repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
2ndy/RaspIM
|
usr/lib/python2.6/lib2to3/fixer_util.py
|
Python
|
gpl-2.0
| 14,225
| 0.002812
|
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, u'='), value])
def LParen():
return Leaf(token.LPAR, u"(")
def RParen():
return Leaf(token.RPAR, u")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = u" "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, u",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, u".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, u"\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, u"")
def N
|
umber(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, u'['),
index_node,
Leaf(token.RBRACE, u']')])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, str
|
ing, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = u""
fp.prefix = u" "
it.prefix = u" "
for_leaf = Leaf(token.NAME, u"for")
for_leaf.prefix = u" "
in_leaf = Leaf(token.NAME, u"in")
in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = u" "
if_leaf = Leaf(token.NAME, u"if")
if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, u"["),
inner,
Leaf(token.RBRACE, u"]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, u'from'),
Leaf(token.NAME, package_name, prefix=u" "),
Leaf(token.NAME, u'import', prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == u"("
and node.children[2].value == u")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == u"["
and node.children[-1].value == u"]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert nod
|
toastdriven/alligator
|
alligator/gator.py
|
Python
|
bsd-3-clause
| 10,077
| 0
|
from .constants import ALL
from .tasks import Task
from .utils import import_attr
class Gator(object):
def __init__(
self, conn_string, queue_name=ALL, task_class=Task, backend_class=None
):
"""
A coordination for scheduling & processing tasks.
Handles creating tasks (with options), using the backend to place tasks
in the queue & pulling/processing tasks off the queue.
Ex::
from alligator import Gator
def add(a, b):
return a + b
gator = Gator('locmem://')
gator.task(add, 3, 7)
Args:
conn_string (str): A DSN for connecting to the queue. Passed along
to the backend.
queue_name (str): Optional. The name of the queue the tasks
should be placed in. Defaults to ``ALL``.
task_class (class): Optional. The class to use for instantiating
tasks. Defaults to ``Task``.
backend_class (class): Optional. The class to use for
instantiating the backend. Defaults to ``None`` (DSN
detection).
"""
self.conn_string = conn_string
self.queue_name = queue_name
self.task_class = task_class
self.backend_class = backend_class
if not backend_class:
self.backend = self.build_backend(self.conn_string)
else:
self.backend = backend_class(self.conn_string)
def build_backend(self, conn_string):
"""
Given a DSN, returns an instantiated backend class.
Ex::
backend = gator.build_backend('locmem://')
# ...or...
backend = gator.build_backend('redis://127.0.0.1:6379/0')
Args:
conn_string (str): A DSN for connecting to the queue. Passed along
to the backend.
Returns:
Client: A backend ``Client`` instance
"""
backend_name, _ = conn_string.split(":", 1)
backend_path = "alligator.backends.{}_backend".format(backend_name)
client_class = import_attr(backend_path, "Client")
return client_class(conn_string)
def len(self):
"""
Returns the number of remaining queued tasks.
Returns:
int: A count of the remaining tasks
"""
return self.backend.len(self.queue_name)
def push(self, task, func, *args, **kwargs):
"""
Pushes a configured task onto the queue.
Typically, you'll favor using the ``Gator.task`` method or
``Gator.options`` context manager for creating a task. Call this
only if you have specific needs or know what you're doing.
If the ``Task`` has the ``is_async = False`` option, the task will be
run immediately (in-process). This is useful for development and
in testing.
Ex::
task = Task(is_async=False, retries=3)
finished = gator.push(task, increment, incr_by=2)
Args:
task (Task): A mostly-configured task
func (callable): The callable with business logic to execute
args (list): Positional arguments to pass to the callable task
kwargs (dict): Keyword arguments to pass to the callable task
Returns:
Task: The fleshed-out ``Task`` instance
"""
task.to_call(func, *args, **kwargs)
data = task.serialize()
if task.is_async:
task.task_id = self.backend.push(
self.queue_name,
task.task_id,
data,
delay_until=task.delay_until,
)
else:
self.execute(task)
return task
def pop(self):
"""
Pops a task off the front of the queue & runs it.
Typically, you'll favor using a ``Worker`` to handle processing the
queue (to constantly consume). However, if you need to custom-process
the queue in-order, this method is useful.
Ex::
# Tasks were previously added, maybe by a different process or
# machine...
finished_topmost_task = gator.pop()
Returns:
Task: The completed ``Task`` instance
"""
data = self.backend.pop(self.queue_name)
if data:
task = self.task_class.deserialize(data)
return self.execute(task)
def get(self, task_id):
"""
Gets a specific task, by ``task_id`` off the queue & runs it.
Using this is not as performant (because it has to search the queue),
but can be useful if you need to specifically handle a task
*right now*.
Ex::
# Tasks were previously added, maybe by a different process or
# machine...
finished_task = gator.get('a-specific-uuid-here')
Args:
task_id (str): The identifier of the task to process
Returns:
Task: The completed ``Task`` instance
"""
data = self.backend.get(self.queue_name, task_id)
if data:
task = self.task_class.deserialize(data)
return self.execute(task)
def cancel(self, task_id):
"""
Takes an existing task & cancels it before it is processed.
Returns the canceled task, as that could be useful in creating a new
task.
Ex::
task = gator.task(add, 18, 9)
# Whoops, didn't mean to do that.
gator.cancel(task.task_id)
Args:
task_id (str): The identifier of the task
|
to process
Returns:
Task: The canceled ``Task`` instance
"""
data = self.backend.get(self.queue_name, task_id)
if data:
task = self.task_class.deserialize(data)
task.to_canceled()
return task
def execute(self, task):
"""
Given a task instance, this runs it.
This includes handling retries & re-raising exceptions.
Ex::
task = Task(is_async=False, retries=5)
|
task.to_call(add, 101, 35)
finished_task = gator.execute(task)
Args:
task_id (str): The identifier of the task to process
Returns:
Task: The completed ``Task`` instance
"""
try:
return task.run()
except Exception:
if task.retries > 0:
task.retries -= 1
task.to_retrying()
if task.is_async:
# Place it back on the queue.
data = task.serialize()
task.task_id = self.backend.push(
self.queue_name, task.task_id, data
)
else:
return self.execute(task)
else:
raise
def task(self, func, *args, **kwargs):
"""
Pushes a task onto the queue.
This will instantiate a ``Gator.task_class`` instance, configure
the callable & its arguments, then push it onto the queue.
You'll typically want to use either this method or the
``Gator.options`` context manager (if you need to configure the
``Task`` arguments, such as retries, is_async, task_id, etc.)
Ex::
on_queue = gator.task(increment, incr_by=2)
Args:
func (callable): The callable with business logic to execute
args (list): Positional arguments to pass to the callable task
kwargs (dict): Keyword arguments to pass to the callable task
Returns:
Task: The ``Task`` instance
"""
task = self.task_class()
return self.push(task, func, *args, **kwargs)
def options(self, **kwargs):
"""
Allows specifying advanced ``Task`` options to control how the task
runs.
This returns a context manager which will create ``Task`` instances
with the supplied options. See ``Task.__init__`` for the available
arguments.
Ex::
d
|
jordanemedlock/psychtruths
|
temboo/core/Library/Utilities/XML/__init__.py
|
Python
|
apache-2.0
| 319
| 0.00627
|
from temboo.Library.Utilities.XML.GetValuesFromXML import GetValues
|
FromXML, GetValuesFromXMLInputSet, GetValuesFromXMLResultSet, GetValuesFromXMLChoreographyExecution
from te
|
mboo.Library.Utilities.XML.RunXPathQuery import RunXPathQuery, RunXPathQueryInputSet, RunXPathQueryResultSet, RunXPathQueryChoreographyExecution
|
andreafrittoli/eowyn
|
eowyn/api.py
|
Python
|
apache-2.0
| 4,364
| 0.000229
|
# Copyright 2015 Andrea Frittoli <andrea.frittoli@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import flask
from flask import request
import flask_restful
import functools
import sys
import eowyn.exceptions as eowyn_exc
from eowyn.model import managers
app = flask.Flask(__name__)
api = flask_restful.Api(app)
manager = None
def handle_validate(f):
"""A decorator to apply handle data validation errors"""
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
try:
return f(self, *func_args, **func_kwargs)
except eowyn_exc.InvalidDataException as ida:
flask_restful.abort(400, message=str(ida))
return wrapper
class Subscription(flask_restful.Resource):
@handle_validate
def get(self, topic, username):
# Get next message on a topic
try:
message = manager.pop_message(topic=topic, username=username)
return message, 200
except eowyn_exc.NoMessageFoundException:
# If no message is found simply return 204
return '', 204
except eowyn_exc.SubscriptionNotFoundException as snfe:
flask_restful.abort(404, message=str(snfe))
@handle_validate
def post(self, topic, username):
# Subscribe to a topic
try:
manager.create_subscription(topic=topic, username=username)
return '', 200
except eowyn_exc.SubscriptionAlreadyExistsException:
# NOTE(andreaf) This is not specified, but it seemed a
# reasonable code to return in this case
return '', 201
@handle_validate
def delete(self, topic, username):
# Unsubscribe from a topic
try:
manager.delete_subscription(topic=topic, username=username)
return '', 200
except eowyn_exc.SubscriptionNotFoundException as snfe:
flask_restful.abort(404, message=str(snfe))
class Message(flask_restful.Resource):
@handle_validate
def post(self, topic):
# Post a message to a topic
# There's no content type set for messages, they're plain text.
# Because of that we need to extract the text from the
# ImmutableMultiDIct returned by flask
# message = request.form.keys()[0]
message = request.data
try:
manager.publish_message(topic, message)
except eowyn_exc.TopicNotFoundException:
# NOTE(andreaf) When no topic is not found it means no subscription
# exists so the message is discarded right away. We still need to
# capture this exception, and do nothing for now. We may have
# logging or reporting logic in future here.
pass
return '', 200
# Handle Subscriber API (subscribe, un-subscribe and get message)
api.add_resource(Subscription, '/<string:topic>/<string:username>')
# Handle Publisher API (post message)
api.add_resource(Message, '/<string:topic>')
def main():
config = ConfigParser.ConfigParser()
try:
# Read config file as first command line parameter
config_file = sys.argv[1]
config.read(config_file)
# Use the configured manager
manager_type = config.get('default', 'manager')
manager_configs = config.items(manager_type)
# In c
|
ase of duplicated configs, the last one wins
manager_configs = {k: v for (k, v) in manager_configs}
# Other configs
debug = config.get('default', 'debug')
except IndexError:
# Or else use defaults
debug = False
manager_type = 'redis'
manager_configs = {'host': 'localhost', 'port': 6379}
global manager
manager = managers.get_manager(manager_type, **manager_configs)
app.run(debug=deb
|
ug)
if __name__ == '__main__':
main()
|
stevecassidy/pyunitgrading
|
tests/bad/single/43684882/comp249-psst-starter-master/users.py
|
Python
|
bsd-3-clause
| 815
| 0.008589
|
"""
@author:
"""
import bottle
# this variable MUST be used as the name for the cookie used by this application
COOKIE_
|
NAME = 'sessionid'
def check_login(db, usernick, password):
"""returns True if password matches stored"""
def generate_session(db, usernick):
"""create a new session and add a cookie to the request object (bottle.request)
user must be a valid user in the databas
|
e, if not, return None
There should only be one session per user at any time, if there
is already a session active, use the existing sessionid in the cookie
"""
def delete_session(db, usernick):
"""remove all session table entries for this user"""
def session_user(db):
"""try to
retrieve the user from the sessions table
return usernick or None if no valid session is present"""
|
mzdanieltest/pex
|
tests/test_bdist_pex.py
|
Python
|
apache-2.0
| 1,373
| 0.00874
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
import sys
from textwrap import dedent
from twitter.common.contextutil import pushd
from pex.testing import temporary_content
def assert_entry_points(entry_points):
setup_py = dedent("""
from setuptools import setup
setup(
name='my_app',
version='0.0.0',
zip_safe=True,
packages=[''],
entry_points=%(entry_points)r,
)
""" % dict(entry_points=entry_points))
my_app = dedent("""
def do_something():
print("hello world!")
""")
with temporary_content({'setup.py': setup_py, 'my_app.py': my_app}) as project_dir
|
:
with pushd(project_dir):
subprocess.check_call([sys.executable, 'setup.py', 'bdist_pex'])
process = subproces
|
s.Popen([os.path.join(project_dir, 'dist', 'my_app-0.0.0.pex')],
stdout=subprocess.PIPE)
stdout, _ = process.communicate()
assert 0 == process.returncode
assert stdout == b'hello world!\n'
def test_entry_points_dict():
assert_entry_points({'console_scripts': ['my_app = my_app:do_something']})
def test_entry_points_ini_string():
assert_entry_points(dedent("""
[console_scripts]
my_app=my_app:do_something
"""))
|
scikit-build/scikit-ci
|
ci/utils.py
|
Python
|
apache-2.0
| 1,327
| 0
|
# -*- coding: utf-8 -*-
"""This module defines functions generally useful in scikit-ci."""
import os
from .constants import SERVICES, SERVICES_ENV_VAR
def current_service():
for service, env_var in SERVICES_ENV_VAR.items():
if os.environ.get(env_var, 'false').lower() == 'true':
return service
raise LookupError(
"unknown service: None of the environment variables
|
{} are set "
"to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values()))
)
def current_operating_system(service):
return os.environ[SERVICES[service]] if SERVICES[service] else None
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines
|
in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Copied from textwrap.py available in python 3 (cpython/cpython@a2d2bef)
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
|
KirillMysnik/obs-ws-rc
|
examples/2. Make requests/make_requests.py
|
Python
|
mit
| 1,156
| 0
|
"""Example shows how to send requests and get responses."""
import asyncio
from obswsrc import OBSWS
from obswsrc.requests import ResponseStatus, StartStreamingRequest
from obswsrc.types import Stream, StreamSettings
async def main():
async with OBSWS('localhost', 4444, "password") as obsws:
#
|
We can send an empty StartStreaming request (in that case the plugin
# will use OBS configuration), but let's provide some settings as well
stream_settings = StreamSettings(
server="rtmp://example.org/my_application",
key="secret_stream_key",
use_auth=False
)
stream = Stream(
settings=stream_settings
|
,
type="rtmp_custom",
)
# Now let's actually perform a request
response = await obsws.require(StartStreamingRequest(stream=stream))
# Check if everything is OK
if response.status == ResponseStatus.OK:
print("Streaming has started")
else:
print("Couldn't start the stream! Reason:", response.error)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
Rustem/toptal-blog-celery-toy-ex
|
celery_uncovered/tricks/utils.py
|
Python
|
mit
| 347
| 0
|
from json import loads
import codecs
import environ
FIXTURE_PATH = (environ.Path(__file__) - 1).path('fixtures')
def read_json(fpath):
with codec
|
s.open(fpath, 'rb', encoding='utf-8') as fp:
return loads(fp.read())
def read_fixture(*subpath):
fixture_file = str(FIXTURE_PATH.path(*subpath))
return read_js
|
on(fixture_file)
|
bastings/neuralmonkey
|
neuralmonkey/runners/beamsearch_runner.py
|
Python
|
bsd-3-clause
| 5,107
| 0.000587
|
from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
|
postprocess: Callable[
|
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
|
mrniranjan/python-scripts
|
reboot/practice6.py
|
Python
|
gpl-2.0
| 149
| 0.033557
|
class MyStuff(object):
def __init__(self):
self.tang
|
erine = "And now a thousand years between"
def apple(s
|
elf):
print "I am classy apples!"
|
bigswitch/neutron
|
neutron/tests/functional/agent/test_ovs_flows.py
|
Python
|
apache-2.0
| 19,181
| 0.000209
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import fixtures
import mock
import testscenarios
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
from testtools.content import text_content
from neutron.agent.common import utils
from neutron.agent.linux import ip_lib
from neutron.cmd.sanity import checks
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_neutron_agent as ovsagt
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent import test_ovs_lib
from neutron.tests.functional import base
from neutron.tests import tools
OVS_TRACE_FINAL_FLOW = 'Final flow'
OVS_TRACE_DATAPATH_ACTIONS = 'Datapath actions'
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
class OVSAgentTestBase(test_ovs_lib.OVSBridgeTestBase,
base.BaseSudoTestCase):
scenarios = testscenarios.multiply_scenarios([
('ofctl', {'main_module': ('neutron.plugins.ml2.drivers.openvswitch.'
'agent.openflow.ovs_ofctl.main')}),
('native', {'main_module': ('neutron.plugins.ml2.drivers.openvswitch.'
'agent.openflow.native.main')})],
test_ovs_lib.OVSBridgeTestBase.scenarios)
def setUp(self):
super(OVSAgentTestBase, self).setUp()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.of_interface_mod = importutils.import_module(self.main_module)
self.br_int_cls = None
self.br_tun_cls = None
self.br_phys_cls = None
self.br_int = None
self.init_done = False
self.init_done_ev = eventlet.event.Event()
self.main_ev = eventlet.event.Event()
self.addCleanup(self._kill_main)
retry_count = 3
while True:
cfg.CONF.set_override('of_listen_port',
net_helpers.get_free_namespace_port(
n_const.PROTO_NAME_TCP),
group='OVS')
self.of_interface_mod.init_config()
self._main_thread = eventlet.spawn(self._kick_main)
# Wait for _kick_main -> of_interface main -> _agent_main
# NOTE(yamamoto): This complexity came from how "native"
# of_interface runs its openflow controller. "native"
# of_interface's main routine blocks while running the
# embedded openflow controller. In that case, the agent
# rpc_loop runs in another thread. However, for FT we
# need to run setUp() and test_xxx() in the same thread.
# So I made this run of_interface's main in a separate
# thread instead.
try:
while not self.init_done:
self.init_done_ev.wait()
break
except fixtures.TimeoutException:
self._kill_main()
retry_count -= 1
if retry_count < 0:
raise Exception('port allocation failed')
def _run_trace(self, brname, spec):
required_keys = [OVS_TRACE_FINAL_FLOW, OVS_TRACE_DATAPATH_ACTIONS]
t = utils.execute(["ovs-appctl", "ofproto/trace", brname, spec],
run_as_root=True)
trace = {}
trace_lines = t.splitlines()
for line in trace_lines:
(l, sep, r) = line.partition(':')
if not sep:
continue
elif l in required_keys:
trace[l] = r
for k in required_keys:
if k not in trace:
self.fail("%s not found in trace %s" % (k, trace_lines))
return trace
def _kick_main(self):
with mock.patch.object(ovsagt, 'main', self._agent_main):
self.of_interface_mod.main()
def _kill_main(self):
self.main_ev.send()
self._main_thread.wait()
def _agent_main(self, bridge_classes):
self.br_int_cls = bridge_classes['br_int']
self.br_phys_cls = bridge_classes['br_phys']
self.br_tun_cls = bridge_classes['br_tun']
self.br_int = self.br_int_cls(self.br.br_name)
self.br_int.set_secure_mode()
self.br_int.setup_controllers(cfg.CONF)
self.br_int.setup_default_table()
# signal to setUp()
self.init_done = True
self.init_done_ev.send()
self.main_ev.wait()
class ARPSpoofTestCase(OVSAgentTestBase):
def setUp(self):
# NOTE(kevinbenton): it would be way cooler to use scapy for
# these but scapy requires the python process to be running as
# root to bind to the ports.
self.addOnException(self.collect_flows_and_ports)
super(ARPSpoofTestCase, self).setUp()
self.skip_without_arp_support()
self.src_addr = '192.168.0.1'
self.dst_addr = '192.168.0.2'
self.src_namespace = self.useFixture(
net_helpers.NamespaceFixture()).name
self.dst_namespace = self.useFixture(
net_helpers.NamespaceFixture()).name
self.src_p = self.useFixture(
net_helpers.OVSPortFixture(self.br, self.src_namespace)).port
self.dst_p = self.useFixture(
net_helpers.OVSPortFixture(self.br, self.dst_namespace)).port
# wait to add IPs until after anti-spoof rules to ensure ARP doesn't
# happen before
def collect_flows_and_ports(self, exc_info):
nicevif = lambda x: ['%s=%s' % (k, getattr(x, k))
for k in ['ofport', 'port_name', 'switch',
'vif_id', 'vif_mac']]
nicedev = lambda x: ['%s=%s' % (k, getattr(x, k))
for k in ['name', 'namespace']] + x.addr.list()
details = {'flows': self.br.dump_all_flows(),
'vifs': map(nicevif, self.br.get_vif_ports()),
'src_ip': self.src_addr,
'dest_ip': self.dst_addr,
'sourt_port': nicedev(self.src_p),
'dest_port': nicedev(self.dst_p)}
self.addDetail('arp-test-state',
text_content(jsonutils.dumps(details, indent=5)))
@common_base.no_skip_on_missing_deps
def skip_without_arp_support(self):
if not checks.arp_header_match_supported():
self.skipTest("ARP header matching not supported")
def test_arp_spoof_doesnt_block_normal_traffic(self):
self._setup_arp
|
_spoof_for_port(self.src_p.name, [self.src_addr])
self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr])
self.src_p.addr.add('%s/24' % self.src_addr)
self.dst_p.addr.add('%s/24' % self.dst_addr)
net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
def test_mac_spoof_blocks_wrong_mac(self):
self._setup_arp_spoof_for_port(self.s
|
rc_p.name, [self.src_addr])
self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr])
self.src_p.addr.add('%s/24' % self.src_addr)
self.dst_p.addr.add('%s/24' % self.dst_addr)
net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2)
# changing the allowed mac should stop the port from working
self._setup_arp_spoof_for_port(self.src_p.na
|
nickofbh/kort2
|
app/dashboard/views.py
|
Python
|
mit
| 1,414
| 0.019095
|
from app.app_and_db import app
from flask import Blueprint, jsonify, render_template
import datetime
import random
import requests
dashboard = Blueprint('dashboard', __name__)
cumtd_endpoint = 'https://developer.cumtd.com/api/{0}/{1}/{2}'
cumtd_endpoint = cumtd_endpoint.format('v2.2', 'json', 'GetDeparturesByStop')
wunderground_endpoint = 'http://api.wunderground.com/api/{0}/hourly/q/{1}/{2}.json'
wunderground_endpoint = wunderground_endpoint.format(app.config['WUNDERGROUND_API_KEY'], 'IL', 'Champaign')
@dashboard.route('/')
def index():
time=datetime.datetime.now().time().strftime('%I:%M').lstrip('0')
return render_template('pages/dashboard.htm
|
l', image_number=random.randrange(1, 9), time=time)
#Quer
|
y no more than once a minute
@dashboard.route('/bus')
def bus_schedule():
params = {'key' : app.config['CUMTD_API_KEY'],
'stop_id' : 'GRN4TH',
'count' : '5'}
response = requests.get(cumtd_endpoint, params=params)
json = response.json()
departures = []
for departure in json['departures'] :
if departure['trip']['direction'] == 'East':
departures.append(departure)
return jsonify(departures=departures)
#Query no more than once every three minutes
@dashboard.route('/weather')
def weather():
response = requests.get(wunderground_endpoint)
json = response.json()
return jsonify(json)
app.register_blueprint(dashboard, url_prefix='/dashboard')
|
edespino/gpdb
|
gpAux/gpdemo/gpsegwalrep.py
|
Python
|
apache-2.0
| 24,714
| 0.005422
|
#! /usr/bin/env python
"""
Initialize, start, stop, or destroy WAL replication mirror segments.
============================= DISCLAIMER =============================
This is a developer tool to assist with development of WAL replication
for mirror segments. This tool is not meant to be used in production.
It is suggested to only run this tool against a gpdemo cluster that
was initialized with no FileRep mirrors.
Example:
WITH_MIRRORS=false make create-demo-cluster
======================================================================
Assumptions:
1. Greenplum cluster was compiled with --enable-segwalrep
2. Greenplum cluster was initialized without mirror segments.
3. Cluster is all on one host
4. Greenplum environment is all setup (greenplum_path.sh, MASTER_DATA_DIRECTORY, PGPORT, etc.)
5. Greenplum environment is started
6. Greenplum environment is the same throughout tool usage
Assuming all of the above, you can just run the tool as so:
./gpsegwalrep.py [init|start|stop|destroy]
"""
import argparse
import os
import sys
import subprocess
import threading
import datetime
import time
from gppylib.db import dbconn
PRINT_LOCK = threading.Lock()
THREAD_LOCK = threading.Lock()
def runcommands(commands, thread_name, command_finish, exit_on_error=True):
output = []
for command in commands:
try:
output.append('%s: Running command... %s' % (datetime.datetime.now(), command))
with THREAD_LOCK:
output = output + subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True).split('\n')
except subprocess.CalledProcessError, e:
output.append(str(e))
output.append(e.output)
if exit_on_error:
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
sys.exit(e.returncode)
output.append('%s: %s' % (datetime.datetime.now(), command_finish))
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
def displaySegmentConfiguration():
commands = []
commands.append("psql postgres -c \"select * from gp_segment_configuration order by content, dbid\"")
runcommands(commands, "", "")
class InitMirrors():
''' Initialize the WAL replication mirror segment '''
def __init__(self, cluster_config, hostname, init=True):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
self.hostname = hostname
self.init = init
def initThread(self, segconfig, user):
commands = []
if self.init:
primary_port = segconfig.port
primary_dir = segconfig.fselocation
mirror_dir = cluster_config.get_pair_dir(segconfig)
mirror_port = cluster_config.get_pair_port(segconfig)
else:
primary_port = cluster_config.get_pair_port(segconfig)
primary_dir = cluster_config.get_pair_dir(segconfig)
mirror_dir = segconfig.fselocation
mirror_port = segconfig.port
mirror_contentid = segconfig.content
if self.init:
commands.append("echo 'host replication %s samenet trust' >> %s/pg_hba.conf" % (user, primary_dir))
commands.append("pg_ctl -D %s reload" % primary_dir)
# 1. create base backup
commands.append("rm -rf %s" % mirror_dir);
commands.append("pg_basebackup -x -R -c fast -E ./pg_log -E ./db_dumps -E ./gpperfmon/data -E ./gpperfmon/logs -D %s -h %s -p %d" % (mirror_dir, self.hostname, primary_port))
commands.append("mkdir %s/pg_log; mkdir %s/pg_xlog/archive_status" % (mirror_dir, mirror_dir))
if self.init:
# 2. update catalog
catalog_update_query = "select pg_catalog.gp_add_segment_mirror(%d::int2, '%s', '%s', %d, -1, '{pg_system, %s}')" % (mirror_contentid, self.hostname, self.hostname, mirror_port, mirror_dir)
commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Initialized mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish)
def run(self):
# Assume db user is current user
|
user = subprocess.check_output(["whoami"]).rstrip('\n')
initThreads = []
for segconfig in self.segconfigs:
assert(segconfig.content != GpSegmentConfiguration.MASTER_CONTENT_ID)
if self.init:
assert(segconfig.role == GpSegmentConfiguration.ROLE_PRIMARY)
else:
assert(segconfig.role == GpSegmentConfiguration.ROLE_MIRROR)
thread = threading.Thread(target=
|
self.initThread, args=(segconfig, user))
thread.start()
initThreads.append(thread)
for thread in initThreads:
thread.join()
class StartInstances():
''' Start a greenplum segment '''
def __init__(self, cluster_config, host, wait=False):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
self.host = host
self.wait = wait
def startThread(self, segconfig):
commands = []
waitstring = ''
dbid = segconfig.dbid
contentid = segconfig.content
segment_port = segconfig.port
segment_dir = segconfig.fselocation
segment_role = StartInstances.getRole(contentid)
# Need to set the dbid to 0 on segments to prevent use in mmxlog records
if contentid != GpSegmentConfiguration.MASTER_CONTENT_ID:
dbid = 0
opts = ("-p %d --gp_dbid=%d --silent-mode=true -i -M %s --gp_contentid=%d --gp_num_contents_in_cluster=%d" %
(segment_port, dbid, segment_role, contentid, self.clusterconfig.get_num_contents()))
# Arguments for the master. -x sets the dbid for the standby master. Hardcoded to 0 for now, but may need to be
# refactored when we start to focus on the standby master.
#
# -E in GPDB will set Gp_entry_postmaster = true;
# to start master in utility mode, need to remove -E and add -c gp_role=utility
#
# we automatically assume people want to start in master only utility mode
# if the self.clusterconfig.get_num_contents() is 0
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
opts += " -x 0"
if self.clusterconfig.get_num_contents() == 0:
opts += " -c gp_role=utility"
else:
opts += " -E"
if self.wait:
waitstring = "-w -t 180"
commands.append("pg_ctl -D %s %s -o '%s' start" % (segment_dir, waitstring, opts))
commands.append("pg_ctl -D %s status" % segment_dir)
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
segment_label = 'master'
elif segconfig.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY:
segment_label = 'primary'
else:
segment_label = 'mirror'
thread_name = 'Segment %s content %d' % (segment_label, contentid)
command_finish = 'Started %s segment with content %d and port %d at %s' % (segment_label, contentid, segment_port, segment_dir)
runcommands(commands, thread_name, command_finish)
@staticmethod
def getRole(contentid):
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
return 'master'
else:
return 'mirrorless'
def run(self):
startThreads = []
for segconfig in self.segconfigs:
thread = threading.Thread(target=self.startThread, args=(segconfig,))
thread.start()
startThreads.append(thread)
for thread in startThreads:
thread.join()
class StopInstances():
''' Stop all segments'''
def __init__(self, cluster_config):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
de
|
dionaea-honeypot/dionaea
|
modules/python/dionaea/echo.py
|
Python
|
gpl-2.0
| 1,366
| 0.012445
|
# This file is part of the dionaea honeypot
#
# SPDX-FileCopyrightText: 2009 Paul Baecher
|
& Markus Koetter & Mark Schloesser
#
# SPDX-License-Identifier: GPL-2.0-or-later
from dionaea.core import connection
class echo(connection):
def __init__ (self, proto=None):
print("echo init")
connection.__init__(self,proto)
self.timeouts.idle = 5.
self.timeouts.sustain = 10.
def handle_origin(self, parent):
print("origin!")
print("parent {:s} {:s}:{:d}".format(
parent.protocol, parent.local.host,parent.local.port))
|
print("self {:s} {:s}:{:d} -> {:s}:{:d}".format(self.protocol,
self.local.host,self.local.port, self.remote.host,self.remote.port))
def handle_established(self):
print("new connection to serve!")
self.send('welcome to reverse world!\n')
def handle_timeout_idle(self):
self.send("you are idle!\n")
return True
def handle_timeout_sustain(self):
self.send("your sustain timeouted!\n")
return False
def handle_disconnect(self):
self.send("disconnecting you!\n")
def handle_io_in(self,data):
print('py_io_in\n')
self.send(data[::-1][1:] + b'\n')
return len(data)
#
#e = echo(proto='tcp')
#e.bind('0.0.0.0',4713,'')
#e.listen()
|
konstantinoskostis/sqlalchemy-utils
|
sqlalchemy_utils/types/ltree.py
|
Python
|
bsd-3-clause
| 3,375
| 0
|
from __future__ import absolute_import
from sqlalchemy import types
from sqlalchemy.dialects.postgr
|
esql import ARRAY
from sqlalchemy.dialects.postgresql.base import ischema_names, PGTypeCompiler
from sqlalchemy.sql import expression
from ..primitives import Ltree
from .scalar_coercible import ScalarCoercible
class LtreeType(types.Concatenable, types.UserDefinedType, ScalarCoercible):
"""Postgresql LtreeType type.
The LtreeType datatype can be used for representing labels of data stored
in
|
hierarchial tree-like structure. For more detailed information please
refer to http://www.postgresql.org/docs/current/static/ltree.html
::
from sqlalchemy_utils import LtreeType
class DocumentSection(Base):
__tablename__ = 'document_section'
id = sa.Column(sa.Integer, autoincrement=True)
path = sa.Column(LtreeType)
section = DocumentSection(name='Countries.Finland')
session.add(section)
session.commit()
section.path # Ltree('Countries.Finland')
.. note::
Using :class:`LtreeType`, :class:`LQUERY` and :class:`LTXTQUERY` types
may require installation of Postgresql ltree extension on the server
side. Please visit http://www.postgres.org for details.
"""
class comparator_factory(types.Concatenable.Comparator):
def ancestor_of(self, other):
if isinstance(other, list):
return self.op('@>')(expression.cast(other, ARRAY(LtreeType)))
else:
return self.op('@>')(other)
def descendant_of(self, other):
if isinstance(other, list):
return self.op('<@')(expression.cast(other, ARRAY(LtreeType)))
else:
return self.op('<@')(other)
def lquery(self, other):
if isinstance(other, list):
return self.op('?')(expression.cast(other, ARRAY(LQUERY)))
else:
return self.op('~')(other)
def ltxtquery(self, other):
return self.op('@')(other)
def bind_processor(self, dialect):
def process(value):
if value:
return value.path
return process
def result_processor(self, dialect, coltype):
def process(value):
return self._coerce(value)
return process
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
__visit_name__ = 'LTREE'
def _coerce(self, value):
if value:
return Ltree(value)
class LQUERY(types.TypeEngine):
"""Postresql LQUERY type.
See :class:`LTREE` for details.
"""
__visit_name__ = 'LQUERY'
class LTXTQUERY(types.TypeEngine):
"""Postresql LTXTQUERY type.
See :class:`LTREE` for details.
"""
__visit_name__ = 'LTXTQUERY'
ischema_names['ltree'] = LtreeType
ischema_names['lquery'] = LQUERY
ischema_names['ltxtquery'] = LTXTQUERY
def visit_LTREE(self, type_, **kw):
return 'LTREE'
def visit_LQUERY(self, type_, **kw):
return 'LQUERY'
def visit_LTXTQUERY(self, type_, **kw):
return 'LTXTQUERY'
PGTypeCompiler.visit_LTREE = visit_LTREE
PGTypeCompiler.visit_LQUERY = visit_LQUERY
PGTypeCompiler.visit_LTXTQUERY = visit_LTXTQUERY
|
agendaTCC/AgendaTCC
|
tccweb/apps/website/urls.py
|
Python
|
gpl-2.0
| 603
| 0.008292
|
from django.conf.urls import patterns, url
from django.views.generic import Tem
|
plateView
from django.contrib.auth.decorators import login_required, user_passes_test
urlpatterns = patterns('',
url(r'^$', 'website.views.index', name='website_index'),
url(r'^termos/$',TemplateView.as_view(template_name='website/termos_de_uso.html'), name='website_termos'),
|
url(r'^sobre/$', TemplateView.as_view(template_name='website/sobre.html'), name='website_sobre'),
url(r'^relatorios/$', login_required(TemplateView.as_view(template_name='website/relatorios.html')), name='website_relatorios'),
)
|
jw/imagery
|
imagery/impart/migrations/0005_auto_20170117_0922.py
|
Python
|
mit
| 621
| 0
|
# -*- codi
|
ng: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-17 09:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("impart", "0004_auto_20170117_0916")]
operations = [
migrations.AlterField(
model_name="contact
|
",
name="artist",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="impart.Artist",
),
)
]
|
zsiciarz/django-pgallery
|
tests/factories.py
|
Python
|
mit
| 805
| 0
|
from django.conf import settings
import factory
from pgallery.models import Gallery, Photo
class UserFactory(factory.django.DjangoModelFactory):
username = factory
|
.Sequence(lambda n: "user_%d" % n)
email = factory.Sequence(lambda n: "user_%d@example.com" % n)
class Meta:
model = settings.AUTH_USER_MODEL
class GalleryFactory(factory.django.DjangoModelFactory):
author = factory.SubFactory(UserFactory)
slug = factory.Sequence(lambda n: "gallery_%d" % n)
class Meta:
model = Gal
|
lery
class PhotoFactory(factory.django.DjangoModelFactory):
gallery = factory.SubFactory(GalleryFactory)
author = factory.LazyAttribute(lambda obj: obj.gallery.author)
image = factory.django.ImageField(width=1024, height=768)
class Meta:
model = Photo
|
dssg/energywise
|
Code/clean_brecs.py
|
Python
|
mit
| 947
| 0.01056
|
from utils import *
import sys
def clean_rec(d):
kwhs, kwhs_oriflag = d["kwhs"]
temps, temps_oriflag = d["temps"]
for i in range(len(temps_oriflag)):
t = temps[i]
if t < -60:
temps_oriflag[i] = False #Ain't no way that reading's real
temps[i] = 0
for i in range(len(kwhs_oriflag)):
k = kwhs[i]
if k < -5:
kwhs_oriflag[i] = False #Ain't no way that reading's real
kwhs[i] = 0
d["temps"] = (t
|
emps, temps_oriflag)
d["kwhs"] = (kwhs, kwhs_oriflag)
if __name__ == "__main__":
args = sys.argv
if len(args) > 1:
the_year = int(args[1])
brecs, desc = qload("state_b_records_" + str(the_y
|
ear) + "_updated_with_temps.pkl")
for d in brecs:
clean_rec(d)
qdump((brecs, desc + "(Plus we cleaned out curiously low values (noise))"),
"state_b_records_" + str(the_year) + "_with_temps_cleaned.pkl")
|
gunan/tensorflow
|
tensorflow/python/types/core.py
|
Python
|
apache-2.0
| 1,707
| 0.005858
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core TensorFlow types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan):
|
Consider adding ABC once the dependence on isinstance is reduced.
# TODO(mdan): Add type annotations.
class Tensor(object):
"""The base class of all dense Tensor objects.
A dense tensor has a static data type (dtype), and may have a static rank and
shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor
which holds the unique handle that identifies the mutable object.
"""
@property
def dtype(self):
|
pass
@property
def shape(self):
pass
class Symbol(Tensor):
"""Symbolic "graph" Tensor.
These objects represent the output of an op definition and do not carry a
value.
"""
pass
class Value(Tensor):
"""Tensor that can be associated with a value (aka "eager tensor").
These objects represent the (usually future) output of executing an op
immediately.
"""
def numpy(self):
pass
|
mattcaldwell/zipline
|
zipline/protocol.py
|
Python
|
apache-2.0
| 14,487
| 0
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems, iterkeys
import pandas as pd
from . utils.protocol_utils import Enum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = ['id', 'payment_sid', 'cash_amount', 'share_count']
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be a security identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def _get_state(self):
return 'Account', self.__dict__
def _set_state(self, saved_state):
self.__dict__.update(saved_state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity
|
ev
|
ent.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price'):
"""
Gets the result of history f
|
zmarvel/playground
|
sound/testplay.py
|
Python
|
mit
| 3,152
| 0.005076
|
import alsaaudio
from math import pi, sin, pow
import getch
SAMPLE_RATE = 44100
FORMAT = alsaaudio.PCM_FORMAT_U8
PERIOD_SIZE = 512
N_SAMPLES = 1024
notes = "abcdefg"
frequencies = {}
for i, note in enumerate(notes):
frequencies[note] = 440 * pow(pow(2, 1/2), i)
# Generate the sine wave, centered at y=128 with 1024 samples
sine_wave = [int(sin(x * 2*pi/N_SAMPLES) * 127) for x in range(0, N_SAMPLES)]
square_wave = []
sawtooth_wave = []
triangle_wave = []
for i in range(0, N_SAMPLES):
phase = (i * 2*pi / N_SAMPLES) % 2*pi
if phase < pi:
square_wave.append(127)
else:
square_wave.append(-128)
sawtooth_wave.append(int(127 - (127 // pi * phase)))
if phase < pi:
triangle_wave.append(int(-127 + (2 * 127 * phase // pi)))
else:
triangle_wave.append(int(3 * 127 - (2 * 127 * phase // pi)))
def main():
buf = bytearray(PERIOD_SIZE)
# alsaaudio setup
dev = alsaaudio.PCM(type=alsaaudio.PCM_PLAYBACK)
dev.setchannels(1)
dev.setrate(SAMPLE_RATE)
dev.setformat(FORMAT)
dev.setperiodsize(PERIOD_SIZE)
#load_buf(buf, 440)
f = 440
w_half = [x//2 + 128 for x in make_wave(sine_wave, f)]
#w_o1 = [x//4 for x in make_wave(f*2)]
#w_o2 = [x//6 for x in make_wave(f*3)]
#w_o3 = [x//8 for x in make_wave(f*4)]
#w_o4 = [x//10 for x in make_wave(f*5)]
#w_o4 = [x//12 for x in make_w
|
ave(f*6)]
#w_o5 = [x//14 for x in make_wave(f*7)]
#w_o6 = [x//16 for x in make_wave(f*8)]
#for i, samp in enumerate(w_o1):
# w[i] += samp + w_o2[i] + w_o3[i] + w_o4[i] + w_o5[i] + w_o6[i] + 128
# print(w[i])
#buf = bytearray(w)
#for i, samp in enumerate(w):
# if samp > 0:
# samp = 127
# else:
# samp = -128
w = [x + 12
|
8 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
char = getch.getch()
last = 'q'
while char != 'q':
if char != last:
if char == '1':
w = [x//2 + 128 for x in make_wave(sine_wave, 440)]
buf = bytearray(w)
elif char == '2':
w = [x//2 + 128 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
elif char == '3':
w = [x//2 + 128 for x in make_wave(sawtooth_wave, 440)]
buf = bytearray(w)
elif char == '4':
w = [x//2 + 128 for x in make_wave(triangle_wave, 440)]
buf = bytearray(w)
elif char == '5':
buf = bytearray(w_half)
dev.write(buf)
dev.write(buf)
dev.write(buf)
last = char
char = getch.getch()
return 0
#def load_buf(buf, frequency):
# step = N_SAMPLES * frequency // SAMPLE_RATE
# for i in range(0, PERIOD_SIZE):
# buf[i] = wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES]
# return buf
def make_wave(wave, frequency):
step = N_SAMPLES * frequency // SAMPLE_RATE
w = []
for i in range(0, PERIOD_SIZE):
w.append(wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES])
return w
if __name__ == '__main__':
main()
|
SamK/check_iftraffic_nrpe.py
|
setup.py
|
Python
|
gpl-3.0
| 524
| 0.015267
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='check_iftraffic_nrpe',
version='0.12.1',
description='Nagios NRPE plugin to check Linux network traffic',
scripts = ['check_iftraffic_nrpe.py'],
author='Samuel Krieg',
author_email='samuel.kri
|
eg+github@gmail.com',
url='https://github.com/SamK/check_iftraffic_nrpe.py',
download_url = 'https://github.com/Sa
|
mK/check_iftraffic_nrpe.py/tarball/0.12.1',
keywords = ['nagios', 'traffic', 'nrpe', 'monitoring']
)
|
aabmass/CIS4301-Project-GUL
|
backend/loaddb/createtables.py
|
Python
|
mit
| 534
| 0.007491
|
#!/usr/bin/env python2
from dbutil import *
def createTables():
""" Populate the array with names of sql DDL files """
for sqlFileName in ["Address.sql", "Electricity.sql", "CodeViolationsReport.sql",
"FireRescueEMSResponse.sql", "NaturalGasReport.sql",
"WaterReport.sql"]:
try:
runSqlFile("create/" + sqlFileName)
print "Created table '{}'".
|
format(sqlFileName.split(".sql")[0]
|
)
except Exception as e:
pass
createTables()
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py
|
Python
|
mit
| 20,742
| 0.001736
|
"""Functions used by least-squares algorithms."""
from math import copysign
import numpy as np
from numpy.linalg import norm
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, aslinearoperator
EPS = np.finfo(float).eps
# Functions related to a trust-region problem.
def intersect_trust_region(x, s, Delta):
"""Find the intersection of a line with the boundary of a trust region.
This function solves the quadratic equation with respect to t
||(x + s*t)||**2 = Delta**2.
Returns
-------
t_neg, t_pos : tuple of float
Negative and positive roots.
Raises
------
ValueError
If `s` is zero or `x` is not within the trust region.
"""
a = np.dot(s, s)
if a == 0:
raise ValueError("`s` is zero.")
b = np.dot(x, s)
c = np.dot(x, x) - Delta**2
if c > 0:
raise ValueError("`x` is not within the trust region.")
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
# Computations below avoid loss of significance, see "Numerical Recipes".
q = -(b + copysign(d, b))
t1 = q / a
t2 = c / q
if t1 < t2:
return t1, t2
else:
return t2, t1
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
rtol=0.01, max_iter=10):
"""Solve a trust-region problem arising in least-squares minimization.
This function implements a method described by J. J. More [1]_ and used
in MINPACK, but it relies on a single SVD of Jacobian instead of series
of Cholesky decompositions. Before running this function, compute:
``U, s, VT = svd(J, full_matrices=False)``.
Parameters
----------
n : int
Number of variables.
m : int
Number of residuals.
uf : ndarray
Computed as U.T.dot(f).
s : ndarray
Singular values of J.
V : ndarray
Transpose of VT.
Delta : float
Radius of a trust region.
initial_alpha : float, optional
Initial guess for alpha, which might be available from a previous
iteration. If None, determined automatically.
rtol : float, optional
Stopping tolerance for the root-finding procedure. Namely, the
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
max_iter : int, optional
Maximum allowed number of iterations for the root-finding procedure.
Returns
-------
p : ndarray, shape (n,)
Found solution of a trust-region problem.
alpha : float
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
Sometimes called Levenberg-Marquardt parameter.
n_iter : int
Number of iterations made by root-finding procedure. Zero means
that Gauss-Newton step was selected as the solution.
References
----------
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
"""
def phi_and_derivative(alpha, suf, s, Delta):
"""Function of which to find zero.
It is defined as "norm of regularized (by alpha) least-squares
solution minus `Delta`". Refer to [1]
|
_.
"""
denom = s**2 + alpha
p_norm = norm(suf / d
|
enom)
phi = p_norm - Delta
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
return phi, phi_prime
suf = s * uf
# Check if J has full rank and try Gauss-Newton step.
if m >= n:
threshold = EPS * m * s[0]
full_rank = s[-1] > threshold
else:
full_rank = False
if full_rank:
p = -V.dot(uf / s)
if norm(p) <= Delta:
return p, 0.0, 0
alpha_upper = norm(suf) / Delta
if full_rank:
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
alpha_lower = -phi / phi_prime
else:
alpha_lower = 0.0
if initial_alpha is None or not full_rank and initial_alpha == 0:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
else:
alpha = initial_alpha
for it in range(max_iter):
if alpha < alpha_lower or alpha > alpha_upper:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
if phi < 0:
alpha_upper = alpha
ratio = phi / phi_prime
alpha_lower = max(alpha_lower, alpha - ratio)
alpha -= (phi + Delta) * ratio / Delta
if np.abs(phi) < rtol * Delta:
break
p = -V.dot(suf / (s**2 + alpha))
# Make the norm of p equal to Delta, p is changed only slightly during
# this. It is done to prevent p lie outside the trust region (which can
# cause problems later).
p *= Delta / norm(p)
return p, alpha, it + 1
def solve_trust_region_2d(B, g, Delta):
"""Solve a general trust-region problem in 2 dimensions.
The problem is reformulated as a 4-th order algebraic equation,
the solution of which is found by numpy.roots.
Parameters
----------
B : ndarray, shape (2, 2)
Symmetric matrix, defines a quadratic term of the function.
g : ndarray, shape (2,)
Defines a linear term of the function.
Delta : float
Radius of a trust region.
Returns
-------
p : ndarray, shape (2,)
Found solution.
newton_step : bool
Whether the returned solution is the Newton step which lies within
the trust region.
"""
try:
R, lower = cho_factor(B)
p = -cho_solve((R, lower), g)
if np.dot(p, p) <= Delta**2:
return p, True
except LinAlgError:
pass
a = B[0, 0] * Delta**2
b = B[0, 1] * Delta**2
c = B[1, 1] * Delta**2
d = g[0] * Delta
f = g[1] * Delta
coeffs = np.array(
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
t = np.roots(coeffs) # Can handle leading zeros.
t = np.real(t[np.isreal(t)])
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
i = np.argmin(value)
p = p[:, i]
return p, False
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions. Zero if predicted
reduction is zero.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
# Construction and minimization of quadratic functions.
def build_quadratic_1d(J, g, s, diag=None, s0=None):
"""Parameterize a multivariate quadratic function along a line.
The resulting univariate quadratic function is given as follows:
::
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
g.T * (s0 + s*t)
Parameters
----------
J : ndarray, sparse matrix or LinearOperator shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (n,)
Direction vector of a line.
diag : None or ndarray with shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
s0 : None or ndarray with shape (n,), optional
Initial point. If None, assumed to be 0.
Returns
-------
a : float
Coefficient for t**2.
b : float
Coefficient for t.
c : float
Free term. Returned only if `s0` is provided.
"""
v = J.dot(s)
a = np.dot(v, v)
if diag is not None:
|
pyrocko/pyrocko
|
src/apps/fomosto.py
|
Python
|
gpl-3.0
| 36,133
| 0
|
#!/usr/bin/env python
# http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
from __future__ import print_function
import sys
import re
import os.path as op
import logging
import copy
import shutil
from optparse import OptionParser
from pyrocko import util, trace, gf, cake, io, config, fomosto
from pyrocko.gui import marker
from pyrocko.util import mpl_show
logger = logging.getLogger('pyrocko.apps.fomosto')
km = 1e3
def d2u(d):
return dict((k.replace('-', '_'), v) for (k, v) in d.items())
subcommand_descriptions = {
'init': 'create a new empty GF store',
'build': 'compute GFs and fill into store',
'stats': 'print information about a GF store',
'check': 'check for problems in GF store',
'decimate
|
': 'build decimated variant of a GF store',
'redeploy': 'copy traces from one GF store into another',
'view': 'view selected traces',
'extract': 'extract selected traces',
'import': 'convert Kiwi GFDB to GF store format',
'export': 'convert GF store to Kiwi GFDB format',
'ttt': 'create travel time tables',
'tttview':
|
'plot travel time table',
'tttextract': 'extract selected travel times',
'tttlsd': 'fix holes in travel time tables',
'server': 'run seismosizer server',
'download': 'download GF store from a server',
'modelview': 'plot earthmodels',
'upgrade': 'upgrade store format to latest version',
'addref': 'import citation references to GF store config',
'qc': 'quality check',
'report': 'report for Green\'s Function databases',
}
subcommand_usages = {
'init': ['init <type> <store-dir> [options]',
'init redeploy <source> <destination> [options]'],
'build': 'build [store-dir] [options]',
'stats': 'stats [store-dir] [options]',
'check': 'check [store-dir] [options]',
'decimate': 'decimate [store-dir] <factor> [options]',
'redeploy': 'redeploy <source> <destination> [options]',
'view': 'view [store-dir] ... [options]',
'extract': 'extract [store-dir] <selection>',
'import': 'import <source> <destination> [options]',
'export': 'export [store-dir] <destination> [options]',
'ttt': 'ttt [store-dir] [options]',
'tttview': 'tttview [store-dir] <phase-ids> [options]',
'tttextract': 'tttextract [store-dir] <phase> <selection>',
'tttlsd': 'tttlsd [store-dir] <phase>',
'server': 'server [options] <store-super-dir> ...',
'download': 'download [options] <site> <store-id>',
'modelview': 'modelview <selection>',
'upgrade': 'upgrade [store-dir] ...',
'addref': 'addref [store-dir] ... <filename> ...',
'qc': 'qc [store-dir]',
'report': 'report <subcommand> <arguments>... [options]'
}
subcommands = subcommand_descriptions.keys()
program_name = 'fomosto'
usage = program_name + ''' <subcommand> <arguments> ... [options]
Subcommands:
init %(init)s
build %(build)s
stats %(stats)s
check %(check)s
decimate %(decimate)s
redeploy %(redeploy)s
view %(view)s
extract %(extract)s
import %(import)s
export %(export)s
ttt %(ttt)s
tttview %(tttview)s
tttextract %(tttextract)s
tttlsd %(tttlsd)s
server %(server)s
download %(download)s
modelview %(modelview)s
upgrade %(upgrade)s
addref %(addref)s
qc %(qc)s
report %(report)s
To get further help and a list of available options for any subcommand run:
fomosto <subcommand> --help
''' % d2u(subcommand_descriptions)
def add_common_options(parser):
parser.add_option(
'--loglevel',
action='store',
dest='loglevel',
type='choice',
choices=('critical', 'error', 'warning', 'info', 'debug'),
default='info',
help='set logger level to '
'"critical", "error", "warning", "info", or "debug". '
'Default is "%default".')
def process_common_options(options):
util.setup_logging(program_name, options.loglevel)
def cl_parse(command, args, setup=None, details=None):
usage = subcommand_usages[command]
descr = subcommand_descriptions[command]
if isinstance(usage, str):
usage = [usage]
susage = '%s %s' % (program_name, usage[0])
for s in usage[1:]:
susage += '\n%s%s %s' % (' '*7, program_name, s)
description = descr[0].upper() + descr[1:] + '.'
if details:
description = description + ' %s' % details
parser = OptionParser(usage=susage, description=description)
parser.format_description = lambda formatter: description
if setup:
setup(parser)
add_common_options(parser)
(options, args) = parser.parse_args(args)
process_common_options(options)
return parser, options, args
def die(message, err=''):
sys.exit('%s: error: %s \n %s' % (program_name, message, err))
def fomo_wrapper_module(name):
try:
if not re.match(gf.meta.StringID.pattern, name):
raise ValueError('invalid name')
words = name.split('.', 1)
if len(words) == 2:
name, variant = words
else:
name = words[0]
variant = None
name_clean = re.sub(r'[.-]', '_', name)
modname = '.'.join(['pyrocko', 'fomosto', name_clean])
mod = __import__(modname, level=0)
return getattr(mod.fomosto, name_clean), variant
except ValueError:
die('invalid modelling code wrapper name')
except ImportError:
die('''modelling code wrapper "%s" not available or not installed
(module probed: "%s")''' % (name, modname))
def command_init(args):
details = '''
Available modelling backends:
%s
More information at
https://pyrocko.org/docs/current/apps/fomosto/backends.html
''' % '\n'.join([' * %s' % b for b in fomosto.AVAILABLE_BACKENDS])
parser, options, args = cl_parse(
'init', args,
details=details)
if len(args) == 0:
sys.exit(parser.format_help())
if args[0] == 'redeploy':
if len(args) != 3:
parser.error('incorrect number of arguments')
source_dir, dest_dir = args[1:]
try:
source = gf.Store(source_dir)
except gf.StoreError as e:
die(e)
config = copy.deepcopy(source.config)
config.derived_from_id = source.config.id
try:
config_filenames = gf.store.Store.create_editables(
dest_dir, config=config)
except gf.StoreError as e:
die(e)
try:
dest = gf.Store(dest_dir)
except gf.StoreError as e:
die(e)
for k in source.extra_keys():
source_fn = source.get_extra_path(k)
dest_fn = dest.get_extra_path(k)
shutil.copyfile(source_fn, dest_fn)
logger.info(
'(1) configure settings in files:\n %s'
% '\n '.join(config_filenames))
logger.info(
'(2) run "fomosto redeploy <source> <dest>", as needed')
else:
if len(args) != 2:
parser.error('incorrect number of arguments')
(modelling_code_id, store_dir) = args
module, variant = fomo_wrapper_module(modelling_code_id)
try:
config_filenames = module.init(store_dir, variant)
except gf.StoreError as e:
die(e)
logger.info('(1) configure settings in files:\n %s'
% '\n '.join(config_filenames))
logger.info('(2) run "fomosto ttt" in directory "%s"' % store_dir)
logger.info('(3) run "fomosto build" in directory "%s"' % store_dir)
def get_store_dir(args):
if len(args) == 1:
store_dir = op.abspath(args.pop(0))
else:
|
10177591/BnB-bot
|
utils/FileDownloader.py
|
Python
|
gpl-3.0
| 2,539
| 0.002757
|
import logging
from logging import config
import paramiko
import os
from read_config import *
class FileDownloader(object):
ip = None
port = None
user = None
password = None
local_file_path = None
remote_file_path = None
abs_file_list = []
ssh = None
logger = None
def __init__(self, config):
logging.config.fileConfig('../config/logging.conf')
self.logger = logging.getLogger('fileLogger')
self.ip = config.get_serverip()
self.port = config.get_serverport()
self.user = config.get_username()
self.password = config.get_password()
self.remote_file_path = config.get_srcdir()
self.local_file_path = config.get_dstdir()
def download(self):
sftp = self.ssh.open_sftp()
self.list_remote_file(self.remote_file_path)
for remote_file in self.abs_file_list:
sub_path_name = remote_file[remote_file.index(self.remote_file_path) + len(self.remote_file_path):]
local_file = self.local_file_path + sub_path_name
dir_name = os.path.dirname(local_file)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
sftp.get(remote_file, local_file)
self.logger.info('Download ' + remote_file + ' successfully.')
def list_remote_file(self, remote_folder):
sftp = self.ssh.open_sftp()
file_list = sftp.listdir(remote_folder)
for file in file_list:
cmd = 'file ' + remote_folder + '/' + file + '|grep directory|wc -l'
stdin, stdout, stderr = self.ssh.exec_command(cmd)
res = stdout.re
|
adline().strip()
if res == "1":
self.list_remote_file(remote_folder + '/' + file)
else:
self.abs_file_list.append(remote_folder + '/' + file)
def connect(self):
self.ssh = paramiko.SSHClient()
self.ssh.s
|
et_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.ip, int(self.port), self.user, self.password)
self.logger.info('Connect to ' + self.ip + ' successfully.')
def close(self):
self.ssh.close()
self.logger.info('Disconnect to ' + self.ip + ' successfully.')
if __name__ == '__main__':
config = ConfigLoader().load_config('../config/product_config.json')
downloader = FileDownloader(config)
downloader.connect()
downloader.download()
downloader.close()
|
jean/sentry
|
src/sentry/south_migrations/0159_auto__add_field_authidentity_last_verified__add_field_organizationmemb.py
|
Python
|
bsd-3-clause
| 52,679
| 0.000835
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuthIdentity.last_verified'
db.add_column(
'sentry_authidentity',
'last_verified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False
)
# Adding field 'OrganizationMember.flags'
db.add_column(
'sentry_organizationmember',
'flags',
self.gf('django.db.models.fields.BigIntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'AuthIdentity.last_verified'
db.delete_column('sentry_authidentity', 'last_verified')
# Deleting field 'OrganizationMember.flags'
db.delete_column('sentry_organizationmember', 'flags')
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fi
|
elds.foreignkey.FlexibleForeignKey', [], {
'to': "orm[
|
'sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
|
hangpark/kaistusc
|
apps/manager/admin.py
|
Python
|
bsd-2-clause
| 1,268
| 0.003061
|
"""
사이트 관리 도구 어드민 페이지 설정.
"""
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from .models import Category, GroupServicePermission, Service, TopBanner
class CategoryAdmin(TranslationAdmin):
"""
:class:`Category` 모델에 대한 커스텀 어드민.
`django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin` 을 상속받아
다국어 처리를 사용자 친화적으로 변경하였습니다.
"""
pass
class ServiceAdmin(TranslationAdmin):
"""
`Service` 모델에 대한 커스텀 어드민.
`django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin` 을 상속받아
다국어 처리를 사용자 친화적으로 변경하였습니다.
"""
pass
class TopBannerAdmin(TranslationAdmin):
"""
`TopBanner` 모델에 대한 커스텀 어
|
드민.
`django-modeltranlation` 에서 제공하는 :class:`TranslationAdmin`
|
을 상속받아
다국어 처리를 사용자 친화적으로 변경하였습니다.
"""
pass
admin.site.register(Category, CategoryAdmin)
admin.site.register(Service, ServiceAdmin)
admin.site.register(TopBanner, TopBannerAdmin)
admin.site.register(GroupServicePermission)
|
shunliz/test
|
python/scikit/linear.py
|
Python
|
apache-2.0
| 505
| 0.011881
|
from sklearn import datasets
from sklearn.linear_model
|
import LinearRegression
import matplotlib.pyplot as plt
loaded_data = datasets.load_boston()
data_X = loaded_data.data
data_y = loaded_data.target
model = LinearRegression()
model.fit(data_X, data_y)
print(model.predict(data_X[:4,:]))
print(data_y[:4])
print(model.coef_)
print(model.intercept_)
print(model.score(data_X, data_y))
#X, y = datasets.make_regression(n_sam
|
ples=100, n_features=1, n_targets=1, noise=20)
#plt.scatter(X,y)
#plt.show()
|
kantale/MutationInfo
|
web/web/settings.py
|
Python
|
mit
| 2,056
| 0
|
"""
Django settings for web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/re
|
f/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
impor
|
t os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a(g^11#$6jyys)0mjl3zv4=r029or=v*ldq=)44866(re!nmg)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MutationInfoApp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'web.urls'
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
bitprophet/pytest-relaxed
|
tasks.py
|
Python
|
bsd-2-clause
| 1,883
| 0
|
from invoke import task, Collection
from invocations.checks import blacken
from invocations.packaging import release
from invocations import docs, pytest as pytests, travis
@task
def coverage(c, html=True):
"""
Run coverage with coverage.py.
"""
# NOTE: this MUST use coverage itself, and not pytest-cov, because the
# latter is apparently unable to prevent pytest plugins from being loaded
# before pytest-cov itself is able to start up coverage.py! The result is
# that coverage _always_ skips over all module level code, i.e. constants,
# 'def' lines, etc. Running coverage as the "outer" layer avoids this
# problem, thus no need for pytest-cov.
# NOTE: this does NOT hold true for NON-PYTEST code, so
# pytest-relaxed-USING modules can happily use pytest-cov.
c.run("coverage run --source=pytest_relaxed -m pyt
|
est")
if html:
|
c.run("coverage html")
c.run("open htmlcov/index.html")
# TODO: good candidate for builtin-to-invoke "just wrap <other task> with a
# tiny bit of behavior", and/or args/kwargs style invocations
@task
def test(
c,
verbose=True,
color=True,
capture="sys",
opts="",
x=False,
k=None,
module=None,
):
"""
Run pytest with given options.
Wraps ``invocations.pytests.test``. See its docs for details.
"""
# TODO: could invert this & have our entire test suite manually _enable_
# our own plugin, but given pytest's options around plugin setup, this
# seems to be both easier and simpler.
opts += " -p no:relaxed"
pytests.test(
c,
verbose=verbose,
color=color,
capture=capture,
opts=opts,
x=x,
k=k,
module=module,
)
ns = Collection(blacken, coverage, docs, test, travis, release)
ns.configure({"blacken": {"find_opts": "-and -not -path './build*'"}})
|
CloudBrewery/duplicity-swiftkeys
|
duplicity/tempdir.py
|
Python
|
gpl-2.0
| 9,197
| 0.001414
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Provides temporary file handling cenetered around a single top-level
securely created temporary directory.
The public interface of this module is thread-safe.
"""
import os
import threading
import tempfile
from duplicity import log
from duplicity import util
from duplicity import globals
# Set up state related to managing the default temporary directory
# instance
_defaultLock = threading.Lock()
_defaultInstance = None
def default():
"""
Obtain the global default instance of TemporaryDirectory, creating
it first if necessary. Failures are propagated to caller. Most
callers are expected to use this function rather than
instantiating TemporaryDirectory directly, unless they explicitly
desdire to have their "own" directory for some reason.
This function is thread-safe.
"""
global _defaultLock
global _defaultInstance
_defaultLock.acquire()
try:
if _defaultInstance is None or _defaultInstance.dir() is None:
_defaultInstance = TemporaryDirectory(temproot = globals.temproot)
return _defaultInstance
finally:
_defaultLock.release()
class TemporaryDirectory:
"""
A temporary directory.
An instance of this class is backed by a directory in the file
system created securely by the use of tempfile.mkdtemp(). Said
instance can be used to obtain unique filenames inside of this
directory for cases where mktemp()-like semantics is desired, or
(recommended) an fd,filename pair for mkstemp()-like semantics.
See further below for the security implications of using it.
Each instance will keep a list of all files ever created by it, to
faciliate deletion of such files and rmdir() of the directory
itself. It does this in order to be able to clean out the
directory without resorting to a recursive delete (ala rm -rf),
which would be risky. Calling code can optionally (recommended)
notify an instance of the fact that a tempfile was deleted, and
thus need not be kept track of anymore.
This class serves two primary purposes:
Firstly, it provides a convenient single top-level directory in
which all the clutter ends up, rather than cluttering up the root
of the system temp directory itself with many files.
Secondly, it provides a way to get mktemp() style semantics for
temporary file creation, with most of the risks
gone. Specifically, since the directory itself is created
securely, files in this directory can be (mostly) safely created
non-atomically without the usual mktemp() security
implications. However, in the presence of tmpwatch, tmpreaper, or
similar mechanisms that will cause files in the system tempdir to
expire, a security risk is still present because the removal of
the TemporaryDirectory managed directory removes all protection it
offers.
For this reason, use of mkstemp() is greatly preferred above use
of mktemp().
In addition, since cleanup is in the form of deletion based on a
list of filenames, completely independently of whether someone
else already deleted the file, there exists a race here as
well. The impact should however be limited to the removal of an
'attackers' file.
"""
def __init__(self, temproot = None):
"""
Create a new TemporaryDirectory backed by a unique and
securely created file system directory.
tempbase - The temp root directory, or None to use system
default (recommended).
"""
self.__dir = tempfile.mkdtemp("-tempdir", "duplicity-", temproot)
log.Info(_("Using temporary directory %s") % util.ufn(self.__dir))
# number of mktemp()/mkstemp() calls served so far
self.__tempcount = 0
# dict of paths pending deletion; use dict even though we are
# not concearned with association, because it is unclear whether
# sets are O(1), while dictionaries are.
self.__pending = {}
self.__lock = threading.Lock() # protect private resources *AND* mktemp/mkstemp calls
def dir(self):
"""
Returns the absolute pathname of the temp folder.
"""
return self.__dir
def __del__(self):
"""
Perform cleanup.
"""
global _defaultInstance
if _defaultInstance is not None:
self.cleanup()
def mktemp(self):
"""
Return a unique filename suitable for use for a temporary
file. The file is not created.
Subsequent calls to this method are guaranteed to never return
the same filename again. As a result, it is safe to use under
concurrent conditions.
NOTE: mkstemp() is greatly preferred.
"""
filename = None
self.__lock.acquire()
try:
self.__tempcount = self.__tempcount + 1
suffix = "-%d" % (self.__tempcount,)
filename = tempfile.mktemp(suffix, "mktemp-", self.__dir)
log.Debug(_("Registering (mktemp) temporary file %s") % util.ufn(filename))
self.__pending[filename] = None
finally:
self.__lock.release()
return filename
def mkstemp(self):
"""
Returns a filedescriptor and a filename, as per os.mkstemp(),
but located in the temporary directory and subject to tracking
and automatic cleanup.
"""
fd = None
filename = None
self.__lock.acquire()
try:
self.__tempcount = self.__tempcount + 1
suffix = "-%d" % (self.__tempcount,)
fd, filename = tempfile.mkstemp(suffix, "mkstemp-", self.__dir)
log.Debug(_("Registering (mkstemp) temporary file %s") % util.ufn(filename))
self.__pending[filename] = None
finally:
self.__lock.release()
return fd, filename
def mkstemp_file(self):
"""
Convenience wrapper around mkstemp(), with the file descriptor
converted into a file object.
"""
fd, filename = self.mkstemp()
return os.fdopen(fd, "r+"), filename
def forget(self, fname):
"""
Forget about the given filename previously obtained through
mktemp() or mkstemp(). This should be called *after* the file
has been deleted, to stop a future cleanup() from trying to
delete it.
Forgetting is only needed for scaling purposes; that is, to
avoid n timefile creations from implying that n filenames are
kept in memory. Typically this whould never matter in
duplicity, but for niceness sake callers are recommended to
use this method whenever possible.
"""
self.__lock.acquire()
try:
if fname in self.__pending:
log.Debug(_("Forgetting temporary file %s") % util.ufn(fname))
del(self.__pending[fname])
else:
log.Warn(_("Attempt to
|
forget unknown tempfile %s - this is probably a bug.") % util.ufn(fname))
pass
finally:
self.__lock.release()
def cleanup(self):
"""
Cleanup any files created in the tempora
|
ry directory (that
have not bee
|
Rogentos/legacy-anaconda
|
storage/zfcp.py
|
Python
|
gpl-2.0
| 16,651
| 0.003784
|
#
# zfcp.py - mainframe zfcp configuration install data
#
# Copyright (C) 2001, 2002, 2003, 2004 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Fre
|
e Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General
|
Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Karsten Hopp <karsten@redhat.com>
#
import string
import os
from constants import *
from udev import udev_settle
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
import warnings
def loggedWriteLineToFile(fn, value):
f = open(fn, "w")
log.debug("echo %s > %s" % (value, fn))
f.write("%s\n" % (value))
f.close()
zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
class ZFCPDevice:
def __init__(self, devnum, wwpn, fcplun):
self.devnum = self.sanitizeDeviceInput(devnum)
self.wwpn = self.sanitizeWWPNInput(wwpn)
self.fcplun = self.sanitizeFCPLInput(fcplun)
if not self.checkValidDevice(self.devnum):
raise ValueError, _("You have not specified a device number or the number is invalid")
if not self.checkValidWWPN(self.wwpn):
raise ValueError, _("You have not specified a worldwide port name or the name is invalid.")
if not self.checkValidFCPLun(self.fcplun):
raise ValueError, _("You have not specified a FCP LUN or the number is invalid.")
def __str__(self):
return "%s %s %s" %(self.devnum, self.wwpn, self.fcplun)
def sanitizeDeviceInput(self, dev):
if dev is None or dev == "":
return None
dev = dev.lower()
bus = dev[:string.rfind(dev, ".") + 1]
dev = dev[string.rfind(dev, ".") + 1:]
dev = "0" * (4 - len(dev)) + dev
if not len(bus):
return "0.0." + dev
else:
return bus + dev
def sanitizeWWPNInput(self, id):
if id is None or id == "":
return None
id = id.lower()
if id[:2] != "0x":
return "0x" + id
return id
# ZFCP LUNs are usually entered as 16 bit, sysfs accepts only 64 bit
# (#125632), expand with zeroes if necessary
def sanitizeFCPLInput(self, lun):
if lun is None or lun == "":
return None
lun = lun.lower()
if lun[:2] == "0x":
lun = lun[2:]
lun = "0x" + "0" * (4 - len(lun)) + lun
lun = lun + "0" * (16 - len(lun) + 2)
return lun
def _hextest(self, hex):
try:
int(hex, 16)
return True
except TypeError:
return False
def checkValidDevice(self, id):
if id is None or id == "":
return False
if len(id) != 8: # p.e. 0.0.0600
return False
if id[0] not in string.digits or id[2] not in string.digits:
return False
if id[1] != "." or id[3] != ".":
return False
return self._hextest(id[4:])
def checkValid64BitHex(self, hex):
if hex is None or hex == "":
return False
if len(hex) != 18:
return False
return self._hextest(hex)
checkValidWWPN = checkValidFCPLun = checkValid64BitHex
def onlineDevice(self):
online = "%s/%s/online" %(zfcpsysfs, self.devnum)
portadd = "%s/%s/port_add" %(zfcpsysfs, self.devnum)
portdir = "%s/%s/%s" %(zfcpsysfs, self.devnum, self.wwpn)
unitadd = "%s/unit_add" %(portdir)
unitdir = "%s/%s" %(portdir, self.fcplun)
failed = "%s/failed" %(unitdir)
try:
if not os.path.exists(online):
loggedWriteLineToFile("/proc/cio_ignore",
"free %s" %(self.devnum,))
udev_settle()
except IOError as e:
raise ValueError, _("Could not free zFCP device %(devnum)s from "
"device ignore list (%(e)s).") \
% {'devnum': self.devnum, 'e': e}
if not os.path.exists(online):
raise ValueError, _(
"zFCP device %s not found, not even in device ignore list."
%(self.devnum,))
try:
f = open(online, "r")
devonline = f.readline().strip()
f.close()
if devonline != "1":
loggedWriteLineToFile(online, "1")
else:
log.info("zFCP device %s already online." %(self.devnum,))
except IOError as e:
raise ValueError, _("Could not set zFCP device %(devnum)s "
"online (%(e)s).") \
% {'devnum': self.devnum, 'e': e}
if not os.path.exists(portdir):
if os.path.exists(portadd):
# older zfcp sysfs interface
try:
loggedWriteLineToFile(portadd, self.wwpn)
udev_settle()
except IOError as e:
raise ValueError, _("Could not add WWPN %(wwpn)s to zFCP "
"device %(devnum)s (%(e)s).") \
% {'wwpn': self.wwpn,
'devnum': self.devnum,
'e': e}
else:
# newer zfcp sysfs interface with auto port scan
raise ValueError, _("WWPN %(wwpn)s not found at zFCP device "
"%(devnum)s.") % {'wwpn': self.wwpn,
'devnum': self.devnum}
else:
if os.path.exists(portadd):
# older zfcp sysfs interface
log.info("WWPN %(wwpn)s at zFCP device %(devnum)s already "
"there.") % {'wwpn': self.wwpn,
'devnum': self.devnum}
if not os.path.exists(unitdir):
try:
loggedWriteLineToFile(unitadd, self.fcplun)
udev_settle()
except IOError as e:
raise ValueError, _("Could not add LUN %(fcplun)s to WWPN "
"%(wwpn)s on zFCP device %(devnum)s "
"(%(e)s).") \
% {'fcplun': self.fcplun, 'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e}
else:
raise ValueError, _("LUN %(fcplun)s at WWPN %(wwpn)s on zFCP "
"device %(devnum)s already configured.") \
% {'fcplun': self.fcplun,
'wwpn': self.wwpn,
'devnum': self.devnum}
fail = "0"
try:
f = open(failed, "r")
fail = f.readline().strip()
f.close()
except IOError as e:
raise ValueError, _("Could not read failed attribute of LUN "
"%(fcplun)s at WWPN %(wwpn)s on zFCP device "
"%(devnum)s (%(e)s).") \
% {'fcplun': self.fcplun,
'wwpn': self.wwpn,
'devnum': self.devnum,
'e': e}
if fail != "0":
self.offlineDevice()
raise ValueError, _("Failed LUN %(fcplun)s at WWPN %(wwpn)s on "
"zFCP device %(devnum)s removed again.") \
% {'fcplun': self.fcplun,
'wwpn':
|
ChristosChristofidis/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_vec_show.py
|
Python
|
apache-2.0
| 516
| 0.00969
|
import sys
sys.path.insert(1, "../../")
import h2o
def vec_show(ip,port):
# Connect to h2o
h2o.in
|
it(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
print "iris:"
iris.show()
###################################################################
res = 2 - iris
res2 = res[0]
print "res2:"
res2.show()
res3 = res[1]
print "res3:"
|
res3.show()
iris[2].show()
if __name__ == "__main__":
h2o.run_test(sys.argv, vec_show)
|
bdcht/masr
|
masr/plugins/graph/main.py
|
Python
|
gpl-2.0
| 5,512
| 0.02812
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Axel Tillequin (bdcht3@gmail.com)
# This code is part of Masr
# published under GPLv2 license
import gtk
from grandalf.graphs import Vertex,Edge,Graph
from grandalf.layouts import SugiyamaLayout
from grandalf.routing import *
from grandalf.utils import median_wh,Dot
from .items import *
# start is called when Masr is 'run', to modify GUI/Canvas elements
# with plugin-specific menus, keybindings, canvas options, etc.
def start(pfunc,app,**kargs):
app.screen.gui.message("plugin graph started by %s"%pfunc)
al = kargs['args']
sg = comp = 0
step = False
cons = False
N=1
for i,arg in enumerate(al):
if arg.endswith(Session.filetype):
if not app.session:
app.session = Session(arg,app)
if arg == '-sg':
sg = int(al[i+1])
if arg == '-c':
comp = int(al[i+1])
if arg == '-s':
step = True
if arg == '-N':
N = int(al[i+1])
if arg == '-ce':
cons=True
if app.session:
assert sg<len(app.session.L)
app.session.g = ast2Graph(app.session.L[sg])
assert comp<len(app.session.g.C)
app.session.cg = CGraph(app.screen.canvas,app.session.g.C[comp])
app.session.cg.Draw(N,stepflag=step,constrained=cons)
def end(pfunc,app,**kargs):
pass
# Session class allows Masr GUIs' File menu to Open a file with matching
# extensions for a new plugin session on this file's data.
class Session(object):
filetype = ('.dot',)
def __init__(self,filename,app):
self.app = app
self.filename = filename
self.dot = Dot()
self.L = self.dot.read(filename)
self.scene = None
|
def info(self):
for s in self.L:
print s
def ast2Graph(ast):
V={}
E=[]
# create Vertex and Vertex.view for each no
|
de in ast :
for k,x in ast.nodes.iteritems():
try:
label = x.attr['label']
except (KeyError,AttributeError):
label = x.name
v = dotnode(label.strip('"\n'))
V[x.name] = v
edgelist = []
# create Edge and Edge_basic for each edge in ast:
for e in ast.edges: edgelist.append(e)
for edot in edgelist:
v1 = V[edot.n1.name]
v2 = V[edot.n2.name]
e = Edge(v1,v2)
e.view = Edge_basic(v1.view,v2.view,head=True)
e.view.set_properties(line_width = 2)
E.append(e)
return Graph(V.values(),E)
def dotnode(seq):
_start = Vertex(seq)
v = _start.view = Node_codeblock(_start.data.replace('\l','\n'))
v.w,v.h = v.get_wh()
return _start
#------------------------------------------------------------------------------
# CGraph is simply a SugiyamaLayout extended with adding nodes and edges views
# on the current canvas and dealing with mouse/keyboard events.
class CGraph(SugiyamaLayout):
def __init__(self,c,g):
self.parent = c
SugiyamaLayout.__init__(self,g)
self.route_edge = route_with_lines
self.dx,self.dy = 5,5
self.dirvh=0
c.parent.connect_object("button-press-event",CGraph.eventhandler,self)
c.parent.connect_object("button-release-event",CGraph.eventhandler,self)
c.parent.connect_object("key-press-event",CGraph.eventhandler,self)
c.parent.connect_object("key-release-event",CGraph.eventhandler,self)
def Draw(self,N=1,stepflag=False,constrained=False):
self.init_all(cons=constrained)
if stepflag:
self.drawer=self.draw_step()
self.greens=[]
else:
self.draw(N)
for e in self.alt_e: e.view.set_properties(stroke_color='red')
for v in self.g.sV: self.connect_add(v.view)
for e in self.g.sE:
self.parent.root.add_child(e.view)
# move edge start/end to CX points:
e.view.update_points()
def connect_add(self,item):
self.parent.root.add_child(item)
def disconnect(self):
self.parent.parent.disconnect_by_func(CGraph.eventhandler)
def remove(self,item):
#import gc
#gc.set_debug(gc.DEBUG_LEAK)
#gc.collect()
Blit.remove(self,item)
for e in item.cx.registered[:]:
for cx in e.cx: cx.unregister(e)
self.c.root.remove(self,e)
def clean(self):
for v in self.g.sV:
self.c.root.remove(v.view)
# Scene-Wide (default) event handler on items events:
def eventhandler(self,e):
if e.type == gtk.gdk.KEY_PRESS:
if e.keyval == ord('p'):
for l in self.layers:
for v in l:
v.view.xy = (self.grx[v].x[self.dirvh],v.view.xy[1])
self.draw_edges()
self.dirvh = (self.dirvh+1)%4
if e.keyval == ord('W'):
self.xspace += 1
self.setxy()
self.draw_edges()
if e.keyval == ord('w'):
self.xspace -= 1
self.setxy()
self.draw_edges()
if e.keyval == ord('H'):
self.yspace += 1
self.setxy()
self.draw_edges()
if e.keyval == ord('h'):
self.yspace -= 1
self.setxy()
self.draw_edges()
if e.keyval == ord(' '):
try:
s,mvmt = self.drawer.next()
print s,len(mvmt)
for x in self.greens:
x.view.shadbox.set_properties(fill_color='grey44')
self.greens=[]
for x in mvmt:
if hasattr(x.view,'shadbox'):
x.view.shadbox.set_properties(fill_color='green')
self.greens.append(x)
except StopIteration:
print 'drawer terminated'
del self.drawer
del self.greens
except AttributeError:
print 'drawer created'
self.drawer=self.draw_step()
self.greens=[]
|
x3ro/RIOT
|
tests/gnrc_sock_dns/tests/01-run.py
|
Python
|
lgpl-2.1
| 12,071
| 0.000331
|
#!/usr/bin/env python3
# Copyright (C) 2018 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import base64
import os
import re
import socket
import sys
import subprocess
import threading
import time
from scapy.all import DNS, DNSQR, DNSRR, Raw, raw
from testrunner import
|
run
SERVER_TIMEOUT = 5
SERVER_PORT
|
= 5335 # 53 requires root and 5353 is used by e.g. Chrome for MDNS
DNS_RR_TYPE_A = 1
DNS_RR_TYPE_AAAA = 28
DNS_RR_TYPE_A_DLEN = 4
DNS_RR_TYPE_AAAA_DLEN = 16
DNS_MSG_COMP_MASK = b"\xc0"
TEST_NAME = "example.org"
TEST_A_DATA = "10.0.0.1"
TEST_AAAA_DATA = "2001:db8::1"
TEST_QDCOUNT = 2
TEST_ANCOUNT = 2
class Server(threading.Thread):
def __init__(self, family=socket.AF_INET, type=socket.SOCK_DGRAM,
proto=0, bind_addr=None, bind_port=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.socket = socket.socket(family, type, proto)
if bind_port is not None:
if (bind_addr is not None):
res = socket.getaddrinfo(bind_addr, bind_port)
sockaddr = res[0][4]
else:
sockaddr = ("", bind_port)
self.socket.bind(sockaddr)
self.stopped = False
self.enter_loop = threading.Event()
def run(self):
while True:
self.enter_loop.wait()
self.enter_loop.clear()
if self.stopped:
return
p, remote = self.socket.recvfrom(1500)
p = DNS(raw(p))
# check received packet for correctness
assert(p is not None)
assert(p[DNS].qr == 0)
assert(p[DNS].opcode == 0)
# has two queries
assert(p[DNS].qdcount == TEST_QDCOUNT)
qdcount = p[DNS].qdcount
# both for TEST_NAME
assert(p[DNS].qd[0].qname == TEST_NAME.encode("utf-8") + b".")
assert(p[DNS].qd[1].qname == TEST_NAME.encode("utf-8") + b".")
assert(any(p[DNS].qd[i].qtype == DNS_RR_TYPE_A
for i in range(qdcount))) # one is A
assert(any(p[DNS].qd[i].qtype == DNS_RR_TYPE_AAAA
for i in range(qdcount))) # one is AAAA
if self.reply is not None:
self.socket.sendto(raw(self.reply), remote)
self.reply = None
def listen(self, reply=None):
self.reply = reply
self.enter_loop.set()
def stop(self):
self.stopped = True
self.enter_loop.set()
self.socket.close()
self.join()
server = None
def check_and_search_output(cmd, pattern, res_group, *args, **kwargs):
output = subprocess.check_output(cmd, *args, **kwargs).decode("utf-8")
for line in output.splitlines():
m = re.search(pattern, line)
if m is not None:
return m.group(res_group)
return None
def get_bridge(tap):
res = check_and_search_output(
["bridge", "link"],
r"{}.+master\s+(?P<master>[^\s]+)".format(tap),
"master"
)
return tap if res is None else res
def get_host_lladdr(tap):
res = check_and_search_output(
["ip", "addr", "show", "dev", tap, "scope", "link"],
r"inet6\s+(?P<lladdr>[0-9A-Fa-f:]+)/\d+",
"lladdr"
)
if res is None:
raise AssertionError(
"Can't find host link-local address on interface {}".format(tap)
)
else:
return res
def dns_server(child, server, port=53):
child.sendline("dns server {} {:d}".format(server, port))
child.sendline("dns server")
child.expect(r"DNS server: \[{}\]:{:d}".format(server, port))
def successful_dns_request(child, name, exp_addr=None):
child.sendline("dns request {}".format(name))
res = child.expect(["error resolving {}".format(name),
"{} resolves to {}".format(name, exp_addr)],
timeout=3)
return ((res > 0) and (exp_addr is not None))
def test_success(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)),
an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA,
rdlen=DNS_RR_TYPE_AAAA_DLEN,
rdata=TEST_AAAA_DATA) /
DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A,
rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA))))
assert(successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA))
def test_timeout(child):
# listen but send no reply
server.listen()
assert(not successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA))
def test_too_short_response(child):
server.listen(Raw(b"\x00\x00\x81\x00"))
assert(not successful_dns_request(child, TEST_NAME))
def test_qdcount_too_large1(child):
# as reported in https://github.com/RIOT-OS/RIOT/issues/10739
server.listen(base64.b64decode("AACEAwkmAAAAAAAAKioqKioqKioqKioqKioqKioqKio="))
assert(not successful_dns_request(child, TEST_NAME))
def test_qdcount_too_large2(child):
server.listen(DNS(qr=1, qdcount=40961, ancount=TEST_ANCOUNT,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)),
an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA,
rdlen=DNS_RR_TYPE_AAAA_DLEN,
rdata=TEST_AAAA_DATA) /
DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A,
rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA))))
assert(not successful_dns_request(child, TEST_NAME))
def test_ancount_too_large1(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=2714,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)),
an=(DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_AAAA,
rdlen=DNS_RR_TYPE_AAAA_DLEN,
rdata=TEST_AAAA_DATA) /
DNSRR(rrname=TEST_NAME, type=DNS_RR_TYPE_A,
rdlen=DNS_RR_TYPE_A_DLEN, rdata=TEST_A_DATA))))
assert(not successful_dns_request(child, TEST_NAME, TEST_AAAA_DATA))
def test_ancount_too_large2(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=19888,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)),
an="\0"))
assert(not successful_dns_request(child, TEST_NAME))
def test_bad_compressed_message_query(child):
server.listen(DNS(qr=1, qdcount=1, ancount=1,
qd=DNS_MSG_COMP_MASK))
assert(not successful_dns_request(child, TEST_NAME))
def test_bad_compressed_message_answer(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_A)),
an=DNS_MSG_COMP_MASK))
assert(not successful_dns_request(child, TEST_NAME))
def test_malformed_hostname_query(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=0,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
# need to use byte string here to induce wrong label
# lengths
b"\xafexample\x03org\x00\x00\x1c\x00\x01")))
assert(not successful_dns_request(child, TEST_NAME))
def test_malformed_hostname_answer(child):
server.listen(DNS(qr=1, qdcount=TEST_QDCOUNT, ancount=TEST_ANCOUNT,
qd=(DNSQR(qname=TEST_NAME, qtype=DNS_RR_TYPE_AAAA) /
|
mattcongy/piprobe
|
imports/pyTemperature.py
|
Python
|
mit
| 507
| 0.013807
|
import time
from datetime import datetime
class pyTemperature(object):
def __init__(self, date = datetime.now(), temp=None,pressure=None,humidity=None):
self.date = date
self.temperature = temp
self
|
.pressure = pressure
self.humidity = humidity
def printTemperature(self):
pr
|
int(self.date)
print("Temp: ")
print(self.temperature)
print("Press: ")
print(self.pressure)
print("Humidity: ")
print(self.humidity)
|
levilucio/SyVOLT
|
GM2AUTOSAR_MM/merge_inter_layer_rules/Himesis/HReconnectMatchElementsRHS.py
|
Python
|
mit
| 6,605
| 0.008176
|
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HReconnectMatchElementsRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HReconnectMatchElementsRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HReconnectMatchElementsRHS, self).__init__(name='HReconnectMatchElementsRHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(2, 0), (0, 1)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('ce9c5429-6e4c-4782-a83a-17e240381cb6')
# Set the node attributes
self.vs[0]["mm__"] = """MT_post__match_contains"""
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["GUID__"] = UUID('789662d8-ab7d-4640-a710-abbc847de320')
self.vs[1]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["GUID__"] = UUID('7e5e306f-cb65-40df-9e60-63b9fe83b79b')
self.vs[2]["mm__"] = """MT_post__MatchModel"""
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["GUID__"] = UUID('3c85bf70-be4a-40d8-9bcb-c138195ad20e')
from HReconnectMatchElementsLHS import HReconnectMatchElementsLHS
self.pre = HReconnectMatchElementsLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===================
|
============================================================
#===============================================================================
# Create new nodes
#===============================================================================
# match_contains3
new_node = graph.add_node()
labels['3'] = new_node
|
graph.vs[new_node][Himesis.Constants.META_MODEL] = 'match_contains'
#===============================================================================
# Create new edges
#===============================================================================
# MatchModel1 -> match_contains3
graph.add_edges([(labels['1'], labels['3'])])
# match_contains3 -> MetaModelElement_S2
graph.add_edges([(labels['3'], labels['2'])])
#===============================================================================
# Set the output pivots
#===============================================================================
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
|
mne-tools/mne-tools.github.io
|
0.15/_downloads/plot_sensor_regression.py
|
Python
|
bsd-3-clause
| 2,578
| 0
|
"""
=====================================
Sensor space least squares regression
=====================================
Predict single trial activity from a continuous variable.
A single-trial regression is performed in each sensor and timepoint
individually, resulting in an Evoked object which contains the
regression coefficient (beta value) for each combination of sensor
and timepoint. Example also shows the T statistics and the associated
p-values.
Note that this example is for educational purposes and that the data used
here do not contain any significant effect.
(See Hauk et al. (2006). The time course of visual word recognition as
revealed by linear regression analysis of ERP data. Neuroimage.)
"""
# Authors: Tal Linzen <linzen@nyu.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, aud_r=2)
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
eog=False, exclude='bads')
# Reject some epochs based on amplitude
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
###############################################################################
# Run regression
names = ['intercept', 'trial-count']
intercept = np.ones((len(epochs),), dtype=
|
np.float)
design_matrix = np.column_stack([intercept, # intercept
np.linspace(0, 1, len(intercept))])
# also accepts source estimates
lm = linear_regression(epochs, design_matrix, names)
def plot_topomap(x, units):
x.plot_topomap(ch_type='mag', scalings=1., size=1.5, vmax=np.max,
units=units, times=np.linspace(0.1, 0.2, 5))
trial_count = lm['trial-count']
plot_topomap(trial_count.beta, units='z (beta)')
plot_topomap(tri
|
al_count.t_val, units='t')
plot_topomap(trial_count.mlog10_p_val, units='-log10 p')
plot_topomap(trial_count.stderr, units='z (error)')
|
kkamkou/gitmostwanted.com
|
gitmostwanted/tasks/github.py
|
Python
|
mit
| 1,028
| 0.002918
|
from gitmostwanted.app import celery, db
from gitmostwanted.lib.github.api import user_starred, user_starred_star
from gitmostwanted.models.repo import Repo
from gitmostwanted.models.user import UserAttitude
@celery.task()
def repo_starred_star(user_id: int, access_token: str):
starred, code = user_starred(access_token)
if not starred:
return False
attitudes = UserAttitude.list_liked_by_user(user_id)
lst_in = [repo_like(s['full_name'], user_id) for s in starred
if not [a for a in attitudes
|
if s['full_name'] == a.repo.full_name]]
lst_out = [user_starred_star(r.repo.full_name, access_token) for r in attitudes
if not [x for x in s
|
tarred if x['full_name'] == r.repo.full_name]]
return len(lst_out), len(list(filter(None, lst_in)))
def repo_like(repo_name: str, uid: int):
repo = Repo.get_one_by_full_name(repo_name)
if not repo:
return None
db.session.merge(UserAttitude.like(uid, repo.id))
db.session.commit()
return repo.id
|
ioGrow/iogrowCRM
|
crm/iomodels/notes.py
|
Python
|
agpl-3.0
| 13,724
| 0.001749
|
from crm import model
from endpoints_proto_datastore.ndb import EndpointsModel
from google.appengine.api import search
from google.appengine.ext import ndb
from crm.model import Userinfo
from protorpc import messages
from crm.iograph import Edge
# The message class that defines the author schema
class AuthorSchema(messages.Message):
google_user_id = messages.StringField(1)
display_name = messages.StringField(2)
google_public_profile_url = messages.StringField(3)
photo = messages.StringField(4)
edgeKey = messages.StringField(5)
email = messages.StringField(6)
class DiscussionAboutSchema(messages.Message):
kind = messages.StringField(1)
id = messages.StringField(2)
name = messages.StringField(3)
class NoteSchema(messages.Message):
id = messages.StringField(1)
entityKey = messages.StringField(2)
title = messages.StringField(3)
content = messages.StringField(4)
comments = messages.IntegerField(5)
about = messages.MessageField(DiscussionAboutSchema, 6)
created_by = messages.MessageField(AuthorSchema, 7)
created_at = messages.StringField(8)
updated_at = messages.StringField(9)
class TopicSchema(messages.Message):
id = messages.StringField(1)
entityKey = messages.StringField(2)
last_updater = messages.MessageField(AuthorSchema, 3, required=True)
title = messages.StringField(4, required=True)
excerpt = messages.StringField(5)
topic_kind = messages.StringField(6)
created_at = messages.StringField(7)
updated_at = messages.StringField(8)
class TopicListResponse(messages.Message):
items = messages.MessageField(TopicSchema, 1, repeated=True)
nextPageToken = messages.StringField(2)
class Topic(EndpointsModel):
_message_fields_schema = (
'id', 'title', 'entityKey', 'last_updater', 'updated_at', 'excerpt', 'discussionId', 'created_at')
last_updater = ndb.StructuredProperty(Userinfo)
created_at = ndb.DateTimeProperty(auto_now_add=True)
updated_at = ndb.DateTimeProperty(auto_now=True)
title = ndb.StringProperty()
# about 100 characters from the beginning of this topic
excerpt = ndb.TextProperty()
# number of comments in this topic
comments = ndb.IntegerProperty(default=0)
# A Topic is attached to an object for example Account or Opportunity..
about_kind = ndb.StringProperty()
about_item = ndb.StringProperty()
# a key reference to the account's organization
# Should be required
discussionKind = ndb.StringProperty()
discussionId = ndb.StringProperty()
organization = ndb.KeyProperty()
class Note(EndpointsModel):
# _message_fields_schema = ('id','title')
author = ndb.StructuredProperty(Userinfo)
# Sharing fields
owner = ndb.StringProperty()
collaborators_list = ndb.StructuredProperty(model.Userinfo, repeated=True)
collaborators_ids = ndb.StringProperty(repeated=True)
created_at = ndb.DateTimeProperty(auto_now_add=True)
updated_at = ndb.DateTimeProperty(auto_now=True)
title = ndb.StringProperty(required=True)
content = ndb.TextProperty()
# number of comments in this topic
comments = ndb.IntegerProperty(default=0)
# A Topic is attached to an object for example Account or Opportunity..
about_kind = ndb.StringProperty()
about_item = ndb.StringProperty()
# a key reference to the account's organization
# Should be required
organization = ndb.KeyProperty()
# public or private
access = ndb.StringProperty()
def put(self, **kwargs):
ndb.Model.put(self, **kwargs)
self._setup()
try:
self.put_index()
except:
print 'error on saving document index'
def set_perm(self
|
):
about_item = str(self.key.id())
perm = model.Permission(about_kind='Note',
about_item=about_item,
type='user',
role='owner',
value=self.owner)
perm.put()
def put_index(self, data=None):
"""
|
index the element at each"""
empty_string = lambda x: x if x else ""
collaborators = " ".join(self.collaborators_ids)
organization = str(self.organization.id())
if data:
search_key = ['topics', 'tags']
for key in search_key:
if key not in data.keys():
data[key] = ""
my_document = search.Document(
doc_id=str(data['id']),
fields=[
search.TextField(name=u'type', value=u'Note'),
search.TextField(name='organization', value=empty_string(organization)),
search.TextField(name='access', value=empty_string(self.access)),
search.TextField(name='owner', value=empty_string(self.owner)),
search.TextField(name='collaborators', value=collaborators),
search.TextField(name='title', value=empty_string(self.title)),
search.TextField(name='content', value=empty_string(self.content)),
search.TextField(name='about_kind', value=empty_string(self.about_kind)),
search.TextField(name='about_item', value=empty_string(self.about_item)),
search.DateField(name='created_at', value=self.created_at),
search.DateField(name='updated_at', value=self.updated_at),
search.NumberField(name='comments', value=self.comments),
search.TextField(name='tags', value=data['tags']),
search.TextField(name='topics', value=data['topics']),
])
else:
my_document = search.Document(
doc_id=str(self.key.id()),
fields=[
search.TextField(name=u'type', value=u'Note'),
search.TextField(name='organization', value=empty_string(organization)),
search.TextField(name='access', value=empty_string(self.access)),
search.TextField(name='owner', value=empty_string(self.owner)),
search.TextField(name='collaborators', value=collaborators),
search.TextField(name='title', value=empty_string(self.title)),
search.TextField(name='content', value=empty_string(self.content)),
search.TextField(name='about_kind', value=empty_string(self.about_kind)),
search.TextField(name='about_item', value=empty_string(self.about_item)),
search.DateField(name='created_at', value=self.created_at),
search.DateField(name='updated_at', value=self.updated_at),
search.NumberField(name='comments', value=self.comments),
])
my_index = search.Index(name="GlobalIndex")
my_index.put(my_document)
@classmethod
def get_schema(cls, user_from_email, request):
note = cls.get_by_id(int(request.id))
if note is None:
raise endpoints.NotFoundException('Note not found.')
author = AuthorSchema(
google_user_id=note.author.google_user_id,
display_name=note.author.display_name,
google_public_profile_url=note.author.google_public_profile_url,
photo=note.author.photo
)
about = None
edge_list = Edge.list(start_node=note.key, kind='parents')
for edge in edge_list['items']:
about_kind = edge.end_node.kind()
parent = edge.end_node.get()
if parent:
if about_kind == 'Contact' or about_kind == 'Lead':
if parent.lastname and parent.firstname:
about_name = parent.firstname + ' ' + parent.lastname
else:
if parent.lastname:
about_name = parent.lastname
else:
if parent.firstname:
about_name = parent.firstname
else:
|
IAPark/PITherm
|
src/shared/models/Mongo/state_change_repeating.py
|
Python
|
mit
| 2,592
| 0.003086
|
from bson import ObjectId
from . import repeating_schedule
from state_change import StateChange
class StateChangeRepeating(StateChange):
def __init__(self, seconds_into_week, AC_target, heater_target, fan, id=None):
self.id = id
self.seconds_into_week = seconds_into_week
self.AC_target = AC_target
self.heater_target = heater_target
self.fan = fan
|
assert type(seconds_into_week) is int or long
assert type(AC_target) is int or float
assert type(heater_target) is int or float
assert type(fan) is int or float
@classmethod
def from_dictionary(cls, json):
seconds_into_week = json["week_time"]
AC_target = json["state"]["AC_target"]
|
heater_target = json["state"]["heater_target"]
fan = json["state"]["fan"]
try:
id = ObjectId(json["_id"]["$oid"])
except KeyError:
id = None
except TypeError:
try:
id = ObjectId(json["_id"])
except:
id = None
return cls(seconds_into_week, AC_target, heater_target, fan, id=id)
@classmethod
def get_current(cls, now):
week_time = now.weekday() * 24 * 60 ** 2 + (now.hour * 60 + now.minute) * 60
result = repeating_schedule.aggregate(
[
{"$project": {
"time_delta": {"$mod": [{"$add": [{"$subtract": [week_time, "$week_time"]}, 24 * 7 * 60 ** 2]},
24 * 7 * 60 ** 2]},
"state": 1,
"week_time": 1}
},
{"$sort": {"time_delta": 1}}
]).next()
return cls.from_dictionary(result)
def save(self):
delayed_state_change = {
"week_time": self.seconds_into_week,
"state": {"AC_target": self.AC_target, "heater_target": self.heater_target, "fan": self.fan}
}
if self.id is not None:
delayed_state_change["_id"] = self.id
return repeating_schedule.save(delayed_state_change)
def to_dictionary(self):
return {"week_time": self.seconds_into_week,
"_id": str(self.id),
"state": {"AC_target": self.AC_target,
"heater_target": self.heater_target,
"fan": self.fan}}
@classmethod
def get_all_dic(cls):
all_items = cls.get_all()
result = []
for item in all_items:
result.append(item.to_dictionary())
return result
|
NeCTAR-RC/horizon
|
openstack_dashboard/test/integration_tests/tests/test_users.py
|
Python
|
apache-2.0
| 1,687
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed
|
under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestUser(helpers.AdminTestCase):
USER_NAME = helpers.gen_random_resource_name("user")
@decorators.skip_because(bugs=['1774697'])
def test_create_delete_user(self):
users_page = self.home_pg.go_to_identity_userspage()
password = self.TEST_PASSWORD
users_page.create_user(self.USER_NAME, password=password,
project='admin', role='admin')
self.assertTrue(users_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(users_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(users_page.is_user_present(self.USER_NAME))
users_page.delete_user(self.USER_NAME)
self.assertTrue(users_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(users_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(users_page.is_user_present(self.USER_NAME))
|
joehalloran/shoppinglist_project
|
shoppinglist/core/models.py
|
Python
|
apache-2.0
| 368
| 0.027174
|
from
|
__future__ import unicode_literals
from django.db import models
# Create your models here.
class TimeStampedModel(models.Model):
"""
An ab
|
stract base class model that provides self-updating "created" and "modified" fields.
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
kevbradwick/rockyroad
|
rockyroad/driver.py
|
Python
|
bsd-3-clause
| 7,103
| 0.001971
|
import errno
import glob
import platform
import re
import sys
import tempfile
import zipfile
from contextlib import contextmanager
from distutils.version import StrictVersion
import os
import requests
from xml.etree import ElementTree
IS_64_BIT = sys.maxsize > 2**32
IS_LINUX = platform.system().lower() == 'linux'
IS_WINDOWS = platform.system().lower() == 'windows'
IS_MAC = platform.system().lower() == 'darwin'
UNKNOWN_PLATFORM = not IS_LINUX and not IS_WINDOWS
REPO_DIR = os.path.join(os.path.expanduser('~'), '.rockyroad')
@contextmanager
def download_file(url):
"""
Download a remote file to a temporary location.
:param url: the file url
"""
resp = requests.get(url, stream=True)
with tempfile.NamedTemporaryFile(delete=False) as fp:
name = fp.name
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
yield name
fp.close()
def _mkdirp(dirpath):
try:
os.makedirs(dirpath)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dirpath):
pass
def _get_xml_ns(uri):
m = re.match(r'\{.*?\}', uri)
return m.group(0) if m else ''
class Driver:
version = None
bit = None
repo_dir = os.path.join(os.path.expanduser('~'), '.rockyroad')
def __init__(self, version=None, bit=None):
if version:
self.version = str(version)
if not bit:
self.bit = '64' if IS_64_BIT else '32'
else:
self.bit = str(bit)
if hasattr(self, 'setup'):
self.setup()
def download(self):
"""Download the driver binary"""
raise NotImplementedError('You must implement download()')
def binary_path(self):
"""The absolute path to the driver binary"""
raise NotImplementedError('You must implement binary_path()')
def path(self):
"""
The absolute path to the driver
:return:
"""
if not os.path.exists(self.binary_path()):
self.download()
return self.binary_path()
class ChromeDriver(Driver):
versions = {}
_bin_path = None
def setup(self):
url = 'https://chromedriver.storage.googleapis.com/'
resp = requests.get(url)
tree = ElementTree.fromstring(resp.content)
ns = _get_xml_ns(tree.tag)
for elem in tree.findall('%sContents' % ns):
key = elem.find('%sKey' % ns)
m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text)
if m:
v = m.group(1) # version
p = m.group(2) # platform
b = m.group(3) # bit
if v not in self.versions:
self.versions[v] = {}
if p not in self.versions[v]:
self.versions[v][p] = {}
self.versions[v][p][b] = url + key.text
@property
def _platform(self):
if IS_WINDOWS:
return 'win'
elif IS_LINUX:
return 'linux'
elif IS_MAC:
return 'mac'
else:
raise RuntimeError('Unable to detect current platform')
def binary_path(self):
if self._bin_path:
return self._bin_path
if self.version and self.version not in self.versions:
raise RuntimeError('Chromedriver %s does not exist' % self.version)
if not self.version:
numbers = list(self.versions.keys())
numbers.sort(key=StrictVersion, reverse=True)
self.version = numbers[0]
bin_name = 'chromedriver.exe' if IS_WINDOWS else 'chromedriver'
self._bin_path = os.path.join(REPO_DIR, 'chromedriver', '%s-%s%s' %
(self.version, self._platform, self.bit,),
bin_name)
return self._bin_path
def download(self):
url = self.versions[self.version][self._platform][self.bit]
destination_dir = ''.join(self._bin_path.split(os.pathsep))
with download_file(url) as name:
_mkdirp(destination_dir)
z = zipfile.ZipFile(name, 'r')
z.extractall(destination_dir)
z.close()
for filename in glob.iglob(destination_dir + '/*'):
os.chmod(filename, 777)
def download_chromedriver(version=None, bit=None):
"""
Download the chromedriver binary.
If version is not set, then it will get the latest one. If the bit value is
not set then it will use the same value as the current system
"""
url = 'https://chromedriver.storage.googleapis.com/'
resp = requests.get(url)
tree = ElementTree.fromstring(resp.content)
ns = _get_xml_ns(tree.tag)
if version:
version = str(version)
if bit:
bit = str(bit)
else:
bit = '64' if IS_64_BIT else '32'
versions = {}
for elem in tree.findall('%sContents' % ns):
key = elem.find('%sKey' % ns)
m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text)
if m:
v = m.group(1) # version
p = m.group(2) # platform
b = m.group(3) # bit
if v not in versions:
versions[v] = {}
if p not in versions[v]:
versions[v][p] = {}
versions[v][p][b] = url + key.text
if version and version not in versions:
raise RuntimeError('Chromedriver %s is not a valid version' % version)
if IS_WINDOWS:
p = 'win'
elif IS_LINUX:
p = 'linux'
elif IS_MAC:
p = 'mac'
else:
raise RuntimeError('Unable to detect current platform')
if version:
if bit is None:
download_url = versions[version][p][bit]
elif bit not in versions[version][p]:
raise RuntimeError('Invalid bit value %s' % bit)
else:
download_url = versions[version][p][bit]
else:
# get latest version
numbers = list(versions.keys())
numbers.sort(key=StrictVersion, reverse=True)
version = numbers[0]
download_url = versions[version][p][bit]
destination_dir = os.path.join(REPO_DIR, 'chromedriver',
'%s-%s%s' % (version, p, bit,))
if os.path.isdir(destination_dir
|
):
return destination_dir
# download an unzip to repo directory
with download_file(download_url) as name:
_mkdirp(destination_dir)
z = zipfile.ZipFile(name, 'r')
z.extractall(destination_dir)
z.close()
for filename in glob.iglob(destination_di
|
r + '/*'):
os.chmod(filename, 777)
return destination_dir
def get_binary(name, arch=None, version=None):
"""
Get the driver binary.
This will check the cache location to see if it has already been downloaded
and return its path. If it is not in the cache then it will be downloaded.
:param name: the binary name chromedriver,
:param arch:
:param version:
:return:
"""
|
Ouranosinc/Magpie
|
tests/test_magpie_ui.py
|
Python
|
apache-2.0
| 50,956
| 0.006084
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_magpie_ui
----------------------------------
Tests for :mod:`magpie.ui` module.
"""
import re
import unittest
from typing import TYPE_CHECKING
from six.moves.urllib.parse import urlparse
# NOTE: must be imported without 'from', otherwise the interface's test cases are also executed
import tests.interfaces as ti
from magpie.constants import get_constant
from magpie.models import Route, UserGroupStatus
from magpie.permissions import Access, Permission, PermissionSet, PermissionType, Scope
from magpie.services import ServiceAPI, ServiceWPS
from tests import runner, utils
from tests.utils import TestVersion
if TYPE_CHECKING:
from typing import Union
from magpie.typedefs import Str
@runner.MAGPIE_TEST_UI
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieUI_NoAuth_Local(ti.Interface_MagpieUI_NoAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that do not require any AuthN/AuthZ (``MAGPIE_ANONYMOUS_GROUP`` & ``MAGPIE_ANONYMOUS_USER``).
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.app = utils.get_test_magpie_app()
cls.url = cls.app # to simplify calls of TestSetup (all use .url)
cls.cookies = None
# note: admin credentials to setup data on test instance as needed, but not to be used for these tests
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME", raise_missing=False, raise_not_set=False)
cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD", raise_missing=False, raise_not_set=False)
cls.setup_admin()
cls.test_user_name = get_constant("MAGPIE_TEST_USER", default_value="unittest-no-auth_ui-user-local",
raise_missing=False, raise_not_set=False)
cls.test_group_name = get_constant("MAGPIE_TEST_GROUP", default_value="unittest-no-auth_ui-group-local",
raise_missing=False, raise_not_set=False)
cls.test_service_type = ServiceWPS.service_type
cls.test_service_name = "magpie-unittest-service-wps"
@runner.MAGPIE_TEST_UI
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieUI_UsersAuth_Local(ti.Interface_MagpieUI_UsersAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require logged user AuthN/AuthZ, but lower than ``MAGPIE_ADMIN_GROUP``.
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME", raise_missing=False, raise_not_set=False)
cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD", raise_missing=False, raise_not_set=False)
cls.app = utils.get_test_magpie_app()
cls.url = cls.app # to simplify calls of TestSetup (all use .url)
cls.version = utils.TestSetup.get_Version(cls)
cls.setup_admin()
cls.login_admin()
cls.test_user_name = get_constant("MAGPIE_TEST_USER", default_value="unittest-user-auth_ui-user-local",
raise_missing=False, raise_not_set=False)
cls.test_group_name = get_constant("MAGPIE_TEST_GROUP", default_value="unittest-user-auth_ui-group-local",
raise_missing=False, raise_not_set=False)
@runner.MAGPIE_TEST_UI
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieUI_AdminAuth_Local(ti.Interface_MagpieUI_AdminAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require at least ``MAGPIE_ADMIN_GROUP`` AuthN/AuthZ.
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME", raise_missing=False, raise_not_set=False)
cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD", raise_missing=False, raise_not_set=False)
cls.app = utils.get_test_magpie_app()
cls.url = cls.app # to simplify calls of TestSetup (all use .url)
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls)
cls.headers, cls.cookies = utils.check_or_try_login_user(cls.url, cls.usr, cls.pwd, use_ui_form_submit=True)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
cls.setup_admin()
cls.login_admin()
cls.test_user_name = get_constant("MAGPIE_TEST_USER", default_value="unittest-admin-auth_ui-user-local",
raise_missing=False, raise_not_set=False)
cls.test_group_name = get_constant("MAGPIE_TEST_GROUP", default_value="unittest-admin-auth_ui-group-local",
raise_missing=False, raise_not_set=False)
cls.test_service_type = ServiceAPI.service_type
cls.test_service_name = "magpie-unittest-ui-admin-local-service"
cls.test_resource_type = Route.resource_type_name
cls.test_service_parent_resource_type = ServiceAPI.service_type
cls.test_service_parent_resource_name = "magpie-unittest-ui-tree-parent"
cls.test_service_child_resource_type = Route.resource_type_name
cls.test_service_child_resource_name = "magpie-unittest-ui-tree-child"
@runner.MAGPIE_TEST_STATUS
@runner.MAGPIE_TEST_FUNCTIONAL
def test_EditService_Goto_AddChild_BackTo_EditService(self):
"""
Verifies that UI button redirects are working for the following workflow:
0. Starting on "Service View", press "Add Child" button (redirects to "New Resource" form)
1. Fill form and press "Add" button (creates the service resource and redirects to "Service View")
2. Back on "Service View", <new-resource> is visible in the list.
Note:
Only implemented locally with form submission of ``TestApp``.
"""
try:
# make sure any sub-resource are all deleted to avoid conflict, then recreate service to add sub-resource
utils.TestSetup.delete_TestService(self, override_service_name=self.test_service_parent_resource_name)
body = utils.TestSetup.create_TestService(self,
override_service_name=self.test_service_parent_resource_name,
override_service_type=self.test_service_parent_resource_type)
svc_res_id = body["service"]["resource_id"]
form = {"add_child": None, "resource_id": str(svc_res_id)}
path = "/ui/services/{}/{}".format(self.test_service_parent_resource_type,
self.test_service_parent_resource_name)
resp = utils.TestSetup.check_FormSubmit(self, form_match=form, form_submit="add_child", path=path)
utils.check_val_is_in("New Resource", resp.text, msg=utils.null) # add resource page reached
data = {
"resource_name": self.test_service_child_resource_name,
"resource_type": self.test_service_child_resource_type,
}
resp = utils.TestSetup.check_FormSubmit(self, form_match="add_resource_form", form_submit="add_child",
form_data=data, previous_response=resp)
for res_name in (self.test_service_parent_resource
|
_name, self.test_
|
service_child_resource_name):
if TestVersion(self.version) <= TestVersion("3.20.1"):
if TestVersion(self.version) >= TestVersion("3.0"):
find = "<div class=\"tree-key\">{}</div>".format(res_name)
text = resp.text.replace("\n", "").replace(" ", "") # ignore formatting of source file
else:
find = "<div class=\"tree-item\">{}</div>".format(res_name)
text
|
dbentley/pants
|
src/python/pants/backend/docgen/tasks/markdown_to_html.py
|
Python
|
apache-2.0
| 8,705
| 0.00919
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, div
|
ision, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import codecs
import os
import re
from pkg_resources import resource_string
from pygments.forma
|
tters.html import HtmlFormatter
from pygments.styles import get_all_styles
from pants.backend.docgen.targets.doc import Page
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.generator import Generator
from pants.base.workunit import WorkUnitLabel
from pants.binaries import binary_util
from pants.build_graph.address import Address
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
def util():
"""Indirection function so we can lazy-import our utils.
It's an expensive import that invokes re.compile a lot (via markdown and pygments),
so we don't want to incur that cost unless we must.
"""
from pants.backend.docgen.tasks import markdown_to_html_utils
return markdown_to_html_utils
class MarkdownToHtml(Task):
"""Generate HTML from Markdown docs."""
@classmethod
def register_options(cls, register):
register('--code-style', choices=list(get_all_styles()), default='friendly',
fingerprint=True,
help='Use this stylesheet for code highlights.')
register('--open', type=bool,
help='Open the generated documents in a browser.')
register('--fragment', type=bool,
fingerprint=True,
help='Generate a fragment of html to embed in a page.')
register('--ignore-failure', type=bool,
fingerprint=True,
help='Do not consider rendering errors to be build errors.')
@classmethod
def product_types(cls):
return ['markdown_html', 'wiki_html']
def __init__(self, *args, **kwargs):
super(MarkdownToHtml, self).__init__(*args, **kwargs)
self._templates_dir = os.path.join('templates', 'markdown')
self.open = self.get_options().open
self.fragment = self.get_options().fragment
self.code_style = self.get_options().code_style
def execute(self):
# TODO(John Sirois): consider adding change detection
outdir = os.path.join(self.get_options().pants_distdir, 'markdown')
css_path = os.path.join(outdir, 'css', 'codehighlight.css')
css = util().emit_codehighlight_css(css_path, self.code_style)
if css:
self.context.log.info('Emitted {}'.format(css))
def is_page(target):
return isinstance(target, Page)
roots = set()
interior_nodes = set()
if self.open:
dependencies_by_page = self.context.dependents(on_predicate=is_page, from_predicate=is_page)
roots.update(dependencies_by_page.keys())
for dependencies in dependencies_by_page.values():
interior_nodes.update(dependencies)
roots.difference_update(dependencies)
for page in self.context.targets(is_page):
# There are no in or out edges so we need to show show this isolated page.
if not page.dependencies and page not in interior_nodes:
roots.add(page)
with self.context.new_workunit(name='render', labels=[WorkUnitLabel.MULTITOOL]):
plaingenmap = self.context.products.get('markdown_html')
wikigenmap = self.context.products.get('wiki_html')
show = []
for page in self.context.targets(is_page):
def process_page(key, outdir, url_builder, genmap, fragment=False):
if page.format == 'rst':
with self.context.new_workunit(name='rst') as workunit:
html_path = self.process_rst(
workunit,
page,
os.path.join(outdir, util().page_to_html_path(page)),
os.path.join(page.payload.sources.rel_path, page.source),
self.fragment or fragment,
)
else:
with self.context.new_workunit(name='md'):
html_path = self.process_md(
os.path.join(outdir, util().page_to_html_path(page)),
os.path.join(page.payload.sources.rel_path, page.source),
self.fragment or fragment,
url_builder,
css=css,
)
self.context.log.info('Processed {} to {}'.format(page.source, html_path))
relpath = os.path.relpath(html_path, outdir)
genmap.add(key, outdir, [relpath])
return html_path
def url_builder(linked_page):
dest = util().page_to_html_path(linked_page)
src_dir = os.path.dirname(util().page_to_html_path(page))
return linked_page.name, os.path.relpath(dest, src_dir)
page_path = os.path.join(outdir, 'html')
html = process_page(page, page_path, url_builder, plaingenmap)
if css and not self.fragment:
plaingenmap.add(page, self.workdir, list(css_path))
if self.open and page in roots:
show.append(html)
if page.provides:
for wiki in page.provides:
basedir = os.path.join(self.workdir, str(hash(wiki)))
process_page((wiki, page), basedir, wiki.wiki.url_builder, wikigenmap, fragment=True)
if show:
binary_util.ui_open(*show)
PANTS_LINK = re.compile(r'''pants\(['"]([^)]+)['"]\)(#.*)?''')
def process_md(self, output_path, source, fragmented, url_builder, css=None):
def parse_url(spec):
match = self.PANTS_LINK.match(spec)
if match:
address = Address.parse(match.group(1), relative_to=get_buildroot())
page = self.context.build_graph.get_target(address)
anchor = match.group(2) or ''
if not page:
raise TaskError('Invalid markdown link to pants target: "{}". '.format(match.group(1)) +
'Is your page missing a dependency on this target?')
alias, url = url_builder(page)
return alias, url + anchor
else:
return spec, spec
def build_url(label):
components = label.split('|', 1)
if len(components) == 1:
return parse_url(label.strip())
else:
alias, link = components
_, url = parse_url(link.strip())
return alias, url
wikilinks = util().WikilinksExtension(build_url)
safe_mkdir(os.path.dirname(output_path))
with codecs.open(output_path, 'w', 'utf-8') as output:
source_path = os.path.join(get_buildroot(), source)
with codecs.open(source_path, 'r', 'utf-8') as source_stream:
md_html = util().markdown.markdown(
source_stream.read(),
extensions=['codehilite(guess_lang=False)',
'extra',
'tables',
'toc',
wikilinks,
util().IncludeExcerptExtension(source_path)],
)
if fragmented:
style_css = (HtmlFormatter(style=self.code_style)).get_style_defs('.codehilite')
template = resource_string(__name__,
os.path.join(self._templates_dir, 'fragment.mustache'))
generator = Generator(template, style_css=style_css, md_html=md_html)
generator.write(output)
else:
style_link = os.path.relpath(css, os.path.dirname(output_path))
template = resource_string(__name__, os.path.join(self._templates_dir, 'page.mustache'))
generator = Generator(template, style_link=style_link, md_html=md_html)
generator.write(output)
return output.name
def process_rst(self, workunit, page, output_path, source, fragmented):
source_path = os.path.join(get_buildroot(), source)
with codecs.open(source_path, 'r', 'utf-8') as source_stream:
rst_html, returncode = util().rst_to_html(source_stream.read(),
stderr=workunit.output('stderr'))
if returncode != 0:
message = '{} rendered with errors.'.format(source_path)
if self.get_options().ignore_failure:
self.context.log.warn(message
|
jakubroztocil/httpie
|
tests/test_output.py
|
Python
|
bsd-3-clause
| 18,705
| 0.00016
|
import argparse
from pathlib import Path
from unittest import mock
import json
import os
import io
from urllib.request import urlopen
import pytest
import requests
import responses
from httpie.cli.argtypes import (
PARSED_DEFAULT_FORMAT_OPTIONS,
parse_format_options,
)
from httpie.cli.definition import parser
from httpie.encoding import UTF8
from httpie.output.formatters.colors import get_lexer
from httpie.status import ExitStatus
from .fixtures import XML_DATA_RAW, XML_DATA_FORMATTED
from .utils import COLOR, CRLF, HTTP_OK, MockEnvironment, http, DUMMY_URL
@pytest.mark.parametrize('stdout_isatty', [True, False])
def test_output_option(tmp_path, httpbin, stdout_isatty):
output_filename = tmp_path / 'test_output_option'
url = httpbin + '/robots.txt'
r = http('--output', str(output_filename), url,
env=MockEnvironment(stdout_isatty=stdout_isatty))
assert r == ''
expected_body = urlopen(url).read().decode()
actual_body = output_filename.read_text(encoding=UTF8)
assert actual_body == expected_body
class TestQuietFlag:
QUIET_SCENARIOS = [('--quiet',), ('-q',), ('--quiet', '--quiet'), ('-qq',)]
@pytest.mark.parametrize('quiet_flags', QUIET_SCENARIOS)
def test_quiet(self, httpbin, quiet_flags):
env = MockEnvironment(
stdin_isatty=True,
stdout_isatty=True,
devnull=io.BytesIO()
)
r = http(*quiet_flags, 'GET', httpbin.url + '/get', env=env)
assert env.stdout is env.devnull
assert env.stderr is env.devnull
assert HTTP_OK in r.devnull
assert r == ''
assert r.stderr == ''
def test_quiet_with_check_status_non_zero(self, httpbin):
r = http(
'--quiet', '--check-status', httpbin + '/status/500',
tolerate_error_exit_status=True,
)
assert 'http: warning: HTTP 500' in r.stderr
def test_quiet_with_check_status_non_zero_pipe(self, httpbin):
r = http(
'--quiet', '--check-status', httpbin + '/status/500',
tolerate_error_exit_status=True,
env=MockEnvironment(stdout_isatty=False)
)
assert 'ht
|
tp: warning: HTTP 500' in r.stderr
def test_quiet_quiet_with_check_status_non_zero(self, httpbin):
r = http(
'--quiet', '--quiet', '--check-status', httpbin + '/status/500',
tolerate_error_exit_status=True,
)
assert not r.stderr
def test_quiet_quiet_with_check_status_non_zero_pi
|
pe(self, httpbin):
r = http(
'--quiet', '--quiet', '--check-status', httpbin + '/status/500',
tolerate_error_exit_status=True,
env=MockEnvironment(stdout_isatty=False)
)
assert 'http: warning: HTTP 500' in r.stderr
@pytest.mark.parametrize('quiet_flags', QUIET_SCENARIOS)
@mock.patch('httpie.cli.argtypes.AuthCredentials._getpass',
new=lambda self, prompt: 'password')
def test_quiet_with_password_prompt(self, httpbin, quiet_flags):
"""
Tests whether httpie still prompts for a password when request
requires authentication and only username is provided
"""
env = MockEnvironment(
stdin_isatty=True,
stdout_isatty=True,
devnull=io.BytesIO()
)
r = http(
*quiet_flags, '--auth', 'user', 'GET',
httpbin.url + '/basic-auth/user/password',
env=env
)
assert env.stdout is env.devnull
assert env.stderr is env.devnull
assert HTTP_OK in r.devnull
assert r == ''
assert r.stderr == ''
@pytest.mark.parametrize('quiet_flags', QUIET_SCENARIOS)
@pytest.mark.parametrize('output_options', ['-h', '-b', '-v', '-p=hH'])
def test_quiet_with_explicit_output_options(self, httpbin, quiet_flags, output_options):
env = MockEnvironment(stdin_isatty=True, stdout_isatty=True)
r = http(*quiet_flags, output_options, httpbin.url + '/get', env=env)
assert env.stdout is env.devnull
assert env.stderr is env.devnull
assert r == ''
assert r.stderr == ''
@pytest.mark.parametrize('quiet_flags', QUIET_SCENARIOS)
@pytest.mark.parametrize('with_download', [True, False])
def test_quiet_with_output_redirection(self, tmp_path, httpbin, quiet_flags, with_download):
url = httpbin + '/robots.txt'
output_path = Path('output.txt')
env = MockEnvironment()
orig_cwd = os.getcwd()
output = requests.get(url).text
extra_args = ['--download'] if with_download else []
os.chdir(tmp_path)
try:
assert os.listdir('.') == []
r = http(
*quiet_flags,
'--output', str(output_path),
*extra_args,
url,
env=env
)
assert os.listdir('.') == [str(output_path)]
assert r == ''
assert r.stderr == ''
assert env.stderr is env.devnull
if with_download:
assert env.stdout is env.devnull
else:
assert env.stdout is not env.devnull # --output swaps stdout.
assert output_path.read_text(encoding=UTF8) == output
finally:
os.chdir(orig_cwd)
class TestVerboseFlag:
def test_verbose(self, httpbin):
r = http('--verbose',
'GET', httpbin.url + '/get', 'test-header:__test__')
assert HTTP_OK in r
assert r.count('__test__') == 2
def test_verbose_raw(self, httpbin):
r = http('--verbose', '--raw', 'foo bar',
'POST', httpbin.url + '/post')
assert HTTP_OK in r
assert 'foo bar' in r
def test_verbose_form(self, httpbin):
# https://github.com/httpie/httpie/issues/53
r = http('--verbose', '--form', 'POST', httpbin.url + '/post',
'A=B', 'C=D')
assert HTTP_OK in r
assert 'A=B&C=D' in r
def test_verbose_json(self, httpbin):
r = http('--verbose',
'POST', httpbin.url + '/post', 'foo=bar', 'baz=bar')
assert HTTP_OK in r
assert '"baz": "bar"' in r
def test_verbose_implies_all(self, httpbin):
r = http('--verbose', '--follow', httpbin + '/redirect/1')
assert 'GET /redirect/1 HTTP/1.1' in r
assert 'HTTP/1.1 302 FOUND' in r
assert 'GET /get HTTP/1.1' in r
assert HTTP_OK in r
class TestColors:
@pytest.mark.parametrize(
'mime, explicit_json, body, expected_lexer_name',
[
('application/json', False, None, 'JSON'),
('application/json+foo', False, None, 'JSON'),
('application/foo+json', False, None, 'JSON'),
('application/json-foo', False, None, 'JSON'),
('application/x-json', False, None, 'JSON'),
('foo/json', False, None, 'JSON'),
('foo/json+bar', False, None, 'JSON'),
('foo/bar+json', False, None, 'JSON'),
('foo/json-foo', False, None, 'JSON'),
('foo/x-json', False, None, 'JSON'),
('application/vnd.comverge.grid+hal+json', False, None, 'JSON'),
('text/plain', True, '{}', 'JSON'),
('text/plain', True, 'foo', 'Text only'),
]
)
def test_get_lexer(self, mime, explicit_json, body, expected_lexer_name):
lexer = get_lexer(mime, body=body, explicit_json=explicit_json)
assert lexer is not None
assert lexer.name == expected_lexer_name
def test_get_lexer_not_found(self):
assert get_lexer('xxx/yyy') is None
class TestPrettyOptions:
"""Test the --pretty handling."""
def test_pretty_enabled_by_default(self, httpbin):
env = MockEnvironment(colors=256)
r = http('GET', httpbin.url + '/get', env=env)
assert COLOR in r
def test_pretty_enabled_by_default_unless_stdout_redirected(self, httpbin):
r = http('GET', httpbin.url + '/get')
assert COLOR not in r
def test_force_pretty(self, httpbin):
env = MockEnvironment(stdout_isa
|
olexiim/edx-platform
|
common/djangoapps/student/tests/test_roles.py
|
Python
|
agpl-3.0
| 7,708
| 0.001816
|
"""
Tests of student.roles
"""
import ddt
from django.test import TestCase
from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory
from student.roles import (
GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class RolesTestCase(TestCase):
"""
Tests of student.roles
"""
def setUp(self):
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course_loc = self.course_key.make_usage_key('course', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_in
|
structor = InstructorFactory(course_key=self.course_key)
def test_global_staff(self):
self.assertFalse(GlobalStaff().has_user(self.student))
self.assertFalse(GlobalStaff().ha
|
s_user(self.course_staff))
self.assertFalse(GlobalStaff().has_user(self.course_instructor))
self.assertTrue(GlobalStaff().has_user(self.global_staff))
def test_group_name_case_sensitive(self):
uppercase_course_id = "ORG/COURSE/NAME"
lowercase_course_id = uppercase_course_id.lower()
uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id)
lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id)
role = "role"
lowercase_user = UserFactory()
CourseRole(role, lowercase_course_key).add_users(lowercase_user)
uppercase_user = UserFactory()
CourseRole(role, uppercase_course_key).add_users(uppercase_user)
self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user))
self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user))
def test_course_role(self):
"""
Test that giving a user a course role enables access appropriately
"""
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student has premature access to {}".format(self.course_key)
)
CourseStaffRole(self.course_key).add_users(self.student)
self.assertTrue(
CourseStaffRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
CourseStaffRole(self.course_key).remove_users(self.student)
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student still has access to {}".format(self.course_key)
)
def test_org_role(self):
"""
Test that giving a user an org role enables access appropriately
"""
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student has premature access to {}".format(self.course_key.org)
)
OrgStaffRole(self.course_key.org).add_users(self.student)
self.assertTrue(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
# remove access and confirm
OrgStaffRole(self.course_key.org).remove_users(self.student)
if hasattr(self.student, '_roles'):
del self.student._roles
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
def test_org_and_course_roles(self):
"""
Test that Org and course roles don't interfere with course roles or vice versa
"""
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).add_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
OrgInstructorRole(self.course_key.org).remove_users(self.student)
self.assertFalse(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# ok now keep org role and get rid of course one
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).remove_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student lost has access to {}".format(self.course_key.org)
)
self.assertFalse(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
def test_get_user_for_role(self):
"""
test users_for_role
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertGreater(len(role.users_with_role()), 0)
def test_add_users_doesnt_add_duplicate_entry(self):
"""
Tests that calling add_users multiple times before a single call
to remove_users does not result in the user remaining in the group.
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertTrue(role.has_user(self.student))
# Call add_users a second time, then remove just once.
role.add_users(self.student)
role.remove_users(self.student)
self.assertFalse(role.has_user(self.student))
@ddt.ddt
class RoleCacheTestCase(TestCase):
IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall')
ROLES = (
(CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')),
(CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')),
(OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')),
(OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')),
(CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')),
)
def setUp(self):
self.user = UserFactory()
@ddt.data(*ROLES)
@ddt.unpack
def test_only_in_role(self, role, target):
role.add_users(self.user)
cache = RoleCache(self.user)
self.assertTrue(cache.has_role(*target))
for other_role, other_target in self.ROLES:
if other_role == role:
continue
self.assertFalse(cache.has_role(*other_target))
@ddt.data(*ROLES)
@ddt.unpack
def test_empty_cache(self, role, target):
cache = RoleCache(self.user)
self.assertFalse(cache.has_role(*target))
|
MrYsLab/razmq
|
hardware_baseline/encoders/left_encoder.py
|
Python
|
gpl-3.0
| 491
| 0.004073
|
impo
|
rt pigpio
import time
class LeftEncoder:
def __init__(self, pin=24):
self.pi = pigpio.pi()
self.pin = pin
self.pi.set_mode(pi
|
n, pigpio.INPUT)
self.pi.set_pull_up_down(pin, pigpio.PUD_UP)
cb1 = self.pi.callback(pin, pigpio.EITHER_EDGE, self.cbf)
self.tick = 0
def cbf(self, gpio, level, tick):
# print(gpio, level, tick)
print(self.tick)
self.tick += 1
e = LeftEncoder()
while True:
time.sleep(.01)
|
mapycz/mapnik
|
scons/scons-local-3.0.1/SCons/Tool/latex.py
|
Python
|
lgpl-2.1
| 2,759
| 0.004349
|
"""SCons.Tool.latex
Tool-specific initialization for LaTeX.
Generates .dvi files from .latex or .ltx files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/latex.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
import SCons.Action
import SCons.De
|
faults
import SCons.Scanner.LaTeX
import SCons.Util
import SCons.Tool
import SCons.Tool.tex
def LaTeXAuxFunction(target = None, source= None, env=None):
result = SCons.Tool.tex.InternalLaTeXAuxAction( SCons.Tool.tex.LaTeXAction, target, source, env )
if result != 0:
SCons.Tool.tex.check_file_error_message(env['LATEX'])
return result
LaTeXAuxAction = SCons.Action.Action(LaTeXAuxFunction,
strfunction=SCons.Tool.t
|
ex.TeXLaTeXStrFunction)
def generate(env):
"""Add Builders and construction variables for LaTeX to an Environment."""
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
from . import dvi
dvi.generate(env)
from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.ltx', LaTeXAuxAction)
bld.add_action('.latex', LaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_eps_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_eps_emitter)
SCons.Tool.tex.generate_common(env)
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('latex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_target_tcp_proxy.py
|
Python
|
gpl-3.0
| 12,924
| 0.003327
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_tcp_proxy
description:
- Represents a TargetTcpProxy resource, which is used by one or more global forwarding
rule to route incoming TCP requests to a Backend service.
short_description: Creates a GCP TargetTcpProxy
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
required: false
choices:
- NONE
- PROXY_V1
service:
description:
- A reference to the BackendService resource.
- 'This field represents a link to a BackendService resource in GCP. It can be
specified in two ways. First, you can place in the selfLink of the resource
here as a string Alternatively, you can add `register: name-of-resource` to
a gcp_compute_backend_service task and then set this service field to "{{ name-of-resource
}}"'
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/latest/targetTcpProxies)'
- 'Setting Up TCP proxy for Google Cloud Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy)'
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: "instancegroup-targettcpproxy"
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancegroup
- name: create a health check
gcp_compute_health_check:
name: "healthcheck-targettcpproxy"
type: TCP
tcp_health_check:
port_name: service-health
request: ping
response: pong
healthy_threshold: 10
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: "backendservice-targettcpproxy"
backends:
- group: "{{ instancegroup }}"
health_checks:
- "{{ healthcheck.selfLink }}"
protocol: TCP
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: backendservice
- name: create a target tcp proxy
gcp_compute_target_tcp_proxy:
name: "test_object"
proxy_header: PROXY_V1
service: "{{ backendservice }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
descripti
|
on:
- An optional descr
|
iption of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
service:
description:
- A reference to the BackendService resource.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
proxy_header=dict(type='str', choices=['NONE', 'PROXY_V1']),
service=dict(required=True),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetTcpProxy'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('proxyHeader') != request.get('proxyHeader'):
proxy_header_update(module, request, response)
if response.get('service') != request.
|
rdkdd/tp-spice
|
spice/lib/vm_actions_rv.py
|
Python
|
gpl-2.0
| 14,208
| 0.000352
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
"""Connect with remote-viewer from client VM to guest VM.
Client requires
---------------
- remote-viewer
TODO
----
- Update properties and add functionality to test others
Other properties:
- username
- version
- title
- toggle-fullscreen (key combo)
- release-cursor (key combo)
- smartcard-insert
- smartcard-remove
- enable-smartcard
- enable-usbredir
- color-depth
- disable-effects
- usb-filter
- secure-channels
- delete-this-file (0,1)
"""
import os
import logging
import socket
import time
import aexpect
from spice.lib import utils
from spice.lib import deco
from spice.lib import act
from spice.lib import reg
from spice.lib import ios
logger = logging.getLogger(__name__)
WINDOW_TITLE = "'vm1 (1) - Remote Viewer'"
RV_WIN_NAME_AUTH = "Authentication required"
"""Expected window caption."""
RV_WIN_NAME = "Remote Viewer"
"""Expected window caption."""
RV_WM_CLASS = "remote-viewer"
class RVSessionError(Exception):
"""Exception for remote-viewer session. Root exception for the RV Sessiov.
"""
def __init__(self, test, *args, **kwargs):
super(RVSessionError, self).__init__(args, kwargs)
if test.cfg.pause_on_fail or test.cfg.pause_on_end:
# 1 hour
seconds = 60 * 60 * 10
logger.error("Test %s has failed. Do nothing for %s seconds.",
test.cfg.id, seconds)
time.sleep(seconds)
class RVSessionNotImplemented(RVSessionError):
"""Used to show that this part of code is not implemented.
"""
class RVSessionConnect(RVSessionError):
"""Exception for remote-viewer session.
"""
#TODO: pass env variables
@reg.add_action(req=[ios.IOSystem])
def rv_connect(vmi, ssn, env=None):
"""Establish connection between client and guest based on test parameters
supplied at cartesian config.
Notes
-----
There are three possible methods to connect from client to guest:
* Cmdline + parameters
* Cmdline + rv file
* remote-viewer menu URL
Parameters
----------
test : VmInfo
VM that runs RV.
ssn : xxx
Session object, as a exec-la
|
yer to VM.
env : dict
Dictionary of env variables to be passed before remote-viewer starts.
Returns
-------
None
"""
env = env or {}
method = vmi.cfg.rv_parameters_from
if method == 'cmd':
act.info(vmi, "Connect to VM using command line.")
rv_connect_cmd(vmi, ssn, env)
elif method == 'menu':
act.info(vmi, "Connect to VM using menu.")
rv_connect_menu(vmi, ssn, env)
elif method == 'file'
|
:
act.info(vmi, "Connect to VM using .vv file.")
rv_connect_file(vmi, ssn, env)
else:
raise RVSessionConnect(vmi.test, "Wrong connect method.")
#TODO: pass env variables
@reg.add_action(req=[ios.ILinux])
def rv_connect_cmd(vmi, ssn, env):
cmd = act.rv_basic_opts(vmi)
url = act.rv_url(vmi)
cmd.append(url)
cmd = utils.combine(cmd, "2>&1")
act.info(vmi, "Final RV command: %s", cmd)
utils.set_ticket(vmi.test)
act.rv_run(vmi, cmd, ssn)
act.rv_auth(vmi)
#TODO: pass env variables
@reg.add_action(req=[ios.ILinux])
def rv_connect_menu(vmi, ssn, env):
cmd = act.rv_basic_opts(vmi)
utils.set_ticket(vmi.test)
cmd = utils.combine(cmd, "2>&1")
act.info(vmi, "Final RV command: %s", cmd)
act.rv_run(vmi, cmd, ssn)
url = act.rv_url(vmi)
act.str_input(vmi, url)
act.rv_auth(vmi)
@reg.add_action(req=[ios.ILinux])
def rv_connect_file(vmi, ssn, env):
cmd = utils.Cmd(vmi.cfg.rv_binary)
vv_file_host = act.gen_vv_file(vmi)
with open(vv_file_host, 'r') as rvfile:
file_contents = rvfile.read()
act.info(vmi, "RV file contents:\n%s", file_contents)
vv_file_client = act.cp_file(vmi, vv_file_host)
cmd.append(vv_file_client)
utils.set_ticket(vmi.test)
cmd = utils.combine(cmd, "2>&1")
act.info(vmi, "Final RV command: %s", cmd)
act.rv_run(vmi, cmd, ssn)
@reg.add_action(req=[ios.ILinux])
def rv_basic_opts(vmi):
"""Command line parameters for RV.
"""
cfg = vmi.cfg
rv_cmd = utils.Cmd()
rv_cmd.append(cfg.rv_binary)
if cfg.rv_debug:
rv_cmd.append("--spice-debug")
if cfg.full_screen:
rv_cmd.append("--full-screen")
if cfg.disable_audio:
rv_cmd.append("--spice-disable-audio")
if cfg.smartcard:
rv_cmd.append("--spice-smartcard")
if cfg.certdb:
rv_cmd.append("--spice-smartcard-db")
rv_cmd.append(cfg.certdb)
if cfg.gencerts:
rv_cmd.append("--spice-smartcard-certificates")
rv_cmd.append(cfg.gencerts)
if cfg.usb_redirection_add_device:
logger.info("Auto USB redirect for devices class == 0x08.")
opt = r'--spice-usbredir-redirect-on-connect="0x08,-1,-1,-1,1"'
rv_cmd.append(opt)
if utils.is_yes(vmi.test.kvm_g.spice_ssl):
cacert_host = utils.cacert_path_host(vmi.test)
cacert_client = act.cp_file(vmi, cacert_host)
opt = "--spice-ca-file=%s" % cacert_client
rv_cmd.append(opt)
if cfg.spice_client_host_subject:
host_subj = utils.get_host_subj(vmi.test)
opt = '--spice-host-subject=%s' % host_subj
rv_cmd.append(opt)
return rv_cmd
@reg.add_action(req=[ios.ILinux])
def rv_url(vmi):
"""Cacert subj is in format for create certificate(with '/' delimiter)
remote-viewer needs ',' delimiter. And also is needed to remove first
character (it's '/').
If it's invalid implicit, a remote-viewer connection will be attempted
with the hostname, since ssl certs were generated with the ip address.
"""
test = vmi.test
port = test.kvm_g.spice_port
tls_port = test.kvm_g.spice_tls_port
#escape_char = test.cfg_c.shell_escape_char or '\\'
host_ip = utils.get_host_ip(test)
# SSL
if utils.is_yes(vmi.test.kvm_g.spice_ssl):
if vmi.cfg.ssltype == "invalid_implicit_hs" or \
"explicit" in vmi.cfg.ssltype:
hostname = socket.gethostname()
url = "spice://%s?tls-port=%s&port=%s" % (hostname, tls_port,
port)
else:
url = "spice://%s?tls-port=%s&port=%s" % (host_ip, tls_port,
port)
return url
# No SSL
url = "spice://%s?port=%s" % (host_ip, port)
return url
@reg.add_action(req=[ios.ILinux])
def rv_auth(vmi):
"""Client waits for user authentication if spice_password is set use qemu
monitor password if set, else, if set, try normal password.
Only for cmdline. File console.rv should have a password.
"""
if vmi.cfg.ticket_send:
# Wait for remote-viewer to launch.
act.wait_for_win(vmi, RV_WIN_NAME_AUTH)
act.str_input(vmi, vmi.cfg.ticket_send)
@reg.add_action(req=[ios.IOSystem])
def gen_vv_file(vmi):
"""Generates vv file for remote-viewer.
Parameters
----------
test : SpiceTest
Spice test object.
"""
test = vmi.test
cfg = vmi.cfg
host_dir = os.path.expanduser('~')
fpath = os.path.join(host_dir, cfg.rv_file)
rv_file = open(fpath, 'w')
rv_file.write("[virt-viewer]\n")
rv_file.write("type=%s\n" % cfg.display)
rv_file.write("host=%s\n" % utils.get_host_ip(test))
rv_file.write("port=%s\n" % test.kvm_g.spice_port)
if cfg.ticket_send:
rv_file.write("password=%s\n" % cfg.ticket_send)
if utils.is_yes(test.kvm_g.spice_ssl):
rv_file.write("tls-port=%s\n"
|
pedrobaeza/odoo
|
addons/mrp_byproduct/mrp_byproduct.py
|
Python
|
agpl-3.0
| 8,828
| 0.006004
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts'),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.produ
|
ct_uom.id, production.product_qty, pro
|
duction.bom_id.product_uom.id)
qty1 = sub_product.product_qty
qty2 = production.product_uos and production.product_uos_qty or False
product_uos_factor = 0.0
if qty2 and production.bom_id.product_uos.id:
product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id)
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
if production.product_uos_qty:
qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'product_uos_qty': qty2,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'state': 'waiting',
'production_id': production.id
}
self.pool.get('stock.move').create(cr, uid, data)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
|
wavesoft/yaco
|
yaco/wsgi.py
|
Python
|
gpl-3.0
| 1,413
| 0.000708
|
"""
WSGI config for yaco project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a c
|
ustom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its
|
own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "yaco.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yaco.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
DevHugo/zds-site
|
zds/forum/migrations/0005_auto_20151119_2224.py
|
Python
|
gpl-3.0
| 594
| 0.001684
|
# -*- coding: utf-8 -*-
from __future_
|
_ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0004_topic_update_index_date'),
]
database_operations = [
migrations.AlterModelTable('TopicFollowed', 'notification_topicfollowed')
]
state_operations = [
migrations.DeleteModel('TopicFollowed')
]
operations = [
migrations.SeparateDatabaseAnd
|
State(
database_operations=database_operations,
state_operations=state_operations)
]
|
iceout/python_koans_practice
|
python2/koans/about_sets.py
|
Python
|
mit
| 1,706
| 0.001758
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutSets(Koan):
def test_sets_make_keep_lists_unique(self):
highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas',
'MacLeod', 'Malcolm', 'MacLeod']
there_can_only_be_only_one = set(highlanders)
self.assertEqual(set(['MacLeod', 'Ramirez', 'Matunas', 'Malcolm']), there_can_only_be_only_one)
def test_sets_are_unordered(self):
self.assertEqual(set(['1', '2', '3', '5', '4']), set('12345'))
def test_convert_the_set_into_a_list_to_sort_it(self):
self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('13245')))
# ------------------------------------------------------------------
def test_set_have_arithmetic_operators(self):
scotsmen = set(['MacLeod', 'Wallace', 'Willie'])
warriors = set(['MacLeod', 'Wallace', 'Leo
|
nidas'])
self.assertEqual(set(['Willie']), scotsmen - warriors)
self.assertEqual(set(['MacLeod', 'Wallace', 'Willie', 'Leonidas']), scotsmen | warriors)
self.assertEqual(set(['MacLeod', 'Wallace']), scotsmen & warriors)
self.assertEqual(set(['Willie', 'Leonidas']), scotsmen ^ warriors)
# ------------------------------------------------------------------
def test_we_can_query_set
|
_membership(self):
self.assertEqual(True, 127 in set([127, 0, 0, 1]))
self.assertEqual(True, 'cow' not in set('apocalypse now'))
def test_we_can_compare_subsets(self):
self.assertEqual(True, set('cake') <= set('cherry cake'))
self.assertEqual(True, set('cake').issubset(set('cherry cake')))
self.assertEqual(False, set('cake') > set('pie'))
|
h4/fuit-webdev
|
projects/logger/manage.py
|
Python
|
mit
| 249
| 0
|
#!/usr/bin
|
/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "logger.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.arg
|
v)
|
jhcodeh/my-doku
|
my_doku_application/my_doku_application/wsgi.py
|
Python
|
mit
| 413
| 0.002421
|
"""
WSGI config for my_doku_application project.
It exposes the WSGI c
|
allable as a module-level variable named ``application``.
For m
|
ore information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_doku_application.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
naziris/HomeSecPi
|
picamera/bcm_host.py
|
Python
|
apache-2.0
| 2,448
| 0.001634
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013,2014 Dave Hughes <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the nam
|
e of the copyright h
|
older nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import ctypes as ct
import warnings
_lib = ct.CDLL('libbcm_host.so')
# bcm_host.h #################################################################
bcm_host_init = _lib.bcm_host_init
bcm_host_init.argtypes = []
bcm_host_init.restype = None
bcm_host_deinit = _lib.bcm_host_deinit
bcm_host_deinit.argtypes = []
bcm_host_deinit.restype = None
graphics_get_display_size = _lib.graphics_get_display_size
graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)]
graphics_get_display_size.restype = ct.c_int32
|
emsrc/daeso-dutch
|
lib/daeso_nl/ga/kb/baseline.py
|
Python
|
gpl-3.0
| 8,138
| 0.010445
|
"""
simple alignment baselines
"""
# TODO:
# - better implementions
from daeso.pair import Pair
def greedy_align_equal_words(corpus):
for graph_pair in corpus:
graph_pair.clear()
graphs = graph_pair.get_graphs()
target_nodes = graphs.target.terminals(with_punct=False,
with_empty=False)
target_words = [ graphs.target.node[tn]["word"].lower()
for tn in target_nodes ]
for sn in graphs.source.terminals_iter(with_punct=False,
with_empty=False):
sw = graphs.source.node[sn]["word"].lower()
try:
j = target_words.index(sw)
except:
continue
tn = target_nodes[j]
graph_pair.add_align(Pair(sn, tn), "equals")
del target_nodes[j]
del target_words[j]
def greedy_align_equal_words_roots(corpus):
# if words are equal, align as equals
# elif roots are equals, align as restates
for graph_pair in corpus:
graph_pair.clear()
graphs = graph_pair.get_graphs()
target_nodes = graphs.target.terminals(with_punct=False,
with_empty=False)
target_words = [ graphs.target.node[tn]["word"].lower()
for tn in target_nodes ]
target_roots = [ graphs.target.node[tn]["root"]
for tn in target_nodes ]
for sn in graphs.source.terminals_iter(with_punct=False,
with_empty=False):
sw = graphs.source.node[sn]["word"].lower()
sr = graphs.source.node[sn]["root"]
try:
j = target_words.index(sw)
except:
try:
j = target_roots.index(sr)
except:
continue
else:
relation = "restates"
else:
relation = "equals"
tn = target_nodes[j]
graph_pair.add_align(Pair(sn, tn), relation)
del target_nodes[j]
del target_words[j]
del target_roots[j]
def greedy_align_words(corpus):
# if words are equal -> equals
# if roots are equals -> restates
# if source in target root and len(source)>3 -> generalizes
# if target in source root and len(target)>3-> specifies
# if target and source root share a morph segment ->intersects
for graph_pair in corpus:
graph_pair.clear()
graphs = graph_pair.get_graphs()
target_nodes = graphs.target.terminals(with_punct=False,
with_empty=False)
target_words = [ graphs.target.node[tn]["word"].lower()
for tn in target_nodes ]
target_roots = [ graphs.target.node[tn]["root"]
for tn in target_nodes ]
for sn in graphs.source.terminals_iter(with_punct=False,
with_empty=False):
sw = graphs.source.node[sn]["word"].lower()
relation = None
# align identical words
for i, tw in enumerate(target_words):
if sw == tw:
relation = "equals"
break
if not relation:
sr = graphs.source.node[sn]["root"]
# align identical roots
for i, tr in enumerate(target_roots):
if sr == tr:
relation = "restates"
break
if not relation:
sparts = set(sr.split("_"))
# check for spec, gen, or intersect
for i, tr in enumerate(target_roots):
tw = target_words[i]
if sr in tr and len(sw) > 3:
|
relation = "generalizes"
break
elif tr in sr and len(tw) > 3:
relation = "specifies"
break
# check if roots share a morphological segment
elif sparts.intersection(tr.split("_")):
relation = "intersec
|
ts"
break
if relation:
tn = target_nodes[i]
graph_pair.add_align(Pair(sn, tn), relation)
del target_nodes[i]
del target_words[i]
del target_roots[i]
#=====================================================================
# Full tree alignment
#=====================================================================
def lc_roots(graph, n):
"""
Return the list of the lower-cased roots of the terminals in
the yield of node n.
Store list in attribute "_lc_roots" of node n.
Also recursively calls lc_roots for all nodes dominated by node n.
"""
try:
# node already seen (should not happen in trees)
return graph.node[n]["_lc_roots"]
except KeyError:
graph.node[n]["_lc_roots"] = []
if graph.node_is_terminal(n, with_empty=False, with_punct=False):
root = graph.node[n].get("root", "").lower()
if root:
graph.node[n]["_lc_roots"].append(root)
else:
# punct and empty nodes end here
for m in graph.successors(n):
graph.node[n]["_lc_roots"] += lc_roots(graph, m)
return graph.node[n]["_lc_roots"]
def greedy_align_phrases(corpus):
# greedy align phrases with the same lower-cased words as strings and with
# the same lower-cased roots as restates
for graph_pair in corpus:
graph_pair.clear()
graphs = graph_pair.get_graphs()
lc_roots(graphs.source, graphs.source.root)
lc_roots(graphs.target, graphs.target.root)
target_nodes = [ tn for tn in graphs.target
if ( not graphs.target.node_is_punct(tn) and
not graphs.target.node_is_empty(tn) ) ]
target_words = [ graphs.target.get_node_token_string(tn).lower()
for tn in target_nodes ]
target_roots = [ graphs.target.node[tn].get("_lc_roots", [])
for tn in target_nodes ]
for sn in graphs.source:
if ( graphs.source.node_is_punct(sn) or
graphs.source.node_is_empty(sn) ):
continue
sw = graphs.source.get_node_token_string(sn).lower()
sr = graphs.source.node[sn].get("_lc_roots")
try:
j = target_words.index(sw)
except:
try:
j = target_roots.index(sr)
except:
continue
else:
tn = target_nodes[j]
graph_pair.add_align(Pair(sn, tn), "restates")
#print "RESTATES"
#print " ".join(sr)
#print " ".join(target_roots[j])
del target_nodes[j]
del target_words[j]
del target_roots[j]
else:
tn = target_nodes[j]
graph_pair.add_align(Pair(sn, tn), "equals")
#print "EQUALS"
#print sw
#print target_words[j]
del target_nodes[j]
del target_words[j]
del target_roots[j]
|
natano/python-git-orm
|
git_orm/__init__.py
|
Python
|
isc
| 758
| 0.006596
|
__version__ = '0.4'
__author__ = 'Martin Natano <natano@natano.net>'
_repository = None
_branch = 'git-orm'
_remote = 'origin'
class GitError(Exception): pass
def set_repository(value):
from pygit2 import discover_repository, Repository
global _repository
if value is None:
_repository = None
ret
|
urn
try:
path = discover_repository(value)
except KeyError:
raise GitError('no repository found in "{}"'.format(value))
_repository = Re
|
pository(path)
def get_repository():
return _repository
def set_branch(value):
global _branch
_branch = value
def get_branch():
return _branch
def set_remote(value):
global _remote
_remote = value
def get_remote():
return _remote
|
google-research/language
|
language/labs/memory/synthetic_dataset.py
|
Python
|
apache-2.0
| 7,696
| 0.007277
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates sequence of binary patterns or kv pairs to test associative memory.
Pattern task: Given N patterns, retrieve the right pattern via its degraded
version, where some of the bits are set to 0.
Symbolic key-value task: Given a string of concatenated key-value pairs,
retrieve the right value given the key.
See [Miconi et al. 2018] Differentiable Plasticity
(https://arxiv.org/abs/1804.02464) and [Ba et al. 2016] Using Fast Weights to
to Attend to the Recent Past (https://arxiv.org/abs/1610.06258v1) for details
of task design.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import string
import numpy as np
import tensorflow.compat.v1 as tf
def generate_pattern_data(num_patterns, pattern_size):
"""Generates a sequence of patterns followed by a degraded query pattern.
Args:
num_patterns (int): Number of unique patterns in the sequence
pattern_size (int): Dimensionality of each pattern
Returns:
seq: Numpy array, the sequence of patterns to be presented
shape [num_patterns + 1, pattern_size]
target: Numpy array, the pattern we expect to retrieve for the degraded
query pattern
shape [pattern_size,]
target_idx (int): The index into the list of unique patterns for the target
patterns: List of np arrays, all unique patterns
"""
patterns = []
for _ in range(num_patterns):
pattern = np.random.choice([-1, 1], size=(pattern_size,), p=[.5, .5])
patterns.append(pattern)
# Choose one pattern to degrade
target_idx = random.choice(range(num_patterns))
target = patterns[target_idx]
degraded = target.copy()
degraded_idxs = np.random.choice(
pattern_size, pattern_size // 2, replace=False)
degraded[degraded_idxs] = 0
patterns.append(degraded)
seq = np.array(patterns)
return seq, target, target_idx, patterns
def generate_pattern_data_selective(num_patterns, num_patterns_store,
pattern_size):
"""Generates a sequence of patterns followed by a degraded query pattern.
Args:
num_patterns (int): Number of unique patterns in the sequence
num_patterns_store (int): Number of patterns we actually have to store
pattern_size (int): Dimensionality of each pattern
Returns:
seq: Numpy array, the sequence of patterns to be presented.
shape [num_patterns + 1, pattern_size]
target: Numpy array, the pattern we expect to retrieve for the degraded
query pattern.
shape [pattern_size,]
target_idx (int): The index into the list of unique patterns for the target.
patterns: List of np arrays, all unique patterns.
Patterns we need to remember (that may be queried) have their last bit set
to 1, otherwise 0.
"""
patterns = []
for _ in range(num_patterns):
pattern = np.random.choice([-1, 1], size=(pattern_size,), p=[.5, .5])
patterns.append(pattern)
# Choose patterns that are important to remember
remember_idxs = np.random.choice(
range(num_patterns), size=num_patterns_store, replace=False)
patterns = [
np.append(p, [1]) if i in remember_idxs else np.append(p, [0])
for i, p in enumerate(patterns)
]
# Choose one pattern to degrade
target_idx = random.choice(range(num_patterns))
target = patterns[target_idx]
degraded = target.copy()
degraded_idxs = np.random.choice(
pattern_size, pattern_size // 2, replace=False)
degraded[degraded_idxs] = 0
patterns.append(degraded)
seq = np.array(patterns)
return seq, target, target_idx, patterns
def generate_symbolic_data(num_pairs):
"""Generates a sequence of key-value pairs followed by a query key.
Args:
num_pairs (int): Number of pairs
Returns:
seq_text (str): Sequence of kv pairs, followed by a ?,
followed by the query key.
seq_encoded (numpy arr): Sequence of kv pairs, encoded into vocab indices.
target_val (str): Digit, the value we expect to retrieve for the key.
target_val_encoded (int): Encoded target_val
target_idx (int): The index into the list of pairs for the target
"""
pairs = zip(
np.random.choice(list(string.ascii_lowercase), num_pairs, replace=False),
np.random.choice(list("0123456789"), num_pairs)
)
vocab = get_symbolic_vocab()
# Choose a query key
target_idx = random.cho
|
ice(range(num_pairs))
target_key, target_val_text = pairs[target_idx]
target_val_encoded = vocab.index(target_val_text)
seq_text = "".join([k + v for k, v in pairs]) + "?" + target_key
seq_encoded = [vocab.index(char) for char in seq_text]
return seq_text, seq_encoded, target_val_text, target_val_encoded, target_idx
def get_pattern_dataset(n=100000,
num_patterns=3,
pattern_size=50,
selecti
|
ve=False,
num_patterns_store=None):
"""Generates a dataset of sequences of patterns and retrieval targets.
Args:
n: Number of examples
num_patterns: Number of unique patterns in the sequence
pattern_size: Dimensionality of each pattern
selective (bool): True if only a subset of patterns needs to be stored.
num_patterns_store: Number of patterns to store if selective=True.
Returns:
A tf.data.Dataset created from a dict with property "seqs,"
containing the sequences of randomly generated binary patterns, and
"targets," containing the ground-truth pattern to retrieve for the last
degraded query pattern in the sequence.
"""
seqs = []
targets = []
for _ in range(n):
if selective:
if num_patterns_store is None:
num_patterns_store = num_patterns // 10
seq, target, _, _ = generate_pattern_data_selective(
num_patterns, num_patterns_store, pattern_size)
else:
seq, target, _, _ = generate_pattern_data(num_patterns, pattern_size)
seqs.append(seq)
targets.append(target)
return tf.data.Dataset.from_tensor_slices({
"seqs": np.array(seqs, dtype=np.float32),
"targets": np.array(targets, dtype=np.int32)
})
def get_symbolic_dataset(_, n=100000, num_pairs=5):
"""Generates a dataset of sequences of key-value pairs and retrieval targets.
Args:
n: Number of examples
num_pairs: Number of pairs in each sequence
Returns:
A tf.data.Dataset created from a dict with property "seqs,"
containing the sequences of randomly generated key-value pairs, and
"targets," containing the ground-truth value to retrieve for the query key.
"""
seqs = []
targets = []
for _ in range(n):
_, seq_encoded, _, target_encoded, _ = generate_symbolic_data(num_pairs)
seqs.append(seq_encoded)
targets.append(target_encoded)
return tf.data.Dataset.from_tensor_slices({
"seqs": np.array(seqs, dtype=np.int32),
"targets": np.array(targets, dtype=np.int32)
})
def get_symbolic_vocab():
"""Gets the vocabulary for the symbolic task.
Returns:
A list with a-z, 0-9, and ?.
"""
return list(string.ascii_lowercase) + list(string.digits + "?")
|
chetan/cherokee
|
admin/plugins/proxy.py
|
Python
|
gpl-2.0
| 10,677
| 0.018638
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Handler
import Cherokee
import Balancer
import validations
from util import *
from consts import *
URL_APPLY = '/plugin/proxy/apply'
HELPS = [('modules_handlers_proxy', N_("Reverse Proxy"))]
NOTE_REUSE_MAX = N_("Maximum number of connections per server that the proxy can try to keep opened.")
NOTE_ALLOW_KEEPALIVE = N_("Allow the server to use Keep-alive connections with the back-end servers.")
NOTE_PRESERVE_HOST = N_("Preserve the original \"Host:\" header sent by the client. (Default: No)")
NOTE_PRESERVE_SERVER = N_("Preserve the \"Server:\" header sent by the back-end server. (Default: No)")
VALS = [
('.+?!reuse_max', validations.is_number_gt_0),
]
def commit():
# New Rewrite
for e in ('in_rewrite_request', 'out_rewrite_request'):
key = CTK.post.pop ('tmp!new!%s!key'%(e))
regex = CTK.post.pop ('tmp!new!%s!regex'%(e))
subst = CTK.post.pop ('tmp!new!%s!substring'%(e))
if regex and subst:
next = CTK.cfg.get_next_entry_prefix ('%s!%s'%(key, e))
CTK.cfg['%s!regex'%(next)] = regex
CTK.cfg['%s!substring'%(next)] = subst
return CTK.cfg_reply_ajax_ok()
# New Header
for e in ('in_header_add', 'out_header_add'):
key = CTK.post.pop ('tmp!new!%s!key'%(e))
header = CTK.post.pop ('tmp!new!%s!header'%(e))
value = CTK.post.pop ('tmp!new!%s!value'%(e))
if header and value:
CTK.cfg['%s!%s!%s'%(key, e, header)] = value
return CTK.cfg_reply_ajax_ok()
# New Hide
for e in ('in_header_hide', 'out_header_hide'):
key = CTK.post.pop ('tmp!new!%s!key'%(e))
hide = CTK.post.pop ('tmp!new!%s!hide'%(e))
if hide:
next = CTK.cfg.get_next_entry_prefix ('%s!%s'%(key, e))
CTK.cfg[next] = hide
return CTK.cfg_reply_ajax_ok()
# Modification
return CTK.cfg_apply_post()
class URL_Rewrite (CTK.Container):
class Content (CTK.Container):
def __init__ (self, refresh, key):
CTK.Container.__init__ (self)
keys = CTK.cfg.keys(key)
if keys:
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML(x) for x in (_('Regular Expression'), _('Substitution'))]
for k in CTK.cfg.keys(key):
regex = CTK.TextCfg ('%s!%s!regex'%(key,k), False)
|
subst = CTK.TextCfg ('%s!%s!substring'%(key,k), False)
|
remove = CTK.ImageStock('del')
remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''},
complete = refresh.JS_to_refresh()))
table += [regex, subst, remove]
submit = CTK.Submitter (URL_APPLY)
submit += table
self += CTK.Indenter (submit)
def __init__ (self, key, key_entry):
CTK.Container.__init__ (self)
# List
refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)})
refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render())
self += refresh
# New
new_regex = CTK.TextCfg('tmp!new!%s!regex'%(key_entry), False, {'class': 'noauto'})
new_subst = CTK.TextCfg('tmp!new!%s!substring'%(key_entry), True, {'class': 'noauto'})
add_button = CTK.SubmitterButton(_('Add'))
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML(x) for x in (_('Add RegEx'), _('Substitution'))]
table += [new_regex, new_subst, add_button]
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Indenter (table)
submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key)
submit.bind ('submit_success', refresh.JS_to_refresh())
self += submit
class Header_List (CTK.Container):
class Content (CTK.Container):
def __init__ (self, refresh, key):
CTK.Container.__init__ (self)
keys = CTK.cfg.keys(key)
if keys:
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML(x) for x in (_('Regular Expression'), _('Substitution'))]
for k in CTK.cfg.keys(key):
value = CTK.TextCfg ('%s!%s'%(key,k), False)
remove = CTK.ImageStock('del')
remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''},
complete = refresh.JS_to_refresh()))
table += [CTK.RawHTML(k), value, remove]
submit = CTK.Submitter (URL_APPLY)
submit += table
self += CTK.Indenter (submit)
def __init__ (self, key, key_entry):
CTK.Container.__init__ (self)
# List
refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)})
refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render())
self += refresh
# New
new_regex = CTK.TextCfg('tmp!new!%s!header'%(key_entry), False, {'class': 'noauto'})
new_subst = CTK.TextCfg('tmp!new!%s!value'%(key_entry), False, {'class': 'noauto'})
add_button = CTK.SubmitterButton(_('Add'))
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML(x) for x in (_('Add Header Entry'), _('Value'))]
table += [new_regex, new_subst, add_button]
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Indenter (table)
submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key)
submit.bind ('submit_success', refresh.JS_to_refresh())
self += submit
class Header_Hide (CTK.Container):
class Content (CTK.Container):
def __init__ (self, refresh, key):
CTK.Container.__init__ (self)
keys = CTK.cfg.keys(key)
if keys:
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML(_('Header'))]
for k in CTK.cfg.keys(key):
remove = CTK.ImageStock('del')
remove.bind('click', CTK.JS.Ajax (URL_APPLY, data={'%s!%s'%(key,k): ''},
complete = refresh.JS_to_refresh()))
table += [CTK.RawHTML(CTK.cfg.get_val('%s!%s'%(key,k))), remove]
submit = CTK.Submitter (URL_APPLY)
submit += table
self += CTK.Indenter (submit)
def __init__ (self, key, key_entry):
CTK.Container.__init__ (self)
# List
refresh = CTK.Refreshable ({'id': 'proxy_%s'%(key_entry)})
refresh.register (lambda: self.Content(refresh, '%s!%s'%(key, key_entry)).Render())
self += refresh
# New
new_hide = CTK.TextCfg('tmp!new!%s!hide'%(key_entry), False, {'class': 'noauto'})
add_button = CTK.SubmitterButton(_('Add'))
table = CTK.Table()
table.set_header(1)
table += [CTK.RawHTML (_('Hide Header'))]
table += [new_hide, add_button]
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Indenter (table)
submit += CTK.Hidden ('tmp!new!%s!key'%(key_entry), key)
submit.bind ('submit_success
|
RNAcentral/rnacentral-import-pipeline
|
tests/databases/intact/parser_test.py
|
Python
|
apache-2.0
| 8,022
| 0.002618
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from datetime import date
import collections as coll
import attr
import pytest
from rnacentral_pipeline.databases import data
from rnacentral_pipeline.databases.intact import parser
from rnacentral_pipeline.databases.helpers import publications as pubs
@pytest.fixture(scope="module")
def sample():
with open("data/intact/sample.txt", "r") as raw:
return list(parser.parse(raw, os.environ["PGDATABASE"]))
def test_can_parse_all_data(sample):
assert len(sample) == 4
def test_creates_entries_with_expected_ids(sample):
ids = sorted(e.primary_id for e in sample)
assert ids == [
"INTACT:URS0000077671_559292",
"INTACT:URS0000182FAB_559292",
"INTACT:URS000020D517_559292",
"INTACT:URS00002AFD52_559292",
]
def test_correctly_groups_data(sample):
val = {e.primary_id: len(e.interactions) for e in sample}
assert val == {
"INTACT:URS0000077671_559292": 1,
"INTACT:URS0000182FAB_559292": 1,
"INTACT:URS000020D517_559292": 1,
"INTACT:URS00002AFD52_559292": 4,
}
def test_produces_correct_data(sample):
with open("data/intact/sample.txt", "r") as raw:
val = next(parser.parse_interactions(raw))
i1 = data.Interactor(
id=data.InteractionIdentifier("intact", "EBI-10921362", None),
alt_ids=[
data.InteractionIdentifier("rnacentral", "URS00002AFD52_559292", None)
],
aliases=[
data.InteractionIdentifier("psi-mi", "snr18 yeast", "display_short"),
data.InteractionIdentifier("psi-mi", "EBI-10921362", "display_long"),
],
taxid=559292,
biological_role=[
data.InteractionIdentifier("psi-mi", "MI:0499", "unspecified role")
],
experimental_role=[data.InteractionIdentifier("psi-mi", "MI:0498", "prey")],
interactor_type=[
data.InteractionIdentifier("psi-mi", "MI:0609", "small nucleolar rna")
],
xrefs=[],
annotations="-",
features=[data.InteractionIdentifier("32p radiolabel", "?-?", None)],
stoichiometry=None,
participant_identification=[
data.InteractionIdentifier("psi-mi", "MI:0396", "predetermined participant")
],
)
i2 = data.Interactor(
id=data.InteractionIdentifier("uniprotkb", "P15646", None),
alt_ids=[
data.InteractionIdentifier("intact", "EBI-6838", None),
data.InteractionIdentifier("uniprotkb", "P89890", None),
data.InteractionIdentifier("uniprotkb", "D6VRX5", None),
],
aliases=[
data.InteractionIdentifier("psi-mi", "fbrl_yeast", "display_long"),
data.InteractionIdentifier("uniprotkb", "NOP1", "gene name"),
data.InteractionIdentifier("psi-mi", "NOP1", "display_short"),
data.InteractionIdentifier("uniprotkb", "LOT3", "gene name synonym"),
data.InteractionIdentifier("uniprotkb", "YDL014W", "locus name"),
data.InteractionIdentifier("uniprotkb", "D2870", "orf name"),
data.InteractionIdentifier(
"uniprotkb",
"U3 small nucleolar RNA-associated protein NOP1",
"gene name synonym",
),
data.InteractionIdentifier(
"uniprotkb", "Histone-glutamine methyltransferase", "gene name synonym"
),
],
taxid=559292,
biological_role=[
data.InteractionIdentifier("psi-mi", "MI:0499", "unspecified role")
],
experimental_role=[data.InteractionIdentifier("psi-mi", "MI:0498", "prey")],
interactor_type=[data.InteractionIdentifier("psi-mi", "MI:0326", "protein")],
xrefs=[
data.InteractionIdentifier(
"go", "GO:0008649", "rRNA methyltransferase activity"
),
|
data.InteractionIdentifier("go", "GO:0031428", "box C/D snoRNP comple
|
x"),
data.InteractionIdentifier("go", "GO:0032040", "small-subunit processome"),
data.InteractionIdentifier("go", "GO:0003723", "RNA binding"),
data.InteractionIdentifier(
"go", "GO:0000494", "box C/D snoRNA 3'-end processing"
),
data.InteractionIdentifier("refseq", "NP_010270.1", None),
data.InteractionIdentifier("sgd", "S000002172", None),
data.InteractionIdentifier("interpro", "IPR000692", "Fibrillarin"),
data.InteractionIdentifier("interpro", "IPR020813", None),
data.InteractionIdentifier("rcsb pdb", "5WYJ", None),
data.InteractionIdentifier("rcsb pdb", "5WYK", None),
data.InteractionIdentifier(
"go", "GO:0006356", "regulation of transcription by RNA polymerase I"
),
data.InteractionIdentifier(
"go", "GO:1990259", "histone-glutamine methyltransferase activity"
),
data.InteractionIdentifier("go", "GO:0005730", "nucleolus"),
data.InteractionIdentifier("go", "GO:0006364", "rRNA processing"),
data.InteractionIdentifier("go", "GO:0031167", "rRNA methylation"),
data.InteractionIdentifier("go", "GO:0043144", "snoRNA processing"),
data.InteractionIdentifier(
"go", "GO:1990258", "histone glutamine methylation"
),
data.InteractionIdentifier("interpro", "IPR029063", None),
data.InteractionIdentifier("mint", "P15646", None),
data.InteractionIdentifier("go", "GO:0000451", "rRNA 2'-O-methylation"),
data.InteractionIdentifier("go", "GO:0005654", "nucleoplasm"),
data.InteractionIdentifier(
"go", "GO:0008171", "O-methyltransferase activity"
),
data.InteractionIdentifier("go", "GO:0015030", "Cajal body"),
data.InteractionIdentifier("reactome", "R-SCE-6791226", None),
data.InteractionIdentifier("dip", "DIP-698N", None),
data.InteractionIdentifier("rcsb pdb", "5WLC", None),
data.InteractionIdentifier("go", "GO:0030686", "90S preribosome"),
data.InteractionIdentifier("rcsb pdb", "6ND4", None),
],
annotations="crc64:56A8B958A7B6066E",
features=[data.InteractionIdentifier("protein a tag", "n-n", None)],
stoichiometry=None,
participant_identification=[
data.InteractionIdentifier("psi-mi", "MI:0396", "predetermined participant")
],
)
assert attr.asdict(val) == attr.asdict(
data.Interaction(
ids=[data.InteractionIdentifier("intact", "EBI-11665247", None)],
interactor1=i1,
interactor2=i2,
methods=[],
types=[
data.InteractionIdentifier("psi-mi", "MI:0915", "physical association")
],
xrefs=[],
annotations=[
data.InteractionIdentifier("figure legend", "Fig 2, Fig 3B", None)
],
confidence=[data.InteractionIdentifier("intact-miscore", "0.74", None)],
source_database=[data.InteractionIdentifier("psi-mi", "MI:0471", "MINT")],
is_negative=False,
publications=[pubs.reference(11726521)],
create_date=date(2003, 7, 8),
update_date=date(2016, 3, 23),
host_organisms=-1,
)
)
|
leitelm/RISE_scada
|
wsgi.py
|
Python
|
apache-2.0
| 189
| 0.021164
|
# coding=utf-8
# Run a test server.
from app import app
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
if __nam
|
e__ == '__main__':
app.run(host='0.0.0.0', port=7000, deb
|
ug=True)
|
dongjoon-hyun/tensorflow
|
tensorflow/python/training/basic_loops.py
|
Python
|
apache-2.0
| 2,343
| 0.004695
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic loop for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.basic_train_loop")
def basic_train_loop(supervisor, train_step_fn, args=None,
kwargs=None, master=""):
"""Basic loop to train a model.
Calls `train_step_fn` in a loop to train a model. The function is called as:
```python
train_step_fn(session, *args, **kwargs)
```
It is passed a `tf.Session` in addition to `args` and `kwargs`. The function
typically runs one training step in the session.
Args:
supervisor
|
: `tf.train.Supervisor` to run the training services.
train_step_fn: Callable to execute one training step. Called
repeatedly as `train_step_fn(session, *args
|
**kwargs)`.
args: Optional positional arguments passed to `train_step_fn`.
kwargs: Optional keyword arguments passed to `train_step_fn`.
master: Master to use to create the training session. Defaults to
`""` which causes the session to be created in the local process.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
should_retry = True
while should_retry:
try:
should_retry = False
with supervisor.managed_session(master) as sess:
while not supervisor.should_stop():
train_step_fn(sess, *args, **kwargs)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
should_retry = True
|
sdavlenov/is210-week-05-warmup
|
task_03.py
|
Python
|
mpl-2.0
| 142
| 0
|
#!/usr/bin/
|
env python
# -*- coding: utf-8 -*-
"""module that copies from another module"""
from task_01.peanut import BUTTER
JELLY
|
= BUTTER
|
allieus/pylockfile
|
lockfile/sqlitelockfile.py
|
Python
|
mit
| 5,541
| 0.000722
|
from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit long
|
er.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient.
|
.
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
|
npp/npp-api
|
data/management/commands/import_fcna_spending.py
|
Python
|
mit
| 1,982
| 0.007064
|
from django import db
from django.conf import settings
from django.core.management.base import NoArgsCommand
from data.models import FCNASpending
import csv
# National Priorities Project Data Repository
# import_fcna_spending.py
# Updated 7/23/2010, Joshua Ruihley, Sunlight Foundation
# Imports Federal Child Nutrition Act Spending Data
# source info: http://nces.ed.gov/ccd/bat/index.asp (accurate as of 7/23/2010)
# npp csv: http://assets.nationalpriorities.org/raw_data/education/fcna_spending.csv (updated 7/23/2010)
# destination model: FCNASpending
# HOWTO:
# 1) Download source files from url listed above
# 2) Convert source file to .csv with same formatting as npp csv
# 3) change SOURCE_FILE variable to the the path of t
|
he source file you just created
# 4) change 'amount' column in data_FCNASpending table to type 'bigint'
# 5) Run as Django management command from your project path "python manage.py import_fcna_spending"
SOURCE_FILE = '%s/education/fcna_spending.csv' % (settings.LOCAL_DATA_ROOT)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
|
def clean_int(value):
if value=='':
value=None
return value
data_reader = csv.reader(open(SOURCE_FILE))
for i, row in enumerate(data_reader):
if i == 0:
year_row = row;
else:
state = row[0]
agency_name = row[1]
agency_id = row[2]
for j,col in enumerate(row):
if j > 2:
record = FCNASpending()
record.year = year_row[j]
record.state = state
record.agency_name = agency_name
record.agency_id = agency_id
record.amount = clean_int(col)
record.save()
db.reset_queries()
|
slint/zenodo
|
zenodo/modules/support/config.py
|
Python
|
gpl-2.0
| 8,030
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Support."""
from __future__ import absolute_import, print_function
#: Maximum size of attachment in contact form.
SUPPORT_ATTACHMENT_MAX_SIZE = 1000 * 1000 * 10 # 10 MB
#: Description maximum length.
SUPPORT_DESCRIPTION_MAX_LENGTH = 5000
#: Description minimum length.
SUPPORT_DESCRIPTION_MIN_LENGTH = 20
#: Email body template.
SUPPORT_EMAIL_BODY_TEMPLATE = 'zenodo_support/email_body.html'
#: Email title template.
SUPPORT_EMAIL_TITLE_TEMPLATE = 'zenodo_support/email_title.html'
#: Support confirmation email body.
SUPPORT_EMAIL_CONFIRM_BODY = """Thank you for contacting Zenodo support.
We have received your message, and we will do our best to get back to you as \
soon as possible.
This is an automated confirmation of your request, please do not reply to this\
email.
Zenodo Support
https://zenodo.org
"""
#: Support confirmation email title.
SUPPORT_EMAIL_CONFIRM_TITLE = 'Zenodo Support'
'zenodo_support/email_confirm_title.html'
#: Issue category for contact form.
SUPPORT_ISSUE_CATEGORIES = [
{
'key': 'file-modification',
'title': 'File modification',
'description': (
'All requests related to updating files in already published '
'record(s). This includes new file addition, file removal or '
'file replacement. '
'Before sending a request, please consider creating a '
'<a href="http://help.zenodo.org/#versioning">new version</a> '
'of your upload. Please first consult our '
'<a href="http://help.zenodo.org/#general">FAQ</a> to get familiar'
' with the file update conditions, to see if your case is '
'eligible.<br /><br />'
'You request has to conta
|
in <u>all</u> of the points below:'
'<ol>'
'<li>Provide a justification for the file change in t
|
he '
'description.</li>'
'<li>Mention any use of the record(s) DOI in publications or '
'online, e.g.: list papers that cite your record and '
'provide links to posts on blogs and social media. '
'Otherwise, state that to the best of your knowledge the DOI has '
'not been used anywhere.</li>'
'<li>Specify the record(s) you want to update <u>by the Zenodo'
' URL</u>, e.g.: "https://zenodo.org/record/8428".<br />'
"<u>Providing only the record's title, publication date or a "
"screenshot with search result is not explicit enough</u>.</li>"
'<li>If you want to delete or update a file, specify it '
'<u>by its filename</u>, and mention if you want the name to '
'remain as is or changed (by default the filename of the new '
'file will be used).</li>'
'<li>Upload the new files below or provide a publicly-accessible '
'URL(s) with the files in the description.</li>'
'</ol>'
'<b><u>Not providing full information on any of the points above '
'will significantly slow down your request resolution</u></b>, '
'since our support staff will have to reply back with a request '
'for missing information.'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'upload-quota',
'title': 'File upload quota increase',
'description': (
'All requests for a quota increase beyond the 50GB limit. '
'Please include the following information with your request:'
'<ol>'
'<li>The total size of your dataset, number of files and the '
'largest file in the dataset. When referring to file sizes'
' use <a href="https://en.wikipedia.org/wiki/IEEE_1541-2002">'
'SI units</a></li>'
'<li>Information related to the organization, project or grant '
'which was involved in the research, which produced the '
'dataset.</li>'
'<li>Information on the currently in-review or future papers that '
'will cite this dataset (if applicable). If possible specify the '
'journal or conference.</li>'
'</ol>'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'record-inactivation',
'title': 'Record inactivation',
'description': (
'Requests related to record inactivation, either by the record '
'owner or a third party. Please specify the record(s) in question '
'by the URL(s), and reason for the inactivation.'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'openaire',
'title': 'OpenAIRE',
'description': (
'All questions related to OpenAIRE reporting and grants. '
'Before sending a request, make sure your problem was not '
'already resolved, see OpenAIRE '
'<a href="https://www.openaire.eu/support/faq">FAQ</a>. '
'For questions unrelated to Zenodo, you should contact OpenAIRE '
'<a href="https://www.openaire.eu/support/helpdesk">'
'helpdesk</a> directly.'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'partnership',
'title': 'Partnership, outreach and media',
'description': (
'All questions related to possible partnerships, outreach, '
'invited talks and other official inquiries by media.'
'If you are a journal, organization or conference organizer '
'interested in using Zenodo as archive for your papers, software '
'or data, please provide details for your usecase.'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'tech-support',
'title': 'Security issue, bug or spam report',
'description': (
'Report a technical issue or a spam content on Zenodo.'
'Please provide details on how to reproduce the bug. '
'Upload any screenshots or files which are relevant to the issue '
'or to means of reproducing it. Include error messages and '
'error codes you might be getting in the description.<br /> '
'For REST API errors, provide a minimal code which produces the '
'issues. Use external services for scripts and long text'
', e.g.: <a href="https://gist.github.com/">GitHub Gist</a>. '
'<strong>Do not disclose your password or REST API access tokens.'
'</strong>'
),
'recipients': ['info@zenodo.org'],
},
{
'key': 'other',
'title': 'Other',
'description': (
'Questions which do not fit into any other category.'),
'recipients': ['info@zenodo.org'],
},
]
#: Email address of sender.
SUPPORT_SENDER_EMAIL = 'info@zenodo.org'
#: Name of the sender
SUPPORT_SENDER_NAME = 'Zenodo'
#: Email address for support.
SUPPORT_SUPPORT_EMAIL = ['info@zenodo.org']
|
171121130/SWI
|
venv/Lib/site-packages/tablib/formats/_json.py
|
Python
|
mit
| 1,312
| 0
|
# -*- coding: utf-8 -*-
""" Tablib - JSON Support
"""
import decimal
import tablib
try:
import ujson as json
except ImportError:
import json
title = 'json'
extensions = ('json', 'jsn')
def date_handler(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
elif hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return obj
# return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def export_set(dataset):
"""Returns JSON representation of Dataset."""
return json.dumps(dataset.dict, default=date_handler)
def export_book(databook):
"""Returns JSON representation of Databook."""
return json.dumps(databook._package(), default=date_handler)
def import_set(dset, in_stream):
"""Returns dataset from JSON stream."""
dset.wipe()
dset.dict = json.loads(in_stream)
def import_book(dbook, in_stream):
"""Returns databook from JSON stream."""
dbook.wipe()
for sheet in json.loads(in_stream):
data = tablib.Dataset()
data.title = sheet['title'
|
]
data.dict = sheet['data']
dbook.add_sheet(data)
|
def detect(stream):
"""Returns True if given stream is valid JSON."""
try:
json.loads(stream)
return True
except ValueError:
return False
|
berjc/aus-senate-audit
|
aus_senate_audit/senate_election/simulated_senate_election.py
|
Python
|
apache-2.0
| 3,665
| 0.004366
|
# -*- coding: utf-8 -*-
""" Implements a Class for Representing a Simulated Senate Election. """
from collections import Counter
from random import random
from random import seed as set_seed
from time import asctime
from time import localtime
from aus_senate_audit.senate_election.base_senate_election import BaseSenateElection
class SimulatedSenateElection(BaseSenateElection):
""" Implements a class for representing a simulated senate election.
:ivar int _sample_increment_size: The number of ballots to add to the growing sample during each audit stage.
NOTE: The :attr:`_candidates` and :attr:`_candidate_ids` instance attributes are set as a [1, ..., :attr:`_m`].
"""
TYPE = 'Simulated'
DEFAULT_ID = 'SimulatedElection{}'
def __init__(self, seed, n, m, sample_increment_size):
""" Initializes a :class:`SimulatedSenateElection` object.
The number of seats in a simulated senate election is equal to the floor of the number of candidates in the
election divided by two.
:param int seed: The starting value for the random number generator.
:param int n: The total number of ballots cast in the election.
:param int m: The total number of candidates in the election.
:param int sample_increment_size: The number of ballots to add to the growing sample during each audit stage.
"""
super(SimulatedSenateElection, self).__init__()
self._n = n
self._m = m
self._seats = int(self._m / 2)
self._candidates = list(range(1, self._m + 1))
self._candidate_ids = list(range(1, self._m + 1))
self._election_id = SimulatedSenateElection.DEFAULT_ID.format(asctime(localtime()))
self._sample_increment_size = sample_increment_size
set_seed(seed) # Set the initial value of the RNG.
def draw_ballots(self):
""" Adds simulated ballots to the sample of ballots drawn thus far.
These ballots are biased so (1, 2, ..., m) is likely to be the winner. More precisely, each ballot candidate `i`
is given a value `i + v * U` where `U = uniform(0, 1)` and `v` is the level of noise. Then the candidates are
sorted into increasing order by these values. Note that the total number of ballots drawn may not exceed the
total number of cast votes, :attr:`_n`.
"""
v = self._m / 2.0 # Noise level to control position variance.
batch_size = min(self._sample_increment_size, self._n - self._nu
|
m_ballots_drawn)
for _ in range(batch_size):
candidate_values = [(i + v * random(), cid) for i, cid in enumerate(self._candidate_ids)]
ballot = tuple(cid for val, cid in sorted(candidate_values))
self.add_ballot(ballot, 1)
def get_outcome(self, ballot_weights):
""" Returns the outcome of a senate election with the given ballot weights.
The social choice function used in the simulated senate election is Borda count.
|
:param :class:`Counter` ballot_weights: A mapping from a ballot type to the number of ballots drawn of that
type.
:returns: The IDs of the candidates elected to the available seats, sorted in lexicographical order.
:rtype: tuple
"""
counter = Counter()
for ballot, weight in ballot_weights.items():
for i, cid in enumerate(ballot):
counter[cid] += weight * i
# Get the :attr:`_seat` candidates with the lowest Borda counts in increasing order.
winners = counter.most_common()[-self._seats:][::-1]
return tuple(sorted([cid for cid, count in winners]))
|
atilag/qiskit-sdk-py
|
qiskit/extensions/qasm_simulator_cpp/snapshot.py
|
Python
|
apache-2.0
| 2,505
| 0
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Vers
|
ion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli
|
cable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
local_qiskit_simulator command to snapshot the quantum state.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.qasm import _node as node
class SnapshotGate(Gate):
"""Simulator snapshot operation."""
def __init__(self, m, qubit, circ=None):
"""Create new snapshot gate."""
super().__init__("snapshot", [m], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
m = self.param[0]
return self._qasmif("snapshot(%d) %s[%d];" % (m,
qubit[0].name,
qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.snapshot(self.param[0], self.arg[0]))
def snapshot(self, m, q):
"""Cache the quantum state of local_qiskit_simulator."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.snapshot(m, (q, j)))
return gs
self._check_qubit(q)
return self._attach(SnapshotGate(m, q, self))
# Add to QuantumCircuit and CompositeGate classes
QuantumCircuit.snapshot = snapshot
CompositeGate.snapshot = snapshot
# cache quantum state (identity)
QuantumCircuit.definitions["snapshot"] = {
"print": True,
"opaque": False,
"n_args": 1,
"n_bits": 1,
"args": ["m"],
"bits": ["a"],
# gate snapshot(m) a { }
"body": node.GateBody([])
}
|
martini97/django-ecommerce
|
checkout/urls.py
|
Python
|
gpl-3.0
| 276
| 0
|
from django.conf.urls import url
f
|
rom . import views
urlpatterns = [
url(r'^carrinho/adicionar/(?P<slug>[\w_-]+)/$',
|
views.CreateCartItemView.as_view(), name='create_cartitem'),
url(r'^carrinho/$', views.CartItemView.as_view(),
name='cart_item')
]
|
jorisvandenbossche/geopandas
|
geopandas/tests/test_explore.py
|
Python
|
bsd-3-clause
| 29,278
| 0.001366
|
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from distutils.version import LooseVersion
folium = pytest.importorskip("folium")
branca = pytest.importorskip("branca")
matplotlib = pytest.importorskip("matplotlib")
mapclassify = pytest.importorskip("mapclassify")
import matplotlib.cm as cm # noqa
import matplotlib.colors as colors # noqa
from branca.colormap import StepColormap # noqa
BRANCA_05 = str(branca.__version__) > LooseVersion("0.4.2")
class TestExplore:
def setup_method(self):
self.nybb = gpd.read_file(gpd.datasets.get_path("nybb"))
self.world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
self.cities = gpd.read_file(gpd.datasets.get_path("naturalearth_cities"))
self.world["range"] = range(len(self.world))
self.missing = self.world.copy()
np.random.seed(42)
self.missing.loc[np.random.choice(self.missing.index, 40), "continent"] = np.nan
self.missing.loc[np.random.choice(self.missing.index, 40), "pop_est"] = np.nan
def _fetch_map_string(self, m):
out = m._parent.render()
out_str = "".join(out.split())
return out_str
def test_simple_pass(self):
"""Make sure default pass"""
self.nybb.explore()
self.world.explore()
self.cities.explore()
self.world.geometry.explore()
def test_choropleth_pass(self):
"""Make sure default choropleth pass"""
self.world.explore(column="pop_est")
def test_map_settings_default(self):
"""Check default map settings"""
m = self.world.explore()
assert m.location == [
pytest.approx(-3.1774349999999956, rel=1e-6),
pytest.approx(2.842170943040401e-14, rel=1e-6),
]
assert m.options["zoom"] == 10
assert m.options["zoomControl"] is True
assert m.position == "relative"
assert m.height == (100.0, "%")
assert m.width == (100.0, "%")
assert m.left == (0, "%")
assert m.top == (0, "%")
assert m.global_switches.no_touch is False
assert m.global_switches.disable_3d is False
assert "openstreetmap" in m.to_dict()["children"].keys()
def test_map_settings_custom(self):
"""Check custom map settings"""
m = self.nybb.explore(
zoom_control=False,
width=200,
height=200,
)
assert m.location == [
pytest.approx(40.70582377450201, rel=1e-6),
pytest.approx(-73.9778006856748, rel=1e-6),
]
assert m.options["zoom"] == 10
assert m.options["zoomControl"] is False
assert m.height == (200.0, "px")
|
assert m.width == (200.0, "px")
# custom XYZ tiles
m = self.nybb.explore(
zoom_control
|
=False,
width=200,
height=200,
tiles="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attr="Google",
)
out_str = self._fetch_map_string(m)
s = '"https://mt1.google.com/vt/lyrs=m\\u0026x={x}\\u0026y={y}\\u0026z={z}"'
assert s in out_str
assert '"attribution":"Google"' in out_str
m = self.nybb.explore(location=(40, 5))
assert m.location == [40, 5]
assert m.options["zoom"] == 10
m = self.nybb.explore(zoom_start=8)
assert m.location == [
pytest.approx(40.70582377450201, rel=1e-6),
pytest.approx(-73.9778006856748, rel=1e-6),
]
assert m.options["zoom"] == 8
m = self.nybb.explore(location=(40, 5), zoom_start=8)
assert m.location == [40, 5]
assert m.options["zoom"] == 8
def test_simple_color(self):
"""Check color settings"""
# single named color
m = self.nybb.explore(color="red")
out_str = self._fetch_map_string(m)
assert '"fillColor":"red"' in out_str
# list of colors
colors = ["#333333", "#367324", "#95824f", "#fcaa00", "#ffcc33"]
m2 = self.nybb.explore(color=colors)
out_str = self._fetch_map_string(m2)
for c in colors:
assert f'"fillColor":"{c}"' in out_str
# column of colors
df = self.nybb.copy()
df["colors"] = colors
m3 = df.explore(color="colors")
out_str = self._fetch_map_string(m3)
for c in colors:
assert f'"fillColor":"{c}"' in out_str
# line GeoSeries
m4 = self.nybb.boundary.explore(color="red")
out_str = self._fetch_map_string(m4)
assert '"fillColor":"red"' in out_str
def test_choropleth_linear(self):
"""Check choropleth colors"""
# default cmap
m = self.nybb.explore(column="Shape_Leng")
out_str = self._fetch_map_string(m)
assert 'color":"#440154"' in out_str
assert 'color":"#fde725"' in out_str
assert 'color":"#50c46a"' in out_str
assert 'color":"#481467"' in out_str
assert 'color":"#3d4e8a"' in out_str
# named cmap
m = self.nybb.explore(column="Shape_Leng", cmap="PuRd")
out_str = self._fetch_map_string(m)
assert 'color":"#f7f4f9"' in out_str
assert 'color":"#67001f"' in out_str
assert 'color":"#d31760"' in out_str
assert 'color":"#f0ecf5"' in out_str
assert 'color":"#d6bedc"' in out_str
def test_choropleth_mapclassify(self):
"""Mapclassify bins"""
# quantiles
m = self.nybb.explore(column="Shape_Leng", scheme="quantiles")
out_str = self._fetch_map_string(m)
assert 'color":"#21918c"' in out_str
assert 'color":"#3b528b"' in out_str
assert 'color":"#5ec962"' in out_str
assert 'color":"#fde725"' in out_str
assert 'color":"#440154"' in out_str
# headtail
m = self.world.explore(column="pop_est", scheme="headtailbreaks")
out_str = self._fetch_map_string(m)
assert '"fillColor":"#3b528b"' in out_str
assert '"fillColor":"#21918c"' in out_str
assert '"fillColor":"#5ec962"' in out_str
assert '"fillColor":"#fde725"' in out_str
assert '"fillColor":"#440154"' in out_str
# custom k
m = self.world.explore(column="pop_est", scheme="naturalbreaks", k=3)
out_str = self._fetch_map_string(m)
assert '"fillColor":"#21918c"' in out_str
assert '"fillColor":"#fde725"' in out_str
assert '"fillColor":"#440154"' in out_str
def test_categorical(self):
"""Categorical maps"""
# auto detection
m = self.world.explore(column="continent")
out_str = self._fetch_map_string(m)
assert 'color":"#9467bd","continent":"Europe"' in out_str
assert 'color":"#c49c94","continent":"NorthAmerica"' in out_str
assert 'color":"#1f77b4","continent":"Africa"' in out_str
assert 'color":"#98df8a","continent":"Asia"' in out_str
assert 'color":"#ff7f0e","continent":"Antarctica"' in out_str
assert 'color":"#9edae5","continent":"SouthAmerica"' in out_str
assert 'color":"#7f7f7f","continent":"Oceania"' in out_str
assert 'color":"#dbdb8d","continent":"Sevenseas(openocean)"' in out_str
# forced categorical
m = self.nybb.explore(column="BoroCode", categorical=True)
out_str = self._fetch_map_string(m)
assert 'color":"#9edae5"' in out_str
assert 'color":"#c7c7c7"' in out_str
assert 'color":"#8c564b"' in out_str
assert 'color":"#1f77b4"' in out_str
assert 'color":"#98df8a"' in out_str
# pandas.Categorical
df = self.world.copy()
df["categorical"] = pd.Categorical(df["name"])
m = df.explore(column="categorical")
out_str = self._fetch_map_string(m)
for c in np.apply_along_axis(colors.to_hex, 1, cm.tab20(range(20))):
assert f'"fillColor":"{c}"' in out_str
# custom cmap
m = self.nybb.explore(column="BoroName", cmap="Set1")
out_str = self._fetch_map_string(m)
assert 'color":"#999999"' in out_str
assert 'color":"#a65628"' in out_str
|
nerosketch/djing
|
tariff_app/views.py
|
Python
|
unlicense
| 5,574
| 0.001435
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Count
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.views.generic import DeleteView
from guardian.decorators import permission_required_or_403 as permission_required
from djing.global_base_views import OrderedFilteredList
from djing.lib.mixins import LoginAdminMixin
from abonapp.models import Abon
from .models import Tariff, PeriodicPay
from djing import lib
from djing.lib.decorators import only_admins
from . import forms
login_decs = login_required, only_admins
class TariffsListView(LoginAdminMixin, PermissionRequiredMixin, OrderedFilteredList):
"""
Show Services(Tariffs) list
"""
permission_required = 'tariff_app.view_tariff'
template_name = 'tariff_app/tarifs.html'
context_object_name = 'tariflist'
model = Tariff
queryset = Tariff.objects.annotate(usercount=Count('linkto_tariff__abon'))
@login_required
@only_admins
@permission_required('tariff_app.change_tariff')
def edit_tarif(request, tarif_id=0):
tarif_id = lib.safe_int(tarif_id)
if tarif_id == 0:
if not request.user.has_perm('tariff_app.add_tariff'):
raise PermissionDenied
tarif = None
else:
if not request.user.has_perm('tariff_app.change_tariff'):
raise PermissionDenied
tarif = get_object_or_404(Tariff, pk=tarif_id)
if request.method == 'POST':
frm = forms.TariffForm(request.POST, instance=tarif)
if frm.is_valid():
service = frm.save()
if tarif is None:
request.user.log(request.META, 'csrv', '"%(title)s", "%(descr)s", %(amount).2f' % {
'title': service.title or '-',
'descr': service.descr or '-',
'amount': service.amount or 0.0
})
messages.success(request, _('Service has been saved'))
return redirect('tarifs:edit', tarif_id=service.pk)
else:
messages.warning(request, _('Some fields were filled incorrect, please try again'))
else:
frm = forms.TariffForm(instance=tarif)
return render(request, 'tariff_app/editTarif.html', {
'form': frm,
'tarif_id': tarif_id
})
class TariffDeleteView(LoginAdminMixin, PermissionRequiredMixin, DeleteView):
permission_required = 'tariff_app.delete_tariff'
model = Tariff
pk_url_kwarg = 'tid'
success_url = reverse_lazy('tarifs:home')
def delete(self, request, *args, **kwargs):
res = super().delete(request, *args, **kwargs)
request.user.log(request.META, 'dsrv', '"%(title)s", "%(de
|
scr)s", %(amount).2f' % {
'title': self.object.title or '-',
'descr': self.obj
|
ect.descr or '-',
'amount': self.object.amount or 0.0
})
messages.success(request, _('Service has been deleted'))
return res
def get_context_data(self, **kwargs):
kwargs['tid'] = self.kwargs.get('tid')
return super().get_context_data(**kwargs)
class PeriodicPaysListView(LoginAdminMixin, PermissionRequiredMixin, OrderedFilteredList):
permission_required = 'tariff_app.view_periodicpay'
context_object_name = 'pays'
model = PeriodicPay
template_name = 'tariff_app/periodic_pays/list.html'
@login_required
@only_admins
def periodic_pay(request, pay_id=0):
if pay_id != 0:
pay_inst = get_object_or_404(PeriodicPay, pk=pay_id)
if not request.user.has_perm('tariff_app.change_periodicpay'):
raise PermissionDenied
else:
pay_inst = None
if not request.user.has_perm('tariff_app.add_periodicpay'):
raise PermissionDenied
if request.method == 'POST':
frm = forms.PeriodicPayForm(request.POST, instance=pay_inst)
if frm.is_valid():
new_periodic_pay = frm.save()
if pay_inst is None:
comment = _('New periodic pay successfully created')
else:
comment = _('Periodic pay has been changed')
messages.success(request, comment)
return redirect('tarifs:periodic_pay_edit', new_periodic_pay.pk)
else:
messages.error(request, _('Some fields were filled incorrect, please try again'))
else:
frm = forms.PeriodicPayForm(instance=pay_inst)
return render(request, 'tariff_app/periodic_pays/add_edit.html', {
'pay_instance': pay_inst,
'form': frm
})
class ServiceUsers(LoginAdminMixin, OrderedFilteredList):
template_name = 'tariff_app/service_users.html'
model = Abon
def get_queryset(self):
tarif_id = self.kwargs.get('tarif_id')
return Abon.objects.filter(current_tariff__tariff__id=tarif_id).select_related('group')
def get_context_data(self, **kwargs):
if hasattr(self, 'tariff'):
tariff = getattr(self, 'tariff')
else:
tarif_id = self.kwargs.get('tarif_id')
tariff = get_object_or_404(Tariff, pk=tarif_id)
setattr(self, 'tariff', tariff)
self.tariff = tariff
context = {
'tariff': tariff,
'total': self.object_list.count()
}
context.update(kwargs)
return super().get_context_data(**context)
|
scribblemaniac/MCEdit2Blender
|
blocks/__init__.py
|
Python
|
gpl-3.0
| 132
| 0.015152
|
__all__ = ["Block", "Unknown", "Multitextured", "DataValues", "Stairs", "MultitexturedStairs", "Slab", "MultitexturedSlab",
|
"Log"]
|
|
vanhonit/xmario_center
|
test/gtk3/test_custom_lists.py
|
Python
|
gpl-3.0
| 1,958
| 0.005618
|
#!/usr/bin/python
from gi.repository import Gtk, GObject
import time
import unittest
from testutils import setup_test_env
setup_test_env()
from softwarecenter.enums import XapianValues, ActionButtons
TIMEOUT=300
class TestCustomLists(unittest.TestCase):
def _debug(self, index, model, needle):
print ("Expected '%s' at index '%s', " +
"and custom list contained: '%s'") % (
needle, index, model[index][0].get_value(XapianValues.PKGNAME))
def assertPkgInListAtIndex(self, index, model, needle):
doc = model[index][0]
self.assertEqual(doc.get_value(XapianValues.PKGNAME),
needle, self._debug(index, model, needle))
def test_custom_lists(self):
from softwarecenter.ui.gtk3.panes.availablepane import get_test_window
win = get_test_window()
pane = win.get_data("pane")
self._p()
pane.on_search_terms_changed(None, "ark,artha,software-center")
self._p()
model = pane.app_view.tree_view.get_model()
# custom list should return three items
self.assertTrue(len(model) == 3)
# check package names, ordering is default "by relevance"
self.assertPkgInListAtI
|
ndex(0, model, "ark")
self.assertPkgInListAtIndex(1, model, "software-center")
self.assertPkgInListAtIndex(2, model, "artha")
# check that the status bar offers to install the packages
install_button = pane.action_bar.get_button(ActionButtons.INSTALL)
self.assertNotEqual(install_button, None)
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def _p(self):
|
for i in range(10):
time.sleep(0.1)
while Gtk.events_pending():
Gtk.main_iteration()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
unittest.main()
|
veltzer/riddling
|
scripts/wrapper_lacheck.py
|
Python
|
gpl-3.0
| 939
| 0
|
#!/usr/bin/python
"""
This is a wrapper to run the 'lacheck(1)' tool from the 'lacheck' package.
Why do we need this wrapper?
- lacheck does NOT report in its
|
exit status whether it had warnings o
|
r not.
- it is too verbose when there are no warnings.
"""
import sys # for argv, exit, stderr
import subprocess # for Popen
def main():
out = subprocess.check_output([
'lacheck',
sys.argv[1],
])
errors = False
remember = None
printed_remember = False
for line in out.split('\n'):
if line.startswith('**'):
remember = line
printed_remember = False
continue
if line == '':
continue
# this is a warning or error
errors = True
if not printed_remember:
print(remember)
printed_remember = True
print(line)
if errors:
sys.exit(1)
if __name__ == '__main__':
main()
|
DirkHoffmann/indico
|
indico/modules/categories/compat.py
|
Python
|
gpl-3.0
| 1,374
| 0.002183
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
from flask import abort, redirect, request
from werkzeug.exceptions import NotFound
from indico.modules.categories.models.legacy_mapping import LegacyCategoryMapping
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_category(legacy_category_id, path=None):
if not re.match(r'^\d+l\d+$', legacy_category_id):
abort(404)
mapping = LegacyCategoryMapping.query.filter_by(legacy_category_id=legacy_category_id).first()
if mapping is None:
raise NotFound(f'Legacy category {legacy_category_id} does not exist'
|
)
view_args = request.view_args.copy()
view_args['legacy_category_id'] = mapping.category_id
# To create the same URL with the proper ID we take advantage of the
# fact that the legacy endpoint works perfectly fine with proper IDs
# too (you can pass an int for a string argument), but due to the
# weight of the `int` converter used for new
|
endpoints, the URL will
# then be handled by the proper endpoint instead of this one.
return redirect(url_for(request.endpoint, **dict(request.args.to_dict(), **view_args)), 301)
|
flav-io/flavio
|
flavio/physics/zdecays/test_zdecays.py
|
Python
|
mit
| 6,570
| 0.002435
|
import unittest
import flavio
from math import sqrt, pi
from flavio.physics.zdecays.gammazsm import Zobs, pb
from flavio.physics.zdecays.gammaz import GammaZ_NP
from flavio.physics.zdecays import smeftew
par = flavio.default_parameters.get_central_all()
class TestGammaZ(unittest.TestCase):
def test_obs_sm(self):
# check the SM predictions
self.assertAlmostEqual(flavio.sm_prediction('GammaZ'),
2.4950, delta=0.0015)
self.assertAlmostEqual(flavio.sm_prediction('GammaZ'),
1 / par['tau_Z'], delta=0.0015)
self.assertAlmostEqual(flavio.sm_prediction('sigma_had') / pb / 1e3,
41.488, delta=0.05)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->ee)'),
83.966e-3, delta=0.001e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->mumu)'),
83.966e-3, delta=0.001e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->tautau)'),
83.776e-3, delta=0.001e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->uu)'),
299.936e-3, delta=0.04e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->cc)'),
299.860e-3, delta=0.04e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->dd)'),
382.770e-3, delta=0.04e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->ss)'),
382.770e-3, delta=0.04e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->bb)'),
375.724e-3, delta=0.02e-3)
self.assertAlmostEqual(flavio.sm_prediction('Gamma(Z->nunu)'),
167.157e-3, delta=0.01e-3)
self.assertAlmostEqual(flavio.sm_prediction('R_l'),
20750.9e-3, delta=2e-3)
self.assertAlmostEqual(flavio.sm_prediction('R_c'),
172.23e-3, delta=0.01e-3)
self.assertAlmostEqual(flavio.sm_prediction('R_b'),
215.80e-3, delta=0.01e-3)
self.assertAlmostEqual(flavio.sm_prediction('R_e'),
20.743, delta=0.01)
self.assertAlmostEqual(flavio.sm_prediction('R_mu'),
20.743, delta=0.01)
self.assertAlmostEqual(flavio.sm_prediction('R_tau'),
20.743, delta=0.05)
self.assertAlmostEqual(flavio.sm_prediction('R_uc'),
0.1724, delta=0.0002)
self.assertEqual(flavio.sm_prediction('R_uc'),
(flavio.sm_prediction('R_u') + flavio.sm_prediction('R_c')) / 2)
def test_r_sm(self):
# check that the Sm predictions for the Ri agree with the Gammas
par = flavio.default_parameters.get_central_all()
mh = par['m_h']
mt = par['m_t']
als = par['alpha_s']
Da = 0.059
mZ = par['m_Z']
arg = (mh, mt, als, Da, mZ)
Rl = Zobs('Rl', *arg)
Rc = Zobs('Rc', *arg)
Rb = Zobs('Rb', *arg)
Ge = Zobs('Gammae,mu', *arg)
Gmu = Zobs('Gammae,mu', *arg)
Gtau = Zobs('Gammatau', *arg)
Gu = Zobs('Gammau', *arg)
Gd = Zobs('Gammad,s', *arg)
Gs = Zobs('Gammad,s', *arg)
Gc = Zobs('Gammac', *arg)
Gb = Zobs('Gammab', *arg)
Ghad = Gu + Gd + Gc + Gs + Gb
Gl = (Ge + Gmu + Gtau) / 3.
self.assertAlmostEqual(Rl, Ghad / Gl, delta=1e-4)
self.assertAlmostEqual(Rc, Gc / Ghad, delta=1e-4)
self.assertAlmostEqual(Rb, Gb / Ghad, delta=1e-4)
def test_obs_sm_fv(self):
# check the SM predictions for LFV decays
self.assertEqual(flavio.sm_prediction('BR(Z->emu)'), 0)
self.assertEqual(flavio.sm_prediction('BR(Z->etau)'), 0)
self.assertEqual(flavio.sm_prediction('BR(Z->muta
|
u)'), 0)
def test_Gamma_NP(self):
# compare NP contributions to A.49-A.52 from 1706.08945
GF, mZ, s2w_eff = par['GF'], par['m_Z'], par['s2w']*1.0010
d_gV = 0.055
d_gA = 0.066
# A.49-A.52 from 1706.08945
dGamma_Zll = sqrt(2)*GF*mZ
|
**3/(6*pi) * (-d_gA + (-1+4*s2w_eff)*d_gV)
dGamma_Znn = sqrt(2)*GF*mZ**3/(6*pi) * (d_gA + d_gV)
dGamma_Zuu = sqrt(2)*GF*mZ**3/(pi) * (d_gA -1/3*(-3+8*s2w_eff)*d_gV) /2
dGamma_Zdd = sqrt(2)*GF*mZ**3/(pi) * (-3/2*d_gA +1/2*(-3+4*s2w_eff)*d_gV) /3
# term squared in d_gV and d_gA not included in 1706.08945
d_g_squared = sqrt(2)*GF*mZ**3/(3*pi)*(abs(d_gV)**2+abs(d_gA)**2)
self.assertAlmostEqual(
dGamma_Zll + d_g_squared,
GammaZ_NP(par, 1, smeftew.gV_SM('e', par), d_gV,
smeftew.gA_SM('e', par), d_gA)
)
self.assertAlmostEqual(
dGamma_Znn + d_g_squared,
GammaZ_NP(par, 1, smeftew.gV_SM('nue', par), d_gV,
smeftew.gA_SM('nue', par), d_gA)
)
self.assertAlmostEqual(
dGamma_Zuu + 3*d_g_squared,
GammaZ_NP(par, 3, smeftew.gV_SM('u', par), d_gV,
smeftew.gA_SM('u', par), d_gA)
)
self.assertAlmostEqual(
dGamma_Zdd + 3*d_g_squared,
GammaZ_NP(par, 3, smeftew.gV_SM('d', par), d_gV,
smeftew.gA_SM('d', par), d_gA)
)
class TestAFBZ(unittest.TestCase):
def test_afbz_sm(self):
for l in ['e', 'mu', 'tau']:
self.assertAlmostEqual(flavio.sm_prediction('A(Z->{}{})'.format(l, l)),
0.1472, delta=0.0002, msg="Failed for {}".format(l))
self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->{}{})'.format(l, l)),
0.0163, delta=0.0002, msg="Failed for {}".format(l))
self.assertAlmostEqual(flavio.sm_prediction('A(Z->bb)'),
0.935, delta=0.001)
self.assertAlmostEqual(flavio.sm_prediction('A(Z->cc)'),
0.668, delta=0.001)
self.assertAlmostEqual(flavio.sm_prediction('A(Z->ss)'),
0.935, delta=0.001)
self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->bb)'),
0.1032, delta=0.0002)
self.assertAlmostEqual(flavio.sm_prediction('AFB(Z->cc)'),
0.0738, delta=0.0002)
|
jespino/urwintranet
|
urwintranet/ui/views/__init__.py
|
Python
|
apache-2.0
| 108
| 0
|
# -*-
|
coding: utf-8 -*-
"""
urwintranet.ui.views
~~~~~~~~~~~~~~~~~~
"""
from . import (a
|
uth, home, parts)
|
thisisshi/cloud-custodian
|
tests/test_airflow.py
|
Python
|
apache-2.0
| 3,504
| 0.001427
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
import jmespath
class TestApacheAirflow(BaseTest):
def test_airflow_environment_value_filter(self):
session_factory = self.replay_flight_data('test_airflow_environment_value_filter')
p = self.load_policy(
{
"name": "airflow-name-filter",
"resource": "airflow",
"filters": [
{
"type": "value",
"key": "Name",
"op": "eq",
"value": "testEnvironment",
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'testEnvironment')
self.assertEqual(resources[0]['c7n:MatchedFilters'], ['Name'])
def test_airflow_environment_kms_filter(self):
session_factory = self.replay_flight_data('test_airflow_environment_kms_filter')
kms = session_factory().client('kms')
expression = 'KmsKey'
p = self.load_policy(
{
"name": "airflow-kms-filter",
"resource": "airflow",
"filters": [
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "alias/mwaa",
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertTrue(len(resources), 1)
aliases = kms.list_aliases(KeyId=(jmespath.search(expression, resources[0])))
self.assertEqual(aliases['Aliases'][0]['AliasName'], 'alias/mwaa')
def test_airflow_environment_tag(self):
session_factory = self.replay_flight_data('test_airflow_environment_tag')
new_tag = {'env': 'dev'}
p = self.load_policy(
{
'name': 'airflow-tag',
'resource': 'airflow',
'filters': [{
'tag:env': 'absent'
}],
'actions': [{
'type': 'tag',
'tags': new_tag
}]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(1, len(resources))
name = resources[0].get('Name')
airflow = session_factory().client('mwaa')
call = airflow.get_environment(Name=name)
self.assertEqual(
|
new_tag, call['Environment'].get('Tags'))
def test_airflow_environment_untag(self):
session_factory = self.replay_flight_data('test_airflo
|
w_environment_untag')
p = self.load_policy(
{
'name': 'airflow-untag',
'resource': 'airflow',
'filters': [{
'tag:env': 'dev'
}],
'actions': [{
'type': 'remove-tag',
'tags': ['env']
}]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(1, len(resources))
name = resources[0].get('Name')
airflow = session_factory().client('mwaa')
call = airflow.get_environment(Name=name)
self.assertEqual({}, call['Environment'].get('Tags'))
|
MikeLaptev/sandbox_python
|
mera/selenium_training_automation/pages/page.py
|
Python
|
apache-2.0
| 913
| 0.001095
|
"""
Created on Sep 14, 2015
@author: Mikhail
"""
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import visibility_of_element_located, visibility_of
from selenium.co
|
mmon.exceptions
|
import TimeoutException
__author__ = 'Mikhail'
class Page(object):
def __init__(self, driver, url):
self.driver = driver
self.url = url
self.wait = WebDriverWait(self.driver, 5)
def open_page(self, url):
self.driver.get(url)
def is_element_visible_by_locator(self, locator):
try:
self.wait.until(visibility_of_element_located(locator))
except TimeoutException:
return False
return True
def is_element_visible(self, element):
try:
self.wait.until(visibility_of(element))
except TimeoutException:
return False
return True
|
Erotemic/local
|
misc/code/TAing/fixcmake.py
|
Python
|
gpl-3.0
| 478
| 0
|
import os
import fnmatch
def find_files(directory, pattern):
for root, dirs, files in os.walk(di
|
rectory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
for fname in find_files('.', 'CMakeLists.txt'):
f = open(fname, 'a')
f.write(
'\ninclude_directories(${OPENGL_INCLUDE_PATH} ${GLUT_I
|
NCLUDE_PATH})')
f.close()
|
dever860/cabot
|
cabot/cabotapp/models.py
|
Python
|
mit
| 30,857
| 0.00188
|
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from polymorphic import PolymorphicModel
from django.db.models import F
from django.core.urlresolvers import reverse
from djan
|
go.contrib.a
|
uth.models import User
from celery.exceptions import SoftTimeLimitExceeded
from .jenkins import get_job_status
from .alert import (send_alert, AlertPlugin, AlertPluginUserData, update_alert_plugins)
from .calendar import get_events
from .graphite import parse_metric
from .graphite import get_data
from .tasks import update_service, update_instance
from datetime import datetime, timedelta
from django.utils import timezone
import json
import re
import time
import os
import subprocess
import requests
from celery.utils.log import get_task_logger
RAW_DATA_LIMIT = 5000
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
class CheckGroupMixin(models.Model):
class Meta:
abstract = True
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
alerts = models.ManyToManyField(
'AlertPlugin',
blank=True,
help_text='Alerts channels through which you wish to be notified'
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js'
)
def __unicode__(self):
return self.name
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
self.snapshot.did_send_alert = True
self.snapshot.save()
send_alert(self, duty_officers=get_duty_officers())
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24)))
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapshots
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS)
class Service(CheckGroupMixin):
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = ServiceStatusSnapshot(
service=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS):
self.alert()
instances = models.ManyToManyField(
'Instance',
blank=True,
help_text='Instances this service is running on.',
)
url = models.TextField(
|
wolfram74/flask_exploration
|
run.py
|
Python
|
mit
| 66
| 0
|
from pro
|
ject import app
if __name__ == '__main__':
|
app.run()
|
longnow/panlex-tools
|
libpython/gary/sh_parser.py
|
Python
|
mit
| 1,521
| 0.007242
|
from collections import defaultdict, namedtuple
import regex as re
from gary import ignore_parens_list
Record = namedtuple('Record', ['dob', 'eng', 'pos', 'phn'])
@ignore_parens_list
def split_words(text:str) -> list:
return re.split('\s*;\s*', text)
class ShParser:
def __init__(self, text):
self.entries = []
pattern = re.compile('^\\\\(\w+)\s+(.*)$')
self.entries = []
curr = defaultdict(list)
for line in text.splitlines():
match = pattern.search(line)
if match and not match[1].startswith('_'):
if match[1].strip() == 'lx' and len(curr) > 0:
self.entries.append(curr)
curr = defaultdict(list)
curr['lx'] = match[2]
curr['ps'] = ''
if match[1] == 'ps':
if len(curr['ge']) > 0:
self.entries.append(curr)
curr['ge'] = []
|
curr['ps'] = match[2]
if match[1] == 'ge':
word_list = split_words(match[2])
for word in word_list:
curr['ge'].append(word)
def getEntries(self):
for entry in self.entrie
|
s:
if 'lx' in entry:
dob = entry['lx']
else:
dob = ''
eng = '‣'.join( entry['ge'])
pos = entry['ps']
phn = entry['ph']
yield Record(dob,eng,pos,phn)
|
cupy/cupy
|
cupyx/jit/_cuda_types.py
|
Python
|
mit
| 3,661
| 0
|
import numpy
from cupy._core._scalar import get_typename
# Base class for cuda types.
class TypeBase:
def __str__(self):
raise NotImplementedError
def declvar(self, x):
return f'{self} {x}'
class Void(TypeBase):
def __init__(self):
pass
def __str__(self):
return 'void'
class Scalar(TypeBase):
def __init__(self, dtype):
self.dtype = numpy.dtype(dtype)
def __str__(self):
dtype = self.dtype
if dtype == numpy.float16:
# For the performance
dtype = numpy.dtype('float32')
return get_typename(dtype)
def __eq__(self, other):
return isinstance(other, Scalar) and self.dtype == other.dtype
def __hash__(self):
return hash(self.dtype)
class ArrayBase(TypeBase):
def __init__(self, child_type: TypeBase, ndim: int):
assert isinstance(child_type, TypeBase)
self.child_type = child_type
self.ndim = ndim
class CArray(ArrayBase):
def __init__(self, dtype, ndim, is_c_contiguous, index_32_bits):
self.dtype = dtype
self._c_contiguous = is_c_contiguous
self._index_32_bits = index_32_bits
super().__init__(Scalar(dtype), ndim)
@classmethod
def from_ndarray(cls, x):
return CArray(x.dtype, x.ndim, x._c_contiguous, x._index_32_bits)
def __str__(self):
ctype =
|
get_typename(self.dtype)
c_contiguous = get_cuda_code_from_constant(self._c_contiguous, bool_)
index_32_bits = get_cuda_code_from_constant(self._index_32_bits, bool_)
r
|
eturn f'CArray<{ctype}, {self.ndim}, {c_contiguous}, {index_32_bits}>'
def __eq__(self, other):
return (
isinstance(other, CArray) and
self.dtype == other.dtype and
self.ndim == other.ndim and
self._c_contiguous == other._c_contiguous and
self._index_32_bits == other._index_32_bits
)
def __hash__(self):
return hash(
(self.dtype, self.ndim, self._c_contiguous, self._index_32_bits))
class SharedMem(ArrayBase):
def __init__(self, child_type, size):
if not (isinstance(size, int) or size is None):
raise 'size of shared_memory must be integer or `None`'
self._size = size
super().__init__(child_type, 1)
def declvar(self, x):
if self._size is None:
return f'extern __shared__ {self.child_type} {x}[]'
return f'__shared__ {self.child_type} {x}[{self._size}]'
class Ptr(ArrayBase):
def __init__(self, child_type):
super().__init__(child_type, 1)
def __str__(self):
return f'{self.child_type}*'
class Tuple(TypeBase):
def __init__(self, types):
self.types = types
def __str__(self):
types = ', '.join([str(t) for t in self.types])
return f'thrust::tuple<{types}>'
def __eq__(self, other):
return isinstance(other, Tuple) and self.types == other.types
void = Void()
bool_ = Scalar(numpy.bool_)
int32 = Scalar(numpy.int32)
uint32 = Scalar(numpy.uint32)
_suffix_literals_dict = {
'float64': '',
'float32': 'f',
'int64': 'll',
'int32': '',
'uint64': 'ull',
'uint32': 'u',
'bool': '',
}
def get_cuda_code_from_constant(x, ctype):
dtype = ctype.dtype
suffix_literal = _suffix_literals_dict.get(dtype.name)
if suffix_literal is not None:
s = str(x).lower()
return f'{s}{suffix_literal}'
ctype = str(ctype)
if dtype.kind == 'c':
return f'{ctype}({x.real}, {x.imag})'
if ' ' in ctype:
return f'({ctype}){x}'
return f'{ctype}({x})'
|
emogenet/ghdl
|
libraries/openieee/build_numeric.py
|
Python
|
gpl-2.0
| 32,279
| 0.004058
|
#!/usr/bin/env python
# Generate the body of ieee.numeric_std and numeric_bit from a template.
# The implementation is based only on the specification and on testing (as
# the specifications are often ambiguous).
# The algorithms are very simple: carry ripple adder, restoring division.
# This file is part of GHDL.
# Both this file and the outputs of this file are copyrighted.
# Copyright (C) 2015 Tristan Gingold
#
# GHDL is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2, or (at your option) any later
# version.
#
# GHDL is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING2. If not see
# <http://www.gnu.org/licenses/>.
import re
import sys
# My python 'style' and knowledge is basic... Do not hesitate to comment.
binary_funcs = [ "and", "nand", "or", "nor", "xor" ]
compare_funcs = [ "=", "/=", ">", ">=", "<", "<=" ]
vec_types = ['UNSIGNED', 'SIGNED']
logics = ['bit', 'std']
logic_types = {'bit' : 'bit', 'std': 'sl_x01' }
logic_undefs = {'bit' : "'0'", 'std': "'X'" }
logic = 'xx' # Current logic, either bit or std
v93=False
# Stream to write.
out=sys.stdout
def w(s):
"Write S to the output"
out.write(s)
def logic_type():
return logic_types[logic]
def logic_undef():
return logic_undefs[logic]
def disp_vec_binary(func, typ):
"Generate the body of a vector binary logic function"
res = """
function "{0}" (l, r : {1}) return {1}
is
subtype res_type is {1} (l'length - 1 downto 0);
alias la : res_type is l;
alias ra : {1} (r'length - 1 downto 0) is r;
variable res : res_type;
begin
if la'left /= ra'left then
assert false
report "NUMERIC_STD.""{0}"": arguments are not of the same length"
severity failure;
res := (others => """ + logic_undef() + """);
else
for I in res_type'range loop
res (I) := la (I) {0} ra (I);
end loop;
end if;
return res;
end "{0}";\n"""
w (res.format(func, typ))
def disp_non_logical_warning(func):
return """
assert NO_WARNING
report "NUMERIC_STD.""{0}"": non logical value detected"
severity warning;""".format(func)
def conv_bit(expr):
if logic == 'std':
return "sl_to_x01 (" + expr + ")"
else:
return expr
def extract_bit(name):
res = "{0}b := " + conv_bit ("{0}a (i)") + ";"
return res.format(name)
def init_carry(func):
if func == '+':
return """
carry := '0';"""
else:
return """
carry := '1';"""
def extract_extend_bit(name,typ):
res = """
if i > {0}a'left then
{0}b := """
if typ == 'UNSIGNED':
res += "'0';"
else:
res += "{0} ({0}'left);"
res += """
else
""" + extract_bit(name) + """
end if;"""
return res.format(name)
def disp_vec_vec_binary(func, typ):
"Generate vector binary function body"
res = """
function "{0}" (l, r : {1}) return {1}
is
constant lft : integer := MAX (l'length, r'length) - 1;
subtype res_type is {1} (lft downto 0);
alias la : {1} (l'length - 1 downto 0) is l;
alias ra : {1} (r'length - 1 downto 0) is r;
variable res : res_type;
variable lb, rb, carry : """ + logic_type () + """;
begin
if la'left < 0 or ra'left < 0 then
return null_{1};
end if;"""
res += init_carry(func)
res += """
for i in 0 to lft loop"""
res += extract_extend_bit('l', typ)
res += extract_extend_bit('r', typ)
if logic == 'std':
res += """
if lb = 'X' or rb = 'X' then""" + \
disp_non_logical_warning(func) + """
res := (others => 'X');
exit;
end if;"""
if func == '-':
res += """
rb := not rb;"""
res += """
res (i) := compute_sum (carry, rb, lb);
carry := compute_carry (carry, rb, lb);
end loop;
return res;
end "{0}";
"""
w (res.format (func, typ))
def declare_int_var(name, typ):
res = """
variable {0}1, {0}2 : {1};
variable {0}d : nat1;""";
if typ == "INTEGER":
res += """
constant {0}msb : nat1 := boolean'pos({0} < 0);"""
return res.format(name, typ)
def init_int_var(name, typ):
return """
{0}1 := {0};""".format(name);
def extract_int_lsb(name, typ):
res = """
{0}2 := {0}1 / 2;"""
if typ == "INTEGER":
res += """
if {0}1 < 0 then
{0}d := 2 * {0}2 - {0}1;
{0}1 := {0}2 - {0}d;
else
{0}d := {0}1 - 2 * {0}2;
{0}1 := {0}2;
end if;"""
else:
res += """
{0}d := {0}1 - 2 * {0}2;
{0}1 := {0}2;"""
res += """
{0}b := nat1_to_01 ({0}d);"""
return res.format(name,typ)
def check_int_truncated(func, name, typ):
if typ == "INTEGER":
v = "-{0}msb".format(name)
else:
v = "0"
return """
if {1}1 /= {2} then
assert NO_WARNING
report "NUMERIC_STD.""{0}"": vector is truncated"
severity warning;
end if;""".format(func, name, v)
def create_vec_int_dict(func, left, right):
if left in vec_types:
dic = {'vtype': left,
'itype': right,
'vparam': 'l',
'iparam': 'r'}
else:
dic = {'vtype': right,
'itype': left,
'vparam': 'r',
'iparam': 'l'}
dic.update({'ltype': left,
'rtype': right,
'func': func,
'logic': logic_type()})
return dic
def disp_vec_int_binary(func, left, right):
"Generate vector binary function body"
dic = create_vec_int_dict(func, left, right)
res = """
function "{func}" (l : {ltype}; r : {rtype}) return {vtype}
is
subtype res_type is {vtype} ({vparam}'length - 1 downto 0);
alias {vparam}a : res_type is {vparam};""" + \
declare_int_var (dic["iparam"], dic["itype"]) + """
variable res : res_type;
variable lb, rb, carry : {logic};
begin
if res'length < 0 then
return null_{vtype};
end if;"""
# Initialize carry. For subtraction, use 2-complement.
res += init_carry(func)
res += init_int_var(dic['iparam'], dic['itype']) + """
for i in res'reverse_range loop
""" + extract_bit(dic['vparam']) + "\n" + \
extract_int_lsb(dic['iparam'], dic['itype']);
if logic == 'std':
res += """
if {vparam}b = 'X' then""" + \
disp_non_logical_warning(func) + """
res := (others => 'X');
{iparam}1 := 0;
exit;
end if;"""
# 2-complement for subtraction
if func == '-':
res += """
rb := not rb;"""
res += """
res (i) := compute_sum (carry, rb, lb);
carry := compute_carry (carry, rb, lb);
end loop;""" + \
check_int_truncated(func, dic['iparam'], dic['itype']) + """
return res;
end "{func}";\n"""
w(res.format (**dic))
def disp_vec_int_gcompare(func, left, right):
"Generate comparison function"
dic = create_vec_int_dict(func, left, righ
|
t)
res = """
function {func} (l : {ltype}; r : {rtype}) return compare_type
is
subtype res_type is {vtype} ({vparam}'length - 1 downto 0);
alias la : res_type is l;""" + \
declare_int_var (dic['iparam'], dic['itype']) + """
variable lb, rb : {logic};
variable res : compare_type;
begin
res := compare_eq;""";
res += init_int_var(dic['iparam'], dic['itype']) + """
for i in {vparam}a'reverse_range loop
""" + extract_bit (di
|
c['vparam']) + \
extract_int_lsb("r", right)
if logic == 'std':
res += """
if {vparam}b = 'X' then
return compare_unknown;
end if;"""
res += """
if lb = '1' and rb = '0' then
res := compare_gt;
elsif lb = '0' and rb = '1' then
|
pastpages/wordpress-memento-plugin
|
fabfile/migrate.py
|
Python
|
mit
| 268
| 0
|
from venv import _venv
from fabric.api import task
@tas
|
k
def migrate():
"""
Run Django's migrate command
|
"""
_venv("python manage.py migrate")
@task
def syncdb():
"""
Run Django's syncdb command
"""
_venv("python manage.py syncdb")
|
jut-io/jut-python-tools
|
tests/jut_run_tests.py
|
Python
|
mit
| 3,794
| 0.002636
|
"""
basic set of `jut run` tests
"""
import json
import unittest
from tests.util import jut
BAD_PROGRAM = 'foo'
BAD_PROGRAM_ERROR = 'Error line 1, column 1 of main: Error: no such sub: foo'
class JutRunTests(unittest.TestCase):
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_json(self):
"""
verify an invalid program reports the failure correctly when using json
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'json')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_text(self):
"""
verify an invalid program reports the failure correctly when using text
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'text')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_syntatically_incorrect_program_reports_error_with_format_csv(self):
"""
verify an invalid program reports the failure correctly when using csv
output format
"""
process = jut('run', BAD_PROGRAM, '-f', 'json')
process.expect_status(255)
process.expect_error(BAD_PROGRAM_ERROR)
def test_jut_run_emit_to_json(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected JSON format
"""
process = jut('run',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
points = json.loads(process.read_output())
process.expect_eof()
self.assertEqual(points,
[
{'time': '2014-01-01T00:00:00.000Z'},
{'time': '2014-01-01T00:00:01.000Z'},
{'time': '2014-01-01T00:00:02.000Z'},
{'time': '2014-01-01T00:00:03.000Z'},
{'time': '2014-01-01T00:00:04.000Z'}
])
def test_jut_run_emit_to_text(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected text format
"""
process = jut('run',
'--format', 'text',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
stdout = process.read_output()
process.expect_eof()
self.assertEqual(stdout, '2014-01-01T00:00:00.000Z\n'
'2014-01-01T00:00:01.000Z\n'
'2014-01-01T00:00:02.000Z\n'
'2014-01-01T00:00:03.000Z\n'
'2014-01-01T00:00:04.000Z\n')
def test_jut_run_emit_to_csv(self):
"""
use jut to run the juttle program:
emit -from :2014-01-01T00:00:00.000Z: -limit 5
and verify the output is in the expected csv format
"""
process = jut('run',
|
'--format'
|
, 'csv',
'emit -from :2014-01-01T00:00:00.000Z: -limit 5')
process.expect_status(0)
stdout = process.read_output()
process.expect_eof()
self.assertEqual(stdout, '#time\n'
'2014-01-01T00:00:00.000Z\n'
'2014-01-01T00:00:01.000Z\n'
'2014-01-01T00:00:02.000Z\n'
'2014-01-01T00:00:03.000Z\n'
'2014-01-01T00:00:04.000Z\n')
|
OmkarPathak/Python-Programs
|
CompetitiveProgramming/HackerEarth/DataStructures/Arrays/P02_Mark-The-Answer.py
|
Python
|
gpl-3.0
| 1,372
| 0.008824
|
# Our friend Monk has an exam that has quite we
|
ird rules. Each question has a difficulty level in the form of an
# Integer. Now, Monk can only solve the problems that have difficulty level less than X . Now the rules are-
#
# Score of the student is equal to the maximum number of answers he/she has attempted without skipping a question.
# Student is allowed to skip just "one" question that will not be counted in the continuity of the questions.
# Note- Assume the student knows the solution to t
|
he problem he/she attempts and always starts the paper from first
# question.
#
# Given the number of Questions, N ,the maximum difficulty level of the problem Monk can solve , X ,and the difficulty
# level of each question, Ai can you help him determine his maximum score?
#
# Input Format
# First Line contains Integer N , the number of questions and the maximum difficulty X Monk can solve.
# Next line contains N integers, Ai denoting the difficulty level of each question.
#
# Output Format
# Maximum score Monk can achieve in the exam.
#
# Constraints
# 1≤N≤105
# 1≤X≤109
# 1≤Ai≤109
#
# SAMPLE INPUT
# 7 6
# 4 3 7 6 7 2 2
#
# SAMPLE OUTPUT
# 3
n, x = map(int, input().split())
questions = input().split()
count = 0
skip = 0
for i in range(n):
if int(questions[i]) > x:
skip += 1
else:
if skip == 2:
break
count += 1
print(count)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/GL/ARB/robustness_isolation.py
|
Python
|
lgpl-3.0
| 1,604
| 0.015586
|
'''OpenGL extension ARB.robustness_isolation
This module customises the behaviour of the
OpenGL.raw.GL.ARB.robustness_isolation to provide a more
Python-friendly API
Overview (from the spec)
GL_ARB_robustness and supporting window system extensions allow
creating an OpenGL context supporting graphics reset notification
behavior. GL_ARB_robustness_isolation provides stronger
guarantees about the possible side-effects of a graphics reset.
It is expected that there may be a performance cost associated
with isolating an application or share group from other contexts
on the GPU. For this reason, GL_ARB_robustness_isolation is
phrased as an opt-in mechanism, with a new context creation bit
defined in the window system bindings. It is expected that
implementations might only advertise the strings in this extension
if both the implementation supports the desired isolation
properties, and the context was created with the appropriate reset
isolation bit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/robustness_isolation.txt
'''
from OpenGL import platform, con
|
stant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.robustness_isolation import *
from OpenGL.raw.GL.ARB.robustness_isolation import _EXTENSION_NAME
def glInitRobustnessIsolationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| |
its-dirg/svs
|
setup.py
|
Python
|
apache-2.0
| 1,148
| 0.000871
|
from setuptools import find_packages
from setuptools import setup
setup(
name='svs',
version='1.0.0',
description='The InAcademia Simple validation Service allows for the easy validation of affiliation (Student,'
'Faculty, Staff) of a user in Academia',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apa
|
che Software License',
'Programming Language :: Python :: 3',
],
author='Rebecka Gulliksson',
author_email='tech@inacademia.org',
|
zip_safe=False,
url='http://www.inacademia.org',
packages=find_packages('src'),
package_dir={'': 'src'},
package_data={
'svs': [
'data/i18n/locale/*/LC_MESSAGES/*.mo',
'templates/*.mako',
'site/static/*',
],
},
message_extractors={
'src/svs': [
('**.py', 'python', None),
('templates/**.mako', 'mako', None),
('site/**', 'ignore', None)
]
},
install_requires=[
'satosa==3.3.1',
'Mako',
'gunicorn',
'Werkzeug'
]
)
|
dayatz/taiga-back
|
taiga/export_import/services/store.py
|
Python
|
agpl-3.0
| 29,644
| 0.002699
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This makes all code that import services works and
# is not the baddest practice ;)
import os
import uuid
from unidecode import unidecode
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from taiga.projects.history.services import make_key_from_model_object, take_snapshot
from taiga.projects.models import Membership
from taiga.projects.references import sequences as seq
from taiga.projects.references import models as refs
from taiga.projects.userstories.models import RolePoints
from taiga.projects.services import find_invited_user
from taiga.timeline.service import build_project_namespace
from taiga.users import services as users_service
from .. import exceptions as err
from .. import validators
########################################################################
## Manage errors
########################################################################
_errors_log = {}
def get_errors(clear=True):
_errors = _errors_log.copy()
if clear:
_errors_log.clear()
return _errors
def add_errors(section, errors):
if section in _errors_log:
_errors_log[section].append(errors)
else:
_errors_log[section] = [errors]
def reset_errors():
_errors_log.clear()
########################################################################
## Store functions
########################################################################
## PROJECT
def store_project(data):
project_data = {}
for key, value in data.items():
excluded_fields = [
"default_points", "default_us_status", "default_task_status",
"default_priority", "default_severity", "default_issue_status",
"default_issue_type", "default_epic_status",
"memberships", "points",
"epic_statuses", "us_statuses", "task_statuses", "issue_statuses",
"priorities", "severities",
"issue_types",
"epiccustomattributes", "userstorycustomattributes",
"taskcustomattributes", "issuecustomattributes",
"roles", "milestones",
"wiki_pages", "wiki_links",
"notify_policies",
"epics", "user_stories", "issues", "tasks",
"is_featured"
]
if key not in excluded_fields:
project_data[key] = value
validator = validators.ProjectExportValidator(data=project_data)
if validator.is_valid():
validator.object._importing = True
validator.object.save()
validator.save_watchers()
return validator
add_errors("project", validator.errors)
return None
## MISC
def _use_id_instead_name_as_key
|
_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(attr["name"], None)
if value is not None:
ret[str(attr["id"])] = value
return ret
def _store_custom_attributes_values(obj, data_values, obj_field, serializer_class):
|
data = {
obj_field: obj.id,
"attributes_values": data_values,
}
try:
custom_attributes_values = obj.custom_attributes_values
serializer = serializer_class(custom_attributes_values, data=data)
except ObjectDoesNotExist:
serializer = serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return serializer
add_errors("custom_attributes_values", serializer.errors)
return None
def _store_attachment(project, obj, attachment):
validator = validators.AttachmentExportValidator(data=attachment)
if validator.is_valid():
validator.object.content_type = ContentType.objects.get_for_model(obj.__class__)
validator.object.object_id = obj.id
validator.object.project = project
if validator.object.owner is None:
validator.object.owner = validator.object.project.owner
validator.object._importing = True
validator.object.size = validator.object.attached_file.size
validator.object.name = os.path.basename(validator.object.attached_file.name)
validator.save()
return validator
add_errors("attachments", validator.errors)
return validator
def _store_history(project, obj, history):
validator = validators.HistoryExportValidator(data=history, context={"project": project})
if validator.is_valid():
validator.object.key = make_key_from_model_object(obj)
if validator.object.diff is None:
validator.object.diff = []
validator.object.project_id = project.id
validator.object._importing = True
validator.save()
return validator
add_errors("history", validator.errors)
return validator
## ROLES
def _store_role(project, role):
validator = validators.RoleExportValidator(data=role)
if validator.is_valid():
validator.object.project = project
validator.object._importing = True
validator.save()
return validator
add_errors("roles", validator.errors)
return None
def store_roles(project, data):
results = []
for role in data.get("roles", []):
validator = _store_role(project, role)
if validator:
results.append(validator)
return results
## MEMGERSHIPS
def _store_membership(project, membership):
validator = validators.MembershipExportValidator(data=membership, context={"project": project})
if validator.is_valid():
validator.object.project = project
validator.object._importing = True
validator.object.token = str(uuid.uuid1())
validator.object.user = find_invited_user(validator.object.email,
default=validator.object.user)
validator.save()
return validator
add_errors("memberships", validator.errors)
return None
def store_memberships(project, data):
results = []
for membership in data.get("memberships", []):
results.append(_store_membership(project, membership))
return results
## PROJECT ATTRIBUTES
def _store_project_attribute_value(project, data, field, serializer):
validator = serializer(data=data)
if validator.is_valid():
validator.object.project = project
validator.object._importing = True
validator.save()
return validator.object
add_errors(field, validator.errors)
return None
def store_project_attributes_values(project, data, field, serializer):
result = []
for choice_data in data.get(field, []):
result.append(_store_project_attribute_value(project, choice_data, field, serializer))
return result
## DEFAULT PROJECT ATTRIBUTES VALUES
def store_default_project_attributes_values(project, data):
def helper(project, field, related, data):
if field in data:
value = related.all().get(name=data[field])
else:
value = related.all().first()
setattr(project, field, value)
helper(project, "default_points", project.points, data)
helper(project, "default_issue_type
|
christianholz/QuickShuttle
|
bookings.py
|
Python
|
gpl-3.0
| 3,213
| 0.012138
|
#!/usr/bin/env python
"""list all previously made bookings"""
import os
import sys
import cgi
import datetime
import json
import shuttle
import shconstants
import smtplib
import shcookie
print "Content-type: text/html\r\n"
shuttle.do_login(shcookie.u, shcookie.p)
form = cgi.FieldStorage()
if 'action' in form:
act = form.getvalue("action")
if act == "cancel":
id = form.getvalue("id")
shuttle.cancel_booking(id)
show_all_routes = 'ar' in form
bookings = shuttle.get_bookings()
print '''<html>
<head>
<title>Connector bookings for %s</title>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" />
<link href="style.css" rel="stylesheet" />
</head>
<body>''' % (shcookie.u)
alldata = json.load(open("all.json"))
routes = [r[:-3] for r in alldata["true"].keys()]
routes.sort()
routes = [[r, alldata["true"][r + " AM"][2]] for r in routes if len(shcookie.routes) == 0 or show_all_routes or alldata["true"][r + " AM"][2] in shcookie.routes]
# header bar
print '<div id="newbar"><div id="newbarin">'
for r in routes:
print '''<span class="newbutton">
<a href="new.py?r=%s" class="l">%s</a>
</span>''' % (r[1], r[0])
if len(shcookie.routes) != 0 and not show_all_routes:
print '''<span class="newbutton"><a href="bookings.py?ar=1" class="l">all routes</a></span>'''
print '</div></div>'
# list of rides
if 'cal' in form:
cal = form.getvalue("cal")
print '''<div id="outlook">
<a href="outlook.py?cal=%s">download booked trip</a>
</div>''' % (cal)
print '<div id="bookings">'
for b in bookings:
past = False
dt = datetime.datetime.strptime(b['dd'] + ' ' + b['dt'], "%m/%d/%Y %I:%M %p")
if dt < datetime.datetime.now() - datetime.timedelta(hours=2) - datetime.timedelta(minutes=60):
continue
if "PM" in b['dt']:
csspm = " pm"
else:
csspm = ""
if dt < datetime.datetime.now() - datetime.timedelta(hours=2) - datetime.timedelta(minutes=1):
past = True
csspm += " past"
print '''<div class="booking%s">
<span class="t">%s</span>
<span class="r">%s</span>
<span class="dt">%s</span><span class="dl">%s</span>
<span class="gt">%s</span><span class="gl">%s</span>''' % (
csspm, dt.strftime("%A, %b %d"), b['r'], b
|
['dt'], b['dl'], b['gt'], b['gl'])
if 'cn' in b:
print ' <span class="cn">Connector %s</span>' % (b['cn'])
if not past:
loc = shuttle.get_shuttle_location(b['r'], b['cn'])
if loc != None:
stop = shuttle.get_stop_gps(b['r'], b['dl'
|
])
if stop != None:
dst = shuttle.get_maps_eta((loc['lat'], loc['lon']), (stop[0], stop[1]))
print ' <span class="et">ETA: %s (<a href="https://www.google.com/maps?q=%f,%f">%s</a>)</span>' % (
dst[1], loc['lat'], loc['lon'], dst[0])
if 'cl' in b:
print ''' <form method="post" action="%s" onsubmit="return confirm('Cancel?');">
<input type="hidden" name="action" value="cancel"/>
<input type="hidden" name="id" value="%s"/>
<input type="submit" value="cancel"/>
</form>''' % (os.environ["SCRIPT_NAME"], b['cl'])
print '</div>'
print '</div></body><!--'
# print datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print '--></html>'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.