repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Nachtfeuer/pipeline
|
tests/components/test_shell_config.py
|
Python
|
mit
| 2,422
| 0.001652
|
"""Testing of module config."""
# pylint: disable=no-self-use, invalid-name, redundant-unittest-assert
import unittest
from hamcrest import assert_that, equal_to, contains_string
from ddt import ddt, data
from spline.components.config import ShellConfig
@ddt
class TestShellConfig(unittest.TestCase):
"""Testing of class ShellConfig."""
def test_minimal_valid(self):
"""Testing to provide mandatory parameters only."""
config = ShellConfig(script='echo "hello world"')
assert_that(config.script, equal_to('echo "hello world"'))
assert_that(config.title, equal_to(''))
assert_that(config.model, equal_to({}))
assert_that(config.env, equal_to({}))
assert_that(config.item, equal_to(None))
assert_that(config.dry_run, equal_to(False))
assert_that(config.debug, equal_to(False))
assert_that(config.strict, equal_to(False))
assert_that(config.variables, equal_to({}))
@data({'dry_run': True}, {'debug': True}, {'dry_run': False}, {'item': 'hello'},
{'env': {'message': 'hello'}}, {'model': {'foo': 123}}, {'title': 'test'},
{'variables': {'output': 'hello'}}, {'strict': True})
def test_individual_valid(self, kwargs):
"""Testing to provide mandatory and all optional parameters."""
# defaults
final_kwargs = {'script': 'echo "hello world"', 'title': '', 'debug': False, 'strict': False,
'dry_run'
|
: False, 'item': None, 'env': {}, 'model': {}, 'variables': {}}
final_kwargs.update(kwargs)
config = ShellConfig(**final_kwargs)
for key, value in final_kwargs.i
|
tems():
assert_that(key in config.__dict__, equal_to(True))
assert_that(config.__dict__[key], equal_to(value))
def test_missing_mandatory(self):
"""Testing invalid parameter."""
try:
ShellConfig()
self.assertFalse("RuntimeError expected")
except RuntimeError as exception:
assert_that(str(exception), equal_to("Missing keys: 'script'"))
def test_undefined_parameter(self):
"""Testing undefined parameter."""
try:
ShellConfig(script='echo "hello world"', XYZ='foo and bar')
self.assertFalse("RuntimeError expected")
except RuntimeError as exception:
assert_that(str(exception), contains_string("Wrong keys 'XYZ'"))
|
ipostuma/MCNPtools
|
WriteTally.py
|
Python
|
gpl-3.0
| 1,830
| 0.004372
|
from MCNPtools import Gen
# example usage of the module
# first you initialize the tally by defining the bins: segment (surface number), angle (cosine) and energy (MeV)
cos = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
seg = [666,667]
erg =[1.000E
|
-10, 1.259E-10, 1.585E-10, 1.995E-10, 2.512E-10, 3.162E-10,
3.981E-10, 5.012E-10, 6.310E-10, 7.943E-10, 1.000E-09, 1.259E-09,
1.585E-09, 1.995E-09, 2.512E-09, 3.162E-09, 3.981E-09, 5.012E-09,
6.310E-09, 7.943E-09, 1.000E-08, 1.259E-08, 1.585E-08, 1.995E-08,
2.512E-08, 3.162E-08, 3.981E-08, 5.012E-08, 6.310E-08, 7.943E-08,
1.000E-07, 1.259E-07, 1.585E-07, 1.995E-07, 2.512E-07, 3.162E-07,
3.981E-07, 5.012E-07, 6.310E-07, 7.943E-07, 1.0
|
00E-06, 1.259E-06,
1.585E-06, 1.995E-06, 2.512E-06, 3.162E-06, 3.981E-06, 5.012E-06,
6.310E-06, 7.943E-06, 1.000E-05, 1.259E-05, 1.585E-05, 1.995E-05,
2.512E-05, 3.162E-05, 3.981E-05, 5.012E-05, 6.310E-05, 7.943E-05,
1.000E-04, 1.259E-04, 1.585E-04, 1.995E-04, 2.512E-04, 3.162E-04,
3.981E-04, 5.012E-04, 6.310E-04, 7.943E-04, 1.000E-03, 1.259E-03,
1.585E-03, 1.995E-03, 2.512E-03, 3.162E-03, 3.981E-03, 5.012E-03,
6.310E-03, 7.943E-03, 1.000E-02, 1.259E-02, 1.585E-02, 1.995E-02,
2.512E-02, 3.162E-02, 3.981E-02, 5.012E-02, 6.310E-02, 7.943E-02,
1.000E-01, 1.259E-01, 1.585E-01, 1.995E-01, 2.512E-01, 3.162E-01,
3.981E-01, 5.012E-01, 6.310E-01, 7.943E-01, 1.000E+00, 1.259E+00,
1.585E+00, 1.995E+00, 2.512E+00, 3.162E+00, 3.981E+00, 5.012E+00,
6.310E+00, 7.943E+00, 1.000E+01, 1.259E+01, 1.585E+01, 1.995E+01]
myTally = Gen.Tally(seg,cos,erg)
# Once the object is initlialized it can be used to print out the tally needed by MCNP giving:
# surface -> 999
# title -> test
# normFactor -> 1
myTally.PrintTally(681,"test",9.6E+13)
|
2ndQuadrant/ansible
|
lib/ansible/plugins/action/pause.py
|
Python
|
gpl-3.0
| 10,780
| 0.001299
|
# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import sys
import termios
import time
import tty
from os import (
getpgrp,
isatty,
tcgetpgrp,
)
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import PY3
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
try:
import curses
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
HAS_CURSES = True
except curses.error:
HAS_CURSES = False
except ImportError:
HAS_CURSES = False
if HAS_CURSES:
MOVE_TO_BOL = curses.tigetstr('cr')
CLEAR_TO_EOL = curses.tigetstr('el')
else:
MOVE_TO_BOL = b'\r'
CLEAR_TO_EOL = b'\x1b[K'
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
def clear_line(stdout):
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
def is_interactive(fd=None):
if fd is None:
return False
if isatty(fd):
# Compare the current process group to the process group associated
# with terminal of the given file descriptor to determine if the process
# is running in the background.
return getpgrp() == tcgetpgrp(fd)
else:
return False
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
BYPASS_HOST_LOOP = True
_VALID_ARGS = frozenset(('echo', 'minutes', 'prompt', 'seconds'))
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
duration_unit = 'minutes'
prompt = None
seconds = None
echo = True
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
# Should keystrokes be echoed to stdout?
if 'echo' in self._task.args:
try:
echo = boolean(self._task.args['echo'])
except TypeError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
# Is 'prompt' a key in 'args'?
if 'prompt' in self._task.args:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
# Are 'minutes' or 'seconds' keys that exist in 'args'?
if 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
result['failed'] = True
result['msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(e)
return result
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
stdin_fd = None
old_settings = None
try:
if seconds is not
|
None:
if s
|
econds < 1:
seconds = 1
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
# show the prompt specified in the task
if 'prompt' in self._task.args:
display.display(prompt)
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
stdin_fd = None
stdout_fd = None
try:
if PY3:
stdin = self._connection._new_stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = self._connection._new_stdin
stdout = sys.stdout
stdin_fd = stdin.fileno()
stdout_fd = stdout.fileno()
except (ValueError, AttributeError):
# ValueError: someone is using a closed file descriptor as stdin
# AttributeError: someone is using a null file descriptor as stdin on windoze
stdin = None
interactive = is_interactive(stdin_fd)
if interactive:
# grab actual Ctrl+C sequence
try:
intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
except Exception:
# unsupported/not present, use default
intr = b'\x03' # value for Ctrl+C
# get backspace sequences
try:
backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
except Exception:
backspace = [b'\x7f', b'\x08']
old_settings = termios.tcgetattr(stdin_fd)
tty.setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if isatty(stdout_fd):
tty.setraw(stdout_fd)
# Only echo input if no timeout is specified
if not seconds and echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(stdin, termios.TCIFLUSH)
while True:
if not interactive:
if seconds is None:
display.warning("Not waiting for response to prompt as stdin is not interactive")
if seconds is not None:
# Gi
|
ryandub/simpl
|
simpl/exceptions.py
|
Python
|
apache-2.0
| 3,556
| 0.000281
|
# Copyright (c) 2011-2015 Rackspace US, Inc.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simpl exceptions and warnings.
Warnings can be imported and subsequently disabled by
calling the disable() classmethod.
TODO(sam): NoGroupForOption
"""
import warnings
__all__ = (
'GitWarning',
'SimplException',
'SimplGitError',
'SimplGitCommandError',
'SimplGitNotRepo',
'SimplCalledProcessError',
)
class GitWarning(RuntimeWarning):
"""The local git program is missing or may be incompatible."""
@classmethod
def disable(cls):
"""Disable warnings of this type."""
return warnings.simplefilter('ignore', cls)
# shown until proven ignored :)
warnings.simplefilter('always', GitWarning)
class SimplException(Exception):
"""Base exception for all exceptions raised by the simpl package."""
class SimplGitError(SimplException):
"""Base class for errors from the git module."""
class SimplGitCommandError(SimplGitError):
"""Raised when an error occurs while trying a git command."""
def __init__(self, returncode, cmd, output=None, oserror=None):
"""Customize Exception Constructor."""
super(SimplGitCommandError, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.output = output
self.oserror = oserror
def __str__(self):
"""Include custom data in string."""
return ("The command `%s` returned non-zero exit status %d and "
"produced the following output: \"%s\""
% (self.cmd, self.returncode, self.output))
def __repr__(self):
"""Include custom data in representation."""
rpr = ('SimplGitCommandError(%d, `%s`, output="%s"'
% (self.returncode, self.cmd, self.output))
if self.oserror:
rpr += ', oserror=%s
|
' % repr(self.oserror)
rpr += ')'
return rpr
class Simp
|
lGitNotRepo(SimplGitError):
"""The directory supplied is not a git repo."""
class SimplCalledProcessError(SimplException):
"""Raised when a process run by execute() returns non-zero.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
"""Customize Exception Constructor."""
super(SimplCalledProcessError, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
"""Include custom data in string."""
return ("Command '%s' returned non-zero exit status %d"
% (self.cmd, self.returncode))
class SimplConfigException(SimplException):
"""Errors raised by simpl/config."""
class SimplConfigUnknownOption(SimplConfigException):
"""An option defined in the specified source has no match.
For example, a specified ini file has an option with no corresponding
config.Option.
"""
|
matslindh/codingchallenges
|
adventofcode2021/day10.py
|
Python
|
mit
| 2,156
| 0.000928
|
from math import floor
def score_syntax_errors(program_lines):
points = {')': 3, ']': 57, '}': 1197, '>': 25137}
s = 0
scores_auto = []
for line in program_lines:
corrupted, stack = corrupted_character(line)
if corrupted:
s += points[corrupted]
else:
scores_auto.append(score_autocomplete(stack))
return s, sorted(scores_auto)[floor(l
|
en(scores_auto)/2)]
def corrupted_character(inp):
stack = []
lookup = {'(': ')', '[': ']', '{': '}', '<': '>'}
lookup_close = {v: k for k, v in lookup.items()}
def stack_converter(st):
return [lookup[element] for element in st[::-1]]
for char in inp:
if char in lookup:
stack.append(char)
elif char in lookup_close:
expected = stack.pop()
if expected != lookup_close[char]:
return
|
char, stack_converter(stack)
else:
print(f"INVALID {char}")
return None, stack_converter(stack)
def score_autocomplete(stack):
points_autocomplete = {')': 1, ']': 2, '}': 3, '>': 4}
s_auto = 0
for char in stack:
s_auto *= 5
s_auto += points_autocomplete[char]
return s_auto
def test_corrupted_character():
assert corrupted_character('{([(<{}[<>[]}>{[]{[(<()>')[0] == '}'
assert corrupted_character('[[<[([]))<([[{}[[()]]]')[0] == ')'
assert corrupted_character('[{[{({}]{}}([{[{{{}}([]')[0] == ']'
assert corrupted_character('[<(<(<(<{}))><([]([]()')[0] == ')'
assert corrupted_character('<{([([[(<>()){}]>(<<{{')[0] == '>'
def test_score_syntax_errors():
assert score_syntax_errors(open('input/10.test').read().splitlines()) == (26397, 288957)
def test_corrupted_character_stack():
assert corrupted_character('[({(<(())[]>[[{[]{<()<>>')[1] == ['}', '}', ']', ']', ')', '}', ')', ']']
def test_scoring_autocomplete():
assert score_autocomplete('}}]])})]') == 288957
assert score_autocomplete(')}>]})') == 5566
assert score_autocomplete('}}>}>))))') == 1480781
if __name__ == '__main__':
print(score_syntax_errors(open('input/10').read().splitlines()))
|
akshayarora2009/vayu
|
vayu/routes/projects.py
|
Python
|
mit
| 7,845
| 0.002549
|
from flask import Blueprint, render_template, request, make_response, jsonify
import os
import vayu.core.local_utils as lutils
import vayu.core.fabric_scripts.utils as futils
from vayu.core.constants.model import machine_info
from vayu.core.VayuException import VayuException
import vayu.core.constants.local as constants
import re
project_app = Blueprint('project_app', __name__)
machine_info = machine_info("root","139.59.35.6","ahjvayu2017")
@project_app.route('/projects', methods=['GET'])
def projects():
all_projects = lutils.get_list_of_projects()
if all_projects is None:
all_projects = dict()
return render_template("projects.html", data={'projects': all_projects})
@project_app.route('/projects/new', methods=['POST'])
def new_project():
errors = []
project_id = request.form["project_id"]
if not project_id:
errors.append("Project Id seems to be empty")
project_path = request.form["project_path"]
if "use_gitignore" in request.form:
use_gitignore = request.form["use_gitignore"]
else:
use_gitignore = "off"
if not os.path.isdir(project_path):
print("Not a valid path to dir")
errors.append("The path to directory is invalid")
if use_gitignore == "on":
if not os.path.isfile(project_path + '/.gitignore'):
errors.append("You specify to use gitignore, but gitignore is not present in specified directory")
else:
if not os.path.isfile(project_path + '/.vayuignore'):
open(project_path + '/.vayuignore', 'w')
details = dict()
details["path"] = project_path
details["use_gitignore"] = use_gitignore
if not errors:
try:
created_id = lutils.add_new_project(project_id, details)
except ValueError as e:
errors.append(str(e))
if not errors:
return make_response(created_id, 200)
else:
v = VayuException(400, "Please correct the errors", errors)
return make_response(v.to_json(), 400)
@project_app.route("/projects/delete", methods=['POST'])
def delete_project():
"""
Delete a project with particular id
:return:
"""
project_id = request.form["project_id"]
lutils.delete_project(project_id)
return make_response("Success", 200)
@project_app.route("/<uuid>", methods=['POST'])
def deploy_project(uuid):
"""
Deploy a project with particular id
:return:
"""
project_id = request.form["project_id"]
project_path = request.form["project_path"]
futils.moveProject(machine_info , project_path , project_id)
futils.deployNodeJs(machine_info,project_id,"server.js")
return make_response("Success", 200)
@project_app.route('/projects/<project_id>')
@project_app.route('/projects/<project_id>/overview')
def project_overview(project_id):
print project_id
return render_template("project_overview.html" , project_id = project_id)
@project_app.route('/projects/<project_id>/fleet')
def project_fleet(project_id):
fleet_details = lutils.get_fleet_details(project_id)
if not fleet_details:
fleet_details = dict()
# for key, value in fleet_details.items():
# print(value['hosts'])
for key, value in fleet_details.items():
print key
print value
return render_template("project_fleet.html", data={constants.FLEET: fleet_details})
@project_app.route('/projects/<project_id>/new-data-center', methods=['POST'])
def new_data_center(project_id):
"""
Adds a new data center for the given project id
:param project_id:
:return:
"""
errors = []
data_center_id = request.form[constants.DATA_CENTER_ID]
if not data_center_id:
errors.append("Data Center ID seems to be empty")
data_center_name = request.form[constants.DATA_CENTER_NAME]
if not data_center_name:
errors.append("Data Center Name seems to be empty")
center_details = dict()
center_details[constants.DATA_CENTER_ID] = data_center_id
center_details[constants.DATA_CENTER_NAME] = data_center_name
if not errors:
try:
lutils.add_new_data_center(project_id, center_details)
except ValueError
|
as e:
errors.append(str(e))
if not errors:
return make_response("OK", 200)
else:
v = VayuException(400, "Please corr
|
ect the errors", errors)
return make_response(v.to_json(), 400)
@project_app.route('/projects/<project_id>/delete-data-center', methods=['POST'])
def delete_data_center(project_id):
"""
This method deletes a data center associated with a particular project_id
:param project_id:
:return:
"""
data_center_id = request.form[constants.DATA_CENTER_ID]
if data_center_id:
lutils.delete_data_center(project_id, data_center_id)
return make_response("OK", 200)
@project_app.route('/projects/<project_id>/host/new', methods=['POST'])
def add_new_host_to_data_center(project_id):
"""
Adds a new host to the data center associated with the project
:param project_id:
:return:
"""
errors = []
data_center_id = request.form[constants.DATA_CENTER_ID]
host_id = request.form[constants.HOST_ID]
host_alias = request.form[constants.HOST_ALIAS]
auth_method = request.form[constants.AUTH_METHOD]
host_auth_user = request.form[constants.HOST_AUTH_USER]
host_auth_password = request.form[constants.HOST_AUTH_PASSWORD]
req = dict()
req[constants.HOST_ID] = host_id
req[constants.HOST_ALIAS] = host_alias
req[constants.AUTH_METHOD] = auth_method
req[constants.HOST_AUTH_USER] = host_auth_user
req[constants.HOST_AUTH_PASSWORD] = host_auth_password
errors = lutils.validate_new_host_details(req)
if not data_center_id:
errors.append("Data Center ID cannot be empty")
req[constants.DATA_CENTER_ID] = data_center_id
host_details_known = request.form[constants.HOST_DETAILS]
if host_details_known != 'true':
errors.append("You must connect with the Host before adding it permanently")
# TODO Implement SSH key based login as well. See upvoted answer to a stackoverflow question
if not errors:
try:
hid = lutils.add_new_host(host_id, req)
lutils.add_host_to_data_center(project_id, data_center_id, host_id)
except ValueError, e:
errors.append(str(e))
if not errors:
return make_response("OK", 201)
else:
v = VayuException(400, "Please Correct the errors", errors)
return make_response(v.to_json(), v.status_code)
@project_app.route('/hosts', methods=['GET'])
def get_host_list():
"""
Returns the list of all configured hosts.
This endpoint is usually used by client side by an AJAX call
:return:
"""
hosts = lutils.get_list_of_hosts()
return make_response(jsonify(hosts), 200)
@project_app.route('/projects/<project_id>/host/delete', methods=['POST'])
def delete_host_association(project_id):
"""
Endpoint to delete an association of a particular data center with a host
:param project_id:
:return:
"""
data_center_id = request.form[constants.DATA_CENTER_ID]
host_id = request.form[constants.HOST_ID]
lutils.delete_association_of_host_with_datacenter(project_id, data_center_id, host_id)
return make_response("OK", 201)
@project_app.route('/projects/<project_id>/host/existing', methods=['POST'])
def add_existing_host(project_id):
"""
Adds an existing host to the data center specified for the given project
:param project_id:
:return:
"""
data_center_id = request.form[constants.DATA_CENTER_ID]
host_id = request.form["existing_host_id"]
lutils.add_host_to_data_center(project_id, data_center_id, host_id)
return make_response("OK", 201)
|
ErickMurillo/ciat_plataforma
|
ciat_plataforma/wsgi.py
|
Python
|
mit
| 403
| 0.002481
|
"""
WSGI config for ciat_plaforma project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c
|
iat_plataforma.settings")
from django.core.wsgi import get_wsgi_application
|
application = get_wsgi_application()
|
liqd/a4-meinberlin
|
tests/offlineevents/dashboard_components/test_views_project_offlineevents.py
|
Python
|
agpl-3.0
| 3,388
| 0
|
import pytest
from dateutil.parser import parse
from django.urls import reverse
from adhocracy4.dashboard import components
from adhocracy4.test.helpers import assert_template_response
from adhocracy4.test.helpers import redirect_target
from adhocracy4.test.helpers import setup_phase
from meinberlin.apps.ideas.phases import CollectFeedbackPhase
from meinberlin.apps.offlineevents.models import OfflineEvent
component = components.projects.get('offlineevents')
@pytest.mark.django_db
def test_edit_view(client, phase_factory, offline_event_factory):
phase, module, project, item = setup_phase(
phase_factory, None, CollectFeedbackPhase)
offline_event_factory(project=project)
initiator = module.project.organisation.initiators.first()
url = component.get_base_url(project)
client.login(username=initiator.email, password='password')
response = client.get(url)
assert_template_response(
response, 'meinberlin_offlineevents/offlineevent_list.html')
@pytest.mark.django_db
def test_offlineevent_create_view(client, phase_factory):
phase, module, project, item = setup_phase(
phase_factory, None, CollectFeedbackPhase)
initiator = module.project.organisation.initiators.first()
url = reverse('a4dashboard:offlineevent-create',
kwargs={'project_slug': project.slug})
data = {
'name': 'name',
'event_type': 'event_type',
'description': 'desc',
'date_0': '2013-01-01',
'date_1': '18:00',
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'offlineevent-list'
event = OfflineEvent.objects.get(name=data.get('name'))
assert event.description == data.get('description')
assert event.date == parse("2013-01-01 17:00:00 UTC")
@pytest.mark.django_db
def test_offlineevent_update_view(
client, phase_factory, offline_event_factory):
phase, module, project, item = setup_phase(
phase_factory, None, CollectFeedbackPhase)
initiator = module.project.organisation.initiators.first()
event = offline_event_factory(project=project)
url = reverse('a4dashboard:offlineevent-update',
|
kwargs={'slug': event.slug})
data = {
'name': 'name',
'event_type': 'event_type',
'description': 'desc',
'date_0': '2013-01-01',
|
'date_1': '18:00',
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'offlineevent-list'
event.refresh_from_db()
assert event.description == data.get('description')
assert event.date == parse("2013-01-01 17:00:00 UTC")
@pytest.mark.django_db
def test_offlineevent_delete_view(
client, phase_factory, offline_event_factory):
phase, module, project, item = setup_phase(
phase_factory, None, CollectFeedbackPhase)
initiator = module.project.organisation.initiators.first()
event = offline_event_factory(project=project)
url = reverse('a4dashboard:offlineevent-delete',
kwargs={'slug': event.slug})
client.login(username=initiator.email, password='password')
response = client.delete(url)
assert redirect_target(response) == 'offlineevent-list'
assert not OfflineEvent.objects.exists()
|
aljim/deploymentmanager-samples
|
examples/v2/waiter/instance.py
|
Python
|
apache-2.0
| 2,634
| 0.004176
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissio
|
ns and
# limitations under the License.
"""Creates a VM with the provided name, metadata, and auth scopes."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/global/', collection, '/', name])
|
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/zones/', zone, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
base_name = context.properties['instanceName']
items = []
for key, value in context.properties['metadata'].iteritems():
items.append({
'key': key,
'value': value
})
metadata = {'items': items}
# Properties for the container-based instance.
instance = {
'zone': context.properties['zone'],
'machineType': ZonalComputeUrl(
context.env['project'], context.properties['zone'], 'machineTypes',
'f1-micro'),
'metadata': metadata,
'serviceAccounts': [{
'email': 'default',
'scopes': context.properties['scopes']
}],
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName': base_name + '-disk',
'sourceImage': GlobalComputeUrl(
'debian-cloud', 'images',
''.join(['backports-debian', '-7-wheezy-v20151104']))
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': GlobalComputeUrl(
context.env['project'], 'networks', 'default')
}]
}
# Resources and output to return.
return {
'resources': [{
'name': base_name,
'type': 'compute.v1.instance',
'properties': instance
}]
}
|
Islandman93/reinforcepy
|
examples/ALE/DQN_Async/run_a3c.py
|
Python
|
gpl-3.0
| 1,478
| 0.00406
|
import json
import datetime
from reinforcepy.environments import ALEEnvironment
from reinforcepy.networks.dqn.tflow.nstep_a3c import NStepA3C
from reinforcepy.learners.dqn.asynchronous.q_thread_learner import QThreadLearner
from reinforcepy.learners.dqn.asynchronous.async_thread_host import AsyncThreadHost
|
def main(rom_args, learner_args, network_args, num_threads, epochs, logdir, save_interval):
# create envs for each thread
environments = [ALEEnvironment(**rom_args) for _ in range(num_threads)]
# create shared network
num_actions = environments[0].get_num_actions()
input_shape = [learner_args['phi_length']] + environments[0].get_stat
|
e_shape()
network = NStepA3C(input_shape, num_actions, **network_args)
# create thread host
thread_host = AsyncThreadHost(network, log_dir=logdir)
# create threads
threads = [QThreadLearner(environments[t], network, thread_host.shared_dict, **learner_args) for t in range(num_threads)]
reward_list = thread_host.run_epochs(epochs, threads, save_interval=save_interval)
import matplotlib.pyplot as plt
plt.plot([x[1] for x in reward_list], [x[0] for x in reward_list], '.')
plt.savefig(logdir + 'rewards.png')
plt.show()
return max([x[0] for x in reward_list])
if __name__ == '__main__':
CONFIG = json.load(open('a3c_cfg.json'))
run_date = datetime.datetime.now().strftime("%m-%d-%Y-%H-%M")
CONFIG['logdir'] += '_' + run_date + '/'
main(**CONFIG)
|
google-research/google-research
|
aloe/aloe/rfill/utils/program_struct.py
|
Python
|
apache-2.0
| 6,121
| 0.008822
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program tree representation."""
# pylint: skip-file
import numpy as np
from aloe.rfill.utils.rfill_consts import RFILL_EDGE_TYPES, RFILL_NODE_TYPES
class ProgNode(object):
"""Token as node in program tree/graph."""
def __init__(self, syntax, value=None, subtrees=None):
"""Initializer.
Args:
syntax: string representation of syntax
value: string representation of actual value
subtrees: list of tuple(edge_type, subtree
|
nodes or single node)
"""
self.syntax = syntax
self.value = value
self.children = []
if subtrees is not None:
|
for e_type, children in subtrees:
if isinstance(children, list):
for c in children:
add_edge(parent_node=self, child_node=c, edge_type=e_type)
else:
add_edge(parent_node=self, child_node=children, edge_type=e_type)
def get_name(self):
if self.value is None:
return self.syntax
return self.syntax + '-' + str(self.value)
def add_child(self, edge_type, child_node):
self.children.append((edge_type, child_node))
def pprint(self, tab_cnt=0):
st = ' ' * tab_cnt + self.get_name()
print(st)
for _, c in self.children:
c.pprint(tab_cnt=tab_cnt + 1)
def __str__(self):
st = '(' + self.get_name()
for _, c in self.children:
st += c.__str__()
st += ')'
return st
class AbsRFillNode(ProgNode):
"""abstract Subclass of RFillNode."""
def pprint(self, tab_cnt=0):
if self.syntax == 'RegexTok' or self.syntax == 'ConstTok':
st = ' ' * tab_cnt + self.syntax + '('
_, p1 = self.children[0]
_, p2 = self.children[1]
_, direct = self.children[2]
name = p1.value
st += '%s, %d, %s)' % (name, p2.value, direct.value)
print(st)
return
st = ' ' * tab_cnt + self.get_name()
print(st)
for _, c in self.children:
c.pprint(tab_cnt=tab_cnt + 1)
def filter_tree_nodes(root_node, key_set, out_list=None):
if out_list is None:
out_list = []
if root_node.syntax in key_set:
out_list.append(root_node)
for _, c in root_node.children:
filter_tree_nodes(c, key_set, out_list=out_list)
return out_list
def add_edge(parent_node, child_node, edge_type):
parent_node.add_child(edge_type, child_node)
class ProgGraph(object):
"""Program graph"""
def __init__(self, tree_root, node_types=RFILL_NODE_TYPES, edge_types=RFILL_EDGE_TYPES, add_rev_edge=True):
"""Initializer.
Args:
tree_root: ProgNode type; the root of tree representation
node_types: dict of nodetype to index
edge_types: dict of edgetype to index
add_rev_edge: whether add reversed edge
"""
self.tree_root = tree_root
self.add_rev_edge = add_rev_edge
self.node_types = node_types
self.edge_types = edge_types
# list of tree nodes
self.node_list = []
# node feature index
self.node_feats = []
# list of (from_idx, to_idx, etype_int) tuples
self.edge_list = []
self.last_terminal = None # used for linking terminals
self.build_graph(self.tree_root)
self.num_nodes = len(self.node_list)
self.num_edges = len(self.edge_list)
# unzipped version of edge list
# self.from_list, self.to_list, self.edge_feats = \
# [np.array(x, dtype=np.int32) for x in zip(*self.edge_list)]
self.node_feats = np.array(self.node_feats, dtype=np.int32)
self.subexpr_ids = []
for _, c in self.tree_root.children:
self.subexpr_ids.append(c.index)
def render(self, render_path):
"""Render the program graph to specified path."""
import pygraphviz as pgv
ag = pgv.AGraph(directed=True)
e_idx2name = {}
for key in self.edge_types:
e_idx2name[self.edge_types[key]] = key
for i, node in enumerate(self.node_list):
ag.add_node(str(i) + '-' + node.get_name())
for e in self.edge_list:
x, y, et = e
ename = e_idx2name[et]
if ename.startswith('rev-'):
continue
x = str(x) + '-' + self.node_list[x].get_name()
y = str(y) + '-' + self.node_list[y].get_name()
ag.add_edge(x, y)
ag.layout(prog='dot')
ag.draw(render_path)
def add_bidir_edge(self, from_idx, to_idx, etype_str):
assert etype_str in self.edge_types
self.edge_list.append((from_idx, to_idx, self.edge_types[etype_str]))
if self.add_rev_edge:
# add reversed edge
rev_etype_str = 'rev-' + etype_str
assert rev_etype_str in self.edge_types
self.edge_list.append((to_idx, from_idx, self.edge_types[rev_etype_str]))
def build_graph(self, cur_root):
"""recursively build program graph from program tree.
Args:
cur_root: current root of (sub)program
Returns:
index: index of this cur_root node
"""
cur_root.index = len(self.node_list)
self.node_list.append(cur_root)
name = cur_root.get_name()
if name not in self.node_types:
raise NotImplementedError
type_idx = self.node_types[name]
cur_root.node_type = type_idx
self.node_feats.append(type_idx)
if len(cur_root.children): # pylint: disable=g-explicit-length-test
for e_type, c in cur_root.children:
child_idx = self.build_graph(c)
self.add_bidir_edge(cur_root.index, child_idx, e_type)
else: # add possible links between adjacent terminals
if self.last_terminal is not None:
self.add_bidir_edge(self.last_terminal.index, cur_root.index, 'succ')
self.last_terminal = cur_root
return cur_root.index
|
openstack/ceilometer
|
ceilometer/tests/unit/publisher/test_prometheus.py
|
Python
|
apache-2.0
| 4,934
| 0
|
#
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/prometheus.py"""
import datetime
from unittest import mock
import uuid
from oslotest import base
import requests
from urllib import parse as urlparse
from ceilometer.publisher import prometheus
from ceilometer import sample
from ceilometer import service
class TestPrometheusPublisher(base.BaseTestCase):
resource_id = str(uuid.uuid4())
sample_data = [
sample.Sample(
name='alpha',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'nam
|
e': 'TestPublish'},
),
sample.Sample(
name='beta',
type=sample.TYPE_DELTA,
unit='',
volume=3,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='gamma',
type=sample.TYPE_GAUGE,
unit='',
|
volume=5,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.now().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='delta.epsilon',
type=sample.TYPE_GAUGE,
unit='',
volume=7,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.now().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def setUp(self):
super(TestPrometheusPublisher, self).setUp()
self.CONF = service.prepare_service([], [])
def test_post_samples(self):
"""Test publisher post."""
parsed_url = urlparse.urlparse(
'prometheus://localhost:90/metrics/job/os')
publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url)
res = requests.Response()
res.status_code = 200
with mock.patch.object(requests.Session, 'post',
return_value=res) as m_req:
publisher.publish_samples(self.sample_data)
data = """# TYPE alpha counter
alpha{resource_id="%s", project_id="test"} 1
beta{resource_id="%s", project_id="test"} 3
# TYPE gamma gauge
gamma{resource_id="%s", project_id="test"} 5
# TYPE delta_epsilon gauge
delta_epsilon{resource_id="%s", project_id="test"} 7
""" % (self.resource_id, self.resource_id, self.resource_id, self.resource_id)
expected = [
mock.call('http://localhost:90/metrics/job/os',
auth=None,
cert=None,
data=data,
headers={'Content-type': 'plain/text'},
timeout=5,
verify=True)
]
self.assertEqual(expected, m_req.mock_calls)
def test_post_samples_ssl(self):
"""Test publisher post."""
parsed_url = urlparse.urlparse(
'prometheus://localhost:90/metrics/job/os?ssl=1')
publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url)
res = requests.Response()
res.status_code = 200
with mock.patch.object(requests.Session, 'post',
return_value=res) as m_req:
publisher.publish_samples(self.sample_data)
data = """# TYPE alpha counter
alpha{resource_id="%s", project_id="test"} 1
beta{resource_id="%s", project_id="test"} 3
# TYPE gamma gauge
gamma{resource_id="%s", project_id="test"} 5
# TYPE delta_epsilon gauge
delta_epsilon{resource_id="%s", project_id="test"} 7
""" % (self.resource_id, self.resource_id, self.resource_id, self.resource_id)
expected = [
mock.call('https://localhost:90/metrics/job/os',
auth=None,
cert=None,
data=data,
headers={'Content-type': 'plain/text'},
timeout=5,
verify=True)
]
self.assertEqual(expected, m_req.mock_calls)
|
ttreeagency/PootleTypo3Org
|
pootle/apps/pootle_notifications/views.py
|
Python
|
gpl-2.0
| 8,843
| 0.000565
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2012 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from pootle.i18n.gettext import tr_lang
from pootle_app.models import Directory
from pootle_app.models.permissions import (get_matching_permissions,
check_permission,
check_profile_permission)
from pootle_misc.mail import send_mail
from pootle_notifications.forms import form_factory
from pootle_notifications.models import Notice
from pootle_profile.models import get_profile, PootleProfile
from pootle_translationproject.models import TranslationProject
def view(request, path):
#FIXME: why do we have leading and trailing slashes in pootle_path?
pootle_path = '/%s' % path
directory = get_object_or_404(Directory, pootle_path=pootle_path)
# Set permissions on request in order to allow check them later using
# different functions.
request.permissions = get_matching_permissions(get_profile(request.user),
directory)
if request.GET.get('all', False):
criteria = {
'directory__pootle_path__startswith': directory.pootle_path,
}
else:
criteria = {
'directory': directory,
|
}
# Find lan
|
guage and project defaults, passed to handle_form
proj = None
lang = None
if not directory.is_language() and not directory.is_project():
trans_proj = directory.translation_project
lang = trans_proj.language
proj = trans_proj.project
elif directory.is_language():
lang = directory.language
elif directory.is_project():
proj = directory.project
template_vars = {
'path': path,
'directory': directory,
'title': directory_to_title(directory),
'notices': Notice.objects.filter(**criteria) \
.select_related('directory')[:30],
'language': lang,
'project': proj,
}
if check_permission('administrate', request):
template_vars['form'] = handle_form(request, directory, proj, lang,
template_vars)
return render_to_response('notices.html', template_vars,
context_instance=RequestContext(request))
def directory_to_title(directory):
"""Figures out if directory refers to a Language or TranslationProject and
returns appropriate string for use in titles.
"""
if directory.is_language():
trans_vars = {
'language': tr_lang(directory.language.fullname),
}
return _('News for %(language)s', trans_vars)
elif directory.is_project():
trans_vars = {
'project': directory.project.fullname,
}
return _('News for %(project)s', trans_vars)
elif directory.is_translationproject():
trans_vars = {
'language': tr_lang(directory.translationproject.language.fullname),
'project': directory.translationproject.project.fullname,
}
return _('News for the %(project)s project in %(language)s', trans_vars)
return _('News for %(path)s', {'path': directory.pootle_path})
def create_notice(creator, message, directory):
profile = get_profile(creator)
if not check_profile_permission(profile, 'administrate', directory):
raise PermissionDenied
new_notice = Notice(directory=directory, message=message)
new_notice.save()
return new_notice
def get_recipients(restrict_to_active_users, directory):
to_list = PootleProfile.objects.all()
# Take into account 'only active users' flag from the form.
if restrict_to_active_users:
to_list = to_list.exclude(submission=None).exclude(suggestion=None) \
.exclude(suggester=None)
recipients = []
for person in to_list:
# Check if the User profile has permissions in the directory.
if not check_profile_permission(person, 'view', directory):
continue
if person.user.email:
recipients.append(person.user.email)
return recipients
def handle_form(request, current_directory, current_project, current_language,
template_vars):
if request.method != 'POST':
# Not a POST method. Return a default starting state of the form
return form_factory(current_directory)()
# Reconstruct the NoticeForm with the user data.
form = form_factory(current_directory)(request.POST)
if not form.is_valid():
return form
message = form.cleaned_data['message']
languages = form.cleaned_data.get('language_selection', [])
projects = form.cleaned_data.get('project_selection', [])
publish_dirs = []
template_vars['notices_published'] = []
# Figure out which directories, projects, and languages are involved
if current_language and current_project:
# The current translation project
publish_dirs = [current_directory]
languages = [current_language]
projects = [current_project]
elif current_language:
languages = [current_language]
if form.cleaned_data['project_all']:
# The current language
publish_dirs = [current_language.directory]
else:
# Certain projects in the current language
translation_projects = TranslationProject.objects.filter(
language=current_language, project__in=projects)
publish_dirs = [tp.directory for tp in translation_projects]
elif current_project:
projects = [current_project]
if form.cleaned_data['language_all']:
# The current project
publish_dirs = [current_project.directory]
else:
# Certain languages in the current project
translation_projects = TranslationProject.objects.filter(
language__in=languages, project=current_project)
publish_dirs = [tp.directory for tp in translation_projects]
else:
# The form is top-level (server-wide)
if form.cleaned_data['project_all']:
if form.cleaned_data['language_all']:
# Publish at server root
publish_dirs = [current_directory]
else:
# Certain languages
publish_dirs = [l.directory for l in languages]
else:
if form.cleaned_data['language_all']:
# Certain projects
publish_dirs = [p.directory for p in projects]
else:
# Specific translation projects
translation_projects = TranslationProject.objects.filter(
language__in=languages, project__in=projects)
publish_dirs = [tp.directory for tp in translation_projects]
# RSS (notices)
if form.cleaned_data['publish_rss']:
for d in publish_dirs:
new_notice = create_notice(request.user, message, d)
template_vars['notices_published'].append(new_notice)
# E-mail
if form.cleaned_data['send_email']:
email_header = form.cleaned_data['email_header']
recipients = get_recipients(
|
hduongtrong/hyperemble
|
hyperemble/neural_net/tests/test_neural_net.py
|
Python
|
bsd-2-clause
| 760
| 0
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from keras.datasets import mnist
from hyperemble.neural_net import VanillaNeuralNet
def test_vanilla_neural_net():
(X_train, y_train),
|
(X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
clf = VanillaNeuralNet(n_layers=2, hidden_dim=200,
keep_prob=0.8, loss_func="auto",
verbose=1, batch_size=128, r
|
andom_state=1)
clf.fit(X_train, y_train)
res = clf.score(X_test, y_test)
assert res > 0.92
|
eteq/ginga
|
ginga/misc/plugins/IRAF.py
|
Python
|
bsd-3-clause
| 32,083
| 0.00106
|
"""
The IRAF plugin implements a remote control interface for the Ginga FITS
viewer from an IRAF session. In particular it supports the use of the
IRAF 'display' and 'imexamine' commands.
Instructions for use:
Set the environment variable IMTDEV appropriately, e.g.
$ export IMTDEV=inet:45005 (or)
$ export IMTDEV=unix:/tmp/.imtg45
Ginga will try to use the default value if none is assigned.
Start IRAF plugin (Plugins->Start IRAF).
From Ginga you can load images and then use 'imexamine' from IRAF to load
them, do photometry, etc. You can also use the 'display' command from IRAF
to show images in Ginga. The 'IRAF' tab will show the mapping from Ginga
channels to IRAF numerical 'frames'.
When using imexamine, the plugin disables normal UI processing on the
channel image so that keystrokes, etc. are passed through to IRAF. You can
toggle back and forth between local Ginga control and IRAF control using
the radio buttons at the top of the tab or using the space bar.
IRAF commands that have been tested: display, imexam, rimcur and tvmark.
"""
import sys, os
import logging
import threading
import socket
import ginga.util.six as six
if six.PY2:
import Queue
else:
import queue as Queue
import array
import numpy
import time
from ginga import GingaPlugin, AstroImage
from ginga import cmap, imap
from ginga.gw import Widgets, Viewers
from ginga.misc import Bunch
# XImage protocol support
import IIS_DataListener as iis
class IRAF(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(IRAF, self).__init__(fv)
self.keyqueue = Queue.Queue()
self.keyevent = threading.Event()
self.keymap = {
'comma': ',',
}
self.ctrldown = False
self.layertag = 'iraf-canvas'
# this will be set in initialize()
self.canvas = None
self.dc = fv.getDrawClasses()
self.addr = iis.get_interface()
self.ev_quit = self.fv.ev_quit
self.dataTask = None
# Holds frame buffers
self.fb = {}
self.current_frame = 0
# cursor position
self.cursor_x = 1.0
self.cursor_y = 1.0
self.mode = 'ginga'
self.imexam_active = False
self.imexam_chname = None
# init the first frame(frame 0)
self.init_frame(0)
# colormap for use with IRAF displays
self.cm_iis = cmap.ColorMap('iis_iraf', cmap_iis_iraf)
self.im_iis = imap.get_imap('ultrasmooth')
fv.add_callback('add-channel', self.add_channel)
fv.add_callback('delete-channel', self.delete_channel)
#fv.set_callback('active-image', self.focus_cb)
self.gui_up = False
def build_gui(self, container):
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
## canvas.set_callback('none-move', self.cursormotion)
canvas.add_callback('key-press', self.window_key_press)
canvas.add_callback('key-release', self.window_key_release)
self.canvas = canvas
vbox = Widgets.VBox()
fr = Widgets.Frame("IRAF")
captions = [
("Addr:", 'label', "Addr", 'llabel', 'Restart', 'button'),
("Set Addr:", 'label', "Set Addr", 'entry'),
("Control", 'hbox'),
("Channel:", 'label', 'Channel', 'llabel'),
]
w, b = Widgets.build_info(captions)
self.w.update(b)
addr = str(self.addr.name)
b.addr.set_text(addr)
b.restart.set_tooltip("Restart the server")
b.restart.add_callback('activated', self.restart_cb)
b.set_addr.set_length(100)
b.addr.set_text(addr)
b.set_addr.set_tooltip("Set address to run remote control server")
b.set_addr.add_callback('activated', self.set_addr_cb)
self.w.mode_d = {}
btn1 = Widgets.RadioButton("Ginga")
btn1.set_state(True)
btn1.add_callback('activated', lambda w, val: self.switchMode('ginga'))
self.w.mode_d['ginga'] = btn1
self.w.control.add_widget(btn1)
btn2 = Widgets.RadioButton("IRAF", group=btn1)
btn2.add_callback('activated', lambda w, val: self.switchMode('iraf'))
self.w.mode_d['iraf'] = btn2
self.w.control.add_widget(btn2)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("Frame/Channel")
lbl = Widgets.Label("")
self.w.frch = lbl
fr.set_widget(lbl)
vbox.add_widget(fr, stretch=0)
# stretch
vbox.add_widget(Widgets.Label(''), stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns)
container.add_widget(vbox, stretch=1)
self.gui_up = True
fmap = self.get_channel_frame_mapping()
self.update_chinfo(fmap)
def update_chinfo(self, fmap):
if not self.gui_up:
return
# Update the GUI with the new frame/channel mapping
fmap.sort(lambda x, y: x[1] - y[1])
s = ["%2d: %s" % (num, name) for (name, num) in fmap]
self.w.frch.set_text("\n".join(s))
def _setMode(self, modeStr, chname):
modeStr = modeStr.lower()
self.w.mode_d[modeStr].set_state(True)
self.w.channel.set_text(chname)
self.switchMode(modeStr)
def setMode(self, modeStr, chname):
self.imexam_chname = chname
self.fv.gui_do(self._setMode, modeStr, chname)
def toggleMode(self):
isIRAF = self.w.mode_d['iraf'].get_state()
chname = self.imexam_chname
if isIRAF:
self.logger.info("setting mode to Ginga")
self.setMode('Ginga', chname)
else:
self.logger.
|
info("setting mode to IRAF")
self.setMode('IRAF', chname)
def add_channel(self, viewer, chinfo):
self.logger.debug("channel %s added." % (chinfo.name))
n = self.channel_to_frame(chinfo.name)
if n is None:
found = len(self.fb)
for n, fb in self.fb.ite
|
ms():
if fb.chname is None:
found = n
fb = self.init_frame(found)
fb.chname = chinfo.name
fmap = self.get_channel_frame_mapping()
self.fv.gui_do(self.update_chinfo, fmap)
chinfo.fitsimage.add_callback('image-set', self.new_image_cb,
chinfo)
def delete_channel(self, viewer, chinfo):
self.logger.debug("delete channel %s" % (chinfo.name))
n = self.channel_to_frame(chinfo.name)
if n is not None:
self.fb[n].chname = None
fmap = self.get_channel_frame_mapping()
self.fv.gui_do(self.update_chinfo, fmap)
def switchMode(self, modeStr):
modeStr = modeStr.lower()
chname = self.imexam_chname
chinfo = self.fv.get_channelInfo(chname)
if modeStr == 'iraf':
self.ui_disable(chinfo.fitsimage)
else:
self.ui_enable(chinfo.fitsimage)
def start(self):
try:
if self.addr.prot == 'unix':
os.remove(self.addr.path)
except:
pass
# start the data listener task, if appropriate
ev_quit = threading.Event()
self.dataTask = iis.IIS_DataListener(
self.addr, controller=self,
ev_quit=ev_quit, logger=self.logger)
self.fv.nongui_do(self.dataTask.mainloop)
def stop(self):
if self.dataTask:
self.dataTask.stop()
self.gui_up = False
def restart_cb(self, w):
# restart server
if self.dataTask:
self.dataTask.stop()
self.start()
def set_addr_cb(self, w):
# get and parse address
addr = w.get_text()
self.addr = iis.get_interface(addr=addr)
addr = str(self.addr.name)
self.w.addr.set_text(addr
|
p4lang/p4factory
|
targets/l2_switch/tests/of-tests/openflow.py
|
Python
|
apache-2.0
| 7,004
| 0.004997
|
"""
Openflow tests on an l2 table
"""
import sys
import os
import logging
from oftest import config
import oftest.base_tests as base_tests
import ofp
from oftest.testutils import *
from oftest.parse import parse_mac
import openflow_base_tests
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'testutils'))
from utils import *
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'targets', 'l2_switch', 'build', 'thrift'))
from p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
import sys
import os
import time
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'targets', 'l2_switch', 'openflow_mapping'))
from l2 import *
### TODO: generate expected packets
# common shorthands
flow_add = ofp.message.flow_add
flow_delete = ofp.message.flow_delete
group_add = ofp.message.group_add
group_mod = ofp.message.group_mod
buf = ofp.OFP_NO_BUFFER
# dmac table fields
eth_dst_addr = "ethernet_dstAddr"
def get_oxm(field_obj):
"""
Returns an oxm and an arg-dict for updating an arg-list to
simple_tcp_packet
"""
if field_obj.field == "OFPXMT_OFB_VLAN_VID":
return (ofp.oxm.vlan_vid(field_obj.testval),
{"vlan_vid": field_obj.testval, "dl_vlan_enable": True})
elif field_obj.field == "OFPXMT_OFB_ETH_DST":
return (ofp.oxm.eth_dst(parse_mac(field_obj.testval)),
{"eth_dst": field_obj.testval})
def get_match(match_fields):
"""
Returns a packet and an OXM list that the packet matches,
according to match_fields.
"""
match, args = ofp.match(), {}
for _, field_obj in match_fields.items():
oxm, pkt_arg = get_oxm(field_obj)
match.oxm_list.append(oxm)
args.update(pkt_arg)
return (str(simple_tcp_packet(**args)), match)
def get_action(action, arg):
if action == "OUTPUT":
ofpaction = ofp.action.output(arg, ofp.OFPCML_NO_BUFFER)
elif action == "GROUP":
ofpaction = ofp.action.group(arg)
else:
logging.info("No get_action for %s", action)
exit(1)
return ofpaction
def get_apply_actions(actions):
"""
Returns a 1 element list of APPLY_ACTIONS instructions,
with actions specified in actions.
"""
instruction = ofp.instruction.apply_actions()
for action, arg in actions.items():
instruction.actions.append(get_action(action, arg))
return [instruction]
def get_group_all(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_add(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
def get_group_mod(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_mod(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
def setup_default_table_configurations(client, sess_hdl, dev_tgt):
result = client.smac_set_default_action_mac_learn(sess_hdl, dev_tgt)
assert result == 0
result = client.dmac_set_default_action_broadcast(sess_hdl, dev_tgt)
assert result == 0
result = client.mcast_src_pruning_set_default_action__nop(sess_hdl, dev_tgt)
assert result == 0
def setup_pre(mc, sess_hdl, dev_tgt):
mgrp_hdl = mc.mc_mgrp_create(sess_hdl, dev_tgt.dev_id, 1)
port_map = [0] * 32
lag_map = [0] * 32
# port 1, port 2, port 3
port_map[0] = (1 << 1) | (1 << 2) | (1 << 3)
node_hdl = mc.mc_node_create(sess_hdl, dev_tgt.dev_id, 0,
bytes_to_string(port_map),
bytes_to_string(lag_map))
mc.mc_associate_node(sess_hdl, dev_tgt.dev_id, mgrp_hdl, node_hdl)
def repopulate_openflow_defaults(client, sess_hdl, dev_tgt):
result = client.packet_out_set_default_action_nop(sess_hdl, dev_tgt)
match_spec = l2_switch_packet_out_match_spec_t(
fabric_header_packetType = 5)
result = client.packet_out_table_add_with_terminate_cpu_packet(
sess_hdl, dev_tgt, match_spec)
match_spec = l2_switch_packet_out_match_spec_t(
fabric_header_packetType = 2)
result = client.packet_out_table_add_with_terminate_fabric_multicast_packet(
sess_hdl, dev_tgt, match_spec)
result = client.ofpat_group_egress_set_default_action_nop(
sess_hdl, dev_tgt)
def setup(self):
sess_hdl = self.conn_mgr.client_init()
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
self.client.clean_all(sess_hdl, dev_tgt)
setup_default_table_configurations(self.client, sess_hdl, dev_tgt)
setup_pre(self.mc, sess_hdl, dev_tgt)
repopulate_openflow_defaults(self.client, sess_hdl, dev_tgt)
class Output(openflow_base_tests.OFTestInterface):
"""
Fowards a packet, relies on PDSetup being run first
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "l2_switch")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
table, out_port = openflow_tables["dmac"], ports[1]
table.match_fields[eth_dst_addr].testval = "00:01:02:03:04:05"
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": out_port
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=41)
exp_pkt = simple_tcp_packet()
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[-1], pkt)
verify_packet(self, exp_pkt, out_port)
req = flow_delete(cookie=41, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
class PacketIn(openflow_base_tests.OFTestInterface):
"""
"""
def __init__(self):
openflow_base_te
|
sts.OFTestInterface.__init__(self, "l2_switch")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
in_port = ports[0]
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = "00:01:02:03:04:05"
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": ofp.const.OFPP_CON
|
TROLLER
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=42)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(in_port, pkt)
verify_packet_in(self, str(pkt), in_port, ofp.const.OFPR_ACTION,
controller=self.controller)
req = flow_delete(cookie=42, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
|
PyListener/CF401-Project-1---PyListener
|
pylistener/models/mymodel.py
|
Python
|
mit
| 1,872
| 0.001068
|
from sqlalchemy import (
Column,
Index,
Integer,
Text,
LargeBinary,
Unicode,
ForeignKey,
Table
)
from sqlalchemy.orm import relationship
from .meta import Base
class User(Base):
"""This class defines a User model."""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(Unicode, unique=True)
password = Column(Unicode)
sub_user = Column(Unicode)
address_rel = relationship('AddressBook')
attr_assoc_rel = relationship('UserAttributeLink')
class AddressBook(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
phone = Column(Unicode)
email = Column(Unicode)
picture = Column(LargeBinary)
pic_mime = Column(Text)
user = Column(Integer, ForeignKey('users.id'))
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
label = Column(Unicode)
desc = Column(Unicode)
picture = Column(LargeBinary)
pic_mime = Column(Text)
children = relationship('Attribute')
class Attribute(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
label = Column(Unicode)
desc = Column(Unicode)
picture = Column(LargeBinary)
pic_mime = Column(Text)
cat_id = Column(Integer, ForeignKey('categories.id'))
user_assoc_rel = relationship('UserAttributeLink')
class UserAttributeLink(Base):
__tablename__ = "users_attributes_link"
user_id = Column(Integer, ForeignKey('users.id'), nullable=False, primary_key=True)
attr_id = Co
|
lumn(Integer, ForeignKey('attributes.id'), nullable=False, primary_key=True)
priority = Column(Integer, default=1, nullable=False)
num_hits = Column(Integer, default=0, nullable=False)
user_rel = relationship("User")
|
attr_rel = relationship("Attribute")
|
cpennington/course-discovery
|
course_discovery/urls.py
|
Python
|
agpl-3.0
| 1,899
| 0.00158
|
"""course_discovery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
|
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
import os
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth
|
.views import logout
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from course_discovery.apps.core import views as core_views
admin.autodiscover()
# pylint: disable=invalid-name
# Always login via edX OpenID Connect
login = RedirectView.as_view(url=reverse_lazy('social:begin', args=['edx-oidc']), permanent=False, query_string=True)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('course_discovery.apps.api.urls', namespace='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auto_auth/$', core_views.AutoAuth.as_view(), name='auto_auth'),
url(r'^health/$', core_views.health, name='health'),
url(r'^login/$', login, name='login'),
url(r'^logout/$', logout, name='logout'),
url('', include('social.apps.django_app.urls', namespace='social')),
]
if settings.DEBUG and os.environ.get('ENABLE_DJANGO_TOOLBAR', False): # pragma: no cover
import debug_toolbar # pylint: disable=import-error
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
|
mrachinskiy/booltron
|
lib.py
|
Python
|
gpl-3.0
| 1,452
| 0.001377
|
# SPDX-License-Identifier: GPL-3.0-or-later
# Copyright 2014-2022 Mikhail Rachinskiy
import random
from typing import Optional
import bpy
from bpy.types import Object, Operator
from mathutils import Vector
def object_offset(obs: list[Object], offset: float) -> None:
for ob in obs:
x = random.uniform(-offset, offset)
y = random.uniform(-offset, offset)
z = random.uniform(-offset, offset)
ob.matrix_basis.translation += Vector((x, y, z))
class ModUtils:
__slots__ = ("is_destructive", "solver", "threshold", "use_self", "use_hole_tolerant")
def __init__(self, op: Operator) -> None:
for prop in self.__slots__:
setattr(self, prop, getattr(op, prop))
def add(self, ob1: Object, ob2: Object, mode: str, name: str = "Boolean", remove_ob2: Optional[bool] = None) -> None:
if remove_ob2 is None:
remove_ob2 = self.is_destructive
md = ob1.modifiers.new(name, "BOOLEAN")
md.show_viewport = not self.is_destructive
md.operation = mode
md.solver = self.solver
md.use_self = self.use_self
md.use_hole_tolerant = self.use_hole_tolerant
md.double_threshold = self.threshold
md.object = ob2
if self.is_destructive:
override = {"obj
|
ect": ob1}
bpy.ops.object.modifier_apply(override, modifier=md.name)
if remove_ob2:
bpy
|
.data.meshes.remove(ob2.data)
|
mac389/LVSI
|
src/main.py
|
Python
|
apache-2.0
| 885
| 0.035028
|
import json
import utils as tech
import numpy as np
cols_with_grades = [1,2,3]
pathologists = open('../data/rater-names','rb').read().splitlines()
contingency_tables = {}
possible_values = list(np.array(cols_with_g
|
rades)-1)
#No stains
f1 = '../data/no-stain.xls'
f2 = '../data/no-stain.xls'
contingency_tables['no-stain'] = tech.kappa(f1,f2,pathologists,cols_with_grades,'lvsi-grades-s-stains',possible_values)
#Stains
f1 = '../data/stains.xls'
f2 = '../data/stains.xls'
contingency_tables['stain'] = tech.kappa(f1,f2,pathologists,cols_with_grades,'lvsi-grades-c-stains',possible_values)
#Intra-rater relaibility
#Stains
f1 = '../data/stains.xls'
f2 = '../data/no-stain.xls'
contingency_tab
|
les['Intra-rater'] = tech.kappa(f1,f2,pathologists,cols_with_grades,'intra-rater-reliability',possible_values)
json.dump(contingency_tables,open('../data/contingency_tables.json','wb'))
|
mmerce/python
|
bigml/tests/read_statistical_tst_steps.py
|
Python
|
apache-2.0
| 923
| 0.002167
|
# -*- coding: utf-8 -*-
#
# Copyr
|
ight 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of
|
the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.tools import eq_
from .world import world
from bigml.api import HTTP_OK
#@step(r'I get the test "(.*)"')
def i_get_the_tst(step, resource):
resource = world.api.get_statistical_test(resource)
world.status = resource['code']
eq_(world.status, HTTP_OK)
world.statistical_test = resource['object']
|
richardkiss/pycoin
|
tests/bloomfilter_test.py
|
Python
|
mit
| 2,635
| 0.004934
|
import binascii
import unittest
from pycoin.bloomfilter import filter_size_required, hash_function_count_req
|
uired, BloomFilter, murmur3
from pycoin.symbols.btc import network
Spendable = network.tx.Spendable
h2b = binascii.unhexlify
class BloomFilterTest(unittest.TestCase):
def test_filter_size_required(self):
for ec, fpp, ev in [
(1, 0.00001, 3),
(1, 0.00000001, 5),
(100,
|
0.000001, 360),
]:
fsr = filter_size_required(ec, fpp)
self.assertEqual(fsr, ev)
def test_hash_function_count_required(self):
for fs, ec, ev in [
(1, 1, 6),
(3, 1, 17),
(5, 1, 28),
(360, 100, 20),
]:
av = hash_function_count_required(fs, ec)
self.assertEqual(av, ev)
def test_BloomFilter(self):
bf = BloomFilter(20, hash_function_count=5, tweak=127)
bf.add_hash160(h2b("751e76e8199196d454941c45d1b3a323f1433bd6"))
tx_hash = h2b("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798")
spendable = Spendable(coin_value=1000, script=b'foo', tx_hash=tx_hash, tx_out_index=1)
bf.add_spendable(spendable)
self.assertEqual(bf.filter_bytes, h2b("0000400000000008011130000000101100000000"))
def test_murmur3(self):
# test vectors from https://stackoverflow.com/questions/14747343/murmurhash3-test-vectors
TEST_VECTORS = [
(b'', 0, 0),
(b'', 1, 0x514E28B7),
(b'', 0xffffffff, 0x81F16F39), # | make sure your seed uses unsigned 32-bit math
(h2b("FFFFFFFF"), 0, 0x76293B50), # | make sure 4-byte chunks use unsigned math
(h2b("21436587"), 0, 0xF55B516B), # | Endian order. UInt32 should end up as 0x87654321
(h2b("21436587"), 0x5082EDEE, 0x2362F9DE), # | Special seed value eliminates initial key with xor
(h2b("214365"), 0, 0x7E4A8634), # | Only three bytes. Should end up as 0x654321
(h2b("2143"), 0, 0xA0F7B07A), # | Only two bytes. Should end up as 0x4321
(h2b("21"), 0, 0x72661CF4), # | Only one byte. Should end up as 0x21
(h2b("00000000"), 0, 0x2362F9DE), # | Make sure compiler doesn't see zero and convert to null
(h2b("000000"), 0, 0x85F0B427), #
(h2b("0000"), 0, 0x30F4C306),
(h2b("00"), 0, 0x514E28B7),
]
for data, seed, expected_value in TEST_VECTORS:
actual_value = murmur3(data, seed=seed)
self.assertEqual(expected_value, actual_value)
if __name__ == "__main__":
unittest.main()
|
mnazim/django-rest-kickstart
|
users/views.py
|
Python
|
mit
| 362
| 0
|
fr
|
om rest_framework.decorators import list_route
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from .models import User
from .serializers import UserSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
http_me
|
thod_names = ['get', 'patch']
|
mvaled/sentry
|
src/sentry/api/endpoints/project_user_stats.py
|
Python
|
bsd-3-clause
| 1,034
| 0.000967
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from rest_framework.response import Response
from sentry.app import tsdb
from sentry.api.base import EnvironmentMixin
from sentry.api.bases.project import
|
ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import Environment
class ProjectUserStatsEndpoint(EnvironmentMixin, ProjectEndpoint):
def get(self, request, project):
try:
environment_id = self._get_environment_id_from_request(request, project.organization_id)
|
except Environment.DoesNotExist:
raise ResourceDoesNotExist
now = timezone.now()
then = now - timedelta(days=30)
results = tsdb.get_distinct_counts_series(
tsdb.models.users_affected_by_project,
(project.id,),
then,
now,
rollup=3600 * 24,
environment_id=environment_id,
)[project.id]
return Response(results)
|
karthikdevel/fit
|
gui/wxpython/fitlistbox.py
|
Python
|
mit
| 546
| 0.001832
|
import wx
class FitListBox(wx.ListBox):
def GetDropList(self, itemlist):
selections = [self.GetString(i) for i in self.GetSelect
|
ions()]
drop_list = []
for i in itemlist:
if i not in selections:
dr
|
op_list.append(i)
return drop_list
def GetSelList(self, itemlist):
selections = [self.GetString(i) for i in self.GetSelections()]
sel_list = []
for i in itemlist:
if i in selections:
sel_list.append(i)
return sel_list
|
benreynwar/fpga-sdrlib
|
python/fpga_sdrlib/b100.py
|
Python
|
mit
| 3,118
| 0.002886
|
"""
Synthesise a QA module into the B100 FPGA.
"""
import os
import shutil
import subprocess
from jinja2 import Environment, FileSystemLoader
from fpga_sdrlib import config
from fpga_sdrlib.config import uhddir, miscdir, fpgaimage_fn
b100dir = os.path.join(uhddir, 'fpga', 'usrp2', 'top', 'B100')
custom_src_dir = os.path.join(config.verilogdir, 'uhd')
def set_image(fn):
shutil.copyfile(fn, fpgaimage_fn)
def make_defines_file(builddir, defines):
fn = os.path.join(builddir, 'global_defines.vh')
f = open(fn, 'w')
f.write(make_defines_prefix(defines))
f.close
return fn
def make_defines_prefix(defines):
lines = []
for k, v in defines.items():
if v is False:
pass
elif v is True:
lines.append('`define {0}'.format(k))
else:
lines.append('`define {0} {1}'.format(k, v))
txt = '\n'.join(lines)
txt += '\n'
return txt
def prefix_defines(fn, defines):
f = open(fn)
contents = f.read()
f.close()
prefix = make_defines_prefix(defines)
f = open(fn, 'w')
f.write(prefix)
f.write(contents)
f.close()
def make_make(name, builddir, inputfiles, defines):
header = make_defines_file(builddir, defines)
#shutil.copy(header, os.path.join(config.builddir, 'message'))
inputfiles = [header] + inputfiles
output_fn = os.path.join(builddir, 'Make.B100_{name}'.format(name=name))
template_fn = 'Make.B100_qa.t'
env = Environment(loader=FileSystemLoader(miscdir))
template = env.get_template(template_fn)
f_out = open(output_fn, 'w')
output_dir = os.path.join(builddir, 'build-B100_{name}'.format(name=name))
custom_defs = []
for k, v in defines.items():
if k == 'DEBUG':
custom_defs.append(k)
else:
custom_defs.append("{0}={1}".format(k, v))
custom_defs = " | ".join(custom_defs)
f_out.write(template.render(build_dir=output_dir,
custom_src_dir=custom_src_dir,
inputfiles=inputfiles,
|
#custom_defs=custom_defs,
))
f_out.close()
def synthesise(name, builddir):
output_dir = os.path.join(builddir, 'build-B100_{name}'.format(name=name))
# Synthe
|
sise
currentdir = os.getcwd()
os.chdir(b100dir)
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
make_fn = os.path.join(builddir, 'Make.B100_{0}'.format(name))
logfile_fn = os.path.join(builddir, 'Make.B100_{0}.log'.format(name))
logfile = open(logfile_fn, 'w')
p = subprocess.Popen(['make', '-f', make_fn],
stdout=logfile, stderr=logfile)
p.wait()
logfile.flush()
logfile.close()
# Check if last line is
# All constraints were met
f = open(logfile_fn, 'r')
lines = f.readlines()
lastline = lines[-2]
if lastline != 'All constraints were met.\n':
raise StandardError("Synthesis failed: see {0}".format(logfile_fn))
f.close()
os.chdir(currentdir)
return os.path.join(output_dir, 'B100.bin')
|
jplusplus/wikileaks-cg-analyser
|
ngrams.py
|
Python
|
gpl-3.0
| 991
| 0.009082
|
# -*- coding: utf-8 -*-
import argparse
def ngrams(input='', n_min=0, n_max=5):
input = input.split(' ')
output = {}
end = n_max
for n in range(n_min+1, end+n_min+1):
for i in range(len(input)-n+1):
token = " ".join(input[i:i+n])
# Count the ngram
output[token] = output.get(token, 0) + 1
return output
def main(tokens='', n_min=0, n_
|
max=5):
# Print out ngrams
return ngrams( str(tokens), int(n_min), int(n_max) )
# Command-line execution of the module
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Command arguments
parser.add_argument('-m', '--min', help="Minimum gram to extract.", dest
|
="n_min", default=0)
parser.add_argument('-x', '--max', help="Maximum gram to extract.", dest="n_max", default=5)
parser.add_argument('tokens', help="String to analyze.")
# Parse arguments
args = parser.parse_args()
# Print out the main function
print main( **vars(args) )
|
koolfreak/volunteer_planner
|
scheduler/templatetags/number_registrations.py
|
Python
|
agpl-3.0
| 260
| 0
|
import datetime
from django import template
from registration.models import RegistrationProfile
register = template.Library()
@register.simple_tag
def get_volunteer_number():
vol
|
unteers = RegistrationProfile.objects.all().count(
|
)
return volunteers
|
jessefeinman/FintechHackathon
|
python-getting-started/nlp/classificationTools.py
|
Python
|
bsd-2-clause
| 2,927
| 0.003075
|
import nltk
import os
from random import shuffle
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from datetime import datetime
from nltk import classify, NaiveBayesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifi
|
er
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis impo
|
rt QuadraticDiscriminantAnalysis
import pickle
def listOfFiles(flagged):
files = os.listdir(os.getcwd() + "/emailNames" + flagged)
listToReturn = []
for file in files:
with open("emailNames" + flagged + "/" + file, 'r') as names:
listToReturn.append(([word[:-1].lower for word in names], flagged))
names.close()
return listToReturn
documents = listOfFiles("Flagged") + listOfFiles("NotFlagged")
shuffle(documents)
all_words = []
for document in documents:
all_words.extend(document[0])
word_features = nltk.FreqDist(all_words)
def find_features(document):
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
try:
d = SklearnClassifier(MultinomialNB())
d.train(featuresets[:300])
print(classify.accuracy(d, featuresets[300:]))
except:
print("d")
try:
a = NaiveBayesClassifier.train(featuresets[:300])
print(classify.accuracy(a, featuresets[300:]))
except:
print("a")
try:
e = SklearnClassifier(LinearSVC())
e.train(featuresets[:300])
print(classify.accuracy(e, featuresets[300:]))
except:
print("e")
try:
f = SklearnClassifier(SVC(), sparse=False)
f.train(featuresets[:300])
print(classify.accuracy(f, featuresets[300:]))
except:
print("f")
try:
g = SklearnClassifier(LinearSVC())
g.train(featuresets[:300])
print(classify.accuracy(g, featuresets[300:]))
except:
print("g")
try:
h = nltk.classify.DecisionTreeClassifier.train(featuresets[:300], entropy_cutoff=0, support_cutoff=0)
print(classify.accuracy(h, featuresets[300:]))
except:
print("h")
def saveClassifier(classifier):
pickleClassifier = open(classifier.__name__ + datetime.now().strftime('%H:%M:%S') + ".pickle", "wb")
pickle.dump(classifier, pickleClassifier)
pickleClassifier.close()
return classifier
def loadClassifier(name):
pickledClassifier = open(name, "rb")
classifier = pickle.load(pickledClassifier)
pickledClassifier.close()
return classifier
|
marlboromoo/basinboa
|
basinboa/system/config.py
|
Python
|
mit
| 869
| 0.003452
|
#!/usr/bin/env python
"""
server config.
"""
from basinboa.system.loader import YamlLoader
class Config(object):
"""docstr
|
ing for Config"""
def __init__(self, name):
super(Config, self).__init__()
self.name = name
self.items = 0
def __repr__(self):
return "Config: %s, items: %s" % \
(self.name, self.items)
cl
|
ass ConfigLoader(YamlLoader):
"""docstring for ConfigLoader"""
SERVER_CONFIG = 'server'
def __init__(self, data_dir):
super(ConfigLoader, self).__init__(data_dir)
def get(self, name):
"""docstring for get"""
data = self.load(name)
if data:
return self.register_attr(Config(name), data)
return None
def get_server_config(self):
"""docstring for get"""
return self.get(self.SERVER_CONFIG)
|
astrodsg/django-stock-up
|
stock_up/settings/__init__.py
|
Python
|
mit
| 36
| 0.027778
|
from stock_up.se
|
ttings.base import
|
*
|
hds-lab/textvisdrg-prototype
|
textvis/topics/urls.py
|
Python
|
mit
| 545
| 0.007339
|
from django.conf.urls import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^$', views.TopicModelIndexView.as_view(), name='topics_models'),
url(r'^model/(?P<model_id>\d+)/$', views.TopicModelDetailView.as_view(
|
), name='topics_model'),
url(r'^model/(?P<model_id>\d+)/topic/(?P<topic_id>\d+)/$', views.TopicDetailView.as_view(), name='topics_topic'),
url(r'^model/(?P<model_id>\d+)/topic/(?P<topic_id>\d+)/wor
|
d/(?P<word_id>\d+)/$', views.TopicWordDetailView.as_view(),
name='topics_topic_word'),
)
|
catapult-project/catapult
|
third_party/gsutil/third_party/pyasn1-modules/tests/test_rfc2314.py
|
Python
|
bsd-3-clause
| 2,078
| 0.000481
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2314
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificationRequestTestCase(unittest.TestCase):
pem_text = """\
MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA
|
4IBDwAw
ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN
|
9M2bsNNm
9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
GGbx7DI=
"""
def setUp(self):
self.asn1Spec = rfc2314.CertificationRequest()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
meta1203/Trigonometry-Programlets
|
vector_dimensions.py
|
Python
|
apache-2.0
| 525
| 0.013333
|
import stdutils, math
fro
|
m stdutils import inputAsDict, cTheta, niceRoot, cApprox
vals = inputAsDict(('size',cTheta))
real = math.cos(vals[cTheta])
comp = math.sin(vals[cTheta])
#str1 = '{} ({} + i {})'.format(niceRoot(vals['size']).getString(),niceRoot(real).getString(),niceRoot(comp).getString())
str2 = '<{}, {}>'.format(niceRoot(vals['size']*real).getString(),niceRoot(vals['size']*comp).getString())
str3 = '<{:.6g}, {:.6g}>'.format(vals['size']*real, vals['size']*comp)
print('{} {} {}'.format(str2, cApprox, st
|
r3))
|
ContinuumIO/ashiba
|
enaml/docs/source/sphinxext/refactordoc/fields.py
|
Python
|
bsd-3-clause
| 8,370
| 0.00359
|
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# file: fields.py
# License: LICENSE.TXT
# Author: Ioannis Tziakos
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import collections
import re
from line_functions import add_indent, is_empty, remove_indent, replace_at
class Field(collections.namedtuple('Field', ('name','signature','desc'))):
""" A docstring field.
The class is based on the nametuple class and represents the logic
to check, parse and refactor a docstring field.
Attributes
----------
name : str
The name if the field, usssualy the name of a parameter of atrribute.
signature : str
The signature of the field. Commonly is the class type of an argument
or the signature of a function.
desc : str
The description of the field. Given the type of the field this is a
single paragraph or a block of rst source.
"""
@classmethod
def is_field(cls, line, indent=''):
""" Check if the line is a field header.
"""
regex = indent + r'\*?\*?\w+\s:(\s+|$)'
match = re.match(regex, line)
return match
@classmethod
def parse(cls, lines):
"""Parse a field definition for a set of lines.
The field is assumed to be in one of the following formats::
<name> : <type>
<description>
or::
<name> :
<description>
or::
<name>
<description>
Arguments
---------
lines :
docstring lines of the field without any empty lines before or
after.
Returns
-------
field :
Field or subclass of Field
"""
header = lines[0].strip()
if ' :' in header:
arg_name, arg_type = re.split('\s\:\s?', header, maxsplit=1)
else:
arg_name, arg_type = header, ''
if len(lines) > 1:
lines = [line.rstrip() for line in lines]
return cls(arg_name.strip(), arg_type.strip(), lines[1:])
else:
return cls(arg_name.s
|
trip(), arg_type.strip(), [''])
def to_rst(self, indent=4):
""" Outputs field in rst as an itme in a definition list.
Arguments
---------
indent : int
The indent to use for
|
the decription block.
Returns
-------
lines : list
A list of string lines of formated rst.
Example
-------
>>> Field('Ioannis', 'Ιωάννης', 'Is the greek guy.')
>>> print Field.to_rst()
Ioannis (Ιωάννης)
Is the greek guy.
"""
lines = []
header = '{0} ({1})'.format(self.name, self.signature)
lines.append(header)
lines += add_indent(self.desc, indent)
return lines
class AttributeField(Field):
""" Field for the argument function docstrings """
def to_rst(self, indent=4):
""" Outputs field in rst using the ``:param:`` role.
Arguments
---------
indent : int
The indent to use for the decription block.
Example
-------
>>> Field('indent', 'int', 'The indent to use for the decription block.')
>>> print Field.to_rst()
:param indent: The indent to use for the description block
:type indent: int
"""
lines = []
_type = self.signature
annotation = '{0} :annotation: = {1}'
type_str = '' if is_empty(_type) else annotation.format(indent * ' ', _type)
directive = '{0}.. attribute:: {1}'
lines += [directive.format(indent * ' ', self.name), type_str]
if type_str != '':
lines.append('')
lines += self.desc
lines.append('')
return lines
class ArgumentField(Field):
""" Field for the argument function docstrings """
def to_rst(self, indent=4):
""" Outputs field in rst using the ``:param:`` role.
Arguments
---------
indent : int
The indent to use for the decription block.
Example
-------
>>> Field('indent', 'int', 'The indent to use for the decription block.')
>>> print Field.to_rst()
:param indent: The indent to use for the description block
:type indent: int
"""
lines = []
name = self.name.replace('*','\*') # Fix cases like *args and **kwargs
indent_str = ' ' * indent
param_str = '{0}:param {1}: {2}'.format(indent_str, name, self.desc[0].strip())
type_str = '{0}:type {1}: {2}'.format(indent_str, name, self.signature)
lines.append(param_str)
lines += self.desc[1:]
if len(self.signature) > 0:
lines.append(type_str)
return lines
class ListItemField(Field):
""" Field that in rst is formated as an item in the list ignoring any
field.type information.
"""
def to_rst(self, indent=4, prefix=''):
""" Outputs field in rst using as items in an list.
Arguments
---------
indent : int
The indent to use for the decription block.
prefix : str
The prefix to use. For example if the item is part of a numbered
list then ``prefix='# '``.
Example
-------
Note
----
The field descrption is reformated into a line.
"""
indent_str = ' ' * indent
rst_pattern = '{0}{1}**{2}**{3}' if is_empty(self.desc[0]) else \
'{0}{1}**{2}** -- {3}'
description = '' if is_empty(self.desc[0]) else \
' '.join(remove_indent(self.desc))
return [rst_pattern.format(indent_str, prefix, self.name, description)]
class ListItemWithTypeField(Field):
""" Field for the return section of the function docstrings """
def to_rst(self, indent=4, prefix=''):
indent_str = ' ' * indent
_type = '' if self.signature == '' else '({0})'.format(self.signature)
rst_pattern = '{0}{1}**{2}** {3}{4}' if is_empty(self.desc[0]) else \
'{0}{1}**{2}** {3} -- {4}'
description = '' if is_empty(self.desc[0]) else \
' '.join(remove_indent(self.desc))
return [rst_pattern.format(indent_str, prefix, self.name, _type, description)]
class FunctionField(Field):
""" A field that represents a function """
@classmethod
def is_field(cls, line, indent=''):
regex = indent + r'\w+\(.*\)\s*'
match = re.match(regex, line)
return match
def to_rst(self, length, first_column, second_column):
split_result = re.split('\((.*)\)', self.name)
method_name = split_result[0]
method_text = ':meth:`{0} <{1}>`'.format(self.name, method_name)
summary = ' '.join([line.strip() for line in self.desc])
line = ' ' * length
line = replace_at(method_text, line, first_column)
line = replace_at(summary, line, second_column)
return [line]
MethodField = FunctionField
#------------------------------------------------------------------------------
# Functions to work with fields
#------------------------------------------------------------------------------
def max_name_length(method_fields):
""" Find the max length of the function name in a list of method fields.
Arguments
---------
fields : list
The list of the parsed fields.
"""
return max([field[0].find('(') for field in method_fields])
def max_header_length(fields):
""" Find the max length of the header in a list of fields.
Arguments
---------
fields : list
The list of the parsed fields.
"""
return max([len(field[0]) for field in fields])
def max_desc_length(fields):
""" Find the max length of the description in a list of fields.
Arguments
-----
|
MarHai/ScrapeBot
|
setup.py
|
Python
|
gpl-2.0
| 13,133
| 0.004797
|
import platform
import os
import getpass
import sys
import traceback
from crontab import CronTab
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from scrapebot.configuration import Configuration
from scrapebot.database import base, User, Instance
def main():
print('Welcome to the ScrapeBot setup')
config = get_config()
instance_name = check_minimal_config(config)
print('Continuing to the database')
print('- connecting to ' + config.get('Database', 'host', fallback='localhost'))
try:
engine = get_engine(config)
base.metadata.create_all(engine)
db = get_db(engine)
except:
print('- uh, there is a problem with connecting to your database ...')
exit(3)
print('- read tables: ' + ', '.join(base.metadata.tables.keys()))
users = db.query(User).order_by(User.created).all()
user = None
if len(users) == 0:
print('- the database currently does not contain any users, so we will create a default one')
username = read_forcefully('- what name should this user listen to', 'root')
email = read_forcefully('- and what is this user\'s email address')
user = create_user(db, username, email)
else:
print('- one or many users available')
user = db.query(User).filter(User.name == 'root').first()
if user is None:
user = users[0]
while read_bool_forcefully('Do you want to create another user'):
username = read_forcefully('- what name should this user listen to')
email = read_forcefully('- and what is this user\'s email address')
create_user(db, username, email)
print('Checking this instance')
this_instance = db.query(Instance).filter(Instance.name == instance_name)
print('- it is called ' + instance_name)
if this_instance.count() == 0:
db.add(Instance(name=instance_name, owner_uid=user.uid))
db.commit()
print('- instance newly registered and ascribed to user "' + user.name + '"')
else:
print('- instance name already registered, meaning that it has been used elsewhere')
if read_bool_forcefully('- is this on purpose'):
print('- okay, fair enough, proceeding ...')
else:
instance_name = read_forcefully('- so how should this instance be called')
config.add_value('Instance', 'Name', instance_name)
config.write()
print('- alright, updated "config.ini"')
db.add(Instance(name=instance_name, owner_uid=user.uid))
db.commit()
print('- instance newly registered and ascribed to user "' + user.name + '"')
print('- browser-wise this instance will use ' + config.get('Instance', 'browser', fallback='Firefox'))
print('Finishing up')
print('- instance should be ready to use')
print('- to run it once, use the script "scrapebot.py"')
if platform.system() == 'Linux':
print('- to run it regularly and since you are using Linux, I recommend a cronjob')
os_user = getpass.getuser()
if read_bool_forcefully('- install cronjob for ' + os_user + ' now'):
cron = CronTab(user=os_user)
cron.remove_all(comment='ScrapeBot // ' + instance_name)
cronjob = cron.new(command='cd ' + os.getcwd() + ' && ' + sys.executable +
' scrapebot.py >> scrapebot_cron.log',
comment='ScrapeBot // ' + instance_name)
cronjob.minute.every(2)
cron.write()
else:
print('- to run it regularly (which is what you want), you may want to use Windows Task Scheduler or the like')
print('---------')
print('Thanks for using; please direct any questions and pull requests to https://github.com/marhai/scrapebot')
db.close()
def create_user(db, username, email):
email = email.lower()
user = db.query(User).filter(User.email == email).first()
if user is None:
temp_user = User(name=username, email=email)
password = temp_user.create_password()
db.add(temp_user)
db.commit()
user = db.query(User).filter(User.email == email).one()
print('- user "' + user.name + '" with password "' + password + '" (no quotes) created')
return user
else:
print('- a user with this email address already exists so no new user was created')
return user
def get_config(create_if_necessary=True):
if not os.access('config.ini', os.R_OK) and create_if_necessary:
config = setup_config()
print('Reading newly created config.ini')
return config
elif not os.access('config.ini', os.R_OK):
print('Configuration (config.ini) not found (have you tried running "setup.py" first?')
exit(3)
else:
print('Configuration file "config.ini" found')
return Configuration()
def setup_config():
print('No configuration file ("config.ini") found, so let\'s create it in six easy steps:')
config = Configuration()
print('(1) We first need access
|
to the main MySQL database.' +
'Specify the all the necessary credentials on where to find it!')
config.add_value('Database', 'Host', read_forcefully('- Database: Host', 'localhost'))
config.add_value('Database', 'User', read_forcefully('- Database: User', 'root'))
config.add_value('Databa
|
se', 'Password', read_forcefully('- Database: Password'))
config.add_value('Database', 'Database', read_forcefully('- Database: Database Name', 'scrapebot'))
if read_bool_forcefully('- Recipes sometimes take their time. In case your MySQL server has short timeouts set, ' \
'you may want ScrapeBot to renew database connections every now and then. Do you'):
print('- Okay, to check your MySQL server\'s timeout in seconds, you may run the following query:')
print(' SHOW SESSION VARIABLES LIKE \'wait_timeout\';')
config.add_value('Database', 'Timeout',
read_numeric_forcefully('- Enter the number of seconds after which to renew the connection'))
print('(2) Next, we need to specify this very instance.')
config.add_value('Instance', 'Name', read_forcefully('- Instance name'))
print('(3) Assuming you have installed all necessary prerequisites, what browser will this instance run.')
browser = read_forcefully('- Browser', 'Firefox')
config.add_value('Instance', 'Browser', browser)
if read_bool_forcefully('- Do you want to specify the path to ' + browser + '\'s binary'):
config.add_value('Instance', 'BrowserBinary', read_forcefully('- Path to binary'))
if read_bool_forcefully('- Do you want to change this browser\'s default user-agent string'):
config.add_value('Instance', 'BrowserUserAgent', read_forcefully('- Full user-agent string'))
config.add_value('Instance', 'BrowserLanguage', read_forcefully('- What language should the browser use', 'en'))
config.add_value('Instance', 'BrowserWidth', read_numeric_forcefully('- Browser width [in pixel]', 1024))
config.add_value('Instance', 'BrowserHeight', read_numeric_forcefully('- Browser height [in pixel]', 768))
print('(4) Also, to simulate human surf behavior, this instance introduces random delays. ' +
'Well, they are not completely random, though. You can set an approximate delay in seconds.')
config.add_value('Instance', 'Timeout', read_numeric_forcefully('- Rough browser delay [in seconds]', 1))
print('(5) When taking screenshots, should these be stored locally or in an Amazon S3 bucket (i.e., the cloud)?')
if read_bool_forcefully('- Do you want to upload them to an Amazon S3 bucket'):
config.add_value('Database', 'AWSaccess', read_forcefully('- Enter your AWS Access Key'))
config.add_value('Database', 'AWSsecret', read_forcefully('- Along with this, what is your AWS Secret Key'))
config.add_value('Database', 'AWSbucket', read_forcefully('- Finally, enter the name of your bucket'))
else:
screenshot_dir = read_forcefully('- Okay, so store them locall
|
ajb/pittsburgh-purchasing-suite
|
migrations/versions/3de73bde77e_fix_opportunities_model.py
|
Python
|
bsd-3-clause
| 4,373
| 0.009376
|
"""fix opportunities model
Revision ID: 3de73bde77e
Revises: 22cc439cd89
Create Date: 2015-06-25 00:55:51.895965
"""
# revision identifiers, used by Alembic.
revision = '3de73bde77e'
down_revision = '22cc439cd89'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category_opportunity_association',
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('opportunity_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['opportunity_id'], ['opportunity.id'], ondelete='SET NULL')
)
op.create_index(op.f('ix_category_opportunity_association_category_id'), 'category_opportunity_association', ['category_id'], unique=False)
op.create_index(op.f('ix_category_opportunity_association_opportunity_id'), 'category_opportunity_association', ['opportunity_id'], unique=False)
op.create_table('document',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('display_name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('form_href', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_documents_id'), 'document', ['id'], unique=False)
op.add_column(u'opportunity', sa.Column('contact_id', sa.Integer(), nullable=False))
op.add_column(u'opportunity', sa.Column('planned_deadline', sa.DateTime(), nullable=True))
op.add_column(u'opportunity', sa.Column('planned_open', sa.DateTime(), nullable=True))
op.add_column(u'opportunity', sa.Column('created_by', sa.Integer(), nullable=False))
op.add_column(u'opportunity', sa.Column('document', sa.String(length=255), nullable=True))
op.add_column(u'opportunity', sa.Column('document_href', sa.String(length=255), nullable=True))
op.add_column(u'opportunity', sa.Column('created_from_id', sa.Integer(), nullable=True))
op.add_column(u'opportunity', sa.Column('is_public', sa.Boolean(), nullable=True))
op.add_column(u'opportunity', sa.Column('documents_needed', postgresql.ARRAY(sa.Integer()), nullable=True))
op.create_foreign_key('opportunity_created_by_user_id_fkey', 'opportunity', 'users', ['created_by'], ['id'])
op.drop_constraint(u'opportunity_category_id_fkey', 'opportunity', type_='foreignkey')
op.create_foreign_key('opportunity_user_id_fkey', 'opportunity', 'users', ['contact_id'], ['id'])
op.drop_column(u'opportunity', 'category_id')
op.drop_column(u'opportunity', 'bid_open')
op.drop_column(u'opportunity', 'contract_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'opportunity', sa.Column('bid_open', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column(u'opportunity', sa.Column('category_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'opportunity', sa.Column('contract_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint('opportunity_user_id_fkey', 'opportunity', type_='foreignkey')
op.create_foreign_key(u'opportunity_category_id_fkey', 'opportunity', 'category', ['category_id'], ['id'])
op.drop_constraint('opportunity_created_by_user_id_fkey', 'opportunity', type_='foreignkey')
op.drop_column(u'opportunity', 'created_from_id')
op.drop_column(u'opportunity', 'document_href')
op.drop_column(u'opportunity', 'document')
op.drop_column(u'opportunity', 'created_by')
op.drop_column(u'opportunity', 'planned_open')
op.drop_column(u'opportunity', 'planned_deadline')
op.drop_column(u'opportunity', 'contact_id')
op.drop_column(u'opportunity', 'is_public')
op.drop_column(u'opportunity', 'documents_needed')
op.drop_index(op.f('ix_category_opportunity_association_opportunity_id'), table_name='category_opportunity_association')
op.drop_index(op.f('ix_category_opportunity_
|
association_category_id'), table_name='category_opportunity_association')
op.drop_table('category_opportunity_association')
op.drop_index(op.f('ix_documents_id'), table_name='document')
op.drop_table('document')
### end Alembi
|
c commands ###
|
msipos/lasernotes
|
backend/webapp/tests.py
|
Python
|
agpl-3.0
| 3,156
| 0.001901
|
from django.test import TestCase
from webapp.forms import ItemForm
from webapp.models import User, Collection, CollectionPermission, Item, Accessor, OWNER, GUEST
from webapp.util import server_side_md
class ModelPermissionsTest(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('a', 'a@a.com', 'a')
self.user2 = User.objects.create_user('b', 'b@b.com', 'b')
self.coll1 = Collection.objects.create(user=self.user1, name='coll A')
self.coll2 = Collection.objects.create(user=self.user1, name='coll B')
self.item1 = Item.objects.create(collection=self.coll1, title='foo', content='bar')
self.item2 = Item.objects.create(collection=self.coll2, title='faz', content='bez')
self.perm1 = CollectionPermission.objects.create(user=self.user1, collection=self.coll1, permission=OWNER)
self.perm2 = CollectionPermission.objects.create(user=self.user1, collection=self.coll2, permission=OWNER)
self.perm3 = CollectionPermission.objects.create(user=self.user2, collection=self.coll1, permission=GUEST)
def test_simple_access(self):
a1 = Accessor(self.user1)
cs1 = a1.query_collections()
assert len(cs1) == 2
assert cs1[0] == self.coll1
assert cs1[1] == self.coll2
is1 = a1.query_items()
assert len(is1) == 2
assert self.item1 in is1
assert self.item2 in is1
def test_new_permissions(self):
a1 = Accessor(self.user1)
a2 = Accessor(self.user2)
assert len(a1.query_collections()) == 2
assert len(a2.query_collections()) == 1
assert self.coll1 in a2.query_collections()
assert len(a1.query_collections(owner=True)) == 2
assert len(a2.query_collections(owner=True)) == 0
assert len(a1.query_items()) == 2
assert self.item1 in a1.query_items()
assert self.item2 in a1.query_items()
assert len(a2.query_items()) == 1
assert self.item1 in a2.query_items()
assert len(a1.query_item
|
s(owner=True)) == 2
assert len(a2.query_items(owner=True)) == 0
class ValidationTest(TestCase):
def test_url_validator(self):
data = {
'title': 'My title',
'notes': '',
'content': 'Some
|
stuff',
'typ': 'E'
}
f = ItemForm(data)
assert f.is_valid()
data['typ'] = 'U'
f = ItemForm(data)
assert not f.is_valid()
data['content'] = 'https://lasernotes.com'
f = ItemForm(data)
assert f.is_valid()
class MarkdownTest(TestCase):
def test_xss(self):
evil_content = '''
This is evil content.
First we have a script tag:
<script>alert('foo')</script>
Then we also have an evil link:
<a href="javascript:alert('bar')">Linky</a>
Finally we have an [evil md link](javascript:3+3) and a good
[good md link](foo/bar/baz?q=foo).
'''
rendered = server_side_md(evil_content)
assert '<script>' not in rendered
assert '3+3' not in rendered
assert 'foo/bar/baz?q=foo' in rendered
assert "alert('bar')" not in rendered
|
zouzhberk/ambaridemo
|
demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
|
Python
|
apache-2.0
| 1,523
| 0.009192
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from webhcat import webhcat
from webhcat_service
|
import webhcat_service
class WebHCatServer(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
webhcat()
def start(self, env):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
webhcat_service(action = 'start')
def stop(self, env):
import params
env.set_params(params)
webhcat_service(ac
|
tion = 'stop')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.webhcat_pid_file)
if __name__ == "__main__":
WebHCatServer().execute()
|
LTD-Beget/sprutio
|
app/modules/webdav/actions/files/copy.py
|
Python
|
gpl-3.0
| 934
| 0.004283
|
from core import FM
from core.FMOperation import FMOperation
class CopyFiles(FM.BaseAction):
def __init__(self, request, paths, session, target, overwrite, **kwargs):
super(CopyFiles, self).__init__(request=request, **kwargs)
self.paths = paths
self.session = session
self.target = target
self.overwrite = overwrite
def run(self):
request = se
|
lf.get_rpc_request()
operation = FMOperation.create(FM.Action.COPY, FMOperation.STATUS_WAIT)
result = request.request('webdav/copy_files', login=self.request.get_current_user(),
password=self.request.get_current_password(), status_id=operation.id,
source=self.s
|
ession, target=self.target, paths=self.paths, overwrite=self.overwrite)
answer = self.process_result(result)
answer["data"] = operation.as_dict()
return answer
|
liangsun/me
|
webapp/models/user.py
|
Python
|
mit
| 573
| 0.001745
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class User(object):
def __init__(self, id, email, passwd, session, session_expire_time, ctime, rtime):
self.id = str(id)
self.email = email
self.passwd = passwd
self.session = sess
|
ion
self.session_expire_time = session_expire_time
self.ctime = self.ctime
@classmethod
def get(cls, id):
pass
@classmethod
def get_by_email(cls, email):
pass
@classmethod
def register(cls, email, passwd):
pass
def login_user(em
|
ail, passwd):
|
wbvalid/python2
|
aioSqlQuery.py
|
Python
|
unlicense
| 1,328
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pdb
import time
import cx_Oracle
import multiprocessing
ipList = ("192.168.7.121",
"192.168.7.122",
"192.168.7.123",
"192.168.7.124",
"192.168.7.125",
"192.168.7.126",
"192.168.7.127",
"192.168.7.128",
"192.168.7.129",
"192.168.7.130",
"192.168.7.131",
"192.168.7.132",
"192.168.7.133",
"192.168.7.134",
"192.168.7.135")
sqlstr = ''
with open('testSql.sql', 'r') as fs:
sqlstr = fs.read().strip('\n')
def doQuery(ip, sql):
result = []
conn = cx_Oracle.connect("fhnsdb/fhnsdb@%s:1521/ora11g" % ip)
c = conn.cursor()
x = c.execute(sql)
result = x.fetchall()
c.close()
conn.close()
return result
# cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool()
result = []
for ips in ipList:
# pdb.set_trace()
rtmp = pool.apply_async(doQuery, args=(ips, sqlstr))
result.append([ips, rtmp])
pool.cl
|
ose()
pool.join()
with open("Sqlout.log", "w") as fd:
for i i
|
n result:
for j in i[1].get():
strresult = ''
# pdb.set_trace()
for k in j:
strresult += '%s,' % str(k)
fd.write("%s\n" % strresult.rstrip(','))
|
cheer021/BikesPrediction_DP
|
bikes_prediction/manage.py
|
Python
|
mit
| 259
| 0.003861
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bikes_prediction.settings")
from django.core.management import execute_from_comman
|
d_line
e
|
xecute_from_command_line(sys.argv)
|
SamaraCardoso27/eMakeup
|
backend/test/course_test.py
|
Python
|
mit
| 950
| 0.012632
|
from base import GAETestCase
from config.template_middleware import TemplateResponse
from ro
|
utes.courses import new
from tekton.gae.middl
|
eware.redirect import RedirectResponse
from student.student_model import Course
__author__ = 'samara'
class NewTeste(GAETestCase):
def test_sucesso(self):
resposta = new.salvar(name='bla')
self.assertIsInstance(resposta, RedirectResponse)
self.assertEqual('/courses',resposta.context)
cursos = Course.query().fetch()
self.assertEqual(1,len(cursos))
curso = cursos[0]
self.assertEqual('bla',curso.name)
def test_validacao(self):
resposta = new.salvar()
self.assertIsInstance(resposta,TemplateResponse)
self.assert_can_render(resposta)
self.assertIsNone(Course.query().get())
self.maxDiff = True
self.assertDictEqual({u'course':{},u'erros':{'name':u'Required field'}},resposta.context)
|
madj4ck/ansible
|
lib/ansible/plugins/lookup/file.py
|
Python
|
gpl-3.0
| 2,558
| 0.003518
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
basedir_path = self._loader.path_dwim(term)
relative_path = None
playbook_path = None
# Special handling of the file lookup, used primarily when the
# lookup is done from a role. If the file isn't found in the
# basedir of the current file, use dwim_relative to look in the
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
if 'role_path' in variables:
|
relative_
|
path = self._loader.path_dwim_relative(variables['role_path'], 'files', term, check=False)
# FIXME: the original file stuff still needs to be worked out, but the
# playbook_dir stuff should be able to be removed as it should
# be covered by the fact that the loader contains that info
#if 'playbook_dir' in variables:
# playbook_path = os.path.join(variables['playbook_dir'], term)
for path in (basedir_path, relative_path, playbook_path):
try:
contents, show_data = self._loader._get_file_contents(path)
ret.append(contents.rstrip())
break
except AnsibleParserError:
continue
else:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
|
sa2ajj/DistroTracker
|
pts/mail/tests/tests_control.py
|
Python
|
gpl-2.0
| 90,050
| 0.000233
|
# -*- coding: utf-8 -*-
# Copyright 2013 The Distro Tracker Developers
# See the COPYRIGHT file at the top-level directory of this distribution and
# at http://deb.li/DTAuthors
#
# This file is part of Distro Tracker. It is subject to the license terms
# in the LICENSE file found in the top-level directory of this
# distribution and at http://deb.li/DTLicense. No part of Distro Tracker,
# including this file, may be copied, modified, propagated, or distributed
# except according to the terms contained in the LICENSE file.
"""
Tests for :mod:`pts.mail.pts_control`.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.test import TestCase
from django.core import mail
from django.utils.encoding import force_bytes
from pts.mail import control
from pts.core.utils import pts_render_to_string
from pts.core.utils import extract_email_address_from_header
from pts.core.utils import get_or_none
from pts.core.models import PackageName, EmailUser, Subscription
from pts.core.models import Keyword
from pts.core.models import Team
from pts.core.models import BinaryPackageName
from pts.core.models import SourcePackageName
from pts.core.models import SourcePackage
from pts.accounts.models import User
from pts.mail.models import CommandConfirmation
from pts.mail.control.commands import UNIQUE_COMMANDS
from email import encoders
from email.message import Message
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.utils import make_msgid
from datetime import timedelta
import re
PTS_CONTACT_EMAIL = settings.PTS_CONTACT_EMAIL
PTS_CONTROL_EMAIL = settings.PTS_CONTROL_EMAIL
class EmailControlTest(TestCase):
def control_process(self):
"""
Helper method. Passes the constructed control message to the control
processor.
"""
control.process(force_bytes(self.message.as_string(), 'utf-8'))
def setUp(self):
self.reset_message()
def set_default_headers(self):
"""
Helper method which adds the default headers for each test message.
"""
self.message.add_header('From', 'John Doe <john.doe@unknown.com>')
self.message.add_header('To', PTS_CONTROL_EMAIL)
self.message.add_header('Subject', 'Commands')
self.message.add_header('Message-ID', make_msgid())
def set_header(self, header_name, header_val
|
ue):
"""
Helper method which sets the given value for the given header.
:param header_name: The name of the header to set
:param header_value: The value of the header to set
"""
|
if header_name in self.message:
del self.message[header_name]
self.message.add_header(header_name, header_value)
def set_input_lines(self, lines):
"""
Sets the lines of the message body which represent sent commands.
:param lines: All lines of commands
:param type: iterable
"""
payload = '\n'.join(lines)
if self.multipart:
plain_text = MIMEText('plain')
plain_text.set_payload(payload)
self.message.attach(plain_text)
else:
self.message.set_payload(payload)
def make_multipart(self, alternative=False):
"""
Helper method which converts the test message into a multipart message.
"""
if alternative:
self.message = MIMEMultipart('alternative')
else:
self.message = MIMEMultipart()
self.set_default_headers()
self.multipart = True
def add_part(self, mime_type, subtype, data):
"""
Adds the given part to the test message.
:param mime_type: The main MIME type of the new part
:param subtype: The MIME subtype of the new part
:param data: The payload of the part
"""
part = MIMEBase(mime_type, subtype)
part.set_payload(data)
if mime_type != 'text':
encoders.encode_base64(part)
self.message.attach(part)
def reset_message(self):
"""
Helper method resets any changes made to the test message.
"""
self.message = Message()
self.multipart = False
self.set_default_headers()
def make_comment(self, text):
"""
Helper function which creates a comment from the given text.
"""
return '# ' + text
def assert_response_sent(self, number_of_responses=1):
"""
Helper method which asserts that the expected number of responses is
sent.
:param number_of_responses: The expected number of responses.
"""
self.assertEqual(len(mail.outbox), number_of_responses)
def assert_response_not_sent(self):
"""
Helper method which asserts that no responses were sent.
"""
self.assertEqual(len(mail.outbox), 0)
def assert_in_response(self, text, response_number=-1):
"""
Helper method which asserts that the given text is found in the given
response message.
:param text: The text which needs to be found in the response.
:param response_number: The index number of the response message.
Standard Python indexing applies, which means that -1 means the
last sent message.
"""
self.assertTrue(mail.outbox)
out_mail = mail.outbox[response_number]
self.assertIn(text, out_mail.body)
def assert_line_in_response(self, line, response_number=-1):
"""
Helper method which asserts that the given full line of text is found
in the given response message.
:param line: The line of text which needs to be found in the response.
:param response_number: The index number of the response message.
Standard Python indexing applies, which means that -1 means the
last sent message.
"""
self.assertTrue(mail.outbox)
out_mail = mail.outbox[response_number]
self.assertIn(line, out_mail.body.splitlines())
def assert_line_not_in_response(self, line, response_number=-1):
"""
Helper method which asserts that the given full line of text is not
found in the given response message.
:param line: The line of text which needs to be found in the response.
:param response_number: The index number of the response message.
Standard Python indexing applies, which means that -1 means the
last sent message.
"""
self.assertTrue(mail.outbox)
out_mail = mail.outbox[response_number]
self.assertNotIn(line, out_mail.body.splitlines())
def get_list_item(self, item, bullet='*'):
"""
Helper method which returns a representation of a list item.
:param item: The list item's content
:type item: string
:param bullet: The character used as the "bullet" of the list.
"""
return bullet + ' ' + str(item)
def assert_list_in_response(self, items, bullet='*'):
"""
Helper method which asserts that a list of items is found in the
response.
"""
self.assert_in_response('\n'.join(
self.get_list_item(item, bullet)
for item in items
))
def assert_list_item_in_response(self, item, bullet='*'):
"""
Helper method which asserts that a single list item is found in the
response.
"""
self.assert_line_in_response(self.get_list_item(item, bullet))
def assert_list_item_not_in_response(self, item, bullet='*'):
"""
Helper method which asserts that a single list item is not found in the
response.
"""
self.assert_line_not_in_response(self.get_list_item(item, bullet))
def assert_not_in_response(self, text, response_number=-1):
"""
Helper method which asserts that the given text is not found in the
given response message.
:param text: The text which needs to be found in the response.
:param response_numb
|
pkill-nine/qutebrowser
|
tests/unit/misc/test_readline.py
|
Python
|
gpl-3.0
| 11,925
| 0
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.readline."""
import re
import inspect
from PyQt5.QtWidgets import QLineEdit, QApplication
import pytest
from qutebrowser.misc import readline
# Some functions aren't 100% readline compatible:
# https://github.com/qutebrowser/qutebrowser/issues/678
# Those are marked with fixme and have another value marked with '# wrong'
# which marks the current behavior.
fixme = pytest.mark.xfail(reason='readline compatibility - see #678')
class LineEdit(QLineEdit):
"""QLineEdit with some methods to make testing easier."""
def _get_index(self, haystack, needle):
"""Get the index of a char (needle) in a string (haystack).
Return:
The position where needle was found, or None if it wasn't found.
"""
try:
return haystack.index(needle)
except ValueError:
return None
def set_aug_text(self, text):
"""Set a text with </> markers for selected text and | as cursor."""
real_text = re.sub('[<>|]', '', text)
self.setText(real_text)
cursor_pos = self._get_index(text, '|')
sel_start_pos = self._get_index(text, '<')
sel_end_pos = self._get_index(text, '>')
if sel_start_pos is not None and sel_end_pos is None:
raise ValueError("< given without >!")
if sel_start_pos is None and sel_end_pos is not None:
raise ValueError("> given without <!")
if cursor_pos is not None:
if sel_start_pos is not None or sel_end_pos is not None:
raise ValueError("Can't mix | and </>!")
self.setCursorPosition(cursor_pos)
elif sel_start_pos is not None:
if sel_start_pos > sel_end_pos:
raise ValueError("< given after >!")
sel_len = sel_end_pos - sel_start_pos - 1
self.setSelection(sel_start_pos, sel_len)
def aug_text(self):
"""Get a text with </> markers for selected text and | as cursor."""
text = self.text()
chars = list(text)
cur_pos = self.cursorPosition()
assert cur_pos >= 0
chars.insert(cur_pos, '|')
if self.hasSelectedText():
selected_text = self.selectedText()
sel_start = self.selectionStart()
sel_end = sel_start + len(selected_text)
assert sel_start > 0
assert sel_end > 0
assert sel_end > sel_start
assert cur_pos == sel_end
assert text[sel_start:sel_end] == selected_text
chars.insert(sel_start, '<')
chars.insert(sel_end + 1, '>')
return ''.join(chars)
def _validate_deletion(lineedit, bridge, method, text, deleted, rest):
"""Run and validate a text deletion method on the ReadLine bridge.
Args:
lineedit: The LineEdit instance.
bridge: The ReadlineBridge instance.
method: Reference to the method on the bridge to test.
text: The starting 'augmented' text (see LineEdit.set_aug_text)
deleted: The text that should be deleted when the method is invoked.
rest: The augmented text that should remain after method is invoked.
"""
lineedit.set_aug_text(text)
method()
assert bridge._deleted[lineedit] == deleted
assert lineedit.aug_text() == rest
lineedit.clear()
bridge.rl_yank()
assert lineedit.aug_text() == deleted + '|'
@pytest.fixture
def lineedit(qtbot, monkeypatch):
"""Fixture providing a LineEdit."""
le = LineEdit()
qtbot.add_widget(le)
monkeypatch.setattr(QApplication.instance(), 'focusWidget', lambda: le)
return le
@pytest.fixture
def bridge():
"""Fixture providing a ReadlineBridge."""
return readline.ReadlineBridge()
def test_none(bridge, qtbot):
"""Call each rl_* method with a None focusWidget."""
assert QApplication.instance().focusWidget() is None
for name, method in inspect.getmembers(bridge, inspect.ismethod):
if name.startswith('rl_'):
method()
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'fo|obar'),
('|foobar', '|foobar')])
def test_rl_backward_char(text, expected, lineedit, bridge):
"""Test rl_backward_char."""
lineedit.set_aug_text(text)
bridge.rl_backward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'foob|ar'),
('foobar|', 'foobar|')])
def test_rl_forward_char(text, expected, lineedit, bridge):
"""Test rl_forward_char."""
lineedit.set_aug_text(text)
bridge.rl_forward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('one <tw>o', 'one |two'),
('<one >two', '|one two'),
('|one two', '|one two')])
def test_rl_backward_word(text, expected, lineedit, bridge):
"""Test rl_backward_word."""
lineedit.set_aug_text(text)
bridge.rl_backward_word()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [
pytest.param('<o>ne two', 'one| two', marks=fixme),
('<o>ne two', 'one |two'), # wrong
pytest.param('<one> two', 'one two|', marks=fixme),
('<one> two', 'one |two'), # wrong
('one t<wo>', 'one two|')
])
def test_rl_forward_word(text, expected, lineedit, bridge):
"""Test rl_forward_word."""
lineedit.set_aug_text(text)
bridge.rl_forward_word()
assert lineedit.aug_text() == expected
def test_rl_beginning_of_line(lineedit, bridge):
"""Test rl_beginning_of_line."""
lineedit.set_aug_text('f<oo>bar')
bridge.rl_beginning_of_line()
assert lineedit.aug_text() == '|foobar'
def test_rl_end_of_line(lineedit, bridge):
"""Test rl_end_of_line."""
lineedit.set_aug_text('f<oo>bar')
bridge.rl_end_of_line()
assert lineedit.aug_text() == 'foobar|'
@pytest.mark.parametrize('text, expected', [('foo|bar', 'foo|ar'),
('foobar|', 'foobar|'),
('|foobar', '|oobar'),
('f<oo>bar', 'f|bar')])
def test_rl_delete_char(text, expected, lineedit, bridge):
"""Test rl_delete_char."""
lineedit.set_aug_text(text)
bridge.rl_delete_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('foo|bar', 'fo|bar'),
('foobar|', 'fooba|'),
('|foobar', '|foobar'),
('f<oo>bar', 'f|bar')])
def test_rl_backward_delete_char(text, expected, lineedit, bridge):
"""Test rl_backward_delete_char."""
lineedit.set_aug_text(text)
bridge.rl_backward_delete_char()
assert lineedit.aug_text() == expected
@pyte
|
st.mark.parametrize('text, deleted, rest', [
('delete this| test', 'delete this', '| test'),
pytest.param('delete <this> test', 'delete this', '| test', marks=fixme),
(
|
'delete <this> test', 'delete ', '|this test'), # wrong
pytest.param('f<oo>bar', 'foo', '|bar', marks=fixme),
('f<oo>bar', 'f', '|oobar'), # wrong
])
def test_rl_unix_line_discard(lineedit, bridge, text, deleted, rest):
"""Delete from the curso
|
Ninjakow/TrueSkill
|
frc_trueskill.py
|
Python
|
gpl-3.0
| 3,512
| 0.005125
|
from trueskill import TrueSkill, Rating, rate
import argparse
import requests
from datetime import datetime
#from pytba import api as tba
class FrcTrueSkill:
def __init__(self):
self.env = TrueSkill(draw_probability=0.02)
self.trueskills = {}
self.events = {}
self.get_previous_matches()
#for team in self.trueskills.keys():
# print team, self.skill(team)
def update(self, red_alliance, red_score, blue_alliance, blue_score):
# Calculate teams per alliance
for alliance in [red_alliance, blue_alliance]:
for team in alliance:
if not team in self.trueskills:
self.trueskills[team] = self.env.Rating()
# Update ratings based on result
if red_score == blue_score: # Tied
if red_score == -1:
return # No result yet
ranks = [0, 0]
elif red_score > blue_score: # Red beat blue
ranks = [0, 1] # Lower is better
else:
ranks = [1, 0]
new_red, new_blue = self.env.rate([[self.trueskills[number] for number in red_alliance],
[self.trueskills[number] for number in blue_alliance]], ranks)
# Store the new values
new_ratings = new_red + new_blue
for rating, team_number in zip(new_ratings, red_alliance + blue_alliance):
self.trueskills[team_number] = rating
def predict(self, red_alliance, blue_alliance):
proba = self.env.quality([[self.trueskills[team] for team in red_alliance],
[self.trueskills[team] for team in blue_alliance]])
return round(proba*100)
def skill(self, team):
return self.env.expose(self.trueskills[team])
def get_previous_matches(self):
started_events = []
all_matches = []
events = requests.get("https://www.thebluealliance.com/api/v2/events/2017")
events = events.json()
for event in events:
if event['event_type'] > 5:
continue
if event['start_date'] < str(datetime.date(datetime.today())):
started_events.append(event["key"])
teams = requests.get("https://www.thebluealliance.com/api/v2/event/"+event['key']+"/teams", headers={"X-TBA-App-Id":"frc-4774:TrueSkill:1.0"})
teams = teams.json()
self.events[event['key']] = teams
for event in started_events:
matches = requests.get("https://www.thebluealliance.com/api/v2/event/"+event+"/matches", headers={"X-TBA-App-Id":"frc-4774:TrueSkill:1.0"})
matches = matches.json()
all_matches += matches
all_matches.sort(key=lambda m: m['time'])
for match in all_matches:
score = match['score_breakdown']
if score is None:
continue
red_stats = score['red']
blue_stats = score['blue']
alliances = match['alliances']
red = alliances['red']
blue = alliances['blue']
if red_stats["rotor3Engaged"]:
red['score'] += 100
if red_stats["kPaRankingPointAchieved"]:
red['score'] += 20
if blue_stats["rotor3Engaged"]:
|
blue['score'] += 100
if blu
|
e_stats["kPaRankingPointAchieved"]:
blue['score'] += 20
self.update(red['teams'], red['score'], blue['teams'], blue['score'])
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20130422.py
|
Python
|
mit
| 818
| 0.00489
|
"""
[04/22/13] REMINDER: Week-Long Challenge #1 due today!
https://www.reddit.com/r/
|
dailyprogrammer/comments/1cv8oo/042213_reminder_weeklong_challenge_1_due_today/
Hey r/DailyProgrammers,
As a friendly reminder, our first week-long challenge (making a (tiny) video game) concl
|
udes today, at midnight, here
in the American Pacific Time zone (so that's GMT - 7:00).
We've got about 6 submissions, so half of all submissions at-this-moment are essentially guaranteed winning already! If
enough people post at the last minute, I may extend submissions by a few hours up to 24 hours at most.
Don't forget the prizes: **We will award the most unique game, most impressive game, and best demo with +1 gold ribbons
to your flair. Winners get Reddit Gold.**
"""
def main():
pass
if __name__ == "__main__":
main()
|
mapycz/mapnik
|
plugins/input/gdal/build.py
|
Python
|
lgpl-2.1
| 2,194
| 0.001823
|
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2015 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later ve
|
rsion.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Frankli
|
n St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
Import ('plugin_base')
Import ('env')
from copy import copy
PLUGIN_NAME = 'gdal'
plugin_env = plugin_base.Clone()
plugin_sources = Split(
"""
%(PLUGIN_NAME)s_datasource.cpp
%(PLUGIN_NAME)s_featureset.cpp
""" % locals()
)
plugin_env['LIBS'] = []
plugin_env.Append(LIBS=env['PLUGINS']['gdal']['lib'])
if env['RUNTIME_LINK'] == 'static':
cmd = '%s --dep-libs' % plugin_env['GDAL_CONFIG']
plugin_env.ParseConfig(cmd)
# Link Library to Dependencies
libraries = copy(plugin_env['LIBS'])
if env['PLUGIN_LINKING'] == 'shared':
libraries.insert(0,env['MAPNIK_NAME'])
libraries.append(env['ICU_LIB_NAME'])
libraries.append(env['BOOST_LIB_PATHS']['system'])
TARGET = plugin_env.SharedLibrary('../%s' % PLUGIN_NAME,
SHLIBPREFIX='',
SHLIBSUFFIX='.input',
source=plugin_sources,
LIBS=libraries)
# if the plugin links to libmapnik ensure it is built first
Depends(TARGET, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], TARGET)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
plugin_obj = {
'LIBS': libraries,
'SOURCES': plugin_sources,
}
Return('plugin_obj')
|
geoffjay/opendcs-core
|
examples/minimal.py
|
Python
|
lgpl-3.0
| 353
| 0.005666
|
#!/usr/bin/python3
from gi.repository import GLib
from gi.re
|
pository import OpenDCS
import sys
class DcsExample(object):
def __init__(self):
self.dcsobject = OpenDCS.Object()
self.dcsobject.set_property('id', 'test')
print("Object hash: ", self.dcsobject.get_property('hash'))
if __name__ == "__main__":
|
DcsExample()
|
googleads/google-ads-python
|
google/ads/googleads/v9/enums/types/change_event_resource_type.py
|
Python
|
apache-2.0
| 1,596
| 0.000627
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"ChangeEventResourceTypeEnum",},
)
class ChangeEventResourceTypeEnum(proto.Message):
r"""Container for enum describing supported resource types for
the ChangeEvent resource.
"""
class ChangeEventResourceType(proto.Enum):
r"""Enum listing the resource types support by the ChangeEvent
resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD = 2
AD_GROUP = 3
AD_GROUP_CRITERION = 4
CAMPAIGN = 5
CAMPAIGN_BUDGET = 6
AD_GROUP_BID_MODIFIER
|
= 7
CAMPAIGN_CRITERION = 8
FEED = 9
FEED_ITEM = 10
CAMPAIGN_FEED = 11
AD_GROUP_FEED = 12
AD_GROUP_AD = 13
ASSET = 14
CUSTOMER_ASSET = 15
CAMPAIGN_ASSET =
|
16
AD_GROUP_ASSET = 17
__all__ = tuple(sorted(__protobuf__.manifest))
|
debalance/hp
|
hp/blog/querysets.py
|
Python
|
gpl-3.0
| 1,576
| 0.003807
|
# -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp
|
).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
|
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-xmpp-account.
# If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.db import models
from django.utils import timezone
class BasePageQuerySet(models.QuerySet):
def slug(self, slug):
"""Filters for a given slug in any language."""
query = None
for lang, _name in settings.LANGUAGES:
if query is None:
query = models.Q(**{'slug_%s' % lang: slug})
else:
query |= models.Q(**{'slug_%s' % lang: slug})
return self.filter(query)
class PageQuerySet(BasePageQuerySet):
pass
class BlogPostQuerySet(BasePageQuerySet):
def published(self, now=None):
if now is None:
now = timezone.now()
return self.filter(published=True, publication_date__lt=now)
def blog_order(self):
return self.order_by('-sticky', '-created')
|
kleetus/bitcoin
|
qa/rpc-tests/wallet.py
|
Python
|
mit
| 13,538
| 0.006943
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[
|
0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
|
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
assert_equal(self.nodes[2].getbalance("from1"), 94-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
|
kido113/a-byte-of-python-learning-records
|
module_using_sys.py
|
Python
|
gpl-3.0
| 127
| 0.031496
|
import sys
print('The command line arguments are:')
f
|
or i in sys.argv:
print(i)
print('\n\nThe PYTHONPATH is',sys.path,'\n')
| |
JoePelz/SAM
|
sam/pages/nodes.py
|
Python
|
gpl-3.0
| 4,239
| 0.002359
|
import re
import base
import sam.models.nodes
from sam import errors
from sam import common
# This class is for getting the child nodes of all nodes in a node list, for the map
class Nodes(base.headless_post):
"""
The expected GET data includes:
'address': comma-seperated list of dotted-decimal IP addresses.
Each address is only as long as the subnet,
so 12.34.0.0/16 would be written as 12.34
A request for 1.2.3.0/24, 192.168.0.0/16, and 21.0.0.0/8
would be "1.2.3,192.168,21"
:return: A JSON-encoded dictionary where
the keys are the supplied addresses (or _ if no address) and
the values are a list of child nodes.
POST Expects a query string including:
node: ip address
like "189.179.4.0/24"
or "189.179" ( == 189.179.0.0/16)
or "189.2.3/8" ( == 189.0.0.0/8)
alias: (optional) new alias string for the node
tags: (optional) comma separated string of tags to associate with this node
env: (optional) string, this host's environment category
:return:
"""
def __init__(self):
base.HeadlessPost.__init__(self)
self.flatmode_tolerance = 256
self.nodesModel = sam.models.nodes.Nodes(common.db, self.page.user.viewing)
def check_flat_tolerance(self):
endpoints = self.nodesModel.get_all_endpoints()
count = len(endpoints)
return count <= self.flatmode_tolerance
def decode_get_request(self, data):
addresses = []
address_str = data.get('address')
if address_str:
addresses = address_str.split(',')
addresses = filter(lambda x: bool(x), addresses)
flat = data.get('flat', 'false').lower() == 'true'
if 'ds' in data:
ds_match = re.search('(\d+)', data['ds'])
if ds_match:
ds = int(ds_match.group())
else:
raise errors.MalformedRequest("Could not read data source ('ds')")
else:
raise errors.RequiredKey('data source', 'ds')
return {'addresses': addresses, 'flat': flat, 'ds': ds}
def perform_get_command(self, request):
self.page.require_group('read')
if request['flat']:
if self.check_flat_tolerance():
response = {'flat': self.nodesModel.get_flat_nodes(request['ds'])}
else:
response = {'error': 'Flat mode is not supported once a graph has exceeded {} hosts.'.format(self.flatmode_tolerance)}
elif len(request['addresses']) == 0:
response = {'_': self.nodesModel.get_root_nodes()}
else:
response = {address: self.nodesModel.get_children(address) for address in request['addresses']}
return response
def encode_get_response(self, response):
return response
def decode_post_request(self, data):
node = data.get('node')
if not node:
raise errors.RequiredKey('node', 'node')
alias = data.get('alias')
tags = data.get('tags')
env =
|
data.get('env')
request = {'node': node}
if alias is not None:
request['alias'] = alias
if tags is not None:
request['tags'] = tags
if env is not None:
request['env'] = env
return request
def perform_post_command(self, request):
self.page.require_group('write')
node = requ
|
est.pop('node')
for key, value in request.iteritems():
if key == 'alias':
self.nodesModel.set_alias(node, value)
elif key == 'tags':
tags = filter(lambda x: bool(x), value.split(','))
self.nodesModel.set_tags(node, tags)
elif key == 'env':
if value:
self.nodesModel.set_env(node, value)
else:
self.nodesModel.set_env(node, None)
else:
print("Error in nodeinfo, unrecognized assignment {0} = {1}".format(key, value))
return 0, "Success"
def encode_post_response(self, response):
return {'code': response[0], 'message': response[1]}
|
skosukhin/spack
|
var/spack/repos/builtin/packages/gromacs/package.py
|
Python
|
lgpl-2.1
| 3,939
| 0.000254
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
version('2016.4', '19c8b5c85f3ec62df79d2249a3c272f8')
version('2016.3', 'e9e3a41bd123b52fbcc6b32d09f8202b')
version('5.1.4', 'ba2e34d59b3982603b4935d650c08040')
version('5.1.2', '614d0be372f1a6f1f36382b7a6fcab98')
version('develop', git='https://github.com/gromacs/gromacs', branch='master')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('cmake@2.8.8:', type='build')
depends_on('cuda', when='+cuda')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX
|
_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.sp
|
ec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
return options
|
meng89/hooky
|
hooky.py
|
Python
|
mit
| 6,757
| 0.000296
|
# coding=utf-8
from collections import MutableMapping, Sequence
import copy
__version__ = '0.5.0'
class Hook(object):
def _before_add(self, key, item):
"""
before add a item to the object will call this method.
example: obj[key] = item
"""
def _after_add(self, key, item):
"""
like _before_add, but after add.
"""
def _before_del(self, key, item):
"""
before delete a item from the object will call this method.
example: del obj[key]
"""
def _after_del(self, key, item):
"""
like _before_del, but after del.
"""
class Check:
def _add_check(self, key, value):
pass
def _del_check(self, key, value):
pass
class Garud:
def _add_garud(self, key, value):
return key, value
def _del_garud(self, key):
return key
class List(Hook, Sequence):
"""
list like.
"""
def __init__(self, initlist=None, hook_when_init=True):
"""
:param initlist: iterable object
:param hook_when_init: run hook points when it is True
"""
self._data = []
if initlist:
if hook_when_init:
self.extend(initlist)
else:
self._data.extend(initlist)
def __setitem__(self, i, item): # x[i] = item, del and add
if isinstance(i, slice):
if not hasattr(item, '__contains__'):
raise TypeError('can only assign an iterable')
start, stop, step = i.indices(len(self))
########################################################
if step == 1:
for one in range(stop - start):
del self[start]
_i = start
for one in item:
self.insert(_i, one)
_i += 1
else:
if step > 1:
slice_size = (stop - start) // step
if 0 < (stop - start) % step < step:
slice_size += 1
else:
slice_size = (start - stop) // abs(step)
if 0 < (start - stop) % abs(step) < abs(step):
slice_size += 1
slice_size = 0 if slice_size < 0 else slice_size
if slice_size != len(item):
raise ValueError('attempt to assign sequence of size {} to extended slice of size {}'.format(
len(item), slice_size
))
_i = start
for one in item:
self[_i] = one
_i += step
else:
del self[i]
self.insert(i, item)
# all del action will be here
def __delitem__(self, i): # del x[i], del
item = self[i]
self._before_del(key=i, item=item)
del self._data[i]
self._after_del(key=i, item=item)
def append(self, item): # add
self.insert(len(self), item)
# all add action will be here
def insert(self, i, item):
self._before_add(key=i, item=item)
self._data.insert(i, item)
self._after_add(key=i, item=item)
def pop(self, i=-1): # del
x = self[i]
del self[i]
return x
def remove(self, item): # del
i = self.index(item)
del self[i]
def clear(self): # del
for i in range(len(self)):
self.pop()
def extend(self, other): # add
for item in other:
self.append(item)
def __iadd__(self, other): # x += y, add
self.extend(other)
return self
def __imul__(self, n): # x *= y, add
if not isinstance(n, int):
raise TypeError("can't multiply sequence by non-int of type '{}'".format(type(n)))
if n <= 0:
self.clear()
if n == 1:
pass
elif n > 1:
old_data = copy.copy(self._data)
for time in range(n - 1):
self.extend(old_data)
return self
def __getitem__(self, i): return self._data[i]
def __len__(self): return len(self._data)
def __repr__(self): return repr(self._data)
def __lt__(self, other): return self._data < self.__cast(other)
def __le__(self, other): return self._data <= self.__cast(other)
def __eq__(self, other): return self._data == self.__cast(other)
def __gt__(self, other): return self._data > self.__cast(other)
def __ge__(self, other): return self._data >= self.__cast(other)
def __cast(self, other):
return other._data if isinstance(other, List) else other
def reverse(self): self._data.reverse()
def sort(self, *args, **kwds): self._data.sort(*args, **kwds)
class Dict(Hook, MutableMapping):
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'Dict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict_ = args[0]
elif 'dict' in kwargs:
dict_ = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
PendingDeprecationWarning, stacklevel=2)
else:
dict_ = None
self._data = {}
if dict_ is not None:
self.update(dict_)
if len(kwargs):
self.update(kwargs)
# all set action w
|
ill be here
def __setitem__(self, key, item):
if key in self.keys():
del self[key]
self._before_add(key=key, item=item)
self._data[key] = item
|
self._after_add(key=key, item=item)
# all del action will be here
def __delitem__(self, key):
item = self[key]
self._before_del(key=key, item=item)
del self._data[key]
self._after_del(key=key, item=item)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
if key in self._data:
return self._data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __len__(self): return len(self._data)
def copy(self):
if self.__class__ is Dict:
return Dict(self._data.copy())
import copy
data = self._data
try:
self._data = {}
c = copy.copy(self)
finally:
self._data = data
c.update(self)
return c
|
alien4cloud/alien4cloud-cloudify3-provider
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/storage/wrapper/LinuxFileSystem_3/tosca.interfaces.node.lifecycle.Standard/start/_a4c_start.py
|
Python
|
apache-2.0
| 15,688
| 0.004717
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on
|
the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property valu
|
e on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relations
|
jcbalmeida/onecloud-store
|
store/computing/serializers.py
|
Python
|
mit
| 816
| 0.002451
|
from rest_framework import serializers
from .models import (
Provider,
OsFamily,
OperatingSystem,
Instance,
ServerPlan
)
class ProviderSerializer(serializers.ModelSerializer):
class Meta:
model = Provider
class OsFamilySerializer(serializers.ModelSerializer):
class Meta:
model = OsFa
|
mily
class OperatingSystemSerializer(serializers.ModelSerializer):
family = OsFamilySerializer()
class Meta:
model = OperatingSystem
class InstanceSerializer(serializers.ModelSerializer):
operating_system = OperatingSystemSerializer(many=True)
class Meta:
model = Instance
class ServerPlanSerializer(serializers.ModelSerializer):
instance = InstanceSerializer()
provider = ProviderSerializer()
|
class Meta:
model = ServerPlan
|
dgary50/eovsa
|
adc_plot_og.py
|
Python
|
gpl-2.0
| 8,417
| 0.022573
|
import roach as r
import struct
import numpy as np
import matplotlib.pyplot as plt
import threading
from time import sleep,time
from util import Time
import matplotlib.animation as animation
#set up threading
class AGC_Thread (threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID #set thread ID
self.name = "roach" + str(threadID) #set the thread name
self.sd = np.empty((4,50),float) #set up the standard deviation array
self.iter_n = 0
self.stop = False
def run(self):
threadLock.acquire(0) #set threadlock
while not self.stop:
start = time() #used for computing execution time
f, s = np.modf(start)
waittime = (np.floor(s/60)+1)*60 - start
sleep(waittime)
start=time()
self.sd = np.std(self.grab_all(),axis=2) #calculate standard deviation from the roach
prin
|
t self.name+" Execution Time= "+str(time()-start) #display the execution time
def grab_all(self):
roachname = self.name
buf = [] #adc buffer for single slot an channel
self.iter_n += 1
fh = open(roachname+'-'+str(self.iter_n)+'.dat','wb')
udata = np.zeros((4,50,8192),float) #numpy array
|
of data to return in form of [chan,slot,data]
done = np.zeros((4,50),bool) #values in done array are true if a given slot and channel have been processed
lasttm = time() #last time is needed to ensure processing does not hang (gets stuck on slot 29 for some reason)
rn = r.Roach(roachname) #set up the roach
#loop until all slots and channels processed or stop signal sent
while not np.all(done) and not self.stop:
f, s = np.modf(time()) #get fractional and integer part of the time
tfrac = (np.floor(f*50)+1.0)/50.0 #compute the fractional part of the time for the next adc read
slot = int(tfrac*50) % 50 #get the slot number for the next read
sleep(tfrac-f) #wait till next read
chan, = np.where(done[:,slot] == False) # get list of unprocessed channels
if (chan.size>0): #if still channels to process
#print "Processing Slot "+str(slot)+" Chan "+str(chan[0]) #display slot and channel (used for error checking)
#read in the adc values
rn.fpga.write_int('swreg_snap_select',chan[0])
rn.fpga.write_int('adc_data_adc_ctrl',0)
# Calculate slot to be read, from time at instant before trigger is written
t1 = time()
rn.fpga.write_int('adc_data_adc_ctrl',7)
t2 = time()
f, s = np.modf(t1)
slot_read = int(f*50)
buf = rn.fpga.read('adc_data_adc_bram', 2048*4, 0)
udata[chan[0],slot_read] = np.array(struct.unpack('>8192b', buf))
done[chan[0],slot] = True #set value of done for current slot and channel to true
lasttm = time() #update lasttm
x = np.array([slot_read, t1, t2])
fh.write(x)
if time()-lasttm>4.0: #if more than 4 seconds since last read, the exit the loop
break
#go through each slot to see if any left to process
if not self.stop:
for slot in range(50):
chan, = np.where(done[:,slot] == False) #get channels to process for current slot
if chan.size>0: #if channels left to be processed
for c in chan: #go through each channel and get get adc values
f, s = np.modf(time()) #get fractional and integer part of the time
dt = slot*0.02 #get fractional part of time for current slot
#wait for time to read slot
if f<dt:
sleep(dt-f)
else:
sleep(dt+1.0-f)
#process the slot as above
#print "Processing Slot "+str(slot)+" Chan "+str(c)
rn.fpga.write_int('swreg_snap_select',c)
rn.fpga.write_int('adc_data_adc_ctrl',0)
t1 = time()
rn.fpga.write_int('adc_data_adc_ctrl',7)
t2 = time()
f, s = np.modf(t1)
slot_read = int(f*50)
buf = rn.fpga.read('adc_data_adc_bram', 2048*4, 0)
udata[c,slot_read] = np.array(struct.unpack('>8192b', buf))
done[c,slot] = True
x = np.array([slot_read, t1, t2])
fh.write(x)
rn.fpga.stop()
fh.close()
return udata
tt=time()
stdev=np.empty((120,7,4,50),float)
stdev[:,:,:,:]=np.nan
started=False
#set threading
threadLock = threading.Lock()
threads = []
#set the threads
for t in range(1,8):
threads.append(AGC_Thread(t))
#Start new Threads
for t in threads:
t.start()
#set up plot
figure, ax = plt.subplots(4,7,figsize=(15, 8))
plt.suptitle("ADC Standard Deviations", fontsize=20)
polstr = [' X',' Y',' X',' Y']
for i in range(7):
for chan in range(4):
ax[chan,i].text(25,75,'Ant '+str(i*2 + chan/2 + 1)+polstr[chan],horizontalalignment='center')
ax[chan,i].plot([0,50],[32,32],'k--')
ax[chan,i].text(0,34,'target',fontsize=9)
ax[chan,i].set_ylim(0,100)
if chan == 3:
ax[chan,i].set_xlabel('Band')
if i == 0:
ax[chan,i].set_ylabel('St. Dev.')
# Open file for saving output stdevs
t = Time.now().iso[:19].replace('-','').replace(':','').replace(' ','')
fh = open('stdout_'+t+'.dat','wb')
start=time()
waittime = np.ceil(start/60)*60 - start + 45
print "Waiting for "+str(waittime)+" seconds"
sleep(waittime)
def animate(j):
t = Time.now().iso
print t
# Loop over threads and get sd (4,50) for each
stdall = np.zeros((7,4,50),float)
for i in range(7):
stdev = threads[i].sd
stdall[i] = stdev
for chan in range(4):
ax[chan,i].plot(stdev[chan],'.')
#Remove previous n-3 line (note lines[0] is the "target" line, which we want to keep)
if len(ax[chan,i].lines) > 4: ax[chan,i].lines[1].remove()
ax[0,6].set_title(t[11:19])
# Save stdevs for this time
fh.write(stdall)
# Create animation, new frame every 60 seconds (60,000 ms)
ani = animation.FuncAnimation(figure, animate, interval=60000)
plt.show(block=True) #block=True is to prevent program proceeding until animation window closed
|
PacketPerception/pyjuggling
|
tests/siteswap_tests.py
|
Python
|
mit
| 2,919
| 0.001028
|
import unittest
from juggling.notation import siteswap
class SiteswapUtilsTests(unittest.TestCase):
def test_siteswap_char_to_int(self):
self.assertEqual(siteswap.siteswap_char_to_int('0'), 0)
self.assertEqual(siteswap.siteswap_char_to_int('1'), 1)
self.assertEqual(siteswap.siteswap_char_to_int('a'), 10)
self.assertEqual(siteswap.siteswap_char_to_int('f'), 15)
self.assertEqual(siteswap.siteswap_char_to_int('z'), 35)
def test_invalid_char(self):
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, [3])
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 10)
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, '#')
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 'multichar')
def test_siteswap_int_to_char(self):
self.assertEqual(
|
siteswap.siteswap_int_to_char(9), '9')
self.assertEqual(siteswap.siteswap_int_to_char(0), '0')
self.assertEqual(siteswap.siteswap_int_to_char(10), 'a')
self.assertEqual(siteswap.siteswap_int_to_char(15), 'f
|
')
self.assertEqual(siteswap.siteswap_int_to_char(35), 'z')
def test_invalid_int(self):
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, ['3'])
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 'a')
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 36)
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, -1)
class SiteSwapSyntaxValidationTests(unittest.TestCase):
def test_valid_syntax(self):
solo_patterns = [
'441',
'(6x,4)(4,6x)',
'(6x,4)*',
'[64]020',
'[33](3,3)123',
'(4,2)(2x,[44x])',
]
for pattern in solo_patterns:
self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern))
passing_patterns = [
('<4p|3><2|3p>', 2),
('<2|3p><2p|3><[3p22]|3p><3|3>', 2),
('<(2p3,4x)|(2xp3,4p1)|(2xp2,4xp2)>', 3)
]
for pattern, num_jugglers in passing_patterns:
self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern, num_jugglers))
def test_return_match(self):
import re
sre_match_object = type(re.match('', ''))
self.assertTrue(siteswap.is_valid_siteswap_syntax('441', return_match=False))
_, match = siteswap.is_valid_siteswap_syntax('441', return_match=True)
self.assertIsInstance(match, sre_match_object)
_, match = siteswap.is_valid_siteswap_syntax('###', return_match=True)
self.assertIsNone(match)
def test_invalid_syntax(self):
solo_patterns = [
'#!j',
'((3232,3)',
'(3232,3))',
'[(3232,3)])',
]
for pattern in solo_patterns:
self.assertFalse(siteswap.is_valid_siteswap_syntax(pattern))
|
zag2me/plugin.program.video.node.editor
|
addon.py
|
Python
|
gpl-2.0
| 42,963
| 0.035451
|
# coding=utf-8
import os, sys, shutil, unicodedata, re, types
from htmlentitydefs import name2codepoint
import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
import xml.etree.ElementTree as xmltree
import urllib
from unidecode import unidecode
from urlparse import parse_qs
from traceback import
|
print_exc
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonid__ = __addon__.getAddonInfo('id').decode( 'utf-8' )
__addonversion__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__addonname__ = __addon__.getAddonInfo('name').decode("utf-8")
__resource__ = xbmc.translatePath( os.path.join( _
|
_cwd__, 'resources', 'lib' ) ).decode("utf-8")
__datapath__ = os.path.join( xbmc.translatePath( "special://profile/" ).decode( 'utf-8' ), "addon_data", __addonid__ )
sys.path.append(__resource__)
import rules, viewattrib, orderby
RULE = rules.RuleFunctions()
ATTRIB = viewattrib.ViewAttribFunctions()
ORDERBY = orderby.OrderByFunctions()
# character entity reference
CHAR_ENTITY_REXP = re.compile('&(%s);' % '|'.join(name2codepoint))
# decimal character reference
DECIMAL_REXP = re.compile('&#(\d+);')
# hexadecimal character reference
HEX_REXP = re.compile('&#x([\da-fA-F]+);')
REPLACE1_REXP = re.compile(r'[\']+')
REPLACE2_REXP = re.compile(r'[^-a-z0-9]+')
REMOVE_REXP = re.compile('-{2,}')
def log(txt):
if isinstance (txt,str):
txt = txt.decode('utf-8')
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message.encode('utf-8'), level=xbmc.LOGDEBUG)
class Main:
# MAIN ENTRY POINT
def __init__(self):
self._parse_argv()
# If there are no custom video nodes in the profile directory, copy them from the XBMC install
targetDir = os.path.join( xbmc.translatePath( "special://profile".decode('utf-8') ), "library", "video" )
try:
if not os.path.exists( targetDir ):
xbmcvfs.mkdirs( targetDir )
originDir = os.path.join( xbmc.translatePath( "special://xbmc".decode( "utf-8" ) ), "system", "library", "video" )
dirs, files = xbmcvfs.listdir( originDir )
self.copyNode( dirs, files, targetDir, originDir )
except:
xbmcgui.Dialog().ok(__addonname__, __language__( 30400 ) )
print_exc
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
return
# Create data if not exists
if not os.path.exists(__datapath__):
xbmcvfs.mkdir(__datapath__)
if "type" in self.PARAMS:
# We're performing a specific action
if self.PARAMS[ "type" ] == "delete":
message = __language__( 30401 )
if self.PARAMS[ "actionPath" ] == targetDir:
# Ask the user is they want to reset all nodes
message = __language__( 30402 )
result = xbmcgui.Dialog().yesno(__addonname__, message )
if result:
if self.PARAMS[ "actionPath" ].endswith( ".xml" ):
# Delete single file
xbmcvfs.delete( self.PARAMS[ "actionPath" ] )
else:
# Delete folder
RULE.deleteAllNodeRules( self.PARAMS[ "actionPath" ] )
shutil.rmtree( self.PARAMS[ "actionPath" ] )
if self.PARAMS[ "type" ] == "deletenode":
result = xbmcgui.Dialog().yesno(__addonname__, __language__( 30403 ) )
if result:
self.changeViewElement( self.PARAMS[ "actionPath" ], self.PARAMS[ "node" ], "" )
if self.PARAMS[ "type" ] == "editlabel":
if self.PARAMS[ "label" ].isdigit():
label = xbmc.getLocalizedString( int( self.PARAMS[ "label" ] ) )
else:
label = self.PARAMS[ "label" ]
# Get new label from keyboard dialog
keyboard = xbmc.Keyboard( label, __language__( 30300 ), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
newlabel = keyboard.getText().decode( "utf-8" )
if newlabel != "" and newlabel != label:
# We've got a new label, update the xml file
self.changeViewElement( self.PARAMS[ "actionPath" ], "label", newlabel )
if self.PARAMS[ "type" ] == "editvisibility":
currentVisibility = self.getRootAttrib( self.PARAMS[ "actionPath" ], "visible" )
# Get new visibility from keyboard dialog
keyboard = xbmc.Keyboard( currentVisibility, __language__( 30301 ), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
newVisibility = keyboard.getText()
if newVisibility != currentVisibility:
# We've got a new label, update the xml file
self.changeRootAttrib( self.PARAMS[ "actionPath" ], "visible", newVisibility )
if self.PARAMS[ "type" ] == "editorder":
currentOrder = self.getRootAttrib( self.PARAMS[ "actionPath" ], "order" )
# Get new label from keyboard dialog
neworder = xbmcgui.Dialog().numeric( 0, __language__( 30302 ), currentOrder )
if neworder != "" and neworder != currentOrder:
# We've got a new label, update the xml file
self.changeRootAttrib( self.PARAMS[ "actionPath" ], "order", neworder )
if self.PARAMS[ "type" ] == "newView":
# Get new view name from keyboard dialog
keyboard = xbmc.Keyboard( "", __language__( 30316 ), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
newView = keyboard.getText().decode( "utf-8" )
if newView != "":
# Ensure filename is unique
filename = self.slugify( newView.lower().replace( " ", "" ) )
if os.path.exists( os.path.join( self.PARAMS[ "actionPath" ], filename + ".xml" ) ):
count = 0
while os.path.exists( os.path.join( self.PARAMS[ "actionPath" ], filename + "-" + str( count ) + ".xml" ) ):
count += 1
filename = filename + "-" + str( count )
# Create a new xml file
tree = xmltree.ElementTree( xmltree.Element( "node" ) )
root = tree.getroot()
subtree = xmltree.SubElement( root, "label" ).text = newView
# Add any node rules
RULE.addAllNodeRules( self.PARAMS[ "actionPath" ], root )
# Write the xml file
self.indent( root )
tree.write( os.path.join( self.PARAMS[ "actionPath" ], filename + ".xml" ), encoding="UTF-8" )
if self.PARAMS[ "type" ] == "newNode":
# Get new node name from the keyboard dialog
keyboard = xbmc.Keyboard( "", __language__( 30303 ), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
newNode = keyboard.getText().decode( "utf8" )
if newNode == "":
return
# Ensure foldername is unique
foldername = self.slugify( newNode.lower().replace( " ", "" ) )
if os.path.exists( os.pa
|
pennersr/django-allauth
|
allauth/socialaccount/providers/foursquare/provider.py
|
Python
|
mit
| 967
| 0.001034
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class FoursquareAccount(ProviderAccount):
def get_profile_url(self):
return "https://foursquare.com/user/" + self.account.extra_data.get("id")
def get_avatar_url(self):
return self.account.extra_data.get("photo")
def to_str(self):
|
dflt = super(FoursquareAccount, self).to_str()
return self.account.extra_data.get("name", dflt)
class FoursquareProvider(OAuth2Provider):
id = "foursquare"
name = "Foursquare"
account_class = FoursquareAccount
def extract_uid(self, data):
return str(data["id"]
|
)
def extract_common_fields(self, data):
return dict(
first_name=data.get("firstname"),
last_name=data.get("lastname"),
email=data.get("contact").get("email"),
)
provider_classes = [FoursquareProvider]
|
e-q/scipy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 32,497
| 0.001169
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are m
|
issing;
the output of this script does need to be checked manually. In some cases
objects are lef
|
t out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'scipy.optimize.show_options',
'scipy.integrate.quad_explain',
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
|
cctaylor/googleads-python-lib
|
examples/adwords/v201409/extensions/add_site_links_using_feeds.py
|
Python
|
apache-2.0
| 9,573
| 0.00491
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License
|
at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a sitelinks feed and associates it with a campaign.
To a
|
dd sitelinks using the simpler ExtensionSetting services, see:
add_sitelinks.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Api: AdWordsOnly
"""
__author__ = 'Joseph DiLallo'
import uuid
from googleads import adwords
from googleads import errors
# See the Placeholder reference page for a list of all the placeholder types and
# fields.
# https://developers.google.com/adwords/api/docs/appendix/placeholders.html
PLACEHOLDER_SITELINKS = '1'
PLACEHOLDER_FIELD_SITELINK_LINK_TEXT = '1'
PLACEHOLDER_FIELD_SITELINK_FINAL_URL = '5'
PLACEHOLDER_FIELD_LINE_1_TEXT = '3'
PLACEHOLDER_FIELD_LINE_2_TEXT = '4'
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
feed_service = client.GetService('FeedService', version='v201409')
feed_item_service = client.GetService('FeedItemService', version='v201409')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201409')
campaign_feed_service = client.GetService(
'CampaignFeedService', version='v201409')
sitelinks_data = {}
# Create site links feed first.
site_links_feed = {
'name': 'Feed For Site Links #%s' % uuid.uuid4(),
'attributes': [
{'type': 'STRING', 'name': 'Link Text'},
{'type': 'URL_LIST', 'name': 'Link Final URLs'},
{'type': 'STRING', 'name': 'Line 1 Description'},
{'type': 'STRING', 'name': 'Line 2 Description'}
]
}
response = feed_service.mutate([
{'operator': 'ADD', 'operand': site_links_feed}
])
if 'value' in response:
feed = response['value'][0]
link_text_feed_attribute_id = feed['attributes'][0]['id']
final_url_feed_attribute_id = feed['attributes'][1]['id']
line_1_feed_attribute_id = feed['attributes'][2]['id']
line_2_feed_attribute_id = feed['attributes'][3]['id']
print ('Feed with name \'%s\' and ID \'%s\' was added with' %
(feed['name'], feed['id']))
print ('\tText attribute ID \'%s\' and Final URL attribute ID \'%s\'.' %
(link_text_feed_attribute_id, final_url_feed_attribute_id))
print ('\tLine 1 attribute ID \'%s\' and Line 2 attribute ID \'%s\'.' %
(line_1_feed_attribute_id, line_2_feed_attribute_id))
sitelinks_data['feedId'] = feed['id']
sitelinks_data['linkTextFeedId'] = link_text_feed_attribute_id
sitelinks_data['finalUrlFeedId'] = final_url_feed_attribute_id
sitelinks_data['line1FeedId'] = line_1_feed_attribute_id
sitelinks_data['line2FeedId'] = line_2_feed_attribute_id
else:
raise errors.GoogleAdsError('No feeds were added.')
# Create site links feed items.
items_data = [
{'text': 'Home', 'url': 'http://www.example.com',
'line1': 'Home line 1', 'line2': 'Home line 2'},
{'text': 'Stores', 'url': 'http://www.example.com/stores',
'line1': 'Stores line 1', 'line2': 'Stores line 2'},
{'text': 'On Sale', 'url': 'http://www.example.com/sale',
'line1': 'On Sale line 1', 'line2': 'On Sale line 2'},
{'text': 'Support', 'url': 'http://www.example.com/support',
'line1': 'Support line 1', 'line2': 'Support line 2'},
{'text': 'Products', 'url': 'http://www.example.com/products',
'line1': 'Products line 1', 'line2': 'Products line 2'},
{'text': 'About', 'url': 'http://www.example.com/about',
'line1': 'About line 1', 'line2': 'About line 2'}
]
feed_items = []
for item in items_data:
feed_items.append({
'feedId': sitelinks_data['feedId'],
'attributeValues': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'stringValue': item['text']
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'stringValue': item['url']
},
{
'feedAttributeId': sitelinks_data['line1FeedId'],
'stringValue': item['line1']
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'stringValue': item['line2']
}
],
# Optional: use the 'startTime' and 'endTime' keys to specify the time
# period for the feed to deliver. The example below will make the feed
# start now and stop in one month.
# Make sure you specify the datetime in the customer's time zone. You
# can retrieve this from customer['dateTimeZone'].
#
# ['startTime']: datetime.datetime.now().strftime('%Y%m%d %H%M%S')
# ['endTime']: (datetime.datetime.now() +
# relativedelta(months=1)).strftime('%Y%m%d %H%M%S')
# Optional: use the 'scheduling' key to specify time and days of the
# week for feed to deliver. This is a Beta feature.
})
feed_items_operations = [{'operator': 'ADD', 'operand': item} for item
in feed_items]
response = feed_item_service.mutate(feed_items_operations)
if 'value' in response:
sitelinks_data['feedItemIds'] = []
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
sitelinks_data['feedItemIds'].append(feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No feed items were added.')
# Create site links feed mapping.
feed_mapping = {
'placeholderType': PLACEHOLDER_SITELINKS,
'feedId': sitelinks_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_LINK_TEXT
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_FINAL_URL
},
{
'feedAttributeId': sitelinks_data['line1FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_1_TEXT
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_2_TEXT
}
]
}
response = feed_mapping_service.mutate([
{'operator': 'ADD', 'operand': feed_mapping}
])
if 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.' %
(feed_mapping['feedMappingId'], feed_mapping['placeholderType'],
feed_mapping['feedId']))
else:
raise errors.GoogleAdsError('No feed mappings were added.')
# Create site links campaign feed.
operands = []
for feed_item_id in sitelinks_data['feedItemIds']:
operands.append({
'xsi_type': 'ConstantOperand',
'type': 'LONG',
'longValue': feed_item_id
})
feed_item_function = {
'operator': 'IN',
'lhsOperand': [
{'xsi_type': 'RequestContextOperand', 'contextType': 'FEED_ITEM_ID'}
],
'rhsOperand': operands
}
# Optional: to target to a platform, define a function and 'AND' it with the
# feed item ID link:
platform_function = {
'operator': 'EQUALS',
'lhsOperand': [
{
|
CanopyIQ/gmail_client
|
gmail_client/apis/users_api.py
|
Python
|
mit
| 459,442
| 0.003489
|
# coding: utf-8
"""
Gmail
Access Gmail mailboxes including sending user email.
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class UsersApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def gmail_users_drafts_create(self, user_id, **kwargs):
"""
Creates a new draft with the DRAFT label.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gmail_users_drafts_create(user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str user_id: The user's email address. The special value me can be used to indicate the authenticated user. (required)
:param str alt: Data format for the response.
:param str fields: Selector specifying which fields to include in a partial response.
:param str key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
:param str oauth_token: OAuth 2.0 token for the current user.
:param bool pretty_print: Returns response with indentations and line breaks.
:param str quota_user: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
:param str user_ip: IP address of the site where the request originates. Use this if you want to enforce per-user limits.
:param Draft body:
:return: Draft
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return
|
_http_data_only'] = True
if kwargs.get('callback'):
return self.gmail_users_drafts_create_with_http_info(user_id, **kwargs)
else:
(data) = self.gmail_users_drafts_create_with_http_info(user_id, **kwargs)
return data
def gmail_users_drafts_create_with_http_info(self, user_id, **kwargs):
"""
Creates a new draft with the DRAFT label.
This method makes a synchronous HTTP request by default. To make an
async
|
hronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gmail_users_drafts_create_with_http_info(user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str user_id: The user's email address. The special value me can be used to indicate the authenticated user. (required)
:param str alt: Data format for the response.
:param str fields: Selector specifying which fields to include in a partial response.
:param str key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
:param str oauth_token: OAuth 2.0 token for the current user.
:param bool pretty_print: Returns response with indentations and line breaks.
:param str quota_user: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.
:param str user_ip: IP address of the site where the request originates. Use this if you want to enforce per-user limits.
:param Draft body:
:return: Draft
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'alt', 'fields', 'key', 'oauth_token', 'pretty_print', 'quota_user', 'user_ip', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method gmail_users_drafts_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `gmail_users_drafts_create`")
collection_formats = {}
resource_path = '/{userId}/drafts'.replace('{format}', 'json')
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
query_params = {}
if 'alt' in params:
query_params['alt'] = params['alt']
if 'fields' in params:
query_params['fields'] = params['fields']
if 'key' in params:
query_params['key'] = params['key']
if 'oauth_token' in params:
query_params['oauth_token'] = params['oauth_token']
if 'pretty_print' in params:
query_params['prettyPrint'] = params['pretty_print']
if 'quota_user' in params:
query_params['quotaUser'] = params['quota_user']
if 'user_ip' in params:
query_params['userIp'] = params['user_ip']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['message/rfc822'])
# Authentication setting
auth_settings = ['Oauth2']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Draft',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def gmail_users_drafts_delete(self, user_id, id, **kwargs):
"""
Immediately and permanently deletes the specified draft. Does not simply trash it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be
|
diogo149/simbo
|
simbo/base/api.py
|
Python
|
mit
| 1,542
| 0
|
import abc
import json
import flask
class ApiBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add_feature(self, feature_name, distribution, **kwargs):
return
@abc.abstractmethod
def remove_feature(self, feature_name, **kwargs):
return
@abc.abstractmethod
def add_input_variable(self, variable_name, distribution, **kwargs):
return
@abc.abstractmethod
def remove_input_variable(self, variable_name, **kwargs):
return
@abc.abstractmethod
def get_candidate(self, variables, **kwargs):
return
@abc.abstractmethod
def save_result(self, candidate_id, result, **kwargs):
return
def api_dispatch(self, data):
method = data["method"]
if not hasattr(self, method):
msg = "Method %s not found" % method
raise msg
result = getattr(foo, method)(**data)
|
return json.dumps(result or {})
def start_server(self):
app = flask.Flask(__name__)
@app.route('/', methods=['POST'])
def api_call():
data = flask.request.json
print("Received: %s" % data)
try:
return self.api_dispatch(data)
except Exception as e:
msg = str(e)
print("ERROR: " + msg)
return msg
app.run()
if __name__ == "__main__":
class Foo
|
(ApiBase):
def add_feature(self, **kwargs):
return "foooo"
foo = Foo()
foo.start_server()
|
sim0629/irc
|
irc/tests/test_schedule.py
|
Python
|
lgpl-2.1
| 1,442
| 0.034674
|
import random
import datetime
import pytest
from irc import schedule
def test_delayed_command_order():
"""
delayed commands should be sorted by delay time
"""
null = lambda: None
delays = [random.randint(0, 99) for x in range(5)]
cmds = sorted([
schedule.DelayedCommand.after(delay, null)
for delay in delays
])
assert [c.delay.seconds for c in cmds] == sorted(delays)
def test_periodic_command_delay():
"A PeriodicCommand must have a positive, non-zero delay."
with pytest.raises(ValueError) as exc_info:
schedule.PeriodicCommand.after(0, None)
assert str(exc_info.value) == test_periodic_command_delay.__doc__
def test_periodic_command_fixed_delay():
"""
Test that we can construct a p
|
eriodic command with a fixed initial
delay.
"""
fd = sch
|
edule.PeriodicCommandFixedDelay.at_time(
at = datetime.datetime.now(),
delay = datetime.timedelta(seconds=2),
function = lambda: None,
)
assert fd.due() == True
assert fd.next().due() == False
class TestCommands(object):
def test_command_at_noon(self):
"""
Create a periodic command that's run at noon every day.
"""
when = datetime.time(12,0)
cmd = schedule.PeriodicCommandFixedDelay.daily_at(when, function=None)
assert cmd.due() is False
next_cmd = cmd.next()
daily = datetime.timedelta(days=1)
day_from_now = datetime.datetime.now() + daily
two_days_from_now = day_from_now + daily
assert day_from_now < next_cmd < two_days_from_now
|
google/edward2
|
experimental/attentive_uncertainty/contextual_bandits/pretrain.py
|
Python
|
apache-2.0
| 9,412
| 0.007969
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrains structured neural processes for the wheel bandit task.
"""
from experimental.attentive_uncertainty import regressor # local file import
from experimental.attentive_uncertainty import utils # local file import
import numpy as np
import tensorflow.compat.v1 as tf
tf.compat.v1.enable_eager_execution()
def sample_single_wheel_bandit_data(num_datapoints,
num_actions,
context_dim,
delta,
mean_v,
std_v,
mu_large,
std_large):
"""Samples from Wheel bandit game (see Riquelme et al. (2018)).
Args:
num_datapoints: Number (n) of (context, action, reward) triplets to sample.
num_actions: (a) Number of actions.
context_dim: (c) Number of dimensions in the context.
delta: Exploration parameter: high reward in one region if norm above delta.
mean_v: Mean reward for each action if context norm is below delta.
std_v: Gaussian reward std for each action if context norm is below delta.
mu_large: Mean reward for optimal action if context norm is above delta.
std_large: Reward std for optimal action if context norm is above delta.
Returns:
context_action_pairs: Sampled (context, action) matrix of size (n, c + a).
rewards: Sampled reward matrix of size (n, 1).
"""
data = []
actions = []
rewards = []
# Sample uniform contexts in unit ball.
while len(data) < num_datapoints:
raw_data = np.random.uniform(-1, 1, (int(num_datapoints / 3), context_dim))
for i in range(raw_data.shape[0]):
if np.linalg.norm(raw_data[i, :]) <= 1:
data.append(raw_data[i, :])
contexts = np.stack(data)[:num_datapoints, :]
# Sample rewards and random actions.
for i in range(num_datapoints):
r = [np.random.normal(mean_v[j], std_v[j]) for j in range(num_actions)]
if np.linalg.norm(contexts[i, :]) >= delta:
# Large reward in the right region for the context.
r_big = np.random.normal(mu_large, std_large)
if contexts[i, 0] > 0:
if contexts[i, 1] > 0:
r[0] = r_big
else:
r[1] = r_big
else:
if contexts[i, 1] > 0:
r[2] = r_big
else:
r[3] = r_big
one_hot_vector = np.zeros((5))
random_action = np.random.randint(num_actions)
one_hot_vector[random_action] = 1
actions.append(one_hot_vector)
rewards.append(r[random_action])
rewards = np.expand_dims(np.array(rewards), -1)
context_action_pairs = np.hstack([contexts, actions])
perm = np.random.permutation(len(rewards))
return context_a
|
ction_pairs[perm, :], rewards[perm, :]
def get_single_wheel_data(num_datapoints, num_actions, context_dim, delta):
"""Samples data for a single wheel with default benchmark configuration.
Args:
|
num_datapoints: Number (n) of (context, action, reward) triplets to sample.
num_actions: (a) Number of actions.
context_dim: (c) Number of dimensions in the context.
delta: Exploration parameter: high reward in one region if norm above delta.
Returns:
context_action_pairs: Sampled (context, action) matrix of size (n, c + a).
rewards: Sampled reward matrix of size (n, 1).
"""
mean_v = [1.0, 1.0, 1.0, 1.0, 1.2]
std_v = [0.01, 0.01, 0.01, 0.01, 0.01]
mu_large = 50
std_large = 0.01
context_action_pairs, rewards = sample_single_wheel_bandit_data(
num_datapoints,
num_actions,
context_dim,
delta,
mean_v,
std_v,
mu_large,
std_large)
return context_action_pairs, rewards
def procure_dataset(hparams, num_wheels, seed=0):
"""Samples the full dataset for pretraining GNPs."""
np.random.seed(seed)
all_context_action_pairs, all_rewards = [], []
for _ in range(num_wheels):
delta = np.random.uniform()
context_action_pairs, rewards = get_single_wheel_data(
hparams.num_target + hparams.num_context,
hparams.num_actions,
hparams.context_dim,
delta)
all_context_action_pairs.append(context_action_pairs)
all_rewards.append(rewards)
all_context_action_pairs = np.stack(all_context_action_pairs)
all_rewards = np.stack(all_rewards)
return all_context_action_pairs, all_rewards
def get_splits(dataset, n_context, batch_size, points_perm=True):
"""Splits the dataset into target and context sets."""
full_x, full_y = dataset
dataset_perm = np.random.permutation(len(full_x))[:batch_size]
if points_perm:
datapoints_perm = np.random.permutation(full_x.shape[1])
else:
datapoints_perm = np.arange(full_x.shape[1])
target_x = tf.to_float(full_x[dataset_perm[:, None], datapoints_perm])
target_y = tf.to_float(full_y[dataset_perm[:, None], datapoints_perm])
context_x = target_x[:, :n_context, :]
context_y = target_y[:, :n_context, :]
unseen_targets = target_y[:, n_context:]
return context_x, context_y, target_x, target_y, unseen_targets
def training_loop(train_dataset,
valid_dataset,
model,
hparams):
"""Trains an SNP for a fixed number of iterations."""
optimizer_config = {'optimizer': hparams.optimizer(hparams.lr),
'max_grad_norm': hparams.max_grad_norm}
num_context = hparams.num_context
best_mse = np.inf
step = tf.function(utils.mse_step.python_function) # pytype: disable=module-attr
for it in range(hparams.num_iterations):
batch_train_data = get_splits(
train_dataset,
num_context,
hparams.batch_size,
points_perm=True)
nll, mse, local_z_kl, global_z_kl = step(
model,
batch_train_data,
optimizer_config)
if it % hparams.print_every == 0:
(batch_context_x,
batch_context_y,
batch_target_x,
batch_target_y,
batch_unseen_targets) = get_splits(valid_dataset,
num_context,
hparams.batch_size,
points_perm=False)
prediction = model(batch_context_x,
batch_context_y,
batch_target_x,
batch_target_y)
batch_unseen_predictions = prediction[:, num_context:]
valid_nll = utils.nll(batch_unseen_targets, batch_unseen_predictions)
valid_mse = utils.mse(batch_unseen_targets, batch_unseen_predictions)
if model.local_variational:
valid_local_kl = tf.reduce_mean(
tf.reduce_sum(model.losses[-1][:, num_context:], axis=[1, 2]))
else:
valid_local_kl = 0.
valid_global_kl = tf.reduce_mean(tf.reduce_sum(model.losses[-2], axis=-1))
print('it: {}, train nll: {}, mse: {}, local kl: {} global kl: {} '
'valid nll: {}, mse: {}, local kl: {} global kl: {}'
.format(it, nll, mse, local_z_kl, global_z_kl,
valid_nll, valid_mse, valid_local_kl, valid_global_kl))
if valid_mse.numpy() < best_mse:
best_mse = valid_mse.numpy()
print('Saving best model with MSE', best_mse)
model.save_weights(hparams.save_path)
def train(data_hparams,
model_hparams,
training_hparams):
"""Executes the training pipeline for SNPs."""
all_context_action_pairs, all_rewards = procure_dataset(
data_hparams,
num_wheels=100,
seed=0)
train_dataset = (all_context_action_pairs, all_rewards)
all_co
|
hmustafamail/digitalimageprocessing
|
HW 5 - Frequency Filtering/Spatial Filtering/spatialFiltering.py
|
Python
|
gpl-2.0
| 2,087
| 0.01677
|
# Mustafa Hussain
# Digital Image Processing with Dr. Anas Salah Eddin
# FL Poly, Spring 2015
#
# Homework 3: Spatial Filtering
#
# USAGE NOTES:
#
# Written in Python 2.7
#
# Please ensure that the script is running as the same directory as the images
# directory!
import cv2
import copy
#import matplotlib.pyplot as plt
import numpy
import math
#from skimage import exposure
INPUT_DIRECTORY = 'input/'
OUTPUT_DIRECTORY = 'output/'
IMAGE_FILE_EXTENSION = '.JPG'
MAX_INTENSITY = 255 # 8-bit images
def laplacianFilter(image):
"""Approximates the second derivative, bringing out edges.
Referencing below zero wraps around, so top and left sides will be sharpened.
We are not bothering with the right and bottom edges, because referencing
above the image size results in a boundary error.
"""
width, height = image.shape
filteredImage = copy.deepcopy(image)
originalImage = copy.deepcopy(image)
# Avoid right, bottom edges.
for i in range(width - 1):
for j in range(height - 1):
# Mask from homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm
total = 0.0
total += -1 * float(image[i][j + 1])
total += -1 * float(image[i - 1][j])
total += 4 * float(image[i][j])
total += -
|
1 * float(image[i + 1][j])
total += -1 * float(image[i][j - 1])
filteredImage[i][j] = total / 9.0
filteredImage = (filteredImage / numpy.max(filteredImage)) * MAX_INTENSITY
return filteredImage
def saveImage(image, filename):
"""Saves the image in the output directory with the filename
|
given.
"""
cv2.imwrite(OUTPUT_DIRECTORY + filename + IMAGE_FILE_EXTENSION, image)
def openImage(fileName):
"""Opens the image in the input directory with the filename given.
"""
return cv2.imread(INPUT_DIRECTORY + fileName + IMAGE_FILE_EXTENSION, 0)
# Input images
inputForSharpening = 'testImage1'
# Import image.
imageForSharpening = openImage(inputForSharpening)
print("Laplacian Filter...")
filtered = laplacianFilter(imageForSharpening)
saveImage(filtered, inputForSharpening + 'Laplace')
print("Done.")
|
horczech/coala-bears
|
tests/hypertext/HTMLLintBearTest.py
|
Python
|
agpl-3.0
| 863
| 0
|
from bears.hypertext.HTMLLintBear import HTMLLintBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
test_file = """
<html>
<body>
<h1>Hello, world!</h1>
</body>
</html>
"""
HTMLLintBearTest = verify_local_bear(HTMLLintBear,
valid_files=(),
invalid_files=(test_file,),
tempfile_kwargs={'suffix': '.html'})
HTMLLintBearIgnoreTest = verify_loc
|
al_bear(
HTMLLintBear,
valid_files=(test_file,),
invalid_files=(),
settings={'htmllint_ignore': 'optional_tag'},
tempfile_kwargs={'suffix': '.html'})
HTMLLintBearIgnoreQuotationTest = verify_local_bear(
HTMLLintBear,
v
|
alid_files=(),
invalid_files=(test_file,),
settings={'htmllint_ignore': 'quotation'},
tempfile_kwargs={'suffix': '.html'})
|
davidcox/genson
|
test/test_extra_kwargs.py
|
Python
|
mit
| 225
| 0.004444
|
from genson.functions impor
|
t GridGenerator
def test_extra_kwargs():
raised = False
try:
g = GridGenerator(1, 2, 3, spurious_kwarg=4)
except ValueError:
raised = True
|
assert(raised == True)
|
infobloxopen/heat-infoblox
|
heat_infoblox/resources/ospf.py
|
Python
|
apache-2.0
| 7,481
| 0
|
# Copyright (c) 2016 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat_infoblox import constants
from heat_infoblox import resource_utils
LOG = logging.getLogger(__name__)
class Ospf(resource.Resource):
"""A resource which represents an OSPF settings.
This is used to configure OSPF parameters for the member.
"""
PROPERTIES = (
GRID_MEMBERS, ADVERTISE_INTERFACE_VLAN, AREA_ID, AREA_TYPE,
AUTHENTICATION_KEY, AUTHENTICATION_TYPE, AUTO_CALC_COST_ENABLED,
COMMENT, COST, DEAD_INTERVAL, HELLO_INTERVAL, INTERFACE,
IS_IPV4, KEY_ID, RETRANSMIT_INTERVAL, TRANSMIT_DELAY
) = (
'grid_members', 'advertise_interface_vlan', 'area_id', 'area_type',
'authentication_key', 'authentication_type', 'auto_calc_cost_enabled',
'comment', 'cost', 'dead_interval', 'hello_interval', 'interface',
'is_ipv4', 'key_id', 'retransmit_interval', 'transmit_delay'
)
AREA_TYPES = (
'NSSA',
'STANDARD',
'STUB'
)
AUTHENTICATION_TYPES = (
'MESSAGE_DIGEST',
'NONE',
'SIMPLE'
)
INTERFACES = (
'IP',
'LAN_HA'
)
DELIM = '/'
properties_schema = {
constants.CONNECTION:
resource_utils.connection_schema(constants.DDI),
GRID_MEMBERS: properties.Schema(
properties.Schema.LIST,
_('List of Grid Member Names for Anycast IP address'),
schema=properties.Schema(
properties.Schema.STRING
),
update_allowed=True,
required=True
|
),
ADVERTISE_INTERFACE_VLAN: properties.Schema(
properties.Schema.STRING,
_('The VLAN used as the advertising interface '
'for sending OSPF announcements.'),
update_allowed=True),
AREA_ID: properties.Schema(
properties.Schema.STRING,
_('The area ID value of the OSPF settings.'),
u
|
pdate_allowed=True,
required=True),
AREA_TYPE: properties.Schema(
properties.Schema.STRING,
_('The OSPF area type.'),
update_allowed=True,
constraints=[
constraints.AllowedValues(AREA_TYPES)
]),
AUTHENTICATION_KEY: properties.Schema(
properties.Schema.STRING,
_('The authentication password to use for OSPF.'),
update_allowed=True),
AUTHENTICATION_TYPE: properties.Schema(
properties.Schema.STRING,
_('The authentication type used for the OSPF advertisement.'),
update_allowed=True,
constraints=[
constraints.AllowedValues(AUTHENTICATION_TYPES)
]),
AUTO_CALC_COST_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Determines if auto calculate cost is enabled or not.'),
update_allowed=True,
required=True),
COMMENT: properties.Schema(
properties.Schema.STRING,
_('A descriptive comment of the OSPF configuration.'),
update_allowed=True),
COST: properties.Schema(
properties.Schema.INTEGER,
_('The cost metric associated with the OSPF advertisement.'),
update_allowed=True),
DEAD_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('The dead interval value of OSPF (in seconds).'),
update_allowed=True),
HELLO_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('The hello interval value of OSPF.'),
update_allowed=True,),
INTERFACE: properties.Schema(
properties.Schema.STRING,
_('The interface that sends out OSPF advertisement information.'),
update_allowed=True,
constraints=[
constraints.AllowedValues(INTERFACES)
]),
IS_IPV4: properties.Schema(
properties.Schema.BOOLEAN,
_('The OSPF protocol version. '),
update_allowed=True,
required=True),
KEY_ID: properties.Schema(
properties.Schema.INTEGER,
_('The hash key identifier to use for'
' "MESSAGE_DIGEST" authentication.'),
update_allowed=True),
RETRANSMIT_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('The retransmit interval time of OSPF (in seconds).'),
update_allowed=True),
TRANSMIT_DELAY: properties.Schema(
properties.Schema.INTEGER,
_('The transmit delay value of OSPF (in seconds).'),
update_allowed=True),
}
@property
def infoblox(self):
if not getattr(self, 'infoblox_object', None):
conn = self.properties[constants.CONNECTION]
self.infoblox_object = resource_utils.connect_to_infoblox(conn)
return self.infoblox_object
def handle_create(self):
ospf_options_dict = {
name: self.properties.get(name) for name in self.PROPERTIES}
for member_name in self.properties[self.GRID_MEMBERS]:
self.infoblox.create_ospf(member_name,
ospf_options_dict)
self.resource_id_set(self.properties[self.AREA_ID])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
new_members = set(tmpl_diff['Properties']['grid_members'])
if 'grid_members' in prop_diff:
old_members = set(self.properties.get('grid_members'))
to_remove = old_members - new_members
for member in to_remove:
self.infoblox.delete_ospf(self.properties[self.AREA_ID],
member)
if len(prop_diff) > 1:
# OSPF setting was changed, so need to update all members
to_add = new_members
else:
# OSPF setting is not changes, so it add to new members
to_add = new_members - old_members
else:
# OSPF setting was changed, so need to update all members
to_add = new_members
for member in to_add:
self.infoblox.create_ospf(
member,
tmpl_diff['Properties'],
old_area_id=self.properties[self.AREA_ID])
def handle_delete(self):
for member in self.properties[self.GRID_MEMBERS]:
self.infoblox.delete_ospf(self.properties[self.AREA_ID], member)
def resource_mapping():
return {
'Infoblox::Grid::Ospf': Ospf,
}
|
JacekPierzchlewski/RxCS
|
examples/signals/gaussNoise_ex1.py
|
Python
|
bsd-2-clause
| 1,774
| 0.002255
|
"""
This script is an example of how to use the random gaussian noise generator
module. |br|
In this example only one signal is generated.
Both the minimum and the maximum frequency component in the signal is regulated.
After the generation, spectrum fo the signal is analyzed with an Welch analysis
and ploted.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
1.0 | 03-SEP-2015 : * Version 1.0 released. |br|
*License*:
BSD 2-Clause
"""
import rxcs
import scipy.signal as scsig
import matplotlib.pyplot as plt
def _gaussNoise_ex1():
# Things on the table:
gaussNoise = rxcs.sig.gaussNoise() # Gaussian noise generator
# Configure the generator...
gaussNoise.fR = 1e6 # Representation sampling frequency [1 MHz]
gaussNoise.tS = 1 # Time [1 sec]
gaussNoise.fMin =
|
100e3 # Minimum frequency component [100 kHz]
gaussNoise.fMax = 200e3 # Maximum frequency component [200 kHz]
gaussNoise.run() # ... and run it!
vSig = gaussNoise.mSig[0, :] # take the generated signal
# -----------------------------------------------------------------
# Analyze the signal and plot it
(vFxx, vPxx) = scsig.welch(vSig, fs=gaussNoise.fR, nperseg=1024, noverlap=5
|
12)
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Spectrum of the signal (psd)')
hSubPlot1.set_xlabel('Frequency [kHz]')
hSubPlot1.plot(vFxx/1e3, vPxx, '-')
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_gaussNoise_ex1()
|
artwr/airflow
|
tests/contrib/operators/test_gcp_sql_operator_system.py
|
Python
|
apache-2.0
| 2,927
| 0.002392
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limi
|
tations
# under the License.
import os
import unittest
from airflow import AirflowException
from tests.contrib.utils.base_gcp_system_test_case import \
SKIP_TEST_WARNING, DagGcpSystemTestCase
from tests.contrib.operators.test_gcp_sql_operator_system_helper import \
CloudSqlQueryTestHelper
from tests.contrib.utils.gcp_authenticator import GCP_CLOUDSQL_KEY
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'project-id')
SQL_QUERY_TE
|
ST_HELPER = CloudSqlQueryTestHelper()
@unittest.skipIf(DagGcpSystemTestCase.skip_check(GCP_CLOUDSQL_KEY), SKIP_TEST_WARNING)
class CloudSqlExampleDagsIntegrationTest(DagGcpSystemTestCase):
def __init__(self, method_name='runTest'):
super(CloudSqlExampleDagsIntegrationTest, self).__init__(
method_name,
dag_id='example_gcp_sql',
gcp_key=GCP_CLOUDSQL_KEY)
def tearDown(self):
# Delete instances just in case the test failed and did not cleanup after itself
self.gcp_authenticator.gcp_authenticate()
try:
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="-failover-replica")
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="-read-replica")
SQL_QUERY_TEST_HELPER.delete_instances()
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="2")
SQL_QUERY_TEST_HELPER.delete_service_account_acls()
finally:
self.gcp_authenticator.gcp_revoke_authentication()
super(CloudSqlExampleDagsIntegrationTest, self).tearDown()
def test_run_example_dag_cloudsql(self):
try:
self._run_dag()
except AirflowException as e:
self.log.warning(
"In case you see 'The instance or operation is not in an appropriate "
"state to handle the request' error - you "
"can remove '.random' file from airflow folder and re-run "
"the test. This will generate random name of the database for next run "
"(the problem is that Cloud SQL keeps names of deleted instances in "
"short-term cache).")
raise e
|
grap/OCB
|
addons/project/project.py
|
Python
|
agpl-3.0
| 77,400
| 0.005969
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from lxml import etree
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.base_status.base_stage import base_stage
from openerp.addons.resource.faces import task as Task
_TASK_STATE = [('draft', 'New'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')]
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, size=64, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'state': fields.selection(_TASK_STATE, 'Related Status', required=True,
help="The status of your document is automatically changed regarding the selected stage. " \
"For example, if a stage is related to the status 'Close', when your document reaches this stage, it is automatically closed."),
'fold': fields.boolean('Folded by Default',
help="This stage is not visible, for example in status bar or kanban view, when there are no records in that stage to display."),
}
def _get_default_project_id(self, cr, uid, ctx={}):
proj = ctx.get('default_project_id', False)
if type(proj) is int:
return [proj]
return proj
_defaults = {
'sequence': 1,
'state': 'open',
'fold': False,
'case_default': False,
'project_ids': _get_default_project_id
}
_order = 'sequence'
def short_name(name):
"""Keep first word(s) of name to make it small enough
but distinctive"""
if not name: return name
# keep 7 chars + end of the last word
keep_words = name[:7].strip().split()
return ' '.join(name.split()[:len(keep_words)])
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def _complete_name(self, cr, uid, ids, name, args, context=None):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = (m.parent_id and (m.parent_id.name + '/') or '') + m.name
return res
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
if not part:
return {'value':{}}
val = {}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
|
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
# Deprecated; the _progress_rate method does not use this anymore
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
|
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
# compute planned_hours, total_hours, effective_hours specific to each project
# How this works: the WITH RECURSIVE statement will create an union line
# for each parent project with the hours of each child project; the final
# SUM(...) ensures we get a total of hours by project.
cr.execute("""
WITH RECURSIVE recur_table(project_id,
parent_id,
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/protoc_java.py
|
Python
|
mit
| 2,330
| 0.009013
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate java source files from protobuf files.
This is a helper file for the genproto_java action in protoc_java.gypi.
It performs the following steps:
1. Deletes all old sources (ensures deleted classes are not part of new jars).
2. Creates source directory.
3. Generates Java files using protoc (output into either --java-out-dir or
--srcjar).
4. Creates a new stamp file.
"""
import os
import optparse
import shutil
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "android", "gyp"))
from util import build_utils
def main(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option("--protoc", help="Path to protoc binary.")
parser.add_option("--proto-path", help="Path to proto directory.")
parser.add_option("--java-out-dir",
help="Path to output directory for java files.")
parser.add_option("--srcjar", help="Path to output srcjar.")
parser.add_option("--stamp", help="File to touch on success.")
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, ['protoc', 'proto_path'])
if not options.java_out_dir and not options.srcjar:
print 'One of --java-out-dir or --srcjar must be specified.'
return 1
with build_utils.TempDir() as temp_dir:
# Specify arguments to the generator.
generator_args = ['optional_field_style=reftypes',
'store_unknown_fields=true']
out_arg = '--javanano_out=' + ','.join(generator_args) + ':' + temp_dir
# Generate Java files using protoc.
build_utils.CheckOutput(
[options.
|
protoc, '--proto_path', options.proto_path, out_arg]
+ args)
if options.java_out_dir:
build_utils.DeleteDirectory(options.java_out_dir)
shutil.copytree(temp_dir, options.java_out_dir)
else:
build_utils.ZipDir(options.srcjar, temp_dir)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
args + [options.protoc] + build_utils.GetPyt
|
honDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
sdgdsffdsfff/monitor-core
|
gmond/python_modules/memcached/memcached.py
|
Python
|
bsd-3-clause
| 13,648
| 0.010111
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import traceback
import os
import threading
import time
import socket
import select
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >>sys.stderr, "DEBUG: "+f % v
def floatable(str):
try:
float(str)
return True
except:
return False
class UpdateMetricThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = 15
if "refresh_rate" in params:
self.refresh_rate = int(params["refresh_rate"])
self.metric = {}
self.last_metric = {}
self.timeout = 2
self.host = "localhost"
self.port = 11211
if "host" in params:
self.host = params["host"]
if "port" in params:
self.port = int(params["port"])
self.type = params["type"]
self.mp = params["metrix_prefix"]
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
try:
self.join()
except:
pass
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
self.update_metric()
_Lock.release()
time.sleep(self.refresh_rate)
self.running = False
def update_metric(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msg = ""
self.last_metric = self.metric.copy()
try:
dprint("connect %s:%d", self.host, self.port)
sock.connect((self.host, self.port))
sock.send("stats\r\n")
while True:
rfd, wfd, xfd = select.select([sock], [], [], self.timeout)
if not rfd:
print >>sys.stderr, "ERROR: select timeout"
break
for fd in rfd:
if fd == sock:
data = fd.recv(8192)
msg += data
if msg.find("END"):
break
sock.close()
except socket.error, e:
print >>sys.stderr, "ERROR: %s" % e
for m in msg.split("\r\n"):
d = m.split(" ")
if len(d) == 3 and d[0] == "STAT" and floatable(d[2]):
self.metric[self.mp+"_"+d[1]] = float(d[2])
def metric_of(self, name):
val = 0
mp = name.split("_")[0]
if name.rsplit("_",1)[1] == "rate" and name.rsplit("_",1)[0] in self.metric:
_Lock.acquire()
name = name.rsplit("_",1)[0]
if name in self.last_metric:
num = self.metric[name]-self.last_metric[name]
period = self.metric[mp+"_time"]-self.last_metric[mp+"_time"]
try:
val = num/period
except ZeroDivisionError:
val = 0
_Lock.release()
elif name in self.metric:
_Lock.acquire()
val = self.metric[name]
_Lock.release()
# Value should never be negative. If it is counters wrapper due to e.g. memcached restart
if val < 0:
val = 0
return val
def metric_init(params):
global descriptors, Desc_Skel, _Worker_Thread, Debug
print '[memcached] memcached protocol "stats"'
if "type" not in params:
params["type"] = "memcached"
if "metrix_prefix" not in params:
if params["type"] == "memcached":
params["metrix_prefix"] = "mc"
elif params["type"] == "Tokyo Tyrant":
params["metrix_prefix"] = "tt"
print params
# initialize skeleton of descriptors
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_of,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.0f',
'units' : 'XXX',
'slope' : 'XXX', # zero|positive|negative|both
'description' : 'XXX',
'groups' : params["type"],
}
if "refresh_rate" not in params:
params["refresh_rate"] = 15
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
_Worker_Thread = UpdateMetricThread(params)
_Worker_Thread.start()
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
mp = params["metrix_prefix"]
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_curr_items",
"units" : "items",
"slope" : "both",
"description": "Current number of items stored",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_cmd_get",
"units" : "commands",
"slope" : "positive",
"description": "Cumulative number of retrieval reqs",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_cmd_set",
"units" : "commands",
"slope" : "positive",
"description": "Cumulative number of storage reqs",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes_read",
"units" : "bytes",
"slope" : "positive",
"description": "Total number of bytes read by this server from network",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes_written",
"units" : "bytes",
"slope" : "positive",
"description": "Total number of bytes sent by this server to network",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_bytes",
"units" : "bytes",
"slope" : "both",
"description": "Current number of bytes used to store items",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_limit_maxbytes",
"units" : "bytes",
"slope" : "both",
"description": "Number of bytes this server is allowed to use for storage",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_curr_connections",
"units" : "connections",
"slope" : "both",
"description": "Number of open connections",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_decr_hits",
"units" : "items",
"slope" : "positive",
"description": "Number of keys that have been decremented and found present ",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_decr_misses",
"units" : "items",
"slope" : "positive",
"description": "Number of items that have been decremented and not found",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_delete_hits",
|
"units" : "items",
"slope" : "positive",
"description": "Number of keys that have been deleted and found present ",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_delete_misses",
"units" : "items",
"slope" : "positive",
|
"description": "Number of items that have been deleted and not found",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : mp+"_evictions",
"units"
|
DannySapz/is210-week-04-warmup
|
task_05.py
|
Python
|
mpl-2.0
| 285
| 0
|
#!/usr/bin/
|
env python
# -*- coding: utf-8 -*-
"""task 05"""
def defaults(my_required, my_optional=True):
"""1. ``my_optional`` which has a default value of True
2. ``my_required`` which is a required param and has no default value"""
return my_optional is my_req
|
uired
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/lookup/redis_kv.py
|
Python
|
bsd-3-clause
| 2,846
| 0.003514
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: redis_kv
author: Jan-Piet Mens <jpmens(at)gmail.com>
version_added: "0.9"
short_description: fetch data from Redis
description:
- this looup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
requirements:
- redis (python library https://github.com/andymccurdy/redis-py/)
options:
_terms:
description: Two element comma separated strings composed of ur
|
l of the Redis server and key to query
options:
_url:
description: location of redis host in url format
default: 'redis://localhost:6379'
_key:
description: key to query
required: True
"""
EXAMPLES = """
- name: query redis for somekey
debug: msg="{{ lookup('redis_kv', 'redis://localhost:6379,somekey') }} is value in Redis for somekey"
"""
RETURN = """
_raw:
description: values stored i
|
n Redis
"""
import os
import re
HAVE_REDIS = False
try:
import redis
HAVE_REDIS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
ret = []
for term in terms:
(url, key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
|
ishay2b/tensorflow
|
tensorflow/python/kernel_tests/variable_scope_test.py
|
Python
|
apache-2.0
| 48,184
| 0.009298
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VariableScopeTest(test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 =
|
vs.get_variable("v", [1])
self.assertEqual(v, v1)
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so
|
we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
def testVarScopeDType(self):
with self.test_session():
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
v = variable_scope.get_variable("v", initializer=4, dtype=dtypes.int32)
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
w = variable_scope.get_variable(
"w", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
variable_scope.get_variable("x", initializer={})
def testInitFromNonInitializer(self):
with self.test_session():
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="x%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="y%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
variables_lib.global_variables_initializer().run()
self.assertAllEqual(x.eval(), y.eval())
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(v2_not_cached.value().device.startswith(
caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(v2_identity_device.value().device.startswith(
caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
sess.run(variables_lib.initialize_variables([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
v
|
harikishen/addons-server
|
src/olympia/migrations/532-unleash-payments-in-usa.py
|
Python
|
bsd-3-clause
| 266
| 0
|
from olympia import amo
import mkt
from mkt.webapps.models import AddonExcludedRegion
def run():
|
"""Unleash payments in USA."""
(AddonExcludedRegion.objects
.exclude(addon__premium_type=amo.ADDON_FREE)
.filter(region=mkt.regions.US.
|
id).delete())
|
intelligent-agent/redeem
|
redeem/Stepper.py
|
Python
|
gpl-3.0
| 15,869
| 0.01002
|
#!/usr/bin/env python
"""
A Stepper Motor Driver class for Replicape.
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
import Adafruit_BBIO.GPIO as GPIO
import logging
import time
from threading import Thread
from .Alarm import Alarm
from .DAC import DAC, PWM_DAC
from .Key_pin import Key_pin
from .Printer import Printer
from .ShiftRegister import ShiftRegister
class Stepper(object):
printer = None
def __init__(self, step_pin, dir_pin, fault_key, dac_channel, shiftreg_nr, name):
""" Init """
self.dac_channel = dac_channel # Which channel on the dac is connected to this stepper
self.step_pin = step_pin
self.dir_pin = dir_pin
self.fault_key = fault_key
self.name = name
self.enabled = False
self.in_use = False
self.steps_pr_mm = 1
self.microsteps = 1.0
self.microstepping = 0
self.direction = 1
self.current_disabled = False
# Set up the Shift register
ShiftRegister.make(8)
self.shift_reg = ShiftRegister.registers[shiftreg_nr]
# Set up the GPIO pins - we just have to initialize them so the PRU can flip them
# terrible hack to cover a bug in Adafruit
dir_name = "EHRPWM2A" if dir_pin == "GPIO0_22" else dir_pin
try:
GPIO.setup(dir_name, GPIO.OUT)
GPIO.setup(step_pin, GPIO.OUT)
except ValueError:
logging.warning("*** Stepper {} Pin {} initialization failure:".format(self.name, dir_name))
# Add a key code to the key listener
# Steppers have an nFAULT pin, so callback on falling
Key_pin(name, fault_key, Key_pin.FALLING, self.fault_callback)
def get_state(self):
""" Returns the current state """
|
return self.state & 0xFF # Return the state of the serial to parallel
def update(self):
""" Commits the changes """
ShiftRegister.commit() # Commit the serial to parallel
# Higher level commands
def set_steps_pr_mm(self, steps_pr_mm):
""" Set the number of steps pr mm. """
self.steps_pr_mm = steps_pr_mm
self.mmPrStep = 1.0 / (steps_pr_mm * self.microsteps)
def get_steps_pr_meter(self):
""" Get the number of steps pr meter """
return self.steps_pr_
|
mm * self.microsteps * 1000.0
def get_step_bank(self):
""" The pin that steps, it looks like GPIO1_31 """
return int(self.step_pin[4:5])
def get_step_pin(self):
""" The pin that steps, it looks like GPIO1_31 """
return int(self.step_pin[6:])
def get_dir_bank(self):
""" Get the dir pin shifted into position """
return int(self.dir_pin[4:5])
def get_dir_pin(self):
""" Get the dir pin shifted into position """
return int(self.dir_pin[6:])
def get_direction(self):
return self.direction
@staticmethod
def commit():
pass
def fault_callback(self, key, event):
Alarm(Alarm.STEPPER_FAULT,
"Stepper {}<br>Most likely the stepper is over heated.".format(self.name))
"""
The bits in the shift register are as follows (Rev B1):
Bit - name - init val
D0 = - = X (or servo enable)
D1 = CFG5 = 0 (Chopper blank time)
D2 = CFG4 = 0 (Choppper hysteresis)
D3 = CFG0 = 0 (Chopper off time)
D4 = CFG2 = 0 (microstepping)
D5 = CFG2-Z = 0 (microstepping)
D6 = CFG1 = 0 (microstepping)
D7 = CFG1-Z = 0 (microstepping)
"""
class Stepper_00B1(Stepper):
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = PWM_DAC(dac_channel)
self.state = 0 # The initial state of shift register
def set_microstepping(self, value, force_update=False):
if not value in range(9):
logging.warning("Tried to set illegal microstepping value: {0} for stepper {1}".format(
value, self.name))
return
EN_CFG1 = (1 << 7)
DIS_CFG1 = (0 << 7)
EN_CFG2 = (1 << 5)
DIS_CFG2 = (0 << 5)
CFG2_H = (1 << 4)
CFG2_L = (0 << 4)
CFG1_H = (1 << 6)
CFG1_L = (0 << 6)
if value == 0: # GND, GND
state = EN_CFG2 | CFG2_L | EN_CFG1 | CFG1_L
self.microsteps = 1
elif value == 1: # GND, VCC
state = EN_CFG2 | CFG2_L | EN_CFG1 | CFG1_H
self.microsteps = 2
elif value == 2: # GND, open
state = EN_CFG2 | CFG2_L | DIS_CFG1 | CFG1_L
self.microsteps = 2
elif value == 3: # VCC, GND
state = EN_CFG2 | CFG2_H | EN_CFG1 | CFG1_L
self.microsteps = 4
elif value == 4: # VCC, VCC
state = EN_CFG2 | CFG2_H | EN_CFG1 | CFG1_H
self.microsteps = 16
elif value == 5: # VCC, open
state = EN_CFG2 | CFG2_H | DIS_CFG1 | CFG1_L
self.microsteps = 4
elif value == 6: # open, GND
state = DIS_CFG2 | CFG2_L | EN_CFG1 | CFG1_L
self.microsteps = 16
elif value == 7: # open, VCC
state = DIS_CFG2 | CFG2_L | EN_CFG1 | CFG1_H
self.microsteps = 4
elif value == 8: # open, open
state = DIS_CFG2 | CFG2_L | DIS_CFG1 | CFG1_L
self.microsteps = 16
self.shift_reg.set_state(state, 0xF0)
self.mmPrStep = 1.0 / (self.steps_pr_mm * self.microsteps)
logging.debug("Updated stepper " + self.name + " to microstepping " + str(value) + " = " +
str(self.microsteps))
self.microstepping = value
def set_current_value(self, i_rms):
""" Current chopping limit (This is the value you can change) """
self.current_value = i_rms
v_iref = 2.5 * (i_rms / 1.92)
if (v_iref > 2.5):
logging.warning("Current ref for stepper " + self.name +
" above limit (2.5 V). Setting to 2.5 V")
v_iref = 2.5
logging.debug("Setting votage to " + str(v_iref) + " for " + self.name)
self.dac.set_voltage(v_iref)
def set_disabled(self, force_update=False):
if hasattr(Stepper, "printer"):
Stepper.printer.enable.set_disabled()
def set_enabled(self, force_update=False):
if hasattr(Stepper, "printer"):
Stepper.printer.enable.set_enabled()
def set_decay(self, value):
EN_CFG0 = (1 << 3)
DIS_CFG0 = (0 << 3)
EN_CFG4 = (1 << 2)
DIS_CFG4 = (0 << 2)
EN_CFG5 = (1 << 1)
DIS_CFG5 = (0 << 1)
if value == 0: # GND, GND, GND
state = DIS_CFG0 | DIS_CFG4 | DIS_CFG5
elif value == 1: # GND, GND, VCC
state = DIS_CFG0 | DIS_CFG4 | EN_CFG5
elif value == 2: # GND, VCC, GND
state = DIS_CFG0 | EN_CFG4 | DIS_CFG5
elif value == 3: # GND, VCC, VCC
state = DIS_CFG0 | EN_CFG4 | EN_CFG5
elif value == 4: # VCC, GND, GND
state = EN_CFG0 | DIS_CFG4 | DIS_CFG5
elif value == 5: # VCC, GND, VCC
state = EN_CFG0 | DIS_CFG4 | EN_CFG5
elif value == 6: # VCC, VCC, GND
state = EN_CFG0 | EN_CFG4 | DIS_CFG5
elif value == 7: # VCC, VCC, VCC
state = EN_CFG0 | EN_CFG4 | EN_CFG5
else:
logging.warning("Tried to set illegal value for stepper decay: " + str(value))
return
self.shift_reg.set_state(state, 0x0E)
self.decay = value # For saving the setting with M500
def reset(self):
self.set_disabled()
self.set_enabled()
class Stepper_00B2(Stepper_00B1):
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper_00B1.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = PWM_DAC(dac_channel)
if name in ["X", "E", "H"]:
self.state = 0x1 # The initial state of shift register
else:
self.state = 0x0
self.shift_reg.set_stat
|
joseamaita/guiprog-python
|
wxpython/files/generictable.py
|
Python
|
lgpl-3.0
| 787
| 0.001271
|
import wx
import wx.grid
class GenericTable(wx.grid.GridTableBase):
def __init__(self, data, rowLabels=None, colLabels=None):
wx.grid.GridTableBase.__init__(self)
self.data = data
self.rowLabels = rowLabels
self.colLabels = colLabels
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.data[0])
def GetColLabelValue(self, col):
|
if self.colLabels:
return self.colLabels[col]
def GetRowLabelValue(self, row):
if self.rowLabels:
return self.rowLabels[row]
def IsEmptyCell(self, row, col):
|
return False
def GetValue(self, row, col):
return self.data[row][col]
def SetValue(self, row, col, value):
pass
|
monouno/site
|
judge/widgets/select2.py
|
Python
|
agpl-3.0
| 7,832
| 0.001149
|
# -*- coding: utf-8 -*-
"""
Select2 Widgets based on https://github.com/applegrew/django-select2.
These components are responsible for rendering
the necessary HTML data markups. Since this whole
package is to render choices using Select2 JavaScript
library, hence these components are meant to be used
with choice fields.
Widgets are generally of two types:
1. **Light** --
They are not meant to be used when there
are too many options, say, in thousands.
This is because all those options would
have to be pre-rendered onto the page
and JavaScript would be used to search
through them. Said that, they are also one
the most easiest to use. They are a
drop-in-replacement for Django's default
select widgets.
2. **Heavy** --
They are suited for scenarios when the number of options
are large and need complex queries (from maybe different
sources) to get the options.
This dynamic fetching of options undoubtedly requires
Ajax communication with the server. Django-Select2 includes
a helper JS file which is included automatically,
so you need not worry about writing any Ajax related JS code.
Although on the server side you do need to create a view
specifically to respond to the queries.
Heavy widgets have the word 'Heavy' in their name.
Light widgets are normally named, i.e. there is no
'Light' word in their names.
"""
from __future__ import absolute_import, unicode_literals
from copy import copy
from itertools import chain
from django import forms
from django.conf import settings
from django.core import signing
from django.core.urlresolvers import reverse_lazy
from django.forms.models import ModelChoiceIterator
DEFAULT_SELECT2_JS = '//cdnjs.cloudflare.com/ajax/libs/select2/4.0.3/js/select2.min.js'
DEFAULT_SELECT2_CSS = '//cdnjs.cloudflare.com/ajax/libs/select2/4.0.3/css/select2.min.css'
__all__ = ['Select2Widget', 'Select2MultipleWidget', 'Select2TagWidget',
'HeavySelect2Widget', 'HeavySelect2MultipleWidget', 'HeavySelect2TagWidget']
class Select2Mixin(object):
"""
The base mixin of all Select2 widgets.
This mixin is responsible for rendering the necessary
data attributes for select2 as well as adding the static
form media.
"""
def build_attrs(self, base_attrs, extra_attrs=None):
"""Add select2 data attributes."""
attrs = super(Select2Mixin, self).build_attrs(base_attrs, extra_attrs)
if self.is_required:
attrs.setdefault('data-allow-clear', 'false')
else:
attrs.setdefault('data-allow-clear', 'true')
attrs.setdefault('data-placeholder', '')
attrs.setdefault('data-minimum-input-length', 0)
if 'class' in attrs:
attrs['class'] += ' django-select2'
else:
attrs['class'] = 'django-select2'
return attrs
def optgroups(self, name, value, attrs=None):
"""Add empty option for clearable selects."""
if not self.is_required and not self.allow_multiple_selected:
self.choices = list(chain([('', '')], self.choices))
return super(Select2Mixin, self).optgroups(name, value, attrs=attrs)
def _get_media(self):
"""
Construct Media as a dynamic property.
.. Note:: For more information visit
https://docs.djangoproject.com/en/1.8/topics/forms/media/#media-as-a-dynamic-property
"""
return forms.Media(
js=(getattr(settings, 'SELECT2_JS_URL', DEFAULT_SELECT2_JS),
'django_select2.js'),
css={'screen': (getattr(settings, 'SELECT2_CSS_URL', DEFAULT_SELECT2_CSS),)}
)
media = property(_get_media)
class Select2TagMixin(object):
"""Mixin to add select2 tag functionality."""
|
def build_attrs(self, base_attrs, extra_attrs=None):
"""Add select2's tag attributes."""
|
extra_attrs = extra_attrs or {}
extra_attrs.setdefault('data-minimum-input-length', 1)
extra_attrs.setdefault('data-tags', 'true')
extra_attrs.setdefault('data-token-separators', [",", " "])
return super(Select2TagMixin, self).build_attrs(base_attrs, extra_attrs)
class Select2Widget(Select2Mixin, forms.Select):
"""
Select2 drop in widget.
Example usage::
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': Select2Widget
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(widget=Select2Widget)
"""
pass
class Select2MultipleWidget(Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for multiple select.
Works just like :class:`.Select2Widget` but for multi select.
"""
pass
class Select2TagWidget(Select2TagMixin, Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for for tagging.
Example for :class:`.django.contrib.postgres.fields.ArrayField`::
class MyWidget(Select2TagWidget):
def value_from_datadict(self, data, files, name):
values = super(MyWidget, self).value_from_datadict(data, files, name):
return ",".join(values)
"""
pass
class HeavySelect2Mixin(Select2Mixin):
"""Mixin that adds select2's ajax options."""
def __init__(self, attrs=None, choices=(), **kwargs):
self.choices = choices
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.data_view = kwargs.pop('data_view', None)
self.data_url = kwargs.pop('data_url', None)
if not (self.data_view or self.data_url):
raise ValueError('You must ether specify "data_view" or "data_url".')
def get_url(self):
"""Return url from instance or by reversing :attr:`.data_view`."""
if self.data_url:
return self.data_url
return reverse_lazy(self.data_view)
def build_attrs(self, base_attrs, extra_attrs=None):
"""Set select2's ajax attributes."""
attrs = super(HeavySelect2Mixin, self).build_attrs(base_attrs, extra_attrs)
# encrypt instance Id
self.widget_id = signing.dumps(id(self))
attrs['data-field_id'] = self.widget_id
attrs.setdefault('data-ajax--url', self.get_url())
attrs.setdefault('data-ajax--cache', "true")
attrs.setdefault('data-ajax--type', "GET")
attrs.setdefault('data-minimum-input-length', 2)
attrs['class'] += ' django-select2-heavy'
return attrs
def format_value(self, value):
result = super(HeavySelect2Mixin, self).format_value(value)
if isinstance(self.choices, ModelChoiceIterator):
chosen = copy(self.choices)
chosen.queryset = chosen.queryset.filter(pk__in=[
int(i) for i in result if isinstance(i, (int, long)) or i.isdigit()
])
self.choices = set(chosen)
return result
class HeavySelect2Widget(HeavySelect2Mixin, forms.Select):
"""
Select2 widget with AJAX support.
Usage example::
class MyWidget(HeavySelectWidget):
data_view = 'my_view_name'
or::
class MyForm(forms.Form):
my_field = forms.ChoicesField(
widget=HeavySelectWidget(
data_url='/url/to/json/response'
)
)
"""
pass
class HeavySelect2MultipleWidget(HeavySelect2Mixin, forms.SelectMultiple):
"""Select2 multi select widget similar to :class:`.HeavySelect2Widget`."""
pass
class HeavySelect2TagWidget(Select2TagMixin, HeavySelect2MultipleWidget):
"""Select2 tag widget."""
pass
|
artscoop/django-basemix
|
basemix/mixins/content/content.py
|
Python
|
bsd-3-clause
| 1,190
| 0.002521
|
# coding: utf-8
from django.db import models
from django.
|
utils.translation import ugettext_lazy as _, pgettext_lazy
class ContentBase(models.Model):
"""
Base class for models that share content attributes
The attributes added by this mixin are ``title``, ``description``,
``content`` and ``is_visible``.
Attributes:
:is_visible: whether the content should be displayed by normal users
:title: title of the content, at most 192 characters
:description: most c
|
ontent objects have a description, with an unlimited size
:content: actual content of the object, with an unlimited size
"""
# Fields
is_visible = models.BooleanField(default=True, verbose_name=pgettext_lazy('content', "visible"))
title = models.CharField(blank=False, max_length=192, verbose_name=_("title"))
description = models.TextField(blank=True, verbose_name=_("description"))
content = models.TextField(blank=False, verbose_name=_("content"))
# Metadata
class Meta:
abstract = True
def save(self, *args, **kwargs):
""" Save the object to the database """
super(self.__class__, self).save(*args, **kwargs)
|
Aalto-LeTech/a-plus-rst-tools
|
directives/annotated.py
|
Python
|
mit
| 15,406
| 0.00767
|
# -*- coding: utf-8 -*-
import docutils
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from html import escape
from collections import Counter
import re
import os
from sphinx.directives.code import CodeBlock
from sphinx.errors import SphinxError
from sphinx.util.fileutil import copy_asset
from sphinx.util import logging
from operator import itemgetter
import aplus_nodes
CSS_FILE = 'css/annotated.css'
JS_FILE = 'js/annotated.js'
assets_path = 'static'
logger = logging.getLogger(__name__)
annotated_section_counts = Counter()
class AnnotationError(SphinxError):
category = 'Annotation error'
def clean_path(path):
return re.sub(r"[/\\ :]+", "", path).replace(".rst", "")
def new_annotated_section_id(source_file_path):
idprefix = clean_path(source_file_path).replace(clean_path(os.getcwd()), "")
global annotated_section_counts
annotated_section_counts[idprefix] += 1
return "%s_%s" % (idprefix, str(annotated_section_counts[idprefix]))
def slicer(stringList):
for i in range(0, len(stringList)):
yield stringList[i:i+1]
class annotated_node(nodes.General, nodes.Element): pass
inline_anno_pattern = r"\[\[\[([^¶]+?)(?:¶(.*?))?\]\]\]" # [[[annotation]]] or [[[annotation¶replacement]]]
class AnnotatedSection(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
option_spec = { }
def run(self):
self.assert_has_content()
env = self.state.document.settings.env
env.annotated_name = new_annotated_section_id(self.state_machine.get_source_and_line(self.lineno)[0])
env.annotated_annotation_count = 0
env.annotated_now_within = True
node = annotated_node()
for slice in slicer(self.content):
if '.. code-block' in slice[0]:
slice[0] = slice[0].replace('.. code-block', '.. altered-code-block')
highest_annotation = self.assert_sanity(self.block_text)
if not highest_annotation:
return [self.state.document.reporter.error('Invalid annotation markers embedded in ' + self.block_text)]
# Inline annotations numbered first (before nested_parse deals with annotation directives)
inline_anno_count = len(re.findall(inline_anno_pattern, self.block_text))
env.annotated_annotation_count += inline_anno_count
self.state.nested_parse(self.content, 0, node)
node['name'] = env.annotated_name
if env.annotated_annotation_count != highest_annotation:
return [self.state.document.reporter.error('Mismatching number of annotation captions (n=%s) and the embedded annotation markers (n=%s) in %s' % (env.annotated_annotation_count, highest_annotation, self.block_text))]
env.annotated_now_within = False
return [node]
def assert_sanity(self, content):
annotation_numbers_present = set(map(lambda matching: int(matching[0]), re.findall("\d«", content)))
highest_present = max(annotation_numbers_present)
all_until_highest = set(range(1, highest_present + 1))
if annotation_numbers_present != all_until_highest:
return None
else:
return highest_present
def visit_annotated_node(self, node):
self.body.append('<div class="annotated ex-%s">\n' % (node['name']))
env = self.builder.env
env.redirect = self.body # store original output
self.body = [] # create an empty one to receive the contents of the feedback line
def depart_annotated_node(self, node):
env = self.builder.env
parsed_html = self.body # extract generated feedback line
self.body = env.redirect # restore original output
postprocessed_html = postprocess_annotation_tags(''.join(parsed_html), node['name'])
postprocessed_html = postprocess_inline_annotations(postprocessed_html, node['name'])
self.body.append(postprocessed_html)
self.body.append("</div>\n")
def postprocess_inline_annotations(html, annotated_section_id):
inline_anno_count = 0
def make_annotation_span(match):
nonlocal inline_anno_count
inline_anno_count += 1
annotation_text = match.group(1)
bit_to_insert = match.group(2)
replacement_attrib = ' data-replacement="' + bit_to_insert + '"' if bit_to_insert else ""
html_bits = (annotated_section_id, inline_anno_count, replacement_attrib, annotation_text)
return '<span class="codecomment comment-%s-%s"%s>%s</span>' % html_bits
return re.sub(inline_anno_pattern, make_annotation_span, html)
def postprocess_annotation_tags(html, annotation_id):
processed = []
openstack = []
selfclosing = []
for part in re.split('(\d«» |\d«|»|\n)', html):
if '«» ' in part:
if (len(part) != 4) or (not part[0].isdigit()) or (part[3] != ' '):
raise AnnotationError("Encountered illegal self-closing annotation tag in %s." % (annotation_id))
processed.append('<span class="ex-%s loc%s">' % (annotation_id, part[0]))
openstack.append(part[0])
selfclosing.append(part[0])
elif '«' in part:
if (len(part) != 2) or (not part[0].isdigit()):
raise AnnotationError("Encountered illegal annotation open tag in %s." % (annotation_id))
processed.append('<span class="ex-%s loc%s">' % (annotation_id, part[0]))
openstack.append(part[0])
elif part == '»':
if len(openstack) == 0:
raise AnnotationError("Unbalanced annotation markers in %s." % (annotation_id))
openstack.pop()
processed.append('</span>')
elif part == '\n':
for tag in selfclosing:
if len(openstack) == 0:
raise AnnotationError("Unbalanced annotation markers in %s." % (annotation_id))
openstack.pop()
processed.append('</span>')
selfclosing = []
processed.append(part)
else:
if ('«' in part) or ('»' in part):
raise AnnotationError("Encountered illegal annotation tag in %s." % (annotation_id))
processed.append(part)
if len(openstack) != 0:
raise AnnotationError("Unbalanced annotation markers in %s." % (annotation_id)) ##
return ''.join(processed)
class annotation_node(nodes.General, nodes.Ele
|
ment): pass
class Annotation(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = { }
def run(self):
self.assert_has_content()
env = self.state.document.settings.env
if not env.annotated_now_within:
return [self.state.document.reporter.error('Not within an "annotated" directive:' + self.block_text.replace('\n', ' '))]
node = annotation_nod
|
e()
self.state.nested_parse(self.content, 0, node)
env.annotated_annotation_count += 1
node['annotation-number'] = env.annotated_annotation_count
node['name-of-annotated-section'] = env.annotated_name
if self.arguments:
node['replacement'] = self.arguments[0]
return [node]
def visit_annotation_node(self, node):
if 'replacement' in node:
replacement_attrib = ' data-replacement="' + escape(node['replacement']) + '"'
else:
replacement_attrib = ""
html_bits = (node['name-of-annotated-section'], node['annotation-number'], replacement_attrib)
self.body.append('<div class="container codecomment comment-%s-%s"%s>' % html_bits)
def depart_annotation_node(self, node):
self.body.append("</div>\n")
class altered_node(nodes.General, nodes.Element): pass
class AlteredCodeBlock(CodeBlock):
def run(self):
openstack = []
selfclosing = []
annotations = []
line_num = 0
loc = 0
for line in slicer(self.content):
processed = []
for part in re.split('(\d«» |\d«|»)', line[0]):
if '«» ' in part:
openstack.append((part[0], line_num, loc))
selfclosing.append(part[0])
|
pybuilder/pybuilder
|
src/main/python/pybuilder/_vendor/pkg_resources/_vendor/packaging/markers.py
|
Python
|
apache-2.0
| 8,485
| 0.000236
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ...extern.pyparsing import ( # noqa: N817
Forward,
Group,
Literal as L,
ParseException,
ParseResults,
QuotedString,
ZeroOrMore,
stringEnd,
stringStart,
)
from .specifiers import InvalidSpecifier, Specifier
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node:
def __init__(self, value: Any) -> None:
self.value = value
def __str__(self) -> str:
return str(self.value)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lh
|
s in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(
|
lhs)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
class Undefined:
pass
_undefined = Undefined()
def _get_env(environment: Dict[str, str], name: str) -> str:
value: Union[str, Undefined] = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
f"{name!r} does not exist in evaluation environment."
)
return value
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
raise InvalidMarker(
f"Invalid marker: {marker!r}, parse error at "
f"{marker[e.loc : e.loc + 8]!r}"
)
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined en
|
eickenberg/scikit-learn
|
sklearn/dummy.py
|
Python
|
bsd-3-clause
| 14,149
| 0
|
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Maheshakya Wijewardena<maheshakya.10@cse.mrt.ac.lk>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.validation import check_array
from sklearn.utils import deprecated
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`classes_` : array or list of array of shape = [n_classes]
Class labels for each output.
`n_classes_` : array or list of array of shape = [n_classes]
Number of label for each output.
`class_prior_` : array or list of array of shape = [n_classes]
Probability of each class for each output.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant"):
raise ValueError("Unknown strategy type.")
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.classes_ = []
self.n_classes_ = []
self.class_prior_ = []
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
for k in xrange(self.n_outputs_):
classes, y_k = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
self.n_classes_.append(classes.shape[0])
class_prior = np.bincount(y_k, weights=sample_weight)
self.class_prior_.append(class_prior / class_prior.sum())
# Checking in case of constant strategy if the constant provided
# by the user is in y.
if self.strategy == "constant":
if constant[k] not in self.classes_[k]:
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [
|
classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
|
proba = [proba]
y = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ret = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
elif self.strategy == "stratified":
ret = proba[k].argmax(axis=1)
elif self.strategy == "uniform":
ret = rs.randint(n_classes_[k], size=n_samples)
elif self.strategy == "constant":
ret = np.ones(n_samples, dtype=int) * (
np.where(classes_[k] == constant[k]))
y.append(classes_[k][ret])
y = np.vstack(y).T
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_class
|
anguoyang/SMQTK
|
OLD_ROOT/WebUI/QueryRecommend/query_recommend.py
|
Python
|
bsd-3-clause
| 7,183
| 0.002088
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import os
import numpy as np
import json
thispath = os.path.dirname(os.path.abspath(__file__))
# commonly used words in event descriptions
additional_stop_words = ['event', 'name', 'explication', 'evidential', 'description', 'scene',
'objects', 'people', 'activities', 'audio']
# zero-shot queries for E006 ~ E015
queries = dict()
queries['E000'] = ''
queries['E006'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting sc.has_audience sc.congregating' \
' ob.light_source ob.person'
queri
|
es['E007'] = 'sc.transporting sc.manmade sc.using_tools sc.asphalt ob.round_shape ob.car'
queries['E008'] = 'sc.congregating sc.has_audience ob.person sc.pavement' \
' ob.large_group_of_people ob.crowd ob.small_group_of_people ob.railing ob.floor'
queries['E009'] = 'sc.dirty sc.natural_light sc.natural ob.large_open_area sc.sunny sc.trees' \
|
' ob.truck ob.car ob.large_open_area ob.outdoor'
queries['E010'] = 'sc.working sc.dirty sc.enclosed_area'
queries['E011'] = 'sc.enclosed_area sc.wood_not_part_of_tree sc.electric_or_indoor_lighting'
queries['E012'] = 'sc.congregating sc.has_audience sc.asphalt sc.pavement' \
' ob.person ob.large_group_of_people ob.tree ob.sports_venue ob.crowd' \
' ob.small_group_of_people ob.railing ob.floor'
queries['E013'] = 'sc.asphalt sc.trees sc.natural_light sc.open_area' \
' ob.large_open_area ob.tree ob.rectangular_shape ob.door'
queries['E014'] = 'sc.using_tools sc.working sc.learning ob.round_shape'
queries['E015'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting'
queries['E021'] = 'sc.trees sc.vegetation sc.natural sc.open_area sc.pavement sc.asphalt sc.natural_light' \
' ob.tree ob.large_open_area ob.cloud ob.outdoor ob.sports_venue ob.sky ob.truck '
queries['E022'] = 'sc.learning sc.working sc.enclosed_area sc.dirty sc.using_tools sc.electric_or_indoor_lighting'
queries['E023'] = 'sc.asphalt sc.pavement sc.clouds' \
' ob.cloud ob.small_group_of_people ob.floor ob.sports_venue ob.railing'
queries['E024'] = 'sc.transporting sc.asphalt sc.trees sc.pavement ob.rectangular_shape ob.door'
queries['E025'] = 'sc.person ob.small_group_of_people ob.vertical_pattern'
queries['E026'] = 'sc.wood_not_part_of_tree sc.enclosed_area sc.working sc.using_tools sc.dirty' \
' ob.door ob.vertical_pattern ob.rectangular_shape ob.railing '
queries['E027'] = 'sc.natural sc.dirty sc.open_area sc.trees sc.natural_light' \
' ob.large_group_of_people ob.tree ob.outdoor ob.vertical_pattern ob.crowd ob.person '
queries['E028'] = 'sc.person sc.has_audience sc.enclosed_area ob.rectangular_shape ob.crowd'
queries['E029'] = 'sc.sunny sc.still_water sc.open_area sc.pavement sc.trees sc.manmade sc.asphalt' \
' ob.large_open_area ob.sports_venue ob.outdoor ob.horizontal_pattern'
queries['E030'] = 'sc.using_tools sc.working sc.dirty ob.railing ob.floor ob.face'
def read_words(_words):
words = []
with open(_words, 'r') as fid_stop_words:
for line in fid_stop_words:
if line[-1]=='\n':
line = line[:-1]
if line != '':
words.append(line)
return words
def preprocess(string, stop_words=None, special_char=None):
if stop_words is None:
_stop = thispath + '/stop_words.txt'
stop_words = read_words(_stop)
if special_char is None:
_special = thispath + '/special_characters.txt'
special_char = read_words(_special)
string = string.lower()
string = string.replace('\n', ' ')
string = string.replace('\t', ' ')
for schar in special_char:
string = string.replace(schar.decode("utf8"), '')
words = string.split(' ')
words_out = []
for w in words:
if not (w in stop_words) and len(w) > 0:
words_out.append(w)
return words_out
def generate_bow(string, dictionary):
bow = np.zeros(len(dictionary))
words = preprocess(string)
for w in words:
try:
bow[dictionary[w]] += 1
except KeyError:
# A word doesn't exist in the dictionary, so ignore it.
continue
if np.sum(bow) > 0:
bow /= np.sum(bow)
return bow
def build_dictionary():
_stop = thispath + '/stop_words.txt'
_special = thispath + '/special_characters.txt'
stop_words = read_words(_stop) + additional_stop_words
special_char = read_words(_special)
words = []
for eid in range(6, 16) + range(21, 31):
string = ""
with open('./eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
words += preprocess(string, stop_words, special_char)
words = sorted(list(set(words)))
dictionary = dict()
for idx, w in enumerate(words):
dictionary[w] = idx
np.save('dictionary_event_description.npy', dictionary)
def generate_event_bow():
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
for eid in range(6, 16) + range(21, 31):
string = ""
with open(thispath + '/eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
bow_eid = generate_bow(string, dictionary)
np.save(thispath + '/eventbow/E%03d.npy' % eid, bow_eid)
def recommend_query(string):
'''
Return zero-shot queries based on event description
@param string: Event description in a string format
@return: Queries in a string format
'''
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
bow = generate_bow(string, dictionary)
min_dist = 1
detected_eid = 0 # if description matching fails, it will return an empty query.
for eid in range(6, 16) + range(21, 31):
bow_eid = np.load(thispath + '/eventbow/E%03d.npy' % eid)
dist = np.sqrt(np.sum((bow - bow_eid)**2))
if min_dist > dist:
min_dist = dist
detected_eid = eid
return queries['E%03d' % detected_eid]
if __name__ == '__main__':
# build_dictionary()
# generate_event_bow()
string = 'AExplication: Bikes are normally ridden with a person sitting down on ' \
'seat and holding onto the handlebars and steering with their hands. ' \
'Tricks consist of difficult ways of riding the bike, such as on ' \
'one wheel, steering with feet or standing on the seat; or intentional ' \
'motions made with the bike that are not simply slowing down/stopping ' \
'the bike, propelling it forward, or steering the bike as it'
q = recommend_query(string)
print q
|
KellyChan/python-examples
|
python/data_science/Titanic/complexHeuristic.py
|
Python
|
mit
| 2,546
| 0.007463
|
import numpy
import pandas
import statsmodels.api as sm
def complex_heuristic(file_path):
'''
You are given a list of Titantic passengers and their associating
information. More information about the data can be seen at the link below:
http://www.kaggle.com/c/titanic-gettingStarted/data
For this exercise, you need to write a more sophisticated heuristic
that will use the passengers' gender and their social economical class and age
to predict if they survived the Titanic diaster.
You prediction should be 79% accurate or higher.
If the passenger is female or if his/her socio-economical status is high AND
if the passenger is under 18, you should assume the passenger surived.
Otherwise, you should assume the passenger perished in the disaster.
Or more specifically in code terms: female or (high status and under 18)
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
You can access the socio-economical status of a passenger via passenger['Pclass']:
High socio-economical status -- passenger['Pclass'] is 1
Medium socio-economical status -- passenger['Pclass'] is 2
Low socio-economical status -- passenger['Pclass'] is 3
You can access the age of a passe
|
nger via passenger['Age'].
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the Passenger's id (which can be accessed
via passenger["PassengerId"]) and the associati
|
ng value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
Or if a passenger perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the titantic data that you will be working with
at the link below:
https://www.dropbox.com/s/r5f9aos8p9ri9sa/titanic_data.csv
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
#
# your code here
#
if (passenger['Sex']=='female') or (passenger['Pclass']==1 and passenger['Age']<18):
predictions[passenger['PassengerId']] = 1
else:
predictions[passenger['PassengerId']] = 0
return predictions
|
wking/pygrader
|
pygrader/email.py
|
Python
|
gpl-3.0
| 10,392
| 0.000771
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 W. Trevor King <wking@tremily.us>
#
# This file is part of pygrader.
#
# pygrader is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pygrader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pygrader. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from email.header import Header as _Header
from email.header import decode_header as _decode_header
from email.mime.message import MIMEMessage as _MIMEMessage
from email.mime.multipart import MIMEMultipart as _MIMEMultipart
import email.utils as _email_utils
import logging as _logging
import smtplib as _smtplib
import pgp_mime as _pgp_mime
from . import ENCODING as _ENCODING
from . import LOG as _LOG
from .model.person import Person as _Person
def test_smtp(smtp, author, targets, msg=None):
"""Test the SMTP connection by sending a message to `target`
"""
if msg is None:
msg = _pgp_mime.encodedMIMEText('Success!')
msg['Date'] = _email_utils.formatdate()
msg['From'] = author
msg['Reply-to'] = msg['From']
msg['To'] = ', '.join(targets)
msg['Subject'] = 'Testing pygrader SMTP connection'
_LOG.info('send test message to SMTP server')
smtp.send_message(msg=msg)
test_smtp.__test__ = False # not a test for nose
def send_emails(emails, smtp=None, debug_target=None, dry_run=False):
"""Iterate through `emails` and mail them off one-by-one
>>> from email.mime.text import MIMEText
>>> from sys import stdout
>>> emails = []
>>> for target in ['Moneypenny <mp@sis.gov.uk>', 'M <m@sis.gov.uk>']:
... msg = MIMEText('howdy!', 'plain', 'us-ascii')
... msg['From'] = 'John Doe <jdoe@a.gov.ru>'
... msg['To'] = target
... msg['Bcc'] = 'James Bond <007@sis.gov.uk>'
... emails.append(
... (msg,
... lambda status: stdout.write('SUCCESS: {}\\n'.format(status))))
>>> send_emails(emails, dry_run=True)
... # doctest: +REPORT_UDIFF, +NORMALIZE_WHITESPACE
SUCCESS: None
SUCCESS: None
"""
local_smtp = smtp is None
for msg,callback in emails:
sources = [
_email_utils.formataddr(a) for a in _pgp_mime.email_sources(msg)]
author = sources[0]
targets = [
_email_utils.formataddr(a) for a in _pgp_mime.email_targets(msg)]
_pgp_mime.strip_bcc(msg)
if _LOG.level <= _logging.DEBUG:
# TODO: remove convert_content_transfer_encoding?
#if msg.get('content-transfer-encoding', None) == 'base64':
# convert_content_transfer_encoding(msg, '8bit')
_LOG.debug('\n{}\n'.format(msg.as_string()))
_LOG.info('sending message to {}...'.format(targets))
if not dry_run:
try:
if local_smtp:
smtp = _smtplib.SMTP('localhost')
if debug_target:
targets = [debug_target]
smtp.sendmail(author, targets, msg.as_string())
if local_smtp:
smtp.quit()
except:
_LOG.warning('failed to send message to {}'.format(targets))
if callback:
callback(False)
raise
else:
_LOG.info('sent message to {}'.format(targets))
if callback:
callback(True)
else:
_LOG.info('dry run, so no message sent to {}'.format(targets))
if callback:
callback(None)
class Responder (object):
def __init__(self, *args, **kwargs):
self.args = args
if kwargs is None:
kwargs = {}
self.kwargs = kwargs
def __call__(self, message):
send_emails([(message, None)], *self.args, **self.kwargs)
def get_address(person):
r"""
>>> from pygrader.model.person import Person as Person
>>> p = Person(name='Jack', emails=['a@b.net'])
>>> get_address(p)
'Jack <a@b.net>'
Here's a simple unicode example. The name portion of the address
is encoded following RFC 2047.
>>> p.name = '✉'
>>> get_address(p)
'=?utf-8?b?4pyJ?= <a@b.net>'
Note that the address is in the clear. Otherwise you can have
trouble when your mailer tries to decode the name following
:RFC:`2822`, which limits the locations in which encoded words may
appear.
"""
encoding = _pgp_mime.guess_encoding(person.name)
return _email_utils.formataddr(
(person.name, person.emails[0]), charset=encoding)
def construct_email(author, targets, subject, message, cc=None):
if author.pgp_key:
signers = [author.pgp_key]
else:
signers = []
recipients = [p.pgp_key for p in targets if p.pgp_key]
encrypt = True
for person in targets:
if not person.pgp_key:
encrypt = False # cannot encrypt to every recipient
break
if cc:
recipients.extend([p.pgp_key for p in cc if p.pgp_key])
for person in cc:
if not person.pgp_key:
encrypt = False
break
if not recipients:
encrypt = False # noone to encrypt to
if signers and encrypt:
if author.pgp_key not in recipients:
recipients.
|
append(author.pgp_key)
message = _pgp_mime.sign_and_encrypt(
message=message,
|
signers=signers, recipients=recipients,
always_trust=True)
elif signers:
message = _pgp_mime.sign(message=message, signers=signers)
elif encrypt:
message = _pgp_mime.encrypt(message=message, recipients=recipients)
message['Date'] = _email_utils.formatdate()
message['From'] = get_address(author)
message['Reply-to'] = message['From']
message['To'] = ', '.join(
get_address(target) for target in targets)
if cc:
message['Cc'] = ', '.join(
get_address(target) for target in cc)
subject_encoding = _pgp_mime.guess_encoding(subject)
if subject_encoding == 'us-ascii':
message['Subject'] = subject
else:
message['Subject'] = _Header(subject, subject_encoding)
return message
def construct_text_email(author, targets, subject, text, cc=None):
r"""Build a text/plain email using `Person` instances
>>> from pygrader.model.person import Person as Person
>>> author = Person(name='Джон Доу', emails=['jdoe@a.gov.ru'])
>>> targets = [Person(name='Jill', emails=['c@d.net'])]
>>> cc = [Person(name='H.D.', emails=['hd@wall.net'])]
>>> msg = construct_text_email(author, targets, cc=cc,
... subject='Once upon a time', text='Bla bla bla...')
>>> print(msg.as_string()) # doctest: +REPORT_UDIFF, +ELLIPSIS
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
Date: ...
From: =?utf-8?b?0JTQttC+0L0g0JTQvtGD?= <jdoe@a.gov.ru>
Reply-to: =?utf-8?b?0JTQttC+0L0g0JTQvtGD?= <jdoe@a.gov.ru>
To: Jill <c@d.net>
Cc: "H.D." <hd@wall.net>
Subject: Once upon a time
<BLANKLINE>
Bla bla bla...
With unicode text:
>>> msg = construct_text_email(author, targets, cc=cc,
... subject='Once upon a time', text='Funky ✉.')
>>> print(msg.as_string()) # doctest: +REPORT_UDIFF, +ELLIPSIS
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
Content-Disposition: inline
Date: ...
From: =?utf-8?b?0JTQttC+0L0g0JTQvtGD?= <jdoe@a.gov.ru>
Reply-to: =?utf-8?b?0JTQttC+0L0g0JTQvtGD?= <jdoe@a.gov.ru>
To: Jill <c@d.net>
Cc: "H.D." <hd@wall.net>
Subject: Once upon a time
|
nijel/weblate
|
weblate/addons/autotranslate.py
|
Python
|
gpl-3.0
| 2,345
| 0.000427
|
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from datetime import date
from django.conf import settings
from django.db import transaction
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_COMPONENT_UPDATE, EVENT_DAILY
from weblate.addons.forms import AutoAddonForm
from weblate.trans.tasks import auto_translate_component
class AutoTranslateAddon(BaseAddon):
events = (EVENT_COMPONENT_UPDATE, EVENT_DAILY)
name = "weblate.autotranslate.autotranslate"
verbose = _("Automatic translat
|
ion")
description = _(
"Automatically translates strings using machine translation or "
"other components."
)
settings_form = AutoAddonForm
multiple = True
|
icon = "language.svg"
def component_update(self, component):
transaction.on_commit(
lambda: auto_translate_component.delay(
component.pk, **self.instance.configuration
)
)
def daily(self, component):
# Translate every component less frequenctly to reduce load.
# The translation is anyway triggered on update, so it should
# not matter that much that we run this less often.
if settings.BACKGROUND_TASKS == "never":
return
today = date.today()
if settings.BACKGROUND_TASKS == "monthly" and component.id % 30 != today.day:
return
if (
settings.BACKGROUND_TASKS == "weekly"
and component.id % 7 != today.weekday()
):
return
self.component_update(component)
|
szha/mxnet
|
python/mxnet/gluon/probability/distributions/cauchy.py
|
Python
|
apache-2.0
| 2,935
| 0.000681
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied
|
. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Cauchy distribution"""
__all__ = ['Cauchy
|
']
from numbers import Number
from numpy import nan, pi
from .constraint import Real
from .distribution import Distribution
from .utils import sample_n_shape_converter
from .... import np
class Cauchy(Distribution):
r"""Create a relaxed Cauchy distribution object.
Parameters
----------
loc : Tensor or scalar, default 0
mode or median of the distribution
scale : Tensor or scalar, default 1
half width at half maximum
"""
# pylint: disable=abstract-method
has_grad = True
support = Real()
arg_constraints = {'loc': Real(), 'scale': Real()}
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc = loc
self.scale = scale
super(Cauchy, self).__init__(
event_dim=0, validate_args=validate_args)
@property
def mean(self):
return nan
@property
def variance(self):
return nan
def sample(self, size=None):
# TODO: Implement sampling op in the backend.
# `np.zeros_like` does not support scalar at this moment.
if (isinstance(self.loc, Number), isinstance(self.scale, Number)) == (True, True):
u = np.random.uniform(size=size)
else:
u = np.random.uniform(np.zeros_like( # pylint: disable=too-many-function-args
self.loc + self.scale), size=size)
return self.icdf(u)
def sample_n(self, size=None):
return self.sample(sample_n_shape_converter(size))
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
return (-np.log(pi) - np.log(self.scale) -
np.log(1 + ((value - self.loc) / self.scale) ** 2))
def cdf(self, value):
if self._validate_args:
self._validate_samples(value)
return np.arctan((value - self.loc) / self.scale) / pi + 0.5
def icdf(self, value):
return np.tan(pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return np.log(4 * pi) + np.log(self.scale)
|
kanarinka/UFOScraper
|
scrape.py
|
Python
|
gpl-2.0
| 3,948
| 0.007345
|
import logging, sys, os, codecs
import requests, cache, csv
from bs4 import BeautifulSoup
BASE_URL = "http://www.nuforc.org/webreports/" # needed to reconstruct relative URLs
START_PAGE = "ndxshape.html" # uses shape page because least # requests to get all the data
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # Where the resulting csv file will be created
# set up CSV file
UFO_DATA_CSV = open(BASE_DIR + 'ufodata.csv','wb')
fieldnames = ["date_time", "city", "state", "shape", "duration", "summary", "date_posted"]
csvwriter = csv.DictWriter(UFO_DATA_CSV, delimiter=',', fieldnames=fieldnames)
csvwriter.writeheader()
class DictUnicodeProxy(object):
def __init__(self, d):
self.d = d
def __iter__(self):
return self.d.__iter__()
def get(self, item, default=None):
i = self.d.get(item, default)
if isinstance(i, unicode):
return i.encode('utf-8')
return i
def getColFromIndex(x):
return {
0 : 'date_time',
1: 'city',
2: 'state',
3: 'shape',
4: 'duration',
5: 'summary',
6:'date_posted'
}.get(x, 0)
|
# set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# let's scrape
url = BASE_URL + START_PAGE
logger.info("Scraping UFO reports from %s" % url)
#
|
first grab the index page
if not cache.contains(url):
index_page = requests.get(url)
logger.debug("\tadded to cache from %s" % url)
cache.put(url, index_page.text)
content = cache.get(url)
# now pull out all the links to songs
dom = BeautifulSoup(content)
#/html/body/p/table/tbody/tr[1]/td[1]/font/a
link_tags = dom.select("td a")
logger.debug("\tfound %d link tags" % len(link_tags))
links = set([ tag['href'] for tag in link_tags ]) # get all the unique urls
logger.info("\tfound %d links to UFO shapes" % len(links))
# now scrape ufo data from each page that lists reports
tr_count = 0
for ufo_shape_link in links:
shape_url = BASE_URL + ufo_shape_link
if not cache.contains(shape_url):
shape_page = requests.get(shape_url)
logger.debug("\tadded to cache from %s" % shape_url)
cache.put(shape_url,shape_page.text)
content = cache.get(shape_url)
dom = BeautifulSoup(content)
table_rows = dom.select("tr")
tr_count += len(table_rows)
for row in table_rows:
new_row = {}
cols = row.select("td")
i = 0;
for col in cols:
col_name = getColFromIndex(i)
new_row[col_name] = col.getText()
i+=1
csvwriter.writerow(DictUnicodeProxy(new_row))
logger.info("Done (scraped %d rows)!",tr_count)
'''if dom.select("#b p:nth-of-type(2)") is not None and len(dom.select("#b p:nth-of-type(2)")) > 0:
lyrics_tags = dom.select("#b p:nth-of-type(2)")[0].children
lyrics = [child for child in lyrics_tags if child.name!="br"]
for line in lyrics:
lyrics_file.write(line+os.linesep)
line_count = line_count + 1'''
'''
lyrics_file = codecs.open("lyrics-"+artist+".txt", 'w', 'utf-8')
for relative_song_url in links:
song_url = BASE_URL + relative_song_url
if not cache.contains(song_url):
song_page = requests.get(song_url)
logger.debug("\tadded to cache from %s" % song_url)
cache.put(song_url,song_page.text)
content = cache.get(song_url)
dom = BeautifulSoup(content)
if dom.select("#b p:nth-of-type(2)") is not None and len(dom.select("#b p:nth-of-type(2)")) > 0:
lyrics_tags = dom.select("#b p:nth-of-type(2)")[0].children
lyrics = [child for child in lyrics_tags if child.name!="br"]
for line in lyrics:
lyrics_file.write(line+os.linesep)
line_count = line_count + 1
logger.info("Done (scraped %d lines)!",line_count)
'''
|
jumpstarter-io/keystone
|
keystone/tests/unit/test_v3_credential.py
|
Python
|
apache-2.0
| 16,723
| 0
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
from testtools import matchers
from keystone import exception
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class CredentialBaseTestCase(test_v3.RestfulTestCase):
def _create_dict_blob_credential(self):
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
credential_id = hashlib.sha256(blob['access']).hexdigest()
credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
credential['id'] = credential_id
# Store the blob as a dict *not* JSON ref bug #1259584
# This means we can test the dict->json workaround, added
# as part of the bugfix for backwards compatibility works.
credential['blob'] = blob
credential['type'] = 'ec2'
# Create direct via the DB API to avoid validation failure
self.credential_api.create_credential(
credential_id,
credential)
expected_blob = json.dumps(blob)
return expected_blob, credential_id
class CredentialTestCase(CredentialBaseTestCase):
"""Test credential CRUD."""
def setUp(self):
super(CredentialTestCase, self).setUp()
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
def test_credential_api_delete_credentials_for_project(self):
self.credential_api.delete_credentials_for_project(self.project_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.project_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_credential_api_delete_credentials_for_user(self):
self.credential_api.delete_credentials_for_user(self.user_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.user_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_list_credentials(self):
"""Call ``GET /credentials``."""
r = self.get('/credentials')
self.assertValidCredentialListResponse(r, ref=self.credential)
def test_list_credentials_filtered_by_user_id(self):
"""Call ``GET /credentials?user_id={user_id}``."""
credential = self.new_credential_ref(
user_id=uuid.uuid4().hex)
self.credential_api.create_credential(
credential['id'], credential)
r = self.get('/credentials?user_id=%s' % self.user['id'])
self.assertValidCredentialListResponse(r, ref=self.credential)
for cred in r.result['credentials']:
self.assertEqual(self.user['id'], cred['user_id'])
def test_create_credential(self):
"""Call ``POST /credentials``."""
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_get_credential(self):
"""Call ``GET /credentials/{credential_id}``."""
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
self.assertValidCredentialResponse(r, self.credential)
def test_update_credential(self):
"""Call ``PATCH /credentials/{credential_id}``."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
del ref['id']
r = self.patch(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id},
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_delete_credential(self):
"""Call ``DELETE /credentials/{credential_id}``."""
self.delete(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
def test_create_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref}, expected_status=409)
def test_get_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': credential_id})
self.assertEqual(expected_blob, r.result['credential']['blob'])
def test_list_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, creden
|
tial_id = self._create_dict_blob_credential()
list_r = self.get('/credentials')
list_creds = list_r.result['credentials']
list_ids = [r['id'] for r in list_creds]
self.assertIn(credential_id, list_ids)
for r in list_creds:
if r['id'] == credential_id:
self.assertEqual(expected_blob, r['blob'])
|
def test_create_non_ec2_credential(self):
"""Call ``POST /credentials`` for creating non-ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is not same as hash of access key id for
# non-ec2 credentials
self.assertNotEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
def test_create_ec2_credential_with_missing_project_id(self):
"""Call ``POST /credentials`` for creating ec2
credential with missing project_id.
"""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
# Assert 400 status for bad request with missing project_id
self.post(
'/credentials',
body={'crede
|
liuhong1happy/DockerConsoleApp
|
views/login.py
|
Python
|
apache-2.0
| 2,538
| 0.019429
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from services.user import UserService
import tornado.web
import tornado.gen
import tornado.escape
import json
import settings
from util.stmp import send_email
from views import BaseHandler
class LoginHandler(tornado.web.RequestHandler):
def get(self):
self.render("login.html")
class SigninHandler(BaseHandler):
s_user = UserService()
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
name_or_email = self.get_argument("name_or_email")
password = self.get_argument("password")
fields={
"name":True,
"email":True,
"gitlab":True,
"last_time":True,
"login_time":True,
"create_time":True,
"password":True
}
user =yield self.s_user.signin(name_or_email, password,fields=fields)
if user is None:
self.render_error(error_code=404,msg="login failed")
else:
self.set_current_user(user)
self.write_result()
class SignupHandler(BaseHandler):
s_user = UserService()
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
name = self.get_argument("name")
email = self.get_argument("email")
password = self.get_argument("password")
fields={
"name":True,
"email":True,
"gitlab":True,
"last_time":True,
"login_time":True,
"create_time":True,
"password":True
}
hasName =yield self.s_user.find_one({"name":name}, fields = fields)
hasEmail =yield self.s_user.find_one({"email":email}, fields = fields)
if( (hasName is not None) or (hasEmail is not None) ):
self.render_error(error_code=404,msg='user exist')
else:
user = yield self.s_user.signup(name,email, password,f
|
ields)
if not user:
self.render_error(error_code=404,msg='signup failed')
else:
self.write_result(data=user)
class ForgetHandler(BaseHandler):
s_user = UserService()
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
name = self.get_argument("email")
# 重置密码
|
user =yield self.s_user.forget(email)
# 发送邮件
send_email()
if not user:
self.write_result(msg='user not exist')
else:
self.write_result(data=user)
|
heikoheiko/pyethapp
|
pyethapp/jsonrpc.py
|
Python
|
bsd-3-clause
| 41,771
| 0.001077
|
import time
from copy import deepcopy
from decorator import decorator
from collections import Iterable
import inspect
import ethereum.blocks
from ethereum.utils import (is_numeric, is_string, int_to_big_endian, big_endian_to_int,
encode_hex, decode_hex, sha3, zpad)
import ethereum.slogging as slogging
from ethereum.slogging import LogRecorder
from ethereum.transactions import Transaction
from ethereum import processblock
import gevent
import gevent.wsgi
import gevent.queue
import rlp
from tinyrpc.dispatch import RPCDispatcher
from tinyrpc.dispatch import public as public_
from tinyrpc.exc import BadRequestError, MethodNotFoundError
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol, JSONRPCInvalidParamsError
from tinyrpc.server.gevent import RPCServerGreenlets
from tinyrpc.transports.wsgi import WsgiServerTransport
from devp2p.service import BaseService
from eth_protocol import ETHProtocol
from ethereum.utils import denoms
logger = log = slogging.get_logger('jsonrpc')
slogging.configure(config_string=':debug')
# defaults
default_startgas = 100 * 1000
default_gasprice = 100 * 1000 * denoms.wei
def _fail_on_error_dispatch(self, request):
method = self.get_method(request.method)
# we found the method
result = method(*request.args, **request.kwargs)
return request.respond(result)
RPCDispatcher._dispatch = _fail_on_error_dispatch
# route logging messages
class WSGIServerLogger(object):
_log = slogging.get_logger('jsonrpc.wsgi')
@classmethod
def log(cls, msg):
cls._log.debug(msg.strip())
write = log
@classmethod
def log_error(cls, msg, *args):
cls._log.error(msg % args)
gevent.wsgi.WSGIHandler.log_error = WSGIServerLogger.log_error
# hack to return the correct json rpc error code if the param count is wrong
# (see https://github.com/mbr/tinyrpc/issues/19)
public_methods = dict()
def public(f):
public_methods[f.__name__] = inspect.getargspec(f)
def new_f(*args, **kwargs):
try:
inspect.getcallargs(f, *args, **kwargs)
except TypeError:
raise JSONRPCInvalidParamsError()
else:
return f(*args, **kwargs)
new_f.func_name = f.func_name
new_f.func_doc = f.func_doc
return public_(new_f)
class LoggingDispatcher(RPCDispatcher):
"""A dispatcher that logs every RPC method call."""
def __init__(self):
super(LoggingDispatcher, self).__init__()
self.logger = log.debug
def dispatch(self, request):
if isinstance(request, Iterable):
request_list = request
else:
request_list = [request]
for req in request_list:
self.logger('RPC call', method=req.method, args=req.args, kwargs=req.kwargs,
id=req.unique_id)
response = super(LoggingDispatcher, self).dispatch(request)
if isinstance(response, Iterable):
response_list = response
else:
response_list = [response]
for res in response_list:
if hasattr(res,
|
'result'):
self.logger('RPC result', id=res.unique_id, result=res.result)
else:
self.logger('RPC error', id=res.unique_id, error=res.error)
return response
class JSONRPCServer(BaseService):
"""Service providing an HTTP server with JSON RPC interface.
Other services can extend the JSON RPC interface by creating a
:class:`Subdispatcher` and registering it via
`Subdispatcher.register(self.app.services.json_rpc_server)`.
Alterna
|
tively :attr:`dispatcher` can be extended directly (see
https://tinyrpc.readthedocs.org/en/latest/dispatch.html).
"""
name = 'jsonrpc'
default_config = dict(jsonrpc=dict(listen_port=4000, listen_host='127.0.0.1'))
@classmethod
def subdispatcher_classes(cls):
return (Web3, Net, Compilers, DB, Chain, Miner, FilterManager)
def __init__(self, app):
log.debug('initializing JSONRPCServer')
BaseService.__init__(self, app)
self.app = app
self.dispatcher = LoggingDispatcher()
# register sub dispatchers
for subdispatcher in self.subdispatcher_classes():
subdispatcher.register(self)
transport = WsgiServerTransport(queue_class=gevent.queue.Queue)
# start wsgi server as a background-greenlet
self.listen_port = app.config['jsonrpc']['listen_port']
self.listen_host = app.config['jsonrpc']['listen_host']
self.wsgi_server = gevent.wsgi.WSGIServer((self.listen_host, self.listen_port),
transport.handle, log=WSGIServerLogger)
self.rpc_server = RPCServerGreenlets(
transport,
JSONRPCProtocol(),
self.dispatcher
)
self.default_block = 'latest'
def _run(self):
log.info('starting JSONRPCServer', port=self.listen_port)
# in the main greenlet, run our rpc_server
self.wsgi_thread = gevent.spawn(self.wsgi_server.serve_forever)
self.rpc_server.serve_forever()
def stop(self):
log.info('stopping JSONRPCServer')
self.wsgi_thread.kill()
def get_block(self, block_id=None):
"""Return the block identified by `block_id`.
This method also sets :attr:`default_block` to the value of `block_id`
which will be returned if, at later calls, `block_id` is not provided.
Subdispatchers using this function have to ensure sure that a
chainmanager is registered via :attr:`required_services`.
:param block_id: either the block number as integer or 'pending',
'earliest' or 'latest', or `None` for the default
block
:returns: the requested block
:raises: :exc:`KeyError` if the block does not exist
"""
assert 'chain' in self.app.services
chain = self.app.services.chain.chain
if block_id is None:
block_id = self.default_block
else:
self.default_block = block_id
if block_id == 'pending':
return self.app.services.chain.chain.head_candidate
if block_id == 'latest':
return chain.head
if block_id == 'earliest':
block_id = 0
if is_numeric(block_id):
# by number
hash_ = chain.index.get_block_by_number(block_id)
else:
# by hash
assert is_string(block_id)
hash_ = block_id
return chain.get(hash_)
class Subdispatcher(object):
"""A JSON RPC subdispatcher which can be registered at JSONRPCService.
:cvar prefix: common prefix shared by all rpc methods implemented by this
subdispatcher
:cvar required_services: a list of names of services the subdispatcher
is built on and will be made available as
instance variables
"""
prefix = ''
required_services = []
@classmethod
def register(cls, json_rpc_service):
"""Register a new instance at ``json_rpc_service.dispatcher``.
The subdispatcher will be able to access all required services as well
as the app object as attributes.
If one of the required services is not available, log this as warning
but don't fail.
"""
dispatcher = cls()
for service_name in cls.required_services:
try:
service = json_rpc_service.app.services[service_name]
except KeyError:
log.warning('No {} registered. Some RPC methods will not be '
'available'.format(service_name))
return
setattr(dispatcher, service_name, service)
dispatcher.app = json_rpc_service.app
dispatcher.json_rpc_server = json_rpc_service
json_rpc_service.dispatcher.register_instance(dispatcher, cls.prefix)
def quantity_decoder(data):
"""Decode `data` representing a quantity."""
if not is_string(data):
success = False
elif not data.startswith('
|
vayan/external-video
|
app/mpv.py
|
Python
|
mit
| 487
| 0
|
#!/usr/bin/env python2
import sys
import json
import struct
import subprocess
import shlex
def getMessage():
rawLength = sys.stdin.read(4)
if len(rawLength) == 0:
sys.exit(0)
messageLength = struct.unpack('@I', rawLength)[0]
message = sys.stdin.read(messageLength)
return json.loads(message)
while
|
True:
mpv_args = getMessage()
if len(mpv_args) > 1:
args = shlex.split("mpv " + mpv_args)
subprocess.call(args)
|
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.