repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rickyschools/JARVIS
|
ml/learners.py
|
Python
|
mit
| 1,120
| 0.001786
|
from GIDEON.ml.ml_utilties import *
_run_types = ['production', 'diagnostics']
_load_types = [True, False]
class ActionModeler:
def __init__(self):
self._raw_features, self._target = load_action_data()
def run_diagnostics(self, save=True):
d = lambda x: ' ' if save else ' not '
print('Entering diagnostics. The best model will%sbe saved.' % (d(save)))
winning_model = diagnostics(self._raw_features, self._target)
if save:
save_model(winning_model)
return winning_model
print('Exiting diagnostics.')
else:
return winning_model
def predict(self
|
, request, use_saved=True):
request, _ = pre_processing(request)
# print(os.path.isfile(os.path.join(model_loc, model_name)))
if use_saved and os.path.isfile(os.path
|
.join(model_loc, model_name)):
model = read_saved_model()
else:
print('Running diagnostics before making prediction.')
model = self.run_diagnostics()
return model.predict(request)[0], max(model.predict_proba(request)[0])
|
uwcirg/true_nth_usa_portal
|
portal/migrations/versions/2ddfab3c6533_.py
|
Python
|
bsd-3-clause
| 756
| 0.007937
|
"""empty message
Revision ID: 2ddfab3c6533
Revises: 2214f33d3a7b
Create Date: 2016-01-28 12:01:06.774836
"""
# revision iden
|
tifiers, used by Alembic.
revision = '2ddfab3c6533'
down_revision = '2214f33d3a7b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alem
|
bic - please adjust! ###
op.add_column('users', sa.Column(
'locale', sa.String(length=20), nullable=True))
op.add_column('users', sa.Column(
'timezone', sa.String(length=20), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'timezone')
op.drop_column('users', 'locale')
### end Alembic commands ###
|
LLNL/spack
|
var/spack/repos/builtin/packages/dust/package.py
|
Python
|
lgpl-2.1
| 1,546
| 0.001294
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dust(Package):
"""du + rust = dust. Like du but more intuitive."""
homepage = "https://github.com/bootandy/dust"
url = "https://github.com/bootandy/dust/archive/v0.7.5.tar.gz"
maintainers = ["fangohr"]
version(
"0.7.5",
sha256="f892aaf7a0a7852e12d01b2ced6c2484fb6dc5fe7562abdf0c44a2d08aa52618",
)
depends_on("rust")
sanity_check_is_file = [join_path("bin", "dust")]
def install(self, spec, prefix):
cargo = which("cargo")
cargo("install", "--root", prefix, "--path", ".")
@run_after("install")
def check_install(self):
print("Attempt to call 'dust' with '--ver
|
sion'")
dust = Executable(join_path(self.spec["dust"].prefix.bin, "dust"))
output = dust(
"--version",
output=str.split,
)
print("stdout received fromm dust is '{}".format(output))
assert "Dust " in output
def test(self):
"""Run this
|
smoke test when requested explicitly"""
dustpath = join_path(self.spec["dust"].prefix.bin, "dust")
options = ["--version"]
purpose = "Check dust can execute (with option '--version')"
expected = ["Dust "]
self.run_test(
dustpath, options=options, expected=expected, status=[0], purpose=purpose
)
|
livoras/py-algorithm
|
searching/hash-chain.py
|
Python
|
mit
| 1,550
| 0.002581
|
#-*- coding: utf-8 -*-
class Hash():
def __init__(self):
self.size = 20
self.slots = []
|
for i in xrange(0, 20):
self.slots.append([])
def __setitem__(self, key, value):
chain = self.slots[self.hash(key)]
for data in chain:
if data[0] == key:
data[1] = value
return True
chain.append([key, value])
def __getitem__(self, key):
chain = self.slots[self.hash(key)]
for data in chain:
if data[0] == key:
return data[1]
return None
def del
|
ete(self, key):
slot = self.hash(key)
chain = self.slots[slot]
for i, data in enumerate(chain):
if data[0] == key:
del chain[i]
return True
raise ValueError("Key %s if not found." % key)
def hash(self, key):
return self.stoi(key) % self.size
def stoi(self, key):
inte = 0
for c in key:
inte = inte + ord(c)
return inte
h = Hash()
h["fuck"] = val = {"name": "jerry"}
h["ucfk"] = val2 = {"name": "lucy"}
h["ufck"] = val3 = {"name": "tony"}
h["uckf"] = val4 = {"name": "honey"}
assert h["fuck"] == val
assert h["ucfk"] == val2
assert h["ufck"] == val3
assert h["uckf"] == val4
h["love"] = "you"
h.delete("love")
assert h["love"] == None
h["you"] = "cool"
h["uoy"] = "sucks"
assert h["you"] == "cool"
assert h["uoy"] == "sucks"
h.delete("you")
assert h["you"] == None
h["uoy"] = "Fool"
assert h["uoy"] == "Fool"
|
pjknkda/werkzeug
|
tests/contrib/__init__.py
|
Python
|
bsd-3-clause
| 207
| 0
|
# -*- codi
|
ng: utf-8 -*-
"""
tests.contrib
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the contrib modul
|
es.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
|
zhangf911/KlayGE
|
build_kfont.py
|
Python
|
gpl-2.0
| 435
| 0.02069
|
#!/usr/bin/env python
#-*- coding: ascii -*-
from _
|
_future__ import print_function
import sys
from blib_util import *
def build_kfont(build_info):
for compiler_info in build_info.compilers:
build_a_project("kfont", "kfont", build_info, compiler_info, True)
if __name__ == "__main__":
cfg = cfg_from_argv(sys.argv)
bi = build_info(cfg.compiler, cfg.archs, cfg.cfg)
print("Building kfont...")
b
|
uild_kfont(bi)
|
huegli/automation
|
picrename/picrename/prnm/renops.py
|
Python
|
mit
| 8,392
| 0.002741
|
import os
import re
import logging
from picrename.prnm import fileops
class DateStrError(Exception):
pass
def exif_to_datetimestr(exif_data_string):
"""
Extracts the date from an EXIF tag and reformats it
"""
dateregex = re.compile(r"""
(?P<year>\d\d\d\d): # match the year
(?P<month>\d\d): # match the month
(?P<day>\d\d) # match the day
\s
(?P<hour>\d\d): # match the hour
(?P<min>\d\d): # match the minute
(?P<sec>\d\d) # match the second
""", re.VERBOSE)
match = re.match(dateregex, exif_data_string)
if match:
year = match.group(1)
month = match.group(2)
day = match.group(3)
hour = match.group(4)
mins = match.group(5)
sec = match.group(6)
return year + month + day + hour + mins + sec
else:
raise DateStrError
def metadata_to_datetimestr(metadata_string):
"""
Extracts the date from an creation metadata tag and reformats it
"""
dateregex = re.compile(r"""
.*
(?P<year>\d\d\d\d)- # match the year
(?P<month>\d\d)- # match the month
(?P<day>\d\d) # match the day
\s
(?P<hour>\d\d): # match the hour
(?P<min>\d\d): # match the minute
(?P<sec>\d\d) # match the second
""", re.VERBOSE)
match = re.match(dateregex, metadata_string)
if match:
year = match.group(1)
month = match.group(2)
day = match.group(3)
hour = match.group(4)
mins = match.group(5)
sec = match.group(6)
return year + month + day + hour + mins + sec
else:
raise DateStrError
def get_fname_ext(fname):
"""
Helper function to get the extension of a filename
"""
splitext = os.path.splitext(fname)
return splitext[-1]
def incr_indexstr(indexstr):
"""
Increments a numerical index in string form.
String will be truncated to original length on roll-over
"""
index = int(indexstr)
length = len(indexstr)
index = index + 1
# fill in leading zero's
newindexstr = str(index).rjust(length, "0")
# maintain original length, truncating on the right if needed
return newindexstr[-length:]
def extract_exif(fname):
"""
Attempt to extract a properly formated datetimestr from a valid EXIF
tag in the given filename. Return empty string if not successful.
"""
try:
# check if file has EXIF date, exception if not
exif_data = fileops.get_exif_datetimeorig_tag(fname)
# extract the date/time string from EXIF, exception if
# not the proper format
datetimestr = exif_to_datetimestr(exif_data)
logging.debug("Found EXIF Tag %r for file %r", datetimestr,
os.path.basename(fname))
return datetimestr
except fileops.EXIFTagError:
logging.warning("%r does not have a proper EXIF tag",
os.path.basename(fname))
return "";
except DateStrError:
logging.warning("%r EXIF tag not the right format",
os.path.basename(fname))
return "";
def extract_date_metadata(fname):
"""
Attempt to extract a properly formated datetimestr from a valid date
metadata tag in the given filename. Return empty string if not successful.
"""
try:
# check if file has creation date, exception if not
date_metadata = fileops.get_video_creation_date_metadata(fname)
# extract the date/time string from metadata, exception if
# not the proper format
datetimestr = metadata_to_datetimestr(date_metadata)
logging.debug("Found creation date metadata %r for file %r",
datetimestr, os.path.basename(fname))
return datetimestr
except fileops.VideoMetadataError:
logging.warning(
"%r does not have a proper creation date metadata",
os.path.basename(fname))
return ""
except DateStrError:
logging.warning(
"%r creation data metadata not the right format",
os.path.basename(fname))
return ""
def rename_all(dirpath, startletter, startindex, verbose=1):
"""
Renames all files in a directory that have EXIF data using the
DateTimeOrig tag information. Renamed files will have the format
'YYYYMMDD_<startletter>_<incr index>'.
:param dirpath: Path to do the renaming in
:param startletter: letter that froms part of the renamed filename
:param startindex: incrementing index that forms part of the
renamed filename
:Example:
>>> import os
>>> print os.listdir(".")
['IMG_1234.JPG']
>>> import renops
>>> print renops.rename_all(".", "A", "001")
>>> print os.listdir(".")
['20110217_A_001.JPG']
.. note:: If dirpath contains subdirectories, these are processed
recursively.
"""
if (verbose == 0):
logging.getLogger().setLevel(logging.ERROR)
elif (verbose == 1):
logging.getLogger().setLevel(logging.WARNING)
elif (verbose == 2):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
indexstr = startindex
datetimestr_to_fullfname_dict = {}
# iterate over all files in subdirectories from given root directory
for rootdir, alldirs, allfiles in os.walk(dirpath):
for afile in allfiles:
# create the full path to the file
fullfname = os.path.join(rootdir, afile)
# check if there is a valid file
if not (os.path.exists(fullfname) and
os.path.isfile(fullfname)):
logging.warning("Cannot access %r, skipping it", fullfname)
continue
# First try if the file is an image file with EXIF tags
# if so, return valid datetimestr, otherwise try date metadata
datetimestr = extract_exif(fullfname)
if not (datetimestr):
datetimestr = extract_date_metadata(fullfname)
# if valid datetimestr
if (datetimestr):
# this will handle the case when there is already the exact
# same datetimestr in the dictionary(shouldn't happen often)
while (datetimestr in datetimestr_to_fullfname_dict):
datetimestr = datetimestr + '*'
datetimestr_to_fullfname_dict[datetimestr] = fullfname
logging.info(
"Entering datetimestr %r to dictionary", datetimestr)
else:
logging.warning(
"No EXIF or date metadata found in %r, skipping it",
fullfname)
# Go through the alphabetically (and therefore time-stamp sorted)
# list of keys of the dictionary to do the rename
for a_dtstr in sorted(datetimestr_to_fullfname_dict.keys()):
# we discard the time portion as we don't need it for
# the filename
datestr = a_dtstr[:8]
# the file extension from original filename
afileext = get_fname_ext(
datetimestr_to_fullfname_dict[a_dtstr]).upper()
newfname = datestr + "_" + startletter + "_" + indexstr + afileext
# create the new full filename by taking existing path of old
# full filename and combining with new file name
|
newfullfname = os.path.join(
os.path.dirname(datetimestr_to_fullfname_dict[a_dtstr]),
newfname)
try:
logging.info("Renaming %r -> %r",
datetimestr_to_ful
|
lfname_dict[a_dtstr],
newfullfname)
os.rename(datetimestr_to_fullfname_dict[a_dtstr],
newfullfname)
except os.error as o
|
minrk/oauthenticator
|
docs/source/example-oauthenticator.py
|
Python
|
bsd-3-clause
| 3,918
| 0.00051
|
"""
Example OAuthenticator to use with My Service
"""
import json
from jupyterhub.auth import LocalAuthenticator
from oauthenticator.oauth2 import OAuthLoginHandler, OAuthenticator
from tornado.auth import OAuth2Mixin
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPError
class MyServiceMixin(OAuth2Mixin):
# authorize is the URL users are redirected to to authorize your service
_OAUTH_AUTHORIZE_URL = "https://myservice.biz/login/oauth/authorize"
# token is the URL JupyterHub accesses to finish the OAuth process
_OAUTH_ACCESS_TOKEN_URL = "https://myservice.biz/login/oauth/access_token"
class MyServiceLoginHandler(OAuthLoginHandler, MyServiceMixin):
pass
class GitHubOAuthenticator(OAuthenticator):
# login_service is the text displayed on the "Login with..." button
login_service = "My Service"
login_handler = MyServiceLoginHandler
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional GitHub info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for an Access Token
# this is the TOKEN URL in your provider
params = dict(
client_id=self.client_id, client_secret=self.client_secret, code=code
)
url = url_concat("https://myservice.biz/login/oauth/access_token", params)
req = HTTPRequest(
url, method="POST", headers={"Accept": "application/json"}, body=''
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
if 'access_token' in resp_json:
access_token = resp_json['access_token']
elif 'error_description' in resp_json:
raise HTTPError(
403,
"An access token was not returned: {}".format(
resp_json['error_description']
),
)
else:
raise HTTPError(500, "Bad response: %s".format(resp))
# Determine who the logged in user is
# by using the new access token to make a request
# check with your OAuth provider for this URL.
# it could a
|
lso be in the response to the token request,
# making this request unnecessary.
req = HTTPRequest(
"https://myservice.biz/api/user"
|
,
method="GET",
headers={"Authorization": f"Bearer {access_token}"},
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
# check the documentation for what field contains a unique username
# it might not be the 'username'!
username = resp_json["username"]
if not username:
# return None means that no user is authenticated
# and login has failed
return None
# here we can add additional checks such as against team whitelists
# if the OAuth provider has such a concept
# 'name' is the JupyterHub username
user_info = {"name": username}
# We can also persist auth state,
# which is information encrypted in the Jupyter database
# and can be passed to the Spawner for e.g. authenticated data access
# these fields are up to you, and not interpreted by JupyterHub
# see Authenticator.pre_spawn_start for how to use this information
user_info["auth_state"] = auth_state = {}
auth_state['access_token'] = access_token
auth_state['auth_reply'] = resp_json
return user_info
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
MingoDynasty/FoscamSort
|
EmailController.py
|
Python
|
mit
| 1,372
| 0
|
import logging # Provides access to logging api.
import smtplib
class EmailController:
def __init__(self, username, password, server, port, sender_name):
self.logger = logging.getLogger(__name__)
self.username = username
self.password = password
self.server = server
self.port = port
self.sender_name = sender_name
return
def __del__(self):
return
def sendEmail(self, send_to, subject, text):
self.logger.debug("Trying smtp...")
server = smtplib.SMTP(self.server, self.port)
self.logger.debug("Trying ehlo...")
server.ehlo()
self.logger.debug("Trying starttls...")
server.starttls()
self.logger.debug("Trying login...")
server.log
|
in(self.username, self.password)
body = '\r\n'.join(['To: %s'
|
% send_to,
'From: %s' % self.sender_name,
'Subject: %s' % subject,
'', text])
try:
self.logger.debug("Attempting to send....")
server.sendmail(self.username, [send_to], body)
self.logger.debug("Successfully sent.")
except smtplib.SMTPException:
self.logger.error("Failed to send.")
return False
server.quit()
return True
# end EmailController
|
kaksmet/servo
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/pipes.py
|
Python
|
mpl-2.0
| 13,913
| 0.000934
|
from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content())
|
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __i
|
nit__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
modified_content = []
offset = [0]
def sleep(seconds):
def inner():
time.sleep(seconds)
return ""
return inner
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
modified_content.append(content[offset[0]:offset[0] + value])
offset[0] += value
elif item_type == "delay":
modified_content.append(sleep(value))
elif item_type == "repeat":
assert i == len(delays) - 1
while offset[0] < len(content):
add_content(delays[-(value + 1):-1], True)
if not repeat and offset[0] < len(content):
modified_content.append(content[offset[0]:])
add_content(delays)
response.content = modified_content
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start:
|
molmod/zeobuilder
|
zeobuilder/gui/fields/__init__.py
|
Python
|
gpl-3.0
| 1,515
| 0.00264
|
# -*- coding: utf-8 -*-
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and
|
Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licen
|
ses/>
#
#--
import read, edit, faulty, group, composed, optional
|
dylantelford/ccr-summer-internship
|
xmlBuckets/setup.py
|
Python
|
lgpl-3.0
| 395
| 0.005063
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='runBucket',
version='1.0.0',
license='LGPLv3',
author='Dylan Telford',
author_email='dylantelford@gmail.com
|
',
url='https://github.com/ubccr/student-projects/tree/dtelford/dtelford/xmlScripts',
packages=['runBucket'],
scripts=['runBucket/runBucket.py'],
requires=['xml'])
| |
IZSVenezie/VetEpiGIS-Tool
|
plugin/poi_dialog.py
|
Python
|
gpl-2.0
| 6,037
| 0.003313
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'poi_dialog_base.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(382, 236)
self.gridLayout_5 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_5.setObjectName("gridLayout_5")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_3.setMinimumSize(QtCore.QSize(311, 0))
self.lineEdit_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout_2.addWidget(self.lineEdit_3, 0, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 1, 0, 1, 1)
self.lineEdit_5 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_5.setMinimumSize(QtCore.QSize(311, 0))
self.lineEdit_5.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout_2.addWidget(self.lineEdit_5, 1, 1, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_2, 0, 0, 1, 3)
spacerItem = QtWidgets.QSpacerItem(20, 7, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem, 1, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lineEdit_longitude
|
= QtWidgets.QLineEdit(Dialog)
self.lineEdit_longitude.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.Align
|
VCenter)
self.lineEdit_longitude.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit_longitude, 0, 1, 1, 1)
self.toolButton = QtWidgets.QToolButton(Dialog)
self.toolButton.setMinimumSize(QtCore.QSize(0, 59))
self.toolButton.setObjectName("toolButton")
self.gridLayout.addWidget(self.toolButton, 0, 2, 2, 1)
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.lineEdit_2_latitude = QtWidgets.QLineEdit(Dialog)
self.lineEdit_2_latitude.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineEdit_2_latitude.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2_latitude, 1, 1, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout, 2, 0, 1, 3)
spacerItem1 = QtWidgets.QSpacerItem(20, 7, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem1, 3, 1, 1, 1)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_4 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_3.addWidget(self.label_4, 0, 0, 1, 1)
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setMinimumSize(QtCore.QSize(191, 0))
self.comboBox.setEditable(False)
self.comboBox.setObjectName("comboBox")
self.gridLayout_3.addWidget(self.comboBox, 0, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 0, 2, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_3, 4, 0, 1, 3)
spacerItem3 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem3, 5, 2, 1, 1)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setText("")
self.label_5.setObjectName("label_5")
self.gridLayout_4.addWidget(self.label_5, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout_4.addWidget(self.buttonBox, 0, 1, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_4, 6, 0, 1, 3)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_3.setText(_translate("Dialog", "ID:"))
self.label_6.setText(_translate("Dialog", "Code:"))
self.label.setText(_translate("Dialog", "Longitude:"))
self.toolButton.setText(_translate("Dialog", "dms"))
self.label_2.setText(_translate("Dialog", "Latitude:"))
self.label_4.setText(_translate("Dialog", "Activity:"))
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/type_definition/test_rule_006.py
|
Python
|
gpl-3.0
| 1,173
| 0.004263
|
import os
import unittest
from vsg.rules import type_definition
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_006_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_006_test_input.fixed.vhd'), lExpected)
class test_type_definition_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_006(self):
oRule = type_definition.rule_006()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'type')
self.assertEqual(oRule.identifier, '006')
lExpected = [8]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_006(self):
oRule = type_definition.rule_006()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(
|
self.oFile)
self.assertEqual(oRule.vi
|
olations, [])
|
danielfrg/ec2hosts
|
ec2hosts/cli.py
|
Python
|
apache-2.0
| 1,208
| 0.001656
|
from __future__ import print_function, absolute_import, division
import sys
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import ec2hosts
def main():
try:
cli(obj={})
except Exception as e:
import traceback
click.echo(traceback.format_exc(), err=True)
sys.exit(1)
@click.group(invoke_without_com
|
mand=True, context_settings=CONTEXT_SETTINGS)
@click.
|
version_option(prog_name='Anaconda Cluster', version=ec2hosts.__version__)
@click.pass_context
def cli(ctx):
ctx.obj = {}
if ctx.invoked_subcommand is None:
ctx.invoke(run)
@cli.command(short_help='Run')
@click.pass_context
def run(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.gen_file()
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
@cli.command(short_help='Clean')
@click.pass_context
def clean(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.read_file()
content = ec2hosts.clean(ec2hosts.read_file())
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
|
chriskiehl/Gooey
|
gooey/gui/components/widgets/slider.py
|
Python
|
mit
| 1,260
| 0.001587
|
import wx # type: ignore
|
from gooey.gui import formatters
from
|
gooey.gui.components.widgets.bases import TextContainer
from gooey.python_bindings import types as t
class Slider(TextContainer):
"""
An integer input field
"""
widget_class = wx.Slider
def getWidget(self, *args, **options):
widget = self.widget_class(self,
minValue=self._options.get('min', 0),
maxValue=self._options.get('max', 100),
style=wx.SL_MIN_MAX_LABELS | wx.SL_VALUE_LABEL)
return widget
def getWidgetValue(self):
return self.widget.GetValue()
def setValue(self, value):
self.widget.SetValue(value)
def formatOutput(self, metatdata, value):
return formatters.general(metatdata, str(value))
def getUiState(self) -> t.FormField:
widget: wx.Slider = self.widget
return t.Slider(
id=self._id,
type=self.widgetInfo['type'],
value=self.getWidgetValue(),
min=widget.GetMin(),
max=widget.GetMax(),
error=self.error.GetLabel() or None,
enabled=self.IsEnabled(),
visible=self.IsShown()
)
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_Search EP Construction.py
|
Python
|
gpl-3.0
| 4,389
| 0.016405
|
# Filter EnergyPlus Construction
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Search EnergyPlus construction based on Energy modeling standards, climate zone, surface type and building program
-
Provided by Honeybee 0.0.66
Args:
_EPConstrList: List of EPConstructions from Honeybee construction library
_standard: Energy modeling standard [0:"ASHRAE 90.1-2004", 1:"ASHRAE 90.1-2007", 2:"ASHRAE 90.1-2010", 3:"ASHRAE 189.1", 4:"CBECS 1980-2004", 5:"CBECS Before-1980"]
climateZone_: Optional input for climate zone
surfaceType_: Optional input for surface type > 0:'WALL', 1:'ROOF', 2:'FLOOR', 3:'CEILING', 4:'WINDOW'
altBldgProgram_: Optional input for building type > 0:'RESIDENTIAL', 1:'OFFICE', 2:'HOSPITAL'
keyword_: List of optional keywords in the name of the construction (ie. METAL, MASS, WOODFRAME).
Returns:
EPSelectedConstr: List of selected EP constructions that matches the the inputs
"""
ghenv.Component.Name = "Honeybee_Search EP Construction"
ghenv.Component.NickName = 'searchEPConstruction'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "06 | Energy | Material | Construction"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import scriptcontext as sc
import Grasshopper.Kernel as gh
def main(constrList, standard, climateZone, surfaceType, keywords):
# Make sure Honeybee is flying
if not sc.sticky.has_key('honeybee_release'):
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee to fly...")
return -1
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
# get the constuction
try:
hb_EPMaterialAUX = sc.sticky["honeybee_EPMaterialAUX"]()
except:
msg = "Failed to load EP constructions!"
ghenv.Component.AddRuntimeMessage(w, msg)
return -1
surfaceTypesDict = {'0':'WALL', '1':'ROOF', '2':'FLOOR', '3':'CEILING', '4':'WINDOW',
'WALL':'WALL', 'ROOF':'ROOF', 'FLOOR':'FLOOR', 'CEILING':'CEILING', 'CEILING':'WINDOW',
'':'', None:''}
selConstruction = hb_EPMaterialAUX.filterMaterials(constrList, standard, \
climateZone, surfaceTypesDict[surfaceType.upper()], \
"", keywords, ghenv.Component)
# constrList, standard, climateZone, surfaceType, bldgProgram, constructionType, ghenv.Component
return selConstruction
if len(_EPConstrList)!=0 and _standard:
resu
|
lt = main(_EPConst
|
rList, _standard, climateZone_, surfaceType_, keywords_)
if result!= -1:
EPSelectedConstr = result
|
minghuascode/pyj
|
examples/flowpanel/FlowPanel.py
|
Python
|
apache-2.0
| 1,863
| 0.010735
|
# -*- coding: iso-8859-1 -*-
import pyjd
#Ui components
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.FlowPanel import FlowPanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.Label import Label
from pyjamas.ui.Image import Image
from pyjamas.ui.SimplePane
|
l import SimplePanel
from pyjamas import DOM
class FlowPanelDemo:
"""Demos the flow panel. Because of how the Flow
|
Panel works, all elements have to be inline elements.
Divs, tables, etc. can't be used, unless specified with CSS that they are inline or inline-block.
Because of how Vertical Panels work (with tables), we use CSS to display tables as inline-blocks.
IE, excluding IE8, doesn't support inline-blocks, so we have to use a CSS hack
(see http://blog.mozilla.com/webdev/2009/02/20/cross-browser-inline-block/ for more on the hack)
However, we use spans instead of divs for the Label by providing an 'element' argument."""
def __init__(self):
self.root = RootPanel()
#Flow panel taking up 70% of the page. CSS centers it.
self.flow = FlowPanel(Width="70%", StyleName='flow-panel')
for x in range(0, 10):
self.panel = VerticalPanel()
#Label each image with its number in the sequence
title = Label("Item %s" % x, Element=DOM.createElement('span'), StyleName="title item")
#Add a neat-o image.
image = Image('images/pyjamas.png', Width="200px", Height="200px", StyleName="cat-image cat-item")
#Add to the Vertical Panel the image title
self.panel.add(title)
self.panel.add(image)
self.flow.add(self.panel)
self.root.add(self.flow)
if __name__ == "__main__":
pyjd.setup("./public/FlowPanel.html")
FlowPanelDemo()
pyjd.run()
|
mir-dataset-loaders/mirdata
|
scripts/legacy/make_irmas_index.py
|
Python
|
bsd-3-clause
| 6,590
| 0.002276
|
import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join
|
(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump
|
(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
|
gillett-hernandez/project-euler
|
Python/problem_15.py
|
Python
|
mit
| 433
| 0.006928
|
#!/usr/bin/env python3
# -*
|
- coding: utf-8 -*-
# @Author: Gillett Hernandez
# @Date: 201
|
6-07-14 17:06:40
# @Last Modified by: Gillett Hernandez
# @Last Modified time: 2017-08-10 12:39:06
from euler_funcs import timed
def count_routes(n):
result = 1
for i in range(1, n+1):
result = ((n+i) * result) // i
return result
@timed
def main():
print(count_routes(20))
if __name__ == '__main__':
main()
|
sameerparekh/pants
|
tests/python/pants_test/backend/python/tasks/test_pytest_run.py
|
Python
|
apache-2.0
| 12,057
| 0.00423
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
import xml.dom.minidom as DOM
from textwrap import dedent
import coverage
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.base.exceptions import TestFailedTaskError
from pants.util.contextutil import pushd
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonTestBuilderTestBase(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PytestRun
def run_tests(self, targets, **options):
test_options = {
'colors': False,
'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.
}
test_options.update(options)
self.set_options(**test_options)
context = self.context(target_roots=targets)
pytest_run_task = self.create_task(context)
with pushd(self.build_root):
pytest_run_task.execute()
def run_failing_tests(self, targets, failed_targets, **options):
with self.assertRaises(TestFailedTaskError) as cm:
self.run_tests(targets=targets, **options)
self.assertEqual(set(failed_targets), set(cm.exception.failed_targets))
class PythonTestBuilderTestEmpty(PythonTestBuilderTestBase):
def test_empty(self):
self.run_tests(targets=[])
class PythonTestBuilderTest(PythonTestBuilderTestBase):
def setUp(self):
super(PythonTestBuilderTest, self).setUp()
self.create_file(
'lib/core.py',
dedent("""
def one(): # line 1
return 1 # line 2
# line 3
# line 4
def two(): # line 5
return 2 # line 6
""").strip())
self.add_to_build_file(
'lib',
dedent("""
python_library(
name='core',
sources=[
'core.py'
]
)
"""))
self.create_file(
'tests/test_core_green.py',
dedent("""
import unittest2 as unittest
import core
class CoreGreenTest(unittest.TestCase):
def test_one(self):
self.assertEqual(1, core.one())
"""))
self.create_file(
'tests/test_core_red.py',
dedent("""
import core
def test_two():
assert 1 == core.two()
"""))
self.create_file(
'tests/test_core_red_in_class.py',
dedent("""
import unittest2 as unittest
import core
class CoreRedClassTest(unittest.TestCase):
def test_one_in_class(self):
self.assertEqual(1, core.two())
"""))
self.add_to_build_file(
'tests',
dedent("""
python_tests(
name='green',
sources=[
'test_core_green.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red',
sources=[
'test_core_red.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red_in_class',
sources=[
'test_core_red_in_class.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='all',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
]
)
python_tests(
name='all-with-coverage',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
"""))
self.green = self.target('tests:green')
self.red = self.target('tests:red')
self.red_in_class = self.target('tests:red_in_class')
self.all = self.target('tests:all')
self.all_with_coverage = self.target('tests:all-with-coverage')
def test_green(self):
self.run_tests(targets=[self.green])
def test_red(self):
self.run_failing_tests(targets=[self.red], failed_targets=[self.red])
def test_red_test_in_class(self):
# for test in a class, the failure line is in the following format
# F testprojects/tests/python/pants/constants_only/test_fail.py::TestClassName::test_boom
self.run_failing_tests(targets=[self.red_in_class], failed_targets=[self.red_in_class])
def test_mixed(self):
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
def test_junit_xml_option(self):
# We expect xml of the following form:
# <testsuite errors=[Ne] failures=[Nf] skips=[Ns] tests=[Nt] ...>
# <testcase classname="..." name="..." .../>
# <testcase classname="..." name="..." ...>
# <failure ...>...</failure>
# </testcase>
# </testsuite>
|
report_basedir = os.path.join(self.build_root, 'dist', 'junit_option')
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
junit_xml_dir=report_basedir)
files = glob.glob(os.path.join(report_basedir, '*.xml'))
self.assertEqual(1, len(files), 'Expected 1 file, found: {}'.format(files)
|
)
junit_xml = files[0]
root = DOM.parse(junit_xml).documentElement
self.assertEqual(2, len(root.childNodes))
self.assertEqual(2, int(root.getAttribute('tests')))
self.assertEqual(1, int(root.getAttribute('failures')))
self.assertEqual(0, int(root.getAttribute('errors')))
self.assertEqual(0, int(root.getAttribute('skips')))
children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
def coverage_data_file(self):
return os.path.join(self.build_root, '.coverage')
def load_coverage_data(self, path):
data_file = self.coverage_data_file()
self.assertTrue(os.path.isfile(data_file))
coverage_data = coverage.coverage(data_file=data_file)
coverage_data.load()
_, all_statements, not_run_statements, _ = coverage_data.analysis(path)
return all_statements, not_run_statements
def test_coverage_simple_option(self):
# TODO(John Sirois): Consider eliminating support for "simple" coverage or at least formalizing
# the coverage option value that turns this on to "1" or "all" or "simple" = anything formal.
simple_coverage_kwargs = {'coverage': '1'}
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_tests(targets=[self.green], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([6], not_run_statements)
self.run_failing_tests(targets=[self.red], failed_targets=[self.red], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
**simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
# The al
|
mkhuthir/learnPython
|
Book_learning-python-r1.1/ch5/performances.map.py
|
Python
|
mit
| 392
| 0
|
from time impo
|
rt time
mx = 2 * 10 ** 7
t = time()
absloop = []
for n in range(mx):
absloop.append(abs(n))
print('for loop: {:.4f} s'.format(time() - t))
t = time()
abslist = [abs(n) for n in range(mx)]
print('list comprehension: {:.4f} s'.format(time() - t))
t = time()
absmap
|
= list(map(abs, range(mx)))
print('map: {:.4f} s'.format(time() - t))
print(absloop == abslist == absmap)
|
annarev/tensorflow
|
tensorflow/python/kernel_tests/lrn_op_test.py
|
Python
|
apache-2.0
| 5,781
| 0.010552
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class LRNOpTest(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
@test_util.run_deprecated_v1
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
@test_util.run_deprecated_v1
def testGradientsZeroInput(self):
with self.session():
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=
|
shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
|
self.assertLess(err, 1.0)
@test_util.run_deprecated_v1
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
|
itorres/junk-drawer
|
2015/lp2pass.py
|
Python
|
unlicense
| 2,627
| 0.000761
|
#!/usr/bin/env python
import csv
import sys
import json
import hashlib
from subprocess import Popen, PIPE
from urlparse import urlparse
DEFAULT_GROUP = "lastpass-import"
class Record:
def __init__(self, d):
self.d = d
self.password = d['password']
if d['grouping'] in [None, "", "(none)"]:
self.group = DEFAULT_GROUP
else:
self.group = d['grouping']
self.d['kind'] = "lastpass imported item"
self.name = d['name']
self.username = d['username']
self.netloc = urlparse(d['url']).netloc
self.text = "{}\n{}".format(
self.password, json.dumps(self.d, sort_keys=True,
indent=2, separators=(',', ': ')))
self.md5 = hashlib.md5(self.text).hexdigest()
if self.name is None or self.name == "":
if self.netloc is None or self.netloc == "":
self.name = self.md5
else:
self.name = self.netloc
if self.username is None or self.username == "":
self.username = "unknown"
self.id = "{}/{}/{}".format(self.group,
self.name.replace('/', '_'),
self.username.replace('/', '_'))
self.items = [self]
def append(self, entry):
self.items.append(e
|
ntry)
def writeToPass(self):
if len(self.items) == 1:
process = Popen(["pass", "insert", "-m", self.id], stdin=PIPE,
stdout=PIPE, stderr=None)
self.stdout = process.communicate(str(self))
self.result = process.returncode
else:
for (i, v) in enumerate(self.items):
key = "{}/{}".format(self.id, i)
process = Popen(["pass", "insert", "-m", key],
|
stdin=PIPE, stdout=PIPE, stderr=None)
self.stdout = process.communicate(str(v))
self.result = process.returncode
def __str__(self):
return self.text
class Records:
def __init__(self):
self.d = dict()
def add(self, r):
if r.id not in self.d:
self.d[r.id] = r
else:
self.d[r.id].append(r)
def get(self, k):
return self.d[k]
fn = sys.argv[1]
with open(fn, 'rb') as cf:
lp = csv.DictReader(cf, delimiter=',')
rs = Records()
for l in lp:
r = Record(l)
rs.add(r)
for k, v in rs.d.items():
v.writeToPass()
if v.result != 0:
print "{} {} {}".format(v.result, len(v.items), k)
|
dirn/readthedocs.org
|
readthedocs/vcs_support/backends/git.py
|
Python
|
mit
| 6,947
| 0.00072
|
import re
import logging
import csv
import os
from StringIO import StringIO
from projects.exceptions import ProjectImportError
from vcs_support.backends.github import GithubContributionBackend
from vcs_support.base import BaseVCS, VCSVersion
log = logging.getLogger(__name__)
class Backend(BaseVCS):
supports_tags = True
supports_branches = True
contribution_backends = [GithubContributionBackend]
fallback_branch = 'master' # default branch
def __init__(self, *args, **kwargs):
super(Backend, self).__init__(*args, **kwargs)
self.token = kwargs.get('token', None)
self.repo_url = self._get_clone_url()
def _get_clone_url(self):
if '://' in self.repo_url:
hacked_url = self.repo_url.split('://')[1]
hacked_url = re.sub('.git$', '', hacked_url)
clone_url = 'https://%s' % hacked_url
if self.token:
clone_url = 'https://%s@%s' % (self.token, hacked_url)
return clone_url
# Don't edit URL because all hosts aren't the same
#else:
#clone_url = 'git://%s' % (hacked_url)
return self.repo_url
def set_remote_url(self, url):
return self.run('git', 'remote', 'set-url', 'origin', url)
def update(self):
# Use checkout() to update repo
self.checkout()
def repo_exists(self):
code, out, err = self.run('git', 'status')
return code == 0
def fetch(self):
code, out, err = self.run('git', 'fetch', '--tags', '--prune')
if code != 0:
raise ProjectImportError(
"Failed to get code from '%s' (git fetch): %s\n\nStderr:\n\n%s\n\n" % (
self.repo_url, code, err)
)
def checkout_revision(self, revision=None):
if not revision:
branch = self.default_branch or self.fallback_branch
revision = 'origin/%s' % branch
code, out, err = self.run('git', 'checkout',
'--force', '--quiet', revision)
if code != 0:
log.warning("Failed to checkout revision '%s': %s" % (
revision, code))
return [code, out, err]
def clone(self):
code, out, err = self.run('git', 'clone', '--recursive', '--quiet',
self.repo_url, '.')
if code != 0:
raise ProjectImportError(
"Failed to get code from '%s' (git clone): %s" % (
self.repo_url, code)
)
@property
def tags(self):
retcode, stdout, err = self.run('git', 'show-ref', '--tags')
#
|
error (or no tags found)
if retcode != 0:
return []
return self.parse_tags(stdout)
def parse_tags(self, data):
"""
Parses output of show-ref --tags, eg:
3
|
b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0
bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1
c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2
a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2
c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1
edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2
Into VCSTag objects with the tag name as verbose_name and the commit
hash as identifier.
"""
# parse the lines into a list of tuples (commit-hash, tag ref name)
raw_tags = csv.reader(StringIO(data), delimiter=' ')
vcs_tags = []
for row in raw_tags:
row = filter(lambda f: f != '', row)
if row == []:
continue
commit_hash, name = row
clean_name = name.split('/')[-1]
vcs_tags.append(VCSVersion(self, commit_hash, clean_name))
return vcs_tags
@property
def branches(self):
# Only show remote branches
retcode, stdout, err = self.run('git', 'branch', '-r')
# error (or no tags found)
if retcode != 0:
return []
return self.parse_branches(stdout)
def parse_branches(self, data):
"""
Parse output of git branch -r, eg:
origin/2.0.X
origin/HEAD -> origin/master
origin/develop
origin/master
origin/release/2.0.0
origin/release/2.1.0
"""
clean_branches = []
raw_branches = csv.reader(StringIO(data), delimiter=' ')
for branch in raw_branches:
branch = filter(lambda f: f != '' and f != '*', branch)
# Handle empty branches
if len(branch):
branch = branch[0]
if branch.startswith('origin/'):
cut_len = len('origin/')
slug = branch[cut_len:].replace('/', '-')
if slug in ['HEAD']:
continue
clean_branches.append(VCSVersion(self, branch, slug))
else:
# Believe this is dead code.
slug = branch.replace('/', '-')
clean_branches.append(VCSVersion(self, branch, slug))
return clean_branches
@property
def commit(self):
retcode, stdout, err = self.run('git', 'rev-parse', 'HEAD')
return stdout.strip()
def checkout(self, identifier=None):
self.check_working_dir()
# Clone or update repository
if self.repo_exists():
self.set_remote_url(self.repo_url)
self.fetch()
else:
self.make_clean_working_dir()
self.clone()
# Find proper identifier
if not identifier:
identifier = self.default_branch or self.fallback_branch
identifier = self.find_ref(identifier)
#Checkout the correct identifier for this branch.
code, out, err = self.checkout_revision(identifier)
if code != 0:
return code, out, err
# Clean any remains of previous checkouts
self.run('git', 'clean', '-d', '-f', '-f')
# Update submodules
self.run('git', 'submodule', 'sync')
self.run('git', 'submodule', 'update',
'--init', '--recursive', '--force')
return code, out, err
def find_ref(self, ref):
# Check if ref starts with 'origin/'
if ref.startswith('origin/'):
return ref
# Check if ref is a branch of the origin remote
if self.ref_exists('remotes/origin/' + ref):
return 'origin/' + ref
return ref
def ref_exists(self, ref):
code, out, err = self.run('git', 'show-ref', ref)
return code == 0
@property
def env(self):
env = super(Backend, self).env
env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
return env
|
p2j/ip_tweet
|
iptweet.py
|
Python
|
mit
| 2,893
| 0.001728
|
import re
import os
import urllib.request
import tweepy
import json
import time
def tweet_ip():
IPTweet().do_update()
return
class IPTweet(object):
def __init__(self):
self.__location__ = os.path.realpath(os.path.join(
os.getcwd(), os.path.dirname(__file__)
))
def get_current_ip(self):
# note: https://wtfismyip.com doesn't care if you automate requests to their service if it is for non-commercial use as long as you rate-limit to at most one request/min/ip address. Failure to comply may result in blockage.
with urllib.request.urlopen('https://ipv4.wtf
|
ismyip.com/text') as url:
self.ip = url.read().decode('utf8').rstrip('\r\n')
return self
def get_old_ip(self
|
):
try:
with open(os.path.join(self.__location__, 'ip.old')) as f:
self.old_ip = f.readline().rstrip('\r\n')
except:
self.old_ip = '0.0.0.0'
return self
def get_twitter_keys(self):
try:
with open(os.path.join(self.__location__, 'settings.json')) as f:
json_str = f.read()
except FileNotFoundError:
print('settings file not found')
json_data = json.loads(json_str)
self.c_k = json_data['keys']['consumer_key']
self.c_s = json_data['keys']['consumer_secret']
self.a_t = json_data['keys']['access_token']
self.a_s = json_data['keys']['access_token_secret']
self.server_location = json_data['server_location']
self.target_user = json_data['target_user']
return self
def get_tweepy_auth(self):
try:
self.auth = tweepy.OAuthHandler(self.c_k, self.c_s)
self.auth.set_access_token(self.a_t, self.a_s)
self.tweepy_api = tweepy.API(self.auth)
except Exception:
self.tweepy_exception = 'there was an error with tweepy auth'
raise Exception
return self
def send_ip_direct_message(self):
sloc = self.server_location
ip = self.ip
target = self.target_user
t = time.strftime('%c: ')
message = t + 'New external IP for {} is: {}\n\n Old IP was: {}'.format(sloc, ip, self.old_ip)
self.tweepy_api.send_direct_message(user=target, text=message)
def write_new_ip(self):
output = self.ip + '\n'
with open(os.path.join(self.__location__, 'ip.old'), 'w') as f:
f.write(output)
def do_update(self):
# get old ip
self.get_old_ip()
self.get_current_ip()
if self.ip != self.old_ip:
try:
self.get_twitter_keys()
self.get_tweepy_auth()
self.send_ip_direct_message()
except Exception:
raise Exception
self.write_new_ip()
if __name__ == '__main__':
tweet_ip()
|
MegaWale/python-playb0x
|
scrapy/scrapyPlay/properties/properties/spiders/quotes.py
|
Python
|
mpl-2.0
| 641
| 0
|
# -*- coding: utf-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['quotes.topscrape.com/tag/humor/']
start_urls = ['http://quotes.topscrape.com/tag/humor/']
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
|
'author': quote.xpath('span/small/text()
|
').extract_first(),
}
next_page = response.css('li.next a::attr("href")').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)
|
efajardo/osg-test
|
osgtest/tests/test_140_lcmaps.py
|
Python
|
apache-2.0
| 1,373
| 0.002185
|
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.osgunittest as osgunittest
class TestLcMaps(osgunittest.OSGTestCase):
required_rpms = ['lcmaps', 'lcma
|
ps-
|
db-templates', 'vo-client', 'vo-client-lcmaps-voms']
def test_01_configure(self):
core.config['lcmaps.db'] = '/etc/lcmaps.db'
core.config['lcmaps.gsi-authz'] = '/etc/grid-security/gsi-authz.conf'
core.skip_ok_unless_installed(*self.required_rpms)
template = files.read('/usr/share/lcmaps/templates/lcmaps.db.vomsmap',
as_single_string=True)
files.write(core.config['lcmaps.db'], template, owner='lcmaps')
files.write(core.config['lcmaps.gsi-authz'],
"globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n",
owner='lcmaps')
def test_02_old_xrootd_policy(self):
core.skip_ok_unless_installed('xrootd-lcmaps', *self.required_rpms)
self.skip_ok_if(core.PackageVersion('xrootd-lcmaps') >= '1.4.0')
files.append(core.config['lcmaps.db'],
'''xrootd_policy:
verifyproxynokey -> banfile
banfile -> banvomsfile | bad
banvomsfile -> gridmapfile | bad
gridmapfile -> good | vomsmapfile
vomsmapfile -> good | defaultmapfile
defaultmapfile -> good | bad
''',
backup=False)
|
bittercode/pyrrhic-ree
|
modules/onlineDocs.py
|
Python
|
gpl-2.0
| 530
| 0
|
import webbrowser
import platform
def get_version():
version = platform.python_version()
if len(version) != 3: # This is to exclude minor versions.
|
version = version[0:3]
return version
def open_doc(url):
webbrowser.open(url)
def open_library():
version = get_version()
url = "http://docs.python.org/
|
{}/library/re.html".format(version)
open_doc(url)
def open_guide():
version = get_version()
url = "http://docs.python.org/{}/howto/regex.html".format(version)
open_doc(url)
|
yamt/neutron
|
quantum/tests/unit/metaplugin/test_metaplugin.py
|
Python
|
apache-2.0
| 13,773
| 0.000073
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import mox
from oslo.config import cfg
import stubout
import testtools
from quantum import context
from quantum.db import api as db
from quantum.extensions.flavor import (FLAVOR_NETWORK, FLAVOR_ROUTER)
from quantum.openstack.common import uuidutils
from quantum.plugins.metaplugin.meta_quantum_plugin import FlavorNotFound
from quantum.plugins.metaplugin.meta_quantum_plugin import MetaPluginV2
from quantum.tests import base
CONF_FILE = ""
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
META_PATH = "quantum.plugins.metaplugin"
FAKE_PATH = "quantum.tests.unit.metaplugin"
PROXY_PATH = "%s.proxy_quantum_plugin.ProxyPluginV2" % META_PATH
PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s
""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH)
L3_PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2
""".strip() % (FAKE_PATH, FAKE_PATH)
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def setup_metaplugin_conf():
cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0',
'PROXY')
cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY')
cfg.CONF.set_override('admin_user', 'quantum', 'PROXY')
cfg.CONF.set_override('admin_password', 'password', 'PROXY')
cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY')
cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META')
cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META')
cfg.CONF.set_override('default_flavor', 'fake2', 'META')
cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META')
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
#TODO(nati) remove this after subnet quota change is merged
cfg.CONF.set_override('max_dns_nameservers', 10)
cfg.CONF.set_override('rpc_backend',
'quantum.openstack.common.rpc.impl_fake')
class MetaQuantumPluginV2Test(base.BaseTestCase):
"""Class conisting of MetaQuantumPluginV2 unit tests."""
def setUp(self):
super(MetaQuantumPluginV2Test, self).setUp()
db._ENGINE = None
db._MAKER = None
self.fake_tenant_id = uuidutils.generate_uuid()
self.context = context.get_admin_context()
db.configure_db()
setup_metaplugin_conf()
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client')
client_cls = self.client_cls_p.start()
self.client_inst = mock.Mock()
client_cls.return_value = self.client_inst
self.client_inst.create_network.return_value = \
{'id': 'fake_id'}
self.client_inst.create_port.return_value = \
{'id': 'fake_id'}
self.client_inst.create_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.update_network.return_value = \
{'id': 'fake_id'}
self.client_inst.update_port.return_value = \
{'id': 'fake_id'}
self.client_inst.update_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.delete_network.return_value = True
self.client_inst.delete_port.return_value = True
self.client_inst.delete_subnet.return_value = True
self.plugin = MetaPluginV2(configfile=None)
def _fake_network(self, flavor):
data = {'network': {'name': flavor,
'admin_state_up': True,
'shared': False,
'router:external': [],
'tenant_id': self.fake_tenant_id,
FLAVOR_NETWORK: flavor}}
return data
def _fake_port(self, net_id):
return {'port': {'name': net_id,
'network_id': net_id,
'admin_state_up': True,
'device_id': 'bad_device_id',
'device_owner': 'bad_device_owner',
'admin_state_up': True,
'host_routes': [],
'fixed_ips': [],
'mac_address':
self.plugin._generate_mac(self.context, net_id),
'tenant_id': self.fake_tenant_id}}
def _fake_subnet(self, net_id):
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
return {'subnet': {'name': net_id,
'network_id': net_id,
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
def _fake_router(self, flavor):
data = {'router': {'name': flavor, 'admin_state_up': True,
'tenant_id': self.fake_tenant_id,
FLAVOR_ROUTER: flavor,
|
'external_gateway_info': None}}
return data
def test_create_delete_network(self):
network1 = self._fake_network('fake1')
ret1 = self.plugin.create_network(self.context, network1)
self.assertEqual('fake1', ret1[FLAVOR_NETWORK])
network2 = self._fake_network('fake2')
ret2 = self.plugin.create_network(self.context, network2)
self.assertEqual('fake2', ret2[FLAVOR_NETWORK])
network3 = self._fake_network('proxy')
ret3 = self.plugin.create_network(self.context, network3)
self.assertEqual('proxy', ret3[FLAVOR_NETWORK])
db_ret1 = self.plugin.get_network(self.context, ret1['id'])
self.assertEqual('fake1', db_ret1['name'])
db_ret2 = self.plugin.get_network(self.context, ret2['id'])
self.assertEqual('fake2', db_ret2['name'])
db_ret3 = self.plugin.get_network(self.context, ret3['id'])
self.assertEqual('proxy', db_ret3['name'])
db_ret4 = self.plugin.get_networks(self.context)
self.assertEqual(3, len(db_ret4))
db_ret5 = self.plugin.get_networks(self.context,
{FLAVOR_NETWORK: ['fake1']})
self.assertEqual(1, len(db_ret5))
self.assertEqual('fake1', db_ret5[0]['name'])
self.plugin.delete_network(self.context, ret1['id'])
self.plugin.delete_network(self.context, ret2['id'])
self.plugin.delete_network(self.context, ret3['id'])
def test_create_delete_port(self):
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
port1 = self._fake_port(network_ret1['id'])
port2 = self._fake_port(network_ret2['id'])
port3 = self._fake_port(network_ret3['id'])
port1_ret = self.plugin.create_port(self.context, port1)
port2_ret = self.plugin.create_port(self.context, port2)
port3_ret = self.plugin.create_port(self.context, port3)
self.assertEqual(network_ret1['id'], port1_ret['network_id'])
|
|
bearstech/nuka
|
examples/docker_container.py
|
Python
|
gpl-3.0
| 258
| 0
|
# -*- coding: utf-8 -*-
from nuka.hosts import DockerContainer
from nuka.tasks import shell
import nuka
host = DockerContainer(hostname='debian', image='bearstech/nuk
|
ai')
async def my_tasks(host):
await shell.shell('whoami')
|
nuka.run(my_tasks(host))
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/shortuuid/__init__.py
|
Python
|
agpl-3.0
| 128
| 0
|
from shortuuid.main import (
encode,
decode,
uuid,
|
random,
|
get_alphabet,
set_alphabet,
ShortUUID,
)
|
djds23/pygotham-1
|
pygotham/schedule/models.py
|
Python
|
bsd-3-clause
| 7,610
| 0.000263
|
"""Schedule models.
Much of this module is derived from the work of Eldarion on the
`Symposion <https://github.com/pinax/symposion>`_ project.
Copyright (c) 2010-2014, Eldarion, Inc. and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Eldarion, Inc. nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from bisect import bisect_left
from itertools import tee
from cached_property import cached_property
from sqlalchemy import func
from pygotham.core import db
__all__ = ('Day', 'Room', 'Slot', 'Presentation')
def pairwise(iterable):
"""Return values from ``iterable`` two at a time.
Recipe from
https://docs.python.org/3/library/itertools.html#itertools-recipes.
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
rooms_slots = db.Table(
'rooms_slots',
db.Column('slot_id', db.Integer, db.ForeignKey('slots.id')),
db.Column('room_id', db.Integer, db.ForeignKey('rooms.id')),
)
class Day(db.Model):
"""Day of talks."""
__tablename__ = 'days'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship(
'Event', backref=db.backref('days', lazy='dynamic'))
def __str__(self):
"""Return a printable representation."""
return self.date.strftime('%B %d, %Y')
@cached_property
def rooms(self):
"""Return the rooms for the day."""
return Room.query.join(rooms_slots, Slot).filter(
Slot.day == self).order_by(Room.order).all()
def __iter__(self):
"""Iterate over the schedule for the day."""
if not self.rooms:
raise StopIteration
def rowspan(start, end):
"""Find the rowspan for an entry in the schedule table.
This uses a binary search for the given end time from a
sorted list of start times in order to find the index of the
first start time that occurs after the given end time. This
method is used to prevent issues that can occur with
overlapping start and end times being included in the same
list.
"""
return bisect_left(times, end) - times.index(start)
times = sorted({slot.start for slot in self.slots})
# While we typically only care about the start times here, the
# list is iterated over two items at a time. Without adding a
# final element, the last time slot would be omitted. Any value
# could be used here as bisect_left only assumes the list is
# sorted, but using a meaningful value feels better.
times.append(self.slots[-1].end)
slots = db.session.query(
Slot.id,
Slot.content_override,
Slot.kind,
Slot.start,
Slot.end,
func.count(rooms_slots.c.slot_id).label('room_count'),
func.min(Room.order).label('order'),
).join(rooms_slots, Room).filter(Slot.day == self).order_by(
func.count(rooms_slots.c.slot_id), func.min(Room.order)
).group_by(
Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end
).all()
for time, next_time in pairwise(times):
row = {'time': time, 'slots': []}
for slot in slots:
if slot.start == time:
slot.rowspan = rowspan(slot.start, slot.end)
slot.colspan = slot.room_count
if not slot.content_override:
slot.presentation = Presentation.query.filter(
Presentation.slot_id == slot.id).first()
row['slots'].append(slot)
if row['slots'] or next_time is None:
yield row
class Room(db.Model):
"""Room of talks."""
__tablename__ = 'rooms'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
order = db.Column(db.Integer, nullable=False)
def __str__(self):
"""Return a printable representation."""
return self.name
class Slot(db.Model):
"""Time slot."""
__tablename__ = 'slots'
id = db.Column(db.Integer, primary_key=True)
kind = db.Column(
db.Enum(
'break', 'meal', 'keynote', 'talk', 'tutorial', name='slotkind'),
nullable=False,
)
c
|
ontent_override = db.Column(db.Text)
start = db.Column(db.Time, nullable=False)
end = db.Column(db.Time, nullable=False)
day_id = db.Column(db.Int
|
eger, db.ForeignKey('days.id'), nullable=False)
day = db.relationship('Day', backref=db.backref('slots', lazy='dynamic'))
rooms = db.relationship(
'Room',
secondary=rooms_slots,
backref=db.backref('slots', lazy='dynamic'),
order_by=Room.order,
)
def __str__(self):
"""Return a printable representation."""
start = self.start.strftime('%I:%M %p')
end = self.end.strftime('%I:%M %p')
rooms = ', '.join(map(str, self.rooms))
return '{} - {} on {}, {}'.format(start, end, self.day, rooms)
@cached_property
def duration(self):
"""Return the duration as a :class:`~datetime.timedelta`."""
return self.end - self.start
class Presentation(db.Model):
"""Presentation of a talk."""
__tablename__ = 'presentations'
id = db.Column(db.Integer, primary_key=True)
slot_id = db.Column(db.Integer, db.ForeignKey('slots.id'), nullable=False)
slot = db.relationship(
'Slot', backref=db.backref('presentation', uselist=False))
talk_id = db.Column(db.Integer, db.ForeignKey('talks.id'), nullable=False)
talk = db.relationship(
'Talk', backref=db.backref('presentation', uselist=False))
def __str__(self):
"""Return a printable representation."""
return str(self.talk)
def is_in_all_rooms(self):
"""Return whether the instance is in all rooms."""
return self.slot.number_of_rooms == 4
@cached_property
def number_of_rooms(self):
"""Return the number of rooms for the instance."""
return len(self.slot.rooms)
|
haoqili/MozSecWorld
|
apps/msw/admin.py
|
Python
|
bsd-3-clause
| 504
| 0.003968
|
from msw.models import P
|
age, RichText, MembersPostUser, MembersPostText
from django.contrib import admin
# could add more complicated stuff here consult:
# tutorial: https://docs.djangoproject.com/en/dev/intro/tutorial02/#enter-the-admin-site
# tutorial finished admin.py: https://github.com/haoqili/Django-Tutorial-Directory/blob/master/tutorialSite/polls/admin.py
admin.site.register(Page)
admin.site.register(RichText)
admin.
|
site.register(MembersPostUser)
admin.site.register(MembersPostText)
|
samuelwu90/PynamoDB
|
tests/test_consistent_hash_ring.py
|
Python
|
mit
| 2,081
| 0.004805
|
"""
test_consistent_hash_ring.py
~~~~~~~~~~~~
Tests that PersistenceEngine's put, get, delete methods raise the correct error codes.
Run tests with:
clear; python -m unittest discover -v
"""
import unittest
import consistent_hash_ring
import util
import random
class T
|
estSequenceFunctions(unittest.TestCase):
def setUp(self):
self.consistent_hash_ring = consistent_hash_ring.ConsistentHashRing()
self.node_hash = util.get_hash('value')
def tearDow
|
n(self):
pass
def test_add_node_hash(self):
self.consistent_hash_ring.add_node_hash(self.node_hash)
self.assertTrue(self.node_hash in self.consistent_hash_ring._hash_ring)
def test_add_multiple_node_hashes(self):
for x in xrange(10):
node_hash = util.get_hash(str(random.random()))
self.consistent_hash_ring.add_node_hash(node_hash)
self.assertEqual(self.consistent_hash_ring._hash_ring, sorted(self.consistent_hash_ring._hash_ring))
def test_remove_node_hash(self):
self.consistent_hash_ring.add_node_hash(self.node_hash)
self.consistent_hash_ring.remove_node_hash(self.node_hash)
self.assertFalse(self.node_hash in self.consistent_hash_ring._hash_ring)
def test_get_responsible_node_hashes(self):
node_hashes = ['A', 'B', 'C', 'D', 'E', 'F']
self.consistent_hash_ring = consistent_hash_ring.ConsistentHashRing(node_hashes)
responsible_node_hashes = self.consistent_hash_ring.get_responsible_node_hashes('AA', 3)
self.assertEqual(responsible_node_hashes, ['B', 'C', 'D'])
def test_get_responsible_node_hashes_wrap_around(self):
""" Make sure that the list returns wraps around properly, i.e. 'E, F, A' or 'F, A, B'... """
node_hashes = ['A', 'B', 'C', 'D', 'E', 'F']
self.consistent_hash_ring = consistent_hash_ring.ConsistentHashRing(node_hashes)
responsible_node_hashes = self.consistent_hash_ring.get_responsible_node_hashes('EE', 3)
self.assertEqual(responsible_node_hashes, ['F', 'A', 'B'])
|
diogocs1/comps
|
web/addons/l10n_ma/__openerp__.py
|
Python
|
apache-2.0
| 2,154
| 0.004669
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 kazacube (http://kazacube.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your opti
|
on) any later version.
#
# This program is d
|
istributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Maroc - Accounting',
'version' : '1.0',
'author' : 'kazacube',
'category' : 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Maroc.
=================================================================
Ce Module charge le modèle du plan de comptes standard Marocain et permet de
générer les états comptables aux normes marocaines (Bilan, CPC (comptes de
produits et charges), balance générale à 6 colonnes, Grand livre cumulatif...).
L'intégration comptable a été validé avec l'aide du Cabinet d'expertise comptable
Seddik au cours du troisième trimestre 2010.""",
'website': 'http://www.kazacube.com',
'depends' : ['base', 'account'],
'data' : [
'security/ir.model.access.csv',
'account_type.xml',
'account_pcg_morocco.xml',
'l10n_ma_wizard.xml',
'l10n_ma_tax.xml',
'l10n_ma_journal.xml',
],
'demo' : [],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ma.jpeg','images/l10n_ma_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ntucllab/striatum
|
striatum/bandit/exp4p.py
|
Python
|
bsd-2-clause
| 6,781
| 0
|
""" EXP4.P: An extention to exponential-weight algorithm for exploration and
exploitation. This module contains a class that implements EXP4.P, a contextual
bandit algorithm with expert advice.
"""
import logging
import six
from six.moves import zip
import numpy as np
from striatum.bandit.bandit import BaseBandit
LOGGER = logging.getLogger(__name__)
class Exp4P(BaseBandit):
r"""Exp4.P with pre-trained supervised learning algorithm.
Parameters
----------
actions : list of Action objects
List of actions to be chosen from.
historystorage: a HistoryStorage object
The place where we store the histories of contexts and rewards.
modelstorage: a ModelStorage object
The place where we store the model parameters.
delta: float, 0 < delta <= 1
With probability 1 - delta, LinThompSamp satisfies the theoretical
regret bound.
p_min: float, 0 < p_min < 1/k
The minimum probability to choose each action.
References
----------
.. [1] Beygelzimer, Alina, et al. "Contextual bandit algorithms with
supervised learning guarantees." International Conference on
Artificial Intelligence and Statistics (AISTATS). 2011u.
"""
def __init__(self, actions, historystorage, modelstorage, delta=0.1,
p_min=None, max_rounds=10000):
super(Exp4P, self).__init__(historystorage, modelstorage, actions)
self.n_total = 0
# number of actions (i.e. K in the paper)
self.n_actions = len(self._actions)
self.max_rounds = max_rounds
# delta > 0
if not isinstance(delta, float):
raise ValueError("delta should be float, the one"
"given is: %f" % p_min)
self.delta = delta
# p_min in [0, 1/k]
if p_min is None:
self.p_min = np.sqrt(np.log(10) / self.n_actions / self.max_rounds)
elif not isinstance(p_min, float):
raise ValueError("p_min should be float, the one"
"given is: %f" % p_min)
elif (p_min < 0) or (p_min > (1. / self.n_actions)):
raise ValueError("p_min should be in [0, 1/k], the one"
"given is: %f" % p_min)
else:
self.p_min = p_min
# Initialize the model storage
model = {
# probability distribution for action recommendation
'action_probs': {},
# weight vector for each expert
'w': {},
}
self._modelstorage.save_model(model)
def _exp4p_score(self, context):
"""The main part of Exp4.P.
"""
advisor_ids = list(six.viewkeys(context))
w = self._modelstorage.get_model()['w']
if len(w) == 0:
for i in advisor_ids:
w[i] = 1
w_sum = sum(six.viewvalues(w))
action_probs_list = []
for action_id in self.action_ids:
weighted_exp = [w[advisor_id] * context[advisor_id][action_id]
for advisor_id in advisor_ids]
prob_vector = np.sum(weighted_exp) / w_sum
action_probs_list.append((1 - self.n_actions * self.p_min)
* prob_vector
+ self.p_min)
action_probs_list = np.asarray(action_probs_list)
action_probs_list /= action_probs_list.sum()
estimated_reward = {}
uncertainty = {}
score = {}
for action_id, action_prob in zip(self.action_ids, action_probs_list):
estimated_reward[action_id] = action_prob
uncertainty[action_id] = 0
score[action_id] = action_prob
self._modelstorage.save_model(
{'action_probs': estimated_reward, 'w': w})
return estimated_reward, uncertainty, score
def get_action(self, context=None, n_actions=1):
"""Return the action to perform
Parameters
----------
context : dictionary
Contexts {expert_id: {action_id: expert_prediction}} of
different actions.
n_actions: int
Number of actions wanted to recommend users.
Returns
-------
history_id : int
The history id of the action.
action_recommendation : list of dictionaries
In each dictionary, it will contains {Action object,
estimated_reward, uncertainty}.
"""
estimated_reward, uncertainty, score = self._exp4p_score(context)
action_recommendation = []
action_recommendation_ids = sorted(score, key=score.get,
reverse=True)[:n_actions]
for action_id in action_recommendation_ids:
action = self.get_action_with_id(acti
|
on_id)
action_recommendation.append({
'action': action,
'estimated_reward': estimated_reward[action_id],
'uncertainty': uncertainty[action_id],
'score': score[action_id],
})
|
self.n_total += 1
history_id = self._historystorage.add_history(
context, action_recommendation, reward=None)
return history_id, action_recommendation
def reward(self, history_id, rewards):
"""Reward the previous action with reward.
Parameters
----------
history_id : int
The history id of the action to reward.
rewards : dictionary
The dictionary {action_id, reward}, where reward is a float.
"""
context = (self._historystorage
.get_unrewarded_history(history_id)
.context)
model = self._modelstorage.get_model()
w = model['w']
action_probs = model['action_probs']
action_ids = list(six.viewkeys(six.next(six.itervalues(context))))
# Update the model
for action_id, reward in six.viewitems(rewards):
y_hat = {}
v_hat = {}
for i in six.viewkeys(context):
y_hat[i] = (context[i][action_id] * reward
/ action_probs[action_id])
v_hat[i] = sum(
[context[i][k] / action_probs[k] for k in action_ids])
w[i] = w[i] * np.exp(
self.p_min / 2
* (y_hat[i] + v_hat[i]
* np.sqrt(np.log(len(context) / self.delta)
/ (len(action_ids) * self.max_rounds))))
self._modelstorage.save_model({
'action_probs': action_probs, 'w': w})
# Update the history
self._historystorage.add_reward(history_id, rewards)
|
koenedaele/skosprovider
|
examples/dump_jsonld.py
|
Python
|
mit
| 3,074
| 0.000976
|
# -*- coding: utf-8 -*-
'''
This example demonstrates the skosprovider API with a simple
DictionaryProvider containing just three items.
'''
from pyld import jsonld
import json
from skosprovider.providers import DictionaryProvider
from skosprovider.uri import UriPatternGenerator
from skosprovider.skos import ConceptScheme
from skosprovider.jsonld import (
CONTEXT,
jsonld_conceptscheme_dumper,
jsonld_dumper,
)
larch = {
'id': '1',
'uri': 'http://id.trees.org/1',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Larch'},
{'type': 'prefLabel', 'language': 'nl', 'label': 'De Lariks'}
],
'notes': [
{'type': 'definition', 'language': 'en', 'note': 'A type of tree.'}
],
'member_of': ['3'],
'matches': {
'close': ['http://id.python.org/different/types/of/trees/nr/1/the/larch']
}
}
chestnut = {
'id': '2',
'uri': 'http://id.trees.org/2',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'The Chestnut'},
{'type': 'altLabel', 'language': 'nl', 'label': 'De Paardekastanje'},
{'type': 'altLabel', 'language': 'fr', 'label': 'la châtaigne'}
],
'notes': [
{
'type': 'definition', 'language': 'en',
'note': 'A different type of tree.'
}
],
'member_of': ['3'],
'matches': {
'related': ['http://id.python.org/different/types/of/trees/nr/17/the/other/chestnut']
}
}
species = {
'id': 3,
'uri': 'http://id.trees.org/3',
'labels': [
{'type': 'prefLabel', 'language': 'en', 'label': 'Trees by species'},
|
{'type': 'prefLabel', 'language': 'nl', 'label': 'Bomen per soort'}
],
'type': 'collection',
'members': ['1', '2'],
'notes': [
{
'type': 'editorialNote',
'language': 'en',
'note': 'As seen in <em>How to Recognise Different Types of Trees from Quite a Long Way Away</em>.',
'markup': 'HTML'
}
]
}
provider = DictionaryProvider(
{
'id': 'TREES',
'default_language': 'nl',
'subject': ['bio
|
logy'],
'dataset': {
'uri': 'http://id.trees.org/dataset'
}
},
[larch, chestnut, species],
uri_generator=UriPatternGenerator('http://id.trees.org/types/%s'),
concept_scheme=ConceptScheme('http://id.trees.org')
)
# Generate a doc for a cs
doc = jsonld_dumper(provider, CONTEXT)
msg = 'Conceptscheme'
print(msg)
print(len(msg) * '=')
print(json.dumps(doc, indent=2))
# Print an expanded doc
expanded = jsonld.expand(doc, CONTEXT)
msg = 'Conceptscheme expanded'
print(msg)
print(len(msg) * '=')
print(json.dumps(expanded, indent=2))
# Compact the doc again
compacted = jsonld.compact(expanded, CONTEXT)
msg = 'Conceptscheme compacted again'
print(msg)
print(len(msg) * '=')
print(json.dumps(compacted, indent=2))
# And now flatten it
flattened = jsonld.flatten(compacted, CONTEXT)
msg = 'Conceptscheme flattened'
print(msg)
print(len(msg) * '=')
print(json.dumps(flattened, indent=2))
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/xml/dom/pulldom.py
|
Python
|
mit
| 10,143
| 0.001577
|
import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
re
|
sult = self.elementStack[-1]
|
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(sel
|
Roger/couchdb-python
|
couchdb/tests/mapping.py
|
Python
|
bsd-3-clause
| 12,137
| 0.001813
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from decimal import Decimal
import doctest
import unittest
from couchdb import design, mapping
from couchdb.tests import testutil
class DocumentTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_mutable_fields(self):
class Test(mapping.Document):
d = mapping.DictField()
a = Test()
b = Test()
a.d['x'] = True
self.assertTrue(a.d.get('x'))
self.assertFalse(b.d.get('x'))
def test_automatic_id(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
assert post.id is None
post.store(self.db)
assert post.id is not None
self.assertEqual('Foo bar', self.db[post.id]['title'])
def test_explicit_id_via_init(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(id='foo_bar', title='Foo bar')
self.assertEqual('foo_bar', post.id)
post.store(self.db)
self.assertEqual('Foo bar', self.db['foo_bar']['title'])
def test_explicit_id_via_setter(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.id = 'foo_bar'
self.assertEqual('foo_bar', post.id)
post.store(self.db)
self.assertEqual('Foo bar', self.db['foo_bar']['title'])
def test_change_id_failure(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.store(self.db)
post = Post.load(self.db, post.id)
try:
post.id = 'foo_bar'
self.fail('Excepted AttributeError')
except AttributeError, e:
self.assertEqual('id can only be set on new documents', e.args[0])
def test_batch_update(self):
class Post(mapping.Document):
title = mapping.TextField()
post1 = Post(title='Foo bar')
post2 = Post(title='Foo baz')
results = self.db.update([post1, post2])
self.assertEqual(2, len(results))
assert results[0][0] is True
assert results[1][0] is True
def test_store_existing(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.store(self.db)
post.store(self.db)
self.assertEqual(len(list(self.db.view('_all_docs'))), 1)
def test_old_datetime(self):
dt = mapping.DateTimeField()
assert dt._to_python(u'1880-01-01T00:00:00Z')
def test_get_has_default(self):
doc = mapping.Document()
doc.get('foo')
doc.get('foo', None)
class ListFieldTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_to_json(self):
# See <http://code.google.com/p/couchdb-python/issues/detail?id=14>
class Post(mapping.Document):
title = mapping.TextField()
comments = mapping.ListField(mapping.DictField(
mapping.Mapping.build(
author = mapping.TextField(),
content = mapping.TextField(),
)
))
post = Post(title='Foo bar')
post.comments.append(author='myself', content='Bla bla')
post.comments = post.comments
self.assertEqual([{'content': 'Bla bla', 'author': 'myself'}],
post.comments)
def test_proxy_append(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
thing.numbers.append(Decimal('3.0'))
self.assertEqual(3, len(thing.numbers))
self.assertEqual(Decimal('3.0'), thing.numbers[2])
def test_proxy_append_kwargs(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
self.assertRaises(TypeError, thing.numbers.append, foo='bar')
def test_proxy_contains(self):
class Thing(mapping.Document):
numbers = mapping.ListFie
|
ld(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
|
assert isinstance(thing.numbers, mapping.ListField.Proxy)
assert '1.0' not in thing.numbers
assert Decimal('1.0') in thing.numbers
def test_proxy_count(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
self.assertEqual(1, thing.numbers.count(Decimal('1.0')))
self.assertEqual(0, thing.numbers.count('1.0'))
def test_proxy_index(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
self.assertEqual(0, thing.numbers.index(Decimal('1.0')))
self.assertRaises(ValueError, thing.numbers.index, '3.0')
def test_proxy_insert(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
thing.numbers.insert(0, Decimal('0.0'))
self.assertEqual(3, len(thing.numbers))
self.assertEqual(Decimal('0.0'), thing.numbers[0])
def test_proxy_insert_kwargs(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
self.assertRaises(TypeError, thing.numbers.insert, 0, foo='bar')
def test_proxy_remove(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers.append(Decimal('1.0'))
thing.numbers.remove(Decimal('1.0'))
def test_proxy_iter(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
self.db['test'] = {'numbers': ['1.0', '2.0']}
thing = Thing.load(self.db, 'test')
assert isinstance(thing.numbers[0], Decimal)
def test_proxy_iter_dict(self):
class Post(mapping.Document):
comments = mapping.ListField(mapping.DictField)
self.db['test'] = {'comments': [{'author': 'Joe', 'content': 'Hey'}]}
post = Post.load(self.db, 'test')
assert isinstance(post.comments[0], dict)
def test_proxy_pop(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers = [Decimal('%d' % i) for i in range(3)]
self.assertEqual(thing.numbers.pop(), Decimal('2.0'))
self.assertEqual(len(thing.numbers), 2)
self.assertEqual(thing.numbers.pop(0), Decimal('0.0'))
def test_proxy_slices(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers = [Decimal('%d' % i) for i in range(5)]
ll = thing.numbers[1:3]
self.assertEqual(len(ll), 2)
self.assertEqual(ll[0], Decimal('1.0'))
thing.numbers[2:4] = [Decimal('%d' % i) for i in range(6, 8)]
self.assertEqual(thing.numbers[2], Decimal('6.0'))
self.assertEqual(thing.numbers[4], Decimal('4.0'))
self.assertEqual(len(thing.numbers), 5)
del thing.numbers[3:]
self.assertEquals(len(thing.numbers), 3)
def test_mutable_fields(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing.wrap({'_id': 'foo', '_rev': 1}) # no numbers
thing.numbers.append('1.0')
thing2 = Thing(id='thing2')
self.assertEqual([i for i in thing2.numbers], [])
class DocumentSchemaFieldTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_simple(self):
clas
|
iserko/jira2fogbugz
|
src/jira2fogbugz/__init__.py
|
Python
|
mit
| 8,941
| 0.002461
|
#!/usr/bin/env python
import argparse
import sys
import time
import traceback
from jira.client import JIRA
from jira.exceptions import JIRAError
from fogbugz import FogBugz
from fogbugz import FogBugzLogonError, FogBugzConnectionError
RECENTLY_ADDED_CASES = {}
FOGBUGZ_FIELDS = ('ixBug,ixPersonAssignedTo,ixPersonEditedBy,'
'sTitle,sLatestTextSummary,sProject,dtOpened,'
'sCategory,ixBugParent,hrsCurrEst,tags')
def create_issue(jis, email_map, project, default_assignee):
global RECENTLY_ADDED_CASES
data = {}
parent_issue = None
if not getattr(jis.fields, 'assignee', False):
data['ixPersonAssignedTo'] = default_assignee
else:
data['ixPersonAssignedTo'] = email_map[jis.fields.assignee.emailAddress]
data['sTitle'] = jis.fields.summary if jis.fields.summary else 'No title'
description = ''
if jis.fields.description:
description = jis.fields.description
data['sEvent'] = description
data['sProject'] = project
# TODO: use pytz and convert timezone data properly
data['dt'] = jis.fields.created.split('.')[0]+'Z'
data['hrsCurrEst'] = 0
tags = []
if getattr(jis.fields, 'fixVersions', []):
for ver in jis.fields.fixVersions:
tags.append(ver.name)
if getattr(jis.fields, 'labels', []):
for label in jis.fields.labels:
if label != u'export':
tags.append(label)
if tags:
data['sTags'] = ','.join(tags)
if getattr(jis.fields, 'timeoriginalestimate'):
data['hrsCurrEst'] = int(jis.fields.timeoriginalestimate)/60/60
# TODO: these are custom issue types in JIRA imported from Pivotal Tracker
# so you have to make a special mapping
if jis.fields.issuetype.name in ('Story', 'Improvement
|
', 'Epic', 'Theme', 'Technical task'):
data['sCategory'] = 'Feature'
elif jis.fields.issuetype.name in ('Bug'):
data['sCategory'] = 'Bug'
else:
raise Exception("Unknown issue type: {0}".format(jis.fields.issuetype.name))
if getattr(jis.fields, 'parent', None):
parent = jis.fields.parent
|
tmp = jira.search_issues('key={0}'.format(parent.key))
if len(tmp) != 1:
raise Exception("Was expecting to find 1 result for key={0}. Got {1}".format(parent.key, len(tmp)))
parent_issue = create_issue(tmp[0])
data['ixBugParent'] = parent_issue
if jis.fields.issuelinks:
for link in jis.fields.issuelinks:
parent = getattr(link, 'outwardIssue', None)
child = getattr(link, 'inwardIssue', None)
if parent:
tmp = jira.search_issues('key={0}'.format(parent.key))
if len(tmp) != 1:
raise Exception("Was expecting to find 1 result for key={0}. Got {1}".format(parent.key, len(tmp)))
parent_issue = create_issue(tmp[0],
email_map,
project,
default_assignee)
data['ixBugParent'] = parent_issue
func = fb.new
tries = 0
count = 0
# TODO: create custom field with JIRA key and JIRA URL then search for them
# before you attempt to create a new case
if RECENTLY_ADDED_CASES.has_key(jis.key):
resp = fb.search(q=RECENTLY_ADDED_CASES[jis.key],
cols=FOGBUGZ_FIELDS)
count = int(resp.cases['count'])
if count != 1:
raise Exception("We should see case {0}".format(RECENTLY_ADDED_CASES[jis.key]))
if count == 1:
case = resp.cases.case
data.pop('sEvent')
sparent_issue = parent_issue if parent_issue else 'n/a'
print "{0} exists as case ID {1: >3} ... parent case {2: >3} Type={3}".format(jis.key, case.ixbug.string, sparent_issue, jis.fields.issuetype.name)
if int(case.ixpersonassignedto.string) == data['ixPersonAssignedTo']:
data.pop('ixPersonAssignedTo')
curr = case.stitle.string
if curr[-3:] == '...':
curr = curr[:-3]
if curr in data['sTitle']:
data.pop('sTitle')
if case.sproject.string == data['sProject']:
data.pop('sProject')
if data.get('ixBugParent', False):
if int(case.ixbugparent.string) == data['ixBugParent']:
data.pop('ixBugParent')
if case.scategory.string == data['sCategory']:
data.pop('sCategory')
if int(case.hrscurrest.string) == data['hrsCurrEst']:
data.pop('hrsCurrEst')
tags = [tag.string for tag in resp.cases.case.tags.childGenerator()]
if data.has_key('sTags'):
new_tags = []
split_tags = data['sTags'].split(',')
for t in split_tags:
if t not in tags:
new_tags.append(t)
if new_tags:
data['sTags'] = ','.join(new_tags)
data.pop('dt')
if not data:
return int(case['ixbug'])
data['ixBug'] = int(case['ixbug'])
func = fb.edit
print "Calling edit with {0}".format(data)
else:
sparent_issue = parent_issue if parent_issue else 'n/a'
print "{0} doesn't exist yet ... parent case {1: >3} Type={2}".format(jis.key, sparent_issue, jis.fields.issuetype.name)
reporter = getattr(jis.fields, 'reporter', None)
if reporter:
data['ixPersonEditedBy'] = email_map[reporter.emailAddress]
else:
data['ixPersonEditedBy'] = default_assignee
print "Creating new"
return 0
def get_jira_issues(server, query):
chunk_size = 100
start_at = 0
while True:
issues = server.search_issues(query,
startAt=start_at,
maxResults=chunk_size)
if not issues:
break
start_at += chunk_size
for issue in issues:
yield issue
def run():
parser = argparse.ArgumentParser(description="JIRA to FogBugz importer")
parser.add_argument('jira_url',
help="JIRA URL, ex. http://jira.example.com")
parser.add_argument('jira_username', help="JIRA username")
parser.add_argument('jira_password', help="JIRA password")
parser.add_argument('fogbugz_url',
help="FogBugz URL, ex. http://example.fogbugz.com")
parser.add_argument('fogbugz_username', help="FogBugz username")
parser.add_argument('fogbugz_password', help="FogBugz password")
parser.add_argument('default_assignee', help="The email of the default assignee")
# TODO: dynamically create projects based on JIRA data
parser.add_argument('project', help="Which FogBugz project to put cases in")
parser.add_argument('-v', '--verbose',
dest="verbose",
action="store_true",
default=False,
help="Get more verbose output")
args = parser.parse_args()
try:
try:
jira = JIRA(options={'server': args.jira_url},
basic_auth=(args.jira_username,
args.jira_password))
except JIRAError, e:
if e.status_code == 403:
sys.stderr.write('Cannot connect to JIRA. Check username/password\n')
sys.exit(1)
else:
msg = "Cannot connect to JIRA (return code={0})".format(e.status_code)
if args.verbose:
msg += "\n{0}".format('Response from JIRA:\n{0}'.format(e.text))
sys.stderr.write(msg+'\n')
sys.exit(1)
try:
fb = FogBugz(args.fogbugz_url)
fb.logon(args.fogbugz_username, args.fogbugz_password)
except FogBugzConnectionError:
sys.stderr.write('Cannot connect to FogBugz\n')
sys.exit(1)
except FobBugzLogonError:
sys.stderr.write('Cannot login to FogBugz. Check username/password')
sys.exit(1)
# initialize an email to fogb
|
Nexenta/cinder
|
cinder/tests/unit/volume/drivers/test_tegile.py
|
Python
|
apache-2.0
| 18,078
| 0
|
# Copyright (c) 2015 by Tegile Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver Test for Tegile storage.
"""
import mock
from cinder import context
from cinder.exception import TegileAPIException
from cinder import test
from cinder.volume.drivers import tegile
BASE_DRIVER = tegile.TegileIntelliFlashVolumeDriver
ISCSI_DRIVER = tegile.TegileISCSIDriver
FC_DRIVER = tegile.TegileFCDriver
test_config = mock.Mock()
test_config.san_ip = 'some-ip'
test_config.san_login = 'some-user'
test_config.san_password = 'some-password'
test_config.san_is_local = True
test_config.tegile_default_pool = 'random-pool'
test_config.tegile_default_project = 'random-project'
test_config.volume_backend_name = "unittest"
test_volume = {'host': 'node#testPool',
'name': 'testvol',
'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e',
'_name_id': 'testvol',
'metadata': {'project': 'testProj'},
'provider_location': None,
'size': 10}
test_snapshot = {'name': 'testSnap',
'id': '07ae9978-5445-405e-8881-28f2adfee732',
'volume': {'host': 'node#testPool',
'size': 1,
'_name_id': 'testvol'
}
}
array_stats = {'total_capacity_gb': 4569.199686084874,
'free_capacity_gb': 4565.381390112452,
'pools': [{'total_capacity_gb': 913.5,
'QoS_support': False,
'free_capacity_gb': 911.812650680542,
'reserved_percentage': 0,
'pool_name': 'pyramid'
},
{'total_capacity_gb': 2742.1996604874,
'QoS_support': False,
'free_capacity_gb': 2740.148867149747,
'reserved_percentage': 0,
'pool_name': 'cobalt'
},
{'total_capacity_gb': 913.5,
'QoS_support': False,
'free_capacity_gb': 913.4198722839355,
'reserved_percentage': 0,
'pool_name': 'test'
}]
}
class FakeTegileService(object):
@staticmethod
def send_api_request(method, params=None,
request_type='post',
api_service='v2',
fine_logging=False):
if method is 'createVolume':
return ''
elif method is 'deleteVolume':
return ''
elif method is 'createVolumeSnapshot':
return ''
elif method is 'deleteVolumeSnapshot':
return ''
elif method is 'cloneVolumeSnapshot':
return ''
elif method is 'listPools':
return ''
elif method is 'resizeVolume':
return ''
elif method is 'getVolumeSizeinGB':
return 25
elif method is 'getISCSIMappingForVolume':
return {'target_lun': '27',
'target_iqn': 'iqn.2012-02.com.tegile:openstack-cobalt',
'target_portal': '10.68.103.106:3260'
}
elif method is 'getFCPortsForVolume':
return {'target_lun': '12',
'initiator_target_map':
'{"21000024ff59bb6e":["21000024ff578701",],'
'"21000024ff59bb6f":["21000024ff578700",],}',
'target_wwn': '["21000024ff578700","21000024ff578701",]'}
elif method is 'getArrayStats':
return array_stats
fake_tegile_backend = FakeTegileService()
class FakeTegileServiceFail(object):
@staticmethod
def send_api_request(method, params=None,
request_type='post',
api_service='v2',
fine_logging=False):
raise TegileAPIException
fake_tegile_backend_fail = FakeTegileServiceFail()
class TegileIntelliFlashVolumeDriverTestCas
|
e(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
self.configuration = test_config
super(TegileIntelliFlashVolumeDriverTestCase, self).setUp()
def test_create_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({
|
'metadata': {'pool': 'testPool',
'project': test_config.tegile_default_project
}
}, tegile_driver.create_volume(test_volume))
def test_create_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_volume,
test_volume)
def test_delete_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.delete_volume(test_volume)
def test_delete_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.delete_volume,
test_volume)
def test_create_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.create_snapshot(test_snapshot)
def test_create_snapshot_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_snapshot,
test_snapshot)
def test_delete_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.delete_snapshot(test_snapshot)
def test_delete_snapshot_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.delete_snapshot,
test_snapshot)
def test_create_volume_from_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend)
|
chuanchang/tp-libvirt
|
virttest/utils_misc.py
|
Python
|
gpl-2.0
| 81,782
| 0.000648
|
"""
Virtualization test utility functions.
:copyright: 2008-2009 Red Hat Inc.
"""
import time
import string
import random
import socket
import os
import stat
import signal
import re
import logging
import commands
import fcntl
import sys
import inspect
import tarfile
import shutil
import getpass
import ctypes
from autotest.client import utils, os_dep
from autotest.client.shared import error, logging_config
from autotest.client.shared import git, base_job
import data_dir
import utils_selinux
try:
from staging import utils_koji
except ImportError:
from autotest.client.shared import utils_koji
import platform
ARCH = platform.machine()
class UnsupportedCPU(error.TestError):
pass
# TODO: remove this import when log_last_traceback is moved to autotest
import traceback
# TODO: this function is being moved into autotest. For compatibility
# reasons keep it here too but new code should use the one from base_utils.
def log_last_traceback(msg=None, log=logging.error):
"""
Writes last traceback into specified log.
:warning: This function is being moved into autotest and your code should
use autotest.client.shared.base_utils function instead.
:param msg: Override the default message. ["Original traceback"]
:param log: Where to log the traceback [logging.error]
"""
if not log:
log = logging.error
if msg:
log(msg)
exc_type, exc_value, exc_traceback = sys.exc_info()
if not exc_traceback:
log('Requested log_last_traceback but no exception was raised.')
return
log("Original " +
"".join(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
def aton(sr):
"""
Transform a string to a number(include float and int). If the string is
not in the form of number, just return false.
:param sr: string to transfrom
:return: float, int or False for failed transform
"""
try:
return int(sr)
except ValueError:
try:
return float(sr)
except ValueError:
return False
def find_substring(string, pattern1, pattern2=None):
"""
Return the match of pattern1 in string. Or return the match of pattern2
if pattern is not matched.
:param string: string
:param pattern1: first pattern want to match in string, must set.
:param pattern2: second pattern, it will be used if pattern1 not match, optional.
:return: Match substing or None
"""
if not pattern1:
logging.debug("pattern1: get empty string.")
return None
pattern = pattern1
if pattern2:
pattern += "|%s" % pattern2
ret = re.findall(pattern, string)
if not ret:
logging.debug("Could not find matched string with pattern: %s",
pattern)
return None
return ret[0]
def lock_file(filename, mode=fcntl.LOCK_EX):
lockfile = open(filename, "w")
fcntl.lockf(lockfile, mode)
return lockfile
def unlock_file(lockfile):
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
# Utility functions for dealing with external processes
def unique(llist):
"""
Return a list of the elements in list, but without duplicates.
:param list: List with values.
:return: List with non duplicate elements.
"""
n = len(llist)
if n == 0:
return []
u = {}
try:
for x in llist:
u[x] = 1
except TypeError:
return None
else:
return u.keys()
def find_command(cmd):
"""
Try to find a command in the PATH, paranoid version.
:param cmd: Command to be found.
:raise: ValueError in case the command was not found.
"""
common_bin_paths = ["/usr/libexec", "/usr/local/sbin", "/usr/local/bin",
"/usr/sbin", "/usr/bin", "/sbin", "/bin"]
try:
path_paths = os.environ['PATH'].split(":")
except IndexError:
path_paths = []
path_paths = unique(common_bin_paths + path_paths)
for dir_path in path_paths:
cmd_path = os.path.join(dir_path, cmd)
if os.path.isfile(cmd_path):
return os.path.abspath(cmd_path)
raise ValueError('Missing command: %s' % cmd)
def pid_exists(pid):
"""
Return True if a given PID exists.
:param pid: Process ID number.
"""
try:
os.kill(pid, 0)
return True
except Exception:
return False
def safe_kill(pid, signal):
"""
Attempt to send a signal to a given process that may or may not exist.
:param signal: Signal number.
"""
try:
os.kill(pid, signal)
return True
except Exception:
return False
def kill_process_tree(pid, sig=signal.SIGKILL):
"""Signal a process and all of its children.
If the process does not exist -- return.
:param pid: The pid of the process to signal.
:param sig: The signal to send to the processes.
"""
if not safe_kill(pid, signal.SIGSTOP):
return
children = commands.getoutput("ps --ppid=%d -o pid=" % pid).split()
for child in children:
kill_process_tree(int(child), sig)
safe_kill(pid, sig)
safe_kill(pid, signal.SIGCONT)
def kill_process_by_pattern(pattern):
"""S
|
end SIGTERM signal to a process with matched pattern.
:param pattern: normally only matched against the process name
"""
cmd = "pkill -f %s" % pattern
result = utils.run(cmd, ignore_status=True)
if result.exit_status:
logging.error("Failed to run '%s': %s", cmd, result)
else:
logging.info("Succeed to run '%s'.", cmd)
def get_open_fd
|
s(pid):
return len(os.listdir('/proc/%s/fd' % pid))
def get_virt_test_open_fds():
return get_open_fds(os.getpid())
def process_or_children_is_defunct(ppid):
"""Verify if any processes from PPID is defunct.
Attempt to verify if parent process and any children from PPID is defunct
(zombie) or not.
:param ppid: The parent PID of the process to verify.
"""
defunct = False
try:
pids = utils.get_children_pids(ppid)
except error.CmdError: # Process doesn't exist
return True
for pid in pids:
cmd = "ps --no-headers -o cmd %d" % int(pid)
proc_name = utils.system_output(cmd, ignore_status=True)
if '<defunct>' in proc_name:
defunct = True
break
return defunct
# The following are utility functions related to ports.
def is_port_free(port, address):
"""
Return True if the given port is available for use.
:param port: Port number
"""
try:
s = socket.socket()
#s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if address == "localhost":
s.bind(("localhost", port))
free = True
else:
s.connect((address, port))
free = False
except socket.error:
if address == "localhost":
free = False
else:
free = True
s.close()
return free
def find_free_port(start_port, end_port, address="localhost"):
"""
Return a host free port in the range [start_port, end_port].
:param start_port: First port that will be checked.
:param end_port: Port immediately after the last one that will be checked.
"""
for i in range(start_port, end_port):
if is_port_free(i, address):
return i
return None
def find_free_ports(start_port, end_port, count, address="localhost"):
"""
Return count of host free ports in the range [start_port, end_port].
:param count: Initial number of ports known to be free in the range.
:param start_port: First port that will be checked.
:param end_port: Port immediately after the last one that will be checked.
"""
ports = []
i = start_port
while i < end_port and count > 0:
if is_port_free(i, address):
ports.append(i)
count -= 1
i += 1
return ports
# An easy way to log lines to files when the logging system can't be used
_open_log_files = {}
_log_file_dir = "/tmp"
def log_line(filename, line):
|
blorenz/indie-film-rentals
|
indiefilmrentals/products/migrations/0006_auto__add_field_baseindierentalproduct_brand.py
|
Python
|
bsd-3-clause
| 6,372
| 0.006591
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BaseIndieRentalProduct.brand'
db.add_column('products_baseindierentalproduct', 'brand',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['products.Brand']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BaseIndieRentalProduct.brand'
db.delete_column('products_baseindierentalproduct', 'brand_id')
models = {
'base.link': {
'Meta': {'object_name': 'Link'},
'anchor': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '
|
100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'products.baseindierentalproduct': {
'Meta': {'object_name': 'BaseIndieRentalProduct', '_ormbases': ['shop.Product']},
'brand': ('django.db.models.fields.related.ForeignKey',
|
[], {'to': "orm['products.Brand']"}),
'crossSell': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'crossSell_rel_+'", 'null': 'True', 'to': "orm['products.BaseIndieRentalProduct']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.Link']", 'null': 'True', 'blank': 'True'}),
'price_tier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['products.Price_Tier_Package']"}),
'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
'products.brand': {
'Meta': {'object_name': 'Brand'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'products.camera': {
'Meta': {'object_name': 'Camera', '_ormbases': ['products.BaseIndieRentalProduct']},
'baseindierentalproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['products.BaseIndieRentalProduct']", 'unique': 'True', 'primary_key': 'True'})
},
'products.lens': {
'Meta': {'object_name': 'Lens', '_ormbases': ['products.BaseIndieRentalProduct']},
'baseindierentalproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['products.BaseIndieRentalProduct']", 'unique': 'True', 'primary_key': 'True'})
},
'products.lighting': {
'Meta': {'object_name': 'Lighting', '_ormbases': ['products.BaseIndieRentalProduct']},
'baseindierentalproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['products.BaseIndieRentalProduct']", 'unique': 'True', 'primary_key': 'True'})
},
'products.price_tier': {
'Meta': {'object_name': 'Price_Tier'},
'end_day': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {}),
'start_day': ('django.db.models.fields.IntegerField', [], {})
},
'products.price_tier_package': {
'Meta': {'object_name': 'Price_Tier_Package'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tier': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['products.Price_Tier']", 'symmetrical': 'False'})
},
'products.productimage': {
'Meta': {'object_name': 'ProductImage'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['products.BaseIndieRentalProduct']"})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_shop.product_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
}
}
complete_apps = ['products']
|
futurice/schedule
|
schedulesite/schedulesite/settings.py
|
Python
|
bsd-3-clause
| 5,140
| 0.000973
|
"""
Django settings for schedulesite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
GOOGLE_OAUTH2_CLIENT_SECRETS_JSON = '/opt/app/client_secrets.json'
OAUTH_DB_STORAGE = os.getenv('OAUTH_DB_STORAGE', 'false').lower() == 'true'
# production is https, x-forwarded-proto:http ...
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# see README for how to set a SECRET_KEY
#SECRET_KEY = 'dummy'
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'futuschedule',
'rest_framework',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'middleware.
|
SetUserMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.co
|
ntrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'middleware.CustomHeaderMiddleware',
)
AUTH_USER_MODEL = 'futuschedule.FutuUser'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.RemoteUserBackend',
)
ROOT_URLCONF = 'schedulesite.urls'
WSGI_APPLICATION = 'schedulesite.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
def asset_tag():
try:
return open('/opt/tag').read() + '/'
except Exception as e:
print(e)
return ''
STATIC_URL = '/static/%s'%(asset_tag())
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/opt/static/',
]
REST_FRAMEWORK = {
# http://www.django-rest-framework.org/api-guide/pagination
# TODO: what is a good page size?
'PAGE_SIZE': 100,
# http://www.django-rest-framework.org/api-guide/filtering
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
),
}
FUM_API_URL = os.getenv('FUM_API_URL', None)
FUM_API_TOKEN = os.getenv('FUM_API_TOKEN', None)
SECRET_KEY = os.getenv('SECRET_KEY', 'secret')
CALENDAR_DOMAIN = os.getenv('CALENDAR_DOMAIN', None)
# Used by unit tests which also create 'a_credentials_file' used by the app
TEST_CALENDAR_ID = os.getenv('TEST_CALENDAR_ID', None)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', 'false').lower() == 'true'
# SECURITY WARNING: don't run with fake_login turned on in production!
FAKE_LOGIN = os.getenv('FAKE_LOGIN', 'false').lower() == 'true'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Sentry token.
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRY_DSN', 'http://localhost'),
}
if (os.getenv('ENABLE_RAVEN', 'false').lower() == 'true'):
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME', None),
'HOST': os.getenv('DB_HOST', None),
'USER': os.getenv('DB_USER', None),
'PASSWORD': os.getenv('DB_PASSWORD', None),
'PORT': os.getenv('DB_PORT', '5432'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'project':{
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
}
}
|
mbrner/funfolding
|
setup.py
|
Python
|
mit
| 1,277
| 0
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='funfolding',
version='0.0.1',
description='Having fun with unfolding.',
long_description=long_description,
url='https://github.com/mbrner/funfolding',
author='Mathis Boerner',
author_email='mathis.boerner@tu-dortmund.de',
license='MIT',
classifie
|
rs=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
|
'Programming Language :: Python :: 3.6',
],
keywords='unfolding',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'numpy',
'scikit-learn>=0.18.1',
'emcee',
'pymc3',
'scipy',
'futures',
'matplotlib',
'corner'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
gkabbe/Python-Kurs2015
|
Lösungen/Woche1/password.py
|
Python
|
gpl-2.0
| 341
| 0.002933
|
passwort = "geheim"
feh
|
lversuche = 0
while True:
user_input = raw_input("Passwort eingeben:\n")
if user_input == passwort:
print "Hello User!"
break
else:
fehlversuche += 1
print "Fehlversuche:", fehlversuche
if fehlversuche == 3:
|
print "Zu viele Fehlversuche"
break
|
utkbansal/tardis
|
tardis/plasma/properties/nlte.py
|
Python
|
bsd-3-clause
| 9,723
| 0.007199
|
import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (PreviousIterationProperty,
ProcessingPlasmaProperty)
from tardis.plasma.properties import PhiSahaNebular, PhiSahaLTE
__all__ = ['PreviousElectronDensities', 'PreviousBetaSobolev',
'HeliumNLTE', 'HeliumNumericalNLTE']
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
"""
Attributes
----------
previous_electron_densities : The values for the electron densities converged upon in the previous iteration.
"""
outputs = ('previous_electron_densities',)
def set_initial_value(self, kwargs):
initial_value = np.ones(len(kwargs['abundance'].columns))*1000000.0
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
"""
Attributes
----------
previous_beta_sobolev : The beta sobolev values converged upon in the previous iteration.
"""
outputs = ('previous_beta_sobolev',)
def set_initial_value(self, kwargs):
try:
lines = len(kwargs['atomic_data'].lines)
except:
lines = len(kwargs['atomic_data']._lines)
initial_value = np.ones((lines,
len(kwargs['abundance'].columns)))
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ('helium_population',)
def calculate(self, level_boltzmann_factor, electron_densities,
ionization_data, beta_rad, g, g_electron, w, t_rad, t_electrons,
delta, zeta_data, number_density, partition_function):
"""
Updates all of the helium level populations according to the helium NLTE recomb approximation.
"""
helium_population = level_boltzmann_factor.ix[2].copy()
# He I excited states
he_one_population = self.calculate_helium_one(g_electron, beta_rad,
ionization_data, level_boltzmann_factor, electron_densities, g, w)
helium_population.ix[0].update(he_one_population)
#He I metastable states
helium_population.ix[0,1] *= (1 / w)
helium_population.ix[0,2] *= (1 / w)
#He I ground state
helium_population.ix[0,0] = 0.0
#He II excited states
he_two_population = level_boltzmann_factor.ix[2,1].mul(
(g.ix[2,1].ix[0]**(-1)))
helium_population.ix[1].update(he_two_population)
#He II ground state
helium_population.ix[1,0] = 1.0
#He III states
helium_population.ix[2,0] = self.calculate_helium_three(t_rad, w,
zeta_data, t_electrons, delta, g_electron, beta_rad,
ionization_data, electron_densities, g)
unnormalised = helium_population.sum()
normalised = helium_population.mul(number_density.ix[2] / unnormalised)
helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(g_electron, beta_rad, ionization_data,
level_boltzmann_factor, electron_densities, g, w):
"""
Calculates the He I level population values, in equilibrium with the He II ground state.
"""
return level_boltzmann_factor.ix[2,0].mul(
g.ix[2,0], axis=0) * (1./(2*g.ix[2,1,0])) * \
(1/g_electron) * (1/(w**2)) * np.exp(
ionization_data.ionization_energy.ix[2,1] * beta_rad) * \
electron_densities
@staticmethod
def calculate_helium_three(t_rad, w, zeta_data, t_electrons, delta,
g_electron, beta_rad, ionization_data, electron_densities, g):
"""
Calculates the He III level population values.
"""
zeta = PhiSahaNebular.get_zeta_values(zeta_data, 2, t_rad)[1]
he_three_population = (2 / electron_densities) * \
(float(g.ix[2,2,0])/g.ix[2,1,0]) * g_electron * \
np.exp(-ionization_data.ionization_energy.ix[2,2] * beta_rad) \
* w * (delta.ix[2,2] * zeta + w * (1. - zeta)) * \
(t_electrons / t_rad) ** 0.5
class HeliumNumericalNLTE(ProcessingPlasmaProperty):
'''
IMPORTANT: This particular property requires a specific numerical NLTE
solver and a specific atomic dataset (neither of which are distributed
with Tardis) to work.
'''
outputs = ('helium_population',)
def calculate(self, ion_number_density, electron_densities, t_electrons, w,
lines, j_blues, levels, level_boltzmann_factor, t_rad,
zeta_data, g_electron, delta, partition_function, ionization_data,
beta_rad, g):
logger.info('Performing numerical NLTE He calculations.')
if len(j_blues)==0:
return None
heating_rate_data = np.loadtxt(
self.plasma_parent.heating_rate_data_file, unpack=True)
#Outputting data required by SH module
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/shellconditions_{}.txt'.format(zone),
'w') as output_file:
output_file.write(ion_number_density.ix[2].sum()[zone])
output_file.write(electron_densities[zone])
output_file.write(t_electrons[zone])
output_file.write(heating_rate_data[zone])
output_file.write(w[zone])
output_file.write(self.plasma_parent.time_explosion)
output_file.write(t_rad[zon
|
e])
output_file.write(self.plasma_parent.v_inner[zone])
output_file.write(self.plasma_parent.v_outer[zone])
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/abundances_{}.txt'.format(zone), 'w') as \
output_file:
for element in range(1,31):
try:
number_density = ion
|
_number_density[zone].ix[
element].sum()
except:
number_density = 0.0
output_file.write(number_density)
helium_lines = lines[lines['atomic_number']==2]
helium_lines = helium_lines[helium_lines['ion_number']==0]
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discradfield_{}.txt'.format(zone), 'w') \
as output_file:
j_blues = pd.DataFrame(j_blues, index=lines.index)
helium_j_blues = j_blues[zone].ix[helium_lines.index]
for value in helium_lines.index:
if (helium_lines.level_number_lower.ix[value]<35):
output_file.write(
int(helium_lines.level_number_lower.ix[value]+1),
int(helium_lines.level_number_upper.ix[value]+1),
j_blues[zone].ix[value])
#Running numerical simulations
for zone, _ in enumerate(electron_densities):
os.rename('He_NLTE_Files/abundances{}.txt'.format(zone),
'He_NLTE_Files/abundances_current.txt')
os.rename('He_NLTE_Files/shellconditions{}.txt'.format(zone),
'He_NLTE_Files/shellconditions_current.txt')
os.rename('He_NLTE_Files/discradfield{}.txt'.format(zone),
'He_NLTE_Files/discradfield_current.txt')
os.system("nlte-solver-module/bin/nlte_solvertest >/dev/null")
os.rename('He_NLTE_Files/abundances_current.txt',
'He_NLTE_Files/abundances{}.txt'.format(zone))
os.rename('He_NLTE_Files/shellconditions_current.txt',
'He_NLTE_Files/shellconditions{}.txt'.format(zone))
os.rename('He_NLTE_Files/discradfield_current.txt',
'He_NLTE_Files/discradfield{}.txt'.format(zone))
os.rename('debug_occs.dat', 'He_NLTE_Files/occs{}.txt'.format(zone))
#Reading in populations from files
helium_population = level_boltzmann_factor.ix[2].copy()
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discra
|
nmayorov/scipy
|
scipy/special/_precompute/wrightomega.py
|
Python
|
bsd-3-clause
| 979
| 0
|
import numpy as np
try:
import mpmath # type: ignore[import]
except ImportError:
pass
def mpmath_wrightomega(x):
return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
def wrightomega_series_error(x):
series = x
desired = mpmath_wrightomega(x)
return abs(series - desired) / desired
def wrightomega_exp_error(x):
exponential_approx = mpmath.exp(x)
desired = mpmath_wrightomega(x)
return ab
|
s(exponential_approx - desired) / desired
def main():
desired_error = 2 * np.finfo(float).eps
print('Series Error')
for x in [1e5, 1e10, 1e15, 1e20]:
with mpmath.workdps(100):
error = wrightomega_series_error(x)
print(x, error, error < desired_error)
print('Exp error')
for x in [-10, -25, -50, -100, -200, -400, -700, -740]:
with mpmath.workdps(100):
error = wrightomega_exp_error(x)
|
print(x, error, error < desired_error)
if __name__ == '__main__':
main()
|
frreiss/tensorflow-fred
|
tensorflow/python/autograph/converters/return_statements_test.py
|
Python
|
apache-2.0
| 5,879
| 0.012587
|
# Copy
|
right 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
|
BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for return_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import functions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class SingleReturnTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, (functions, return_statements))
self.assertEqual(f(*inputs), tr(*inputs))
def test_straightline(self):
def f(x):
return x * x
self.assertTransformedEquivalent(f, 2)
def test_superfluous_returns(self):
def f():
retval = 1
return retval
retval = 2 # pylint:disable=unreachable
return retval
self.assertTransformedEquivalent(f)
def test_superfluous_returns_adjacent(self):
def f():
return 1
return 2 # pylint:disable=unreachable
self.assertTransformedEquivalent(f)
def test_conditional(self):
def f(x):
if x > 0:
return x
else:
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_missing_else(self):
def f(x):
if x > 0:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_missing_else_then_default(self):
def f(x):
if x > 0:
return x
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_else_only_then_default(self):
def f(x):
if x < 0:
x *= x
else:
return x
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_nested(self):
def f(x):
if x > 0:
if x < 5:
return x
else:
return x * x
else:
return x * x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
self.assertTransformedEquivalent(f, 5)
def test_context_manager(self):
def f(x):
with ops.name_scope(''):
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_context_manager_in_conditional(self):
def f(x):
if x > 0:
with ops.name_scope(''):
return x * x
else:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def text_conditional_in_context_manager(self):
def f(x):
with ops.name_scope(''):
if x > 0:
return x * x
else:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_no_return(self):
def f(x):
x *= x
self.assertTransformedEquivalent(f, 2)
def test_nested_function(self):
def f(x):
def inner_fn(y):
if y > 0:
return y * y
else:
return y
return inner_fn(x)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_nested_function_in_control_flow(self):
def f(x):
if x:
def inner_fn(y):
return y
inner_fn(x)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_for_loop(self):
def f(n):
for _ in range(n):
return 1
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 0)
def test_while_loop(self):
def f(n):
i = 0
s = 0
while i < n:
i += 1
s += i
if s > 4:
return s
return -1
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 4)
def test_null_return(self):
def f(n):
if n > 4:
return
return
self.assertTransformedEquivalent(f, 4)
self.assertTransformedEquivalent(f, 5)
def test_nested_multiple_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
return v
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_returns_in_nested_scope(self):
def f(a):
v = []
for x in a:
x -= 1
if x > 100:
return v
try:
raise ValueError('intentional')
except ValueError: # pylint:disable=bare-except
return v
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
if __name__ == '__main__':
test.main()
|
akaszynski/vtkInterface
|
examples/01-filter/distance-between-surfaces.py
|
Python
|
mit
| 3,157
| 0.001901
|
"""
Distance Between Two Surfaces
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Compute the average thickness between two surfaces.
For example, you might have two surfaces that represent the boundaries of
lithological layers in a subsurface geological model and you want to know the
average thickness of a unit between those boundaries.
We can compute the thickness between the two surfaces using a few different
methods. First, we will demo a method where we compute the normals of the
bottom surface, and then project a ray to the top surface to compute the
distance along the surface normals. Second, we will use a KDTree to compute
the distance from every point in the bottom mesh to it's closest point in
the top mesh.
"""
import pyvista as pv
import numpy as np
# A helper to make a random surface
def hill(seed):
mesh = pv.ParametricRandomHills(randomseed=seed, u_res=50, v_res=50,
hillamplitude=0.5)
mesh.rotate_y(-10) # give the surfaces some tilt
return mesh
h0 = hill(1).elevation()
h1 = hill(10)
# Shift one surface
h1.points[:,-1] += 5
h1 = h1.elevation()
###############################################################################
p = pv.Plotter()
p.add_mesh(h0, smooth_shading=True)
p.add_mesh(h1, smooth_shading=True)
p.show_grid()
p.show()
###############################################################################
# Ray Tracing Distance
# ++++++++++++++++++++
#
# Compute normals of lower surface
h0n = h0.compute_normals(point_normals=True, cell_normals=False,
auto_orient_normals=True)
###############################################################################
# Travel along normals to the other surface and compute the thickness on each
# vector.
h0n["distances"] = np.empty(h0.n_points)
for i in range(h0n.n_points):
p = h0n.points[i]
vec = h0n["Normals"][i] * h0n.length
p0 = p - vec
p1 = p + vec
ip, ic = h1.ray_trace(p0, p1, first_point=True)
dist = np.sqrt(np.sum((ip - p)**2))
h0n["distances"][i] = dist
# Replace zeros with nans
mask = h0n["distances"] == 0
h0n["distances"][mask] = np.nan
np.nanmean(h0n["distances"])
###############################################################################
p = pv.Plotter()
p.add_mesh(h0n, scalars="distances", smooth_shading=True
|
)
p.add_mesh(h1, color=True, opacity=0.75, smooth_shading=True)
p.show()
###############################################################################
# Nearest Neighbor Distance
# +++++++++++++++++++++++++
#
# You could also use a KDTree to compare the distance between each point of the
# upper surface and the nearest neighbor of the lower surface.
# This won't be the exact surface to surface distance, but it w
|
ill be
# noticeably faster than a ray trace, especially for large surfaces.
from scipy.spatial import KDTree
tree = KDTree(h1.points)
d, idx = tree.query(h0.points )
h0["distances"] = d
np.mean(d)
###############################################################################
p = pv.Plotter()
p.add_mesh(h0, scalars="distances", smooth_shading=True)
p.add_mesh(h1, color=True, opacity=0.75, smooth_shading=True)
p.show()
|
gogrean/SurfFit
|
pyxel/prof.py
|
Python
|
gpl-3.0
| 13,240
| 0.002115
|
import numpy as np
import matplotlib.pyplot as plt
from .utils import rotate_point, bin_pix2arcmin, get_bkg_exp
from .messages import ErrorMessages
from .image import Image
class Region(object):
def get_bin_vals(self, counts_img, bkg_img,
exp_img, pixels_in_bin, only_net_cts=False):
"""Calculate the number of counts in a bin."""
if not isinstance(counts_img.data, list):
bkg_corr = [None]
counts_img_data, counts_img_hdr = [], []
counts_img_data.append(counts_img.data)
counts_img_hdr.append(counts_img.hdr)
bkg_img_data, bkg_img_hdr = [], []
exp_img_data = []
if isinstance(bkg_img, Image):
if isinstance(bkg_img.data, list):
raise TypeError('If the counts image is a single map, then \
the background image cannot be a list of maps.')
else:
bkg_img_data.append(bkg_img.data)
bkg_img_hdr.append(bkg_img.hdr)
else:
bkg_img_data.append(bkg_img)
bkg_corr = [1.]
bkg_img_hdr = None
if isinstance(exp_img, Image):
if isinstance(exp_img.data, list):
raise TypeError('If the counts image is a single map, then \
the exposure image cannot be a list of maps.')
else:
exp_img_data.append(exp_img.data)
else:
exp_img_data.append(exp_img)
else:
bkg_corr = [None] * len(counts_img.data)
counts_img_data = counts_img.data
counts_img_hdr = counts_img.hdr
if isinstance(exp_img, Image):
if not isinstance(exp_img.data, list):
exp_img_data = exp_img.data * len(counts_img.data)
elif len(exp_img.data) != len(counts_img.data):
raise ValueError('Exposure map must be either a single \
image, or a list of images with the same length as \
the length of the list of source images.')
else:
exp_img_data = exp_img.data
else:
exp_img_data = exp_img
if isinstance(bkg_img, Image):
if not isinstance(bkg_img.data, list):
bkg_img_data = bkg_img.data * len(counts_img.data)
bkg_img_hdr = bkg_img.hdr * len(counts_img.data)
elif len(bkg_img.data) != len(counts_img.data):
raise ValueError('Background map must be either a single \
image, or a list of images with the same length as \
the length of the list of source images.')
else:
bkg_img_data = bkg_img.data
bkg_img_hdr = bkg_img.hdr
else:
bkg_img_data = bkg_img
bkg_corr = [1.] * len(counts_img_data)
bkg_img_hdr = None
raw_cts, net_cts, bkg_cts = 0., 0., 0.
raw_rate, net_rate, bkg_rate = 0., 0., 0.
err_raw_rate_sq, err_net_rate_sq, err_bkg_rate_sq = 0., 0., 0.
exp_raw = 0.
exp_bkg = 0.
for i in range(len(counts_img_data)):
if 'EXPOSURE' in counts_img_hdr[i]:
counts_img_exp = counts_img_hdr[i]['EXPOSURE']
elif 'ONTIME' in counts_img_hdr[i]:
counts_img_exp = counts_img_hdr[i]['ONTIME']
if bkg_img_hdr is not None:
if 'EXPOSURE' in bkg_img_hdr[i]:
bkg_img_exp = bkg_img_hdr[i]['EXPOSURE']
elif 'ONTIME' in bkg_img_hdr[i]:
bkg_img_exp = bkg_img_hdr[i]['ONTIME']
if not bkg_corr[i]:
if not 'BKGNORM' in bkg_img_hdr[i]:
bkgnorm = 1.
else:
bkgnorm = bkg_img_hdr[i]['BKGNORM']
bkg_corr_i = counts_img_exp * bkgnorm / bkg_img_exp
else:
bkg_corr_i = bkg_corr[i]
bkgnorm = 1.
for pixel in pixels_in_bin:
j, k = pixel[0], pixel[1]
if exp_img_data[i][j, k] == 0:
continue
exp_val = exp_img_data[i][j, k]
exp_val_bkg = exp_img_data[i][j, k] * \
bkg_img_exp / counts_img_exp
raw_cts += counts_img_data[i][j, k]
|
bkg_cts += bkg_img_data[i][j, k]
exp_raw += exp_val
exp_bkg += exp_val_bkg / bkgnorm
net_cts += counts_img_data[i][j, k] - bkg_img_data[i][j, k] * bkg_corr_i
if only_net_cts:
return net_cts
raw_rate = raw_cts / exp_raw
bkg_rate = bkg_cts / exp_bkg
net_rate = raw_rate - bkg_rate
err_raw_rate = np.sqrt(raw_cts) / exp_raw
err_bkg_rate = np.sqrt(bkg_cts) / exp_bkg
e
|
rr_net_rate = np.sqrt(raw_cts / exp_raw**2 + bkg_cts / exp_bkg**2)
return raw_cts, net_cts, bkg_cts, \
raw_rate, err_raw_rate, net_rate, err_net_rate, bkg_rate, err_bkg_rate
def merge_bins(self, counts_img, bkg_img, exp_img,
min_counts, islog=True):
bkg_img, exp_img = get_bkg_exp(counts_img, bkg_img, exp_img)
edges = self.make_edges(islog)
if isinstance(counts_img.data, list):
pixels_in_bins = self.distribute_pixels(edges,
counts_img.data[0].shape[0],
counts_img.data[0].shape[1])
else:
pixels_in_bins = self.distribute_pixels(edges,
counts_img.data.shape[0],
counts_img.data.shape[1])
nbins = len(edges) - 1
npix = len(pixels_in_bins)
bins = []
start_edge = edges[0]
pixels_in_current_bin = []
for i in range(nbins):
end_edge = edges[i+1]
pixels_in_current_bin.extend(
[(pixels_in_bins[j][0], pixels_in_bins[j][1])
for j in range(npix) if pixels_in_bins[j][2] == i])
net_counts = self.get_bin_vals(counts_img, bkg_img, exp_img,
pixels_in_current_bin, only_net_cts=True)
if net_counts < min_counts:
if end_edge == edges[-1] and len(bins) != 0:
bins[-1][2].extend(pixels_in_current_bin)
updated_last_bin = (bins[-1][0], end_edge, bins[-1][2])
bins[-1] = updated_last_bin
elif end_edge == edges[-1] and len(bins) == 0:
error_message = ErrorMessages('001')
raise ValueError(error_message)
else:
continue
else:
bins.append((start_edge, end_edge, pixels_in_current_bin))
start_edge = end_edge
pixels_in_current_bin = []
return bins
def profile(self, counts_img, bkg_img, exp_img, min_counts=50, islog=True):
"""Generate count profiles.
The box is divided into bins based on a minimum number of counts or a
minimum S/N. The box is divided into bins starting from the bottom up,
where the bottom is defined as the bin starting at the lowest row in
the nonrotated box. E.g., if the box is rotated by 135 deg and it
can be divided into three bins, then the bins will be distributed as:
x
x x
x x
x 1st x
x x x
x x bin x
x 2nd x x
x x x x
x x bin x
x 3rd x x
x x x
x bin x
|
frol/Fling-receiver
|
apps/fling_receiver/views.py
|
Python
|
mit
| 2,528
| 0.007516
|
from coffin.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect
from .forms import FlingReceiverAddForm, FlingReceiverEditForm
from .models import FlingReceiver
def root(request, template_name='root.html'):
return TemplateResponse(request, template_name)
@login_required
def fling_receiver_list(request, template_name='fling_receiver/fling_receiver_list.html'):
fling_receiver_list = FlingReceiver.objects.filter(user=request.user)
return TemplateResponse(request, template_name, {'fling_receiver_list': fling_receiver_list})
@login_required
def fling_receiver_add(re
|
quest, form_class=FlingReceiverAddForm,
template_name='fling_receiver/fling_receiver_add.html'):
form = form_class(request.POST or None, initial={'user': request.user})
if form.is_valid():
form.save()
return redirect('fling_receiver_list')
return TemplateResponse(request, template_name, {'form': form})
@login_required
def fling_receiver_edit(request, fling_receiver_id, form_class=FlingReceiverEditForm,
template_name='fling_receiver/fling_receiver_edit.htm
|
l'):
fling_receiver = get_object_or_404(FlingReceiver, id=fling_receiver_id, user=request.user)
form = form_class(request.POST or None, instance=fling_receiver)
if form.is_valid():
form.save()
return redirect('fling_receiver_list')
return TemplateResponse(request, template_name,
{'fling_receiver': fling_receiver, 'form': form})
@login_required
def fling_receiver_predelete(request, fling_receiver_id,
template_name='fling_receiver/fling_receiver_predelete.html'):
fling_receiver = get_object_or_404(FlingReceiver, id=fling_receiver_id, user=request.user)
return TemplateResponse(request, template_name, {'fling_receiver': fling_receiver})
@login_required
def fling_receiver_delete(request):
try:
fling_receiver_id = int(request.POST['fling_receiver_id'])
except (KeyError, ValueError):
raise Http404
fling_receiver = get_object_or_404(FlingReceiver, id=fling_receiver_id, user=request.user)
fling_receiver.delete()
return redirect('fling_receiver_list')
def fling_receiver_template(request, secret_key,
template_name='fling_receiver/fling_receiver_template.html'):
fling_receiver = get_object_or_404(FlingReceiver, secret_key=secret_key)
return TemplateResponse(request, template_name, {'fling_receiver': fling_receiver})
|
w495/python-video-shot-detector
|
shot_detector/utils/__init__.py
|
Python
|
bsd-3-clause
| 348
| 0
|
# -*- coding: utf8 -*-
"""
Some utils
"""
from __future__ import absolute_import, division, print_function
from .config_arg_par
|
ser import ConfigArgParser
from .lazy_helper import LazyHelper
from .lazy_helper_wrapper import LazyHelperWrapper
from .log_meta import LogMeta
from .log_settings import LogSetting
f
|
rom .repr_dict import ReprDict
|
pyaiot/pyaiot
|
pyaiot/tests/test_messaging.py
|
Python
|
bsd-3-clause
| 2,476
| 0
|
"""pyaiot messaging test module."""
import json
from pytest import mark
from pyaiot.common.messaging import Message
@mark.parametrize('message', [1234, "test", "àéèïôû"])
def test_serialize(message):
serialized = Message.serialize(message)
assert serialized == json.dumps(message, ensure_ascii=False)
def test_new_node():
serialized = Message.new_node('1234')
assert serialized == Message.serialize(
{'type': 'new', 'uid': '1234', 'dst': 'all'})
serialized = Message.new_node('1234', '5678')
assert serialized == Message.serialize(
{'type': 'new', 'uid': '1234', 'dst': '5678'})
def test_out_node():
serialized = Message.out_node('1234')
assert serialized == Message.serialize({'type': 'out', 'uid': '1234'})
def test_reset_node():
serialized = Message.reset_node('1234')
assert ser
|
ialized == Message.serialize({'type': 'reset', 'uid': '1234'})
def test_discover_node():
serialized = Message.discover_node
|
()
assert serialized == Message.serialize({'request': 'discover'})
@mark.parametrize('value', [1234, "test", "àéèïôû"])
def test_update_node(value):
serialized = Message.update_node('1234', 'test', 'value')
assert serialized == Message.serialize(
{'type': 'update', 'uid': '1234', 'endpoint': 'test',
'data': 'value', 'dst': 'all'})
serialized = Message.update_node('1234', 'test', value, '5678')
assert serialized == Message.serialize(
{'type': 'update', 'uid': '1234', 'endpoint': 'test',
'data': value, 'dst': '5678'})
@mark.parametrize('badvalue', [b"test",
bytearray(b"12345"),
bytearray("12345".encode('utf-8')),
'{"test", "test"}',
'{"json": "valid", "content": "invalid"}'])
def test_check_message_bad_json(badvalue):
message, reason = Message.check_message(badvalue)
assert message is None
assert "Invalid message " in reason
def test_check_message_bad_type():
message, reason = Message.check_message('{"type": "test"}')
assert message is None
assert "Invalid message type" in reason
@mark.parametrize('msg_type', ["new", "out", "update", "reset"])
def test_check_message_valid(msg_type):
to_test = json.dumps({"type": msg_type, "data": "test"})
message, reason = Message.check_message(to_test)
assert message is not None
assert reason is None
|
wtbarnes/aia_response
|
make_figures.py
|
Python
|
mit
| 2,177
| 0.034451
|
#name:make_figures.py
#author:Will Barnes
#Description: make some figures useful to notes on AIA response functions
import numpy as np
import matplotlib.pyplot as plt
import seaborn.apionly as sns
def display_aia_response_control_flow():
"""Show the control flow of the IDL programs used to compute AIA response functions"""
|
def plot_aia_response_functions(raw_response_file,fix_response_file):
"""Plot AIA temperature response functions as computed by SSW"""
#Load data
raw_tresp,fix_tresp = np.loadtxt(raw_response_file),np.loadtxt(fix_response_file)
#set labels
aia_labs = [r'$94\,\,\AA$',r'$131\,\,\AA$',r'$171\,\,\AA$',r'$193\,\,\AA$',r'$211\,\,\AA$',r'$335\,\,\AA$']
#Create figure
fig,ax = plt.subplots(1,2,figsize=(16,8))
for i in range(1,7):
#unnormalized
|
ax[0].plot(10**raw_tresp[:,0],raw_tresp[:,i],linewidth=2,linestyle='-',color=sns.color_palette('deep')[i-1],label=aia_labs[i-1])
ax[0].plot(10**fix_tresp[:,0],fix_tresp[:,i],linewidth=2,linestyle='--',color=sns.color_palette('deep')[i-1])
#normalized
ax[1].plot(raw_tresp[:,0],raw_tresp[:,i]/np.max(raw_tresp[:,i]),linewidth=2,linestyle='-',color=sns.color_palette('deep')[i-1])
ax[1].plot(fix_tresp[:,0],fix_tresp[:,i]/np.max(fix_tresp[:,i]),linewidth=2,linestyle='--',color=sns.color_palette('deep')[i-1])
#set plot options
ax[0].set_xscale('log')
ax[0].set_yscale('log')
ax[0].set_xlim([10**5.,10**8.])
ax[0].set_ylim([1e-28,1e-23])
ax[1].set_xlim([5,8])
ax[1].set_ylim([0,1])
#labels
ax[0].set_xlabel(r'$T\,\,\mathrm{(K)}$',fontsize=22)
ax[0].set_ylabel(r'Temperature Response $(\mathrm{DN}\,\mathrm{cm}^{-5}\,\mathrm{s}^{-1}\,\mathrm{pix}^{-1})$',fontsize=22)
ax[1].set_xlabel(r'$\log{T}\,\,\mathrm{(K)}$',fontsize=22)
ax[1].set_ylabel(r'Normalized Temperature Response',fontsize=22)
#legend
ax[0].legend(loc='best',fontsize=14)
plt.tight_layout()
plt.savefig('figures/aia_response_functions.eps',format='eps')
if __name__=='__main__':
plot_aia_response_functions('aia_sample_data/aia_tresponse_raw.dat','aia_sample_data/aia_tresponse_fix.dat')
|
syci/partner-contact
|
base_location/models/res_city_zip.py
|
Python
|
agpl-3.0
| 1,353
| 0
|
# Copyright 2016 Nicolas Bessi, Camptocamp SA
# Copyright 2018 Aitor Bouzas <aitor.bouzas@adaptivecity.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ResCityZip(models.Model):
"""City/locations completion object"""
_name = "res.city.zip"
_description = __doc__
_order = "name asc"
_rec_name = "display_name"
name = fields.Char("ZIP", required=True)
city_id = fields.Many2one(
"res.city",
"City",
required=True,
auto_join=True,
ondelete="cascade",
index=True,
)
display_name = fields.Char(
compute="_compute_new_display_name", store=True, index=True
)
_sql_constraints = [
(
"name_city_uniq",
"UNIQUE(name,
|
city_id)",
"You already have a zip with that code in the same city. "
"The zip code must be unique within it's city",
)
]
@api.depends("name", "city_id")
def _compute_new_display_name(self):
for rec in self:
name = [rec.name, rec.city_id.name]
if rec.city_id.state_id:
name.append(rec.city_id.state_id.name)
|
if rec.city_id.country_id:
name.append(rec.city_id.country_id.name)
rec.display_name = ", ".join(name)
|
nuxeh/morph
|
morphlib/localrepocache_tests.py
|
Python
|
gpl-2.0
| 6,502
| 0
|
# Copyright (C) 2012-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import urllib2
import os
import cliapp
import fs.memoryfs
import morphlib
import morphlib.gitdir_tests
class FakeApplication(object):
def __init__(self):
self.settings = {
'verbose': True
}
def status(self, msg):
pass
class LocalRepoCacheTests(unittest.TestCase):
def setUp(self):
aliases = ['upstream=git://example.com/#example.com:%s.git']
repo_resolver = morphlib.repoaliasresolver.RepoAliasResolver(aliases)
tarball_base_url = 'http://lorry.example.com/tarballs/'
self.reponame = 'upstream:reponame'
self.repourl = 'git://example.com/reponame'
escaped_url = 'git___example_com_reponame'
self.tarball_url = '%s%s.tar' % (tarball_base_url, escaped_url)
self.cachedir = '/cache/dir'
self.cache_path = '%s/%s' % (self.cachedir, escaped_url)
self.remotes = {}
self.fetched = []
self.removed = []
self.lrc = morphlib.localrepocache.LocalRepoCache(
FakeApplication(), self.cachedir, repo_resolver, tarball_base_url)
self.lrc.fs = fs.memoryfs.MemoryFS()
self.lrc._git = self.fake_git
self.lrc._fetch = self.not_found
self.lrc._mkdtemp = self.fake_mkdtemp
self.lrc._new_cached_repo_instance = self.new_cached_repo_instance
self._mkdtemp_count = 0
def fake_git(self, args, **kwargs):
if args[0] == 'clone':
self.assertEqual(len(args), 5)
remote = args[3]
local = args[4]
self.remotes['origin'] = {'url': remote, 'updates': 0}
self.lrc.fs.makedir(local, recursive=True)
elif args[0:2] == ['remote', 'set-url']:
remote = args[2]
url = args[3]
self.remotes[remote] = {'url': url}
elif args[0:2] == ['config', 'remote.origin.url']:
remote = 'origin'
url = args[2]
self.remotes[remote] = {'url': url}
elif args[0:2] == ['config', 'remote.origin.mirror']:
remote = 'origin'
elif args[0:2] == ['config', 'remote.origin.fetch']:
remote = 'origin'
else:
raise NotImplementedError()
def fake_mkdtemp(self, dirname):
thing = "foo"+str(self._mkdtemp_count)
self._mkdtemp_count += 1
self.lrc.fs.makedir(dirname+"/"+thing)
return thing
def new_cached_repo_instance(self, *args):
with morphlib.gitdir_tests.allow_nonexistant_git_repos():
return morphlib.cachedrepo.CachedRepo(
FakeApplication(), *args)
def not_found(self, url, path):
raise cliapp.AppException('Not found')
def test_has_not_got_shortened_repo_initially(self):
self.assertFalse(self.lrc.has_repo(self.reponame))
def test_has_not_got_absolute_repo_initially(self):
self.assertFalse(self.lrc.has_repo(self.repourl))
def test_caches_shortened_repository_on_request(self):
self.lrc.cache_repo(self.reponame)
self.assertTrue(self.lrc.has_repo(self.reponame))
self.assertTrue(self.lrc.has_repo(self.repourl))
def test_caches_absolute_repository_on_request(self):
self.lrc.cache_repo(self.repourl)
self.assertTrue(self.lrc.has_repo(self.reponame))
self.assertTrue(self.lrc.has_repo(self.repourl))
def test_cachedir_does_not_exist_initially(self):
self.assertFalse(self.lrc.fs.exists(self.cachedir))
def test_creates_cachedir_if_missing(self):
self.lrc.cache_repo(self.repourl)
self.assertTrue(self.lrc.fs.exists(self.cachedir))
def test_happily_caches_same_repo_twice(self):
self.lrc.cache_repo(self.repourl)
self.lrc.cache_repo(self.repourl)
def test_fails_to_cache_when_remote_does_not_exist(self):
def fail(args, **kwargs):
self.lrc.fs.makedir(args[4])
raise cliapp.AppException('')
self.lrc._git = fail
self.assertRaises(morphlib.localrepocache.NoRemote,
self.lrc.cache_repo, self.repourl)
def test_does_not_mind_a_missing_tarball(self):
self.lrc.cache_repo(self.repourl)
self.assertEqual(self.fetched, [])
def test_fetches_tarball_when_it_exists(self):
self.lrc._fetch = lambda url, path: self.fetched.append(url)
self.unpacked_tar = ""
self.mkdir_path = ""
with morphlib.gitdir_tests.monkeypatch(
morphlib.cachedrepo.CachedRepo, 'update', lambda self: None):
self.lrc.cache_repo(self.repourl)
self.assertEqual(self.fetched, [self.tarball_url])
self.assertFalse(self.lrc.fs.exists(self.cache_path + '.tar'))
self.assertEqual(self.remotes['origin']['url'], self.repourl)
def test_gets_cached_shortened_repo(self):
self.lrc.cache_repo(self.reponame)
cached = self.lrc.get_repo(self.reponame)
self.assertTrue(cached is not None)
def test_gets_cached_absolute_repo(self):
self.lrc.cache_repo(self.repourl)
cached = self.lrc.get_repo(self.repourl)
self.assertTrue(cached is not None)
def test_get_repo_raises_exception_if_repo_is_not_cached(self):
self.assertRaises(Exception, self.lrc.get_repo, self.repourl)
def test_escapes_repourl_as_filename(self):
escaped = self.lrc._escape(self.repourl)
self.assertFalse('/' in
|
escaped)
d
|
ef test_noremote_error_message_contains_repo_name(self):
e = morphlib.localrepocache.NoRemote(self.repourl, [])
self.assertTrue(self.repourl in str(e))
def test_avoids_caching_local_repo(self):
self.lrc.fs.makedir('/local/repo', recursive=True)
self.lrc.cache_repo('file:///local/repo')
cached = self.lrc.get_repo('file:///local/repo')
assert cached.path == '/local/repo'
|
hoangt/core
|
core/tools/generator/wizard/config.py
|
Python
|
agpl-3.0
| 2,087
| 0.008146
|
from PyQt4 import QtCore, QtGui
import os
class ConfigPage(QtGui.QWizardPage):
def __init__(self, templates, parent=None):
super(ConfigPage, self).__init__(parent)
#self.setTitle("Configuration")
#self.setSubTitle("Alter configuration and build your own platform.")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap,
# QtGui.QPixmap(':/images/watermark1.png'))
self.templates = templates
self.view = QtGui.QTreeView()
self.panel = QtGui.QWidget()
self.info = QtGui.QTextBrowser()
self.hsplit = QtGui.QSplitter(QtCore.Qt.Vertical)
self.vsplit = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.hsplit.addWidget(self.panel)
self.hsplit.addWidget(self.info)
self.vsplit.addWidget(self.view)
self.vsplit.addWidget(self.hsplit)
def click(index):
item = index.internalPointer()
self.info.setText(QtCore.QVariant(item.description).toString())
self.model.clicked(item)
self.view.activated.connect(click)
self.view.entered.connect(click)
self.view.clicked.connect(click)
#self.view.setModel(model)
self.layout = QtGui.QGridLayout()
self.layout.addWidget(self.vsplit)
#self.setStyleSheet("* { background: yellow }")
#self.setMaximumHeight(0xFFFFFF)
#self.vsplit.setMaximumHeight(0xFFFFFF)
#self.hsplit.setMaximumHeight(0xFFFFFF)
#self.view.setMaximumHeight(0xFFFFFF)
self.setLayout(self.layout)
#self.hsplit.moveSplitter(340,0)
def initializePage(self):
self.panel.setParent(None)
self.panel = QtGu
|
i.QWidget()
self.hsplit.insertWidget(0, self.panel)
self.model = self.templates.getModel(self.panel)
self.view.setModel(self.model)
self.view.expandAll()
|
self.view.setColumnWidth(0, 220)
self.view.setColumnWidth(1, 20)
self.setLayout(self.layout)
#self.vsplit.moveSplitter(280,1)
#self.hsplit.moveSplitter(120,1)
|
chrsrds/scikit-learn
|
examples/impute/plot_missing_values.py
|
Python
|
bsd-3-clause
| 5,503
| 0.001454
|
"""
====================================================
Imputing missing values before building an estimator
====================================================
Missing values can be replaced by the mean, the median or the most frequent
value using the basic :class:`sklearn.impute.SimpleImputer`.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Another option is the :class:`sklearn.impute.IterativeImputer`. This uses
round-robin linear regression, treating every variable as an output in
turn. The version implemented assumes Gaussian (output) variables. If your
features are obviously non-Normal, consider transforming them to look more
Normal so as to potentially improve performance.
In addition of using an imputing method, we can also keep an indication of the
missing information using :func:`sklearn.impute.MissingIndicator` which might
carry some information.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
# To use the experimental IterativeImputer, we need to explicitly ask for it:
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline, make_union
from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
N_SPLITS = 5
REGRESSOR = RandomForestRegressor(random_state=0)
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(
make_union(imputer, MissingIndicator(missing_values=0)),
REGRESSOR)
impute_scores = cross_val_score(estimator, X_missing, y_missing,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return impute_scores
def get_results(dataset):
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
full_scores = cross_val_score(REGRESSOR, X_full, y_full,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
# Estimate the score after replacing missing values by 0
imputer = SimpleImputer(missing_values=0,
strategy='constant',
fill_value=0)
zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
# Estimate the score after imputation (mean strategy) of the missing values
imputer = SimpleImputer(missing_values=0, strategy="mean")
mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
# Estimate the score after iterative imputation of the missing values
imputer = IterativeImputer(missing_values=0,
random_state=0,
n_nearest_features=5,
sample_posterior=True)
iterative_impute_scores = get_scores_for_imputer(imputer,
X_missing,
y_missing)
return ((full_scores.mean(), full_scores.std()),
(zero_impute_scores.mean(), zero_impute_scores.std()),
(mean_impute_scores.mean(), mean_impute_scores.std()),
(iterative_impute_scores.mean(), iterative_impute_scores.std()))
results_diabetes = np.array(get_results(load_diabetes()))
mses_diabetes = results_diabetes[:, 0] * -1
stds_diabetes = results_diabetes[:, 1]
results_boston = np.array(get_results(load_boston()))
mses_boston = results_boston[:, 0] * -1
stds_boston = results_boston[:, 1]
n_bars = len(mses_diabetes)
xval = np.arange(n_bars)
x_labels = ['Full data',
'Zero imputation',
'Mean Imputation',
'Multivariate Imputation']
colors = ['r', '
|
g', 'b', 'orange']
# plot diabetes results
plt.figure(figsize=(12, 6))
ax1 = plt.subplot(121)
for j in xval:
ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],
color=colors[j], alpha=0.6, align='center')
ax1.set_title('Imputation Techniques with Diabetes Data')
ax1.set_xlim(left=np.min(mses_
|
diabetes) * 0.9,
right=np.max(mses_diabetes) * 1.1)
ax1.set_yticks(xval)
ax1.set_xlabel('MSE')
ax1.invert_yaxis()
ax1.set_yticklabels(x_labels)
# plot boston results
ax2 = plt.subplot(122)
for j in xval:
ax2.barh(j, mses_boston[j], xerr=stds_boston[j],
color=colors[j], alpha=0.6, align='center')
ax2.set_title('Imputation Techniques with Boston Data')
ax2.set_yticks(xval)
ax2.set_xlabel('MSE')
ax2.invert_yaxis()
ax2.set_yticklabels([''] * n_bars)
plt.show()
|
twtrubiks/pillow-examples
|
resizePicture/resize.py
|
Python
|
mit
| 2,818
| 0.021127
|
#coding=utf8
import os, sys
from PIL import Image
from PIL import ImageFile
import glob
output_dir = "resized_images"
def main():
# 找出路徑下全部的圖片檔案
types = ('*.png', '*.jpg', '*.jpeg' ,'*.bmp' ,'*.gif') # the tuple of file types
all_image_files = []
for files in types:
all_image_files.extend(glob.glob(files))
# ImageFile.LOAD_TRUNCATED_IMAGES = True
# 建立儲存資料夾
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if(sys.argv[1] == "size"):
# 圖片大小比率縮放 # python resize.py size 200
if(len(sys.argv) == 3):
ratio = sys.argv[2]
setRatio(ratio, all_image_files)
# 指定圖片大小 # python resize.py size 640 360
if(len(sys.argv) == 4):
basewidth, baseheight = sys.argv[2],sys.argv[3]
setSize(basewidth, baseheight, all_image_files)
# 旋轉圖片 # python resize.py rotate 90
# 預設為 Counterclockwise Rotation 逆時針旋轉 ,如需順時針請修改為 -90
if(sys.argv[1] == "rotate"):
degree = sys.argv[2]
setRotate(float(degree), all_image_files)
def setSize(basewidth, baseheight, all_image_files):
for image_file in all_image_files:
print "Processing", image_file, "..."
img = Image.open(image_file)
img = img.resize( (int(basewidth), int(baseheight)) ,Image.ANTIALIAS)
# Image.ANTIALIAS (a high-quality downsampling filter). If omitted,
# or if the image has mode “1” or “P”, it is set PIL.Image.NEAREST.
image_filename = output_dir + "/" + image_file
print "Save to " + image_filename
img.save(image_filename)
def setRatio(ratio, all_image_files):
for image_file in all_image_files:
print "Processing", image_file, "..."
img = Image.open(image_file)
width, height = img.size
width = int((width * float(ratio)/
|
100))
height = int((height * float(ratio)/100))
|
# Image.resize(size, resample=0)
# size – The requested size in pixels, as a 2-tuple: (width, height). width and height type int
img = img.resize( (width ,height) ,Image.ANTIALIAS)
image_filename = output_dir + "/" + image_file
print "Save to " + image_filename
img.save(image_filename)
def setRotate(degree, all_image_files):
for image_file in all_image_files:
print "Processing", image_file, "..."
img = Image.open(image_file)
img_rotate = img.rotate(degree)
image_filename = output_dir + "/" + image_file
print "Save to " + image_filename
img_rotate.save(image_filename)
if __name__ == "__main__":
main()
|
ardi69/pyload-0.4.10
|
pyload/plugin/crypter/MegaRapidCz.py
|
Python
|
gpl-3.0
| 779
| 0.017972
|
# -*- coding: utf-8 -*-
from pyload.plugin.internal.SimpleCrypter import SimpleCrypter
class MegaRapidCz(SimpleCrypter):
__name = "MegaRapidCz"
__type = "crypter"
__version = "0.02"
__pattern = r'http://(?:www\.)?(share|mega)rapid\.cz/slozka/\d+/\w+'
__config = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__descripti
|
on = """Share-Rapid.com folder
|
decrypter plugin"""
__license = "GPLv3"
__authors = [("zoidberg", "zoidberg@mujmail.cz")]
LINK_PATTERN = r'<td class="soubor".*?><a href="(.+?)">'
|
Spferical/matrix-chatbot
|
main.py
|
Python
|
mit
| 17,094
| 0.000059
|
#!/usr/bin/env python3
import time
from matrix_client.client import MatrixClient
from matrix_client.api import MatrixRequestError
from requests.exceptions import ConnectionError, Timeout
import argparse
import random
from configparser import ConfigParser
import re
import traceback
import urllib.parse
import logging
import os
import sys
import signal
import queue
import codecs
from database import MarkovDatabaseBrain
COMMANDS = [
'!rate'
]
def sigterm_handler(_signo, _stack_frame):
"""Raises SystemExit(0), causing everything to cleanly shut down."""
sys.exit(0)
class ConfigParser(ConfigParser):
# allow case-sensitive option names
# needed for saving per-room response rates
optionxform = str
class Backend(object):
"""Interface for chat backends."""
def __init__(self, brain_file):
pass
def train_file(self, filename):
"""Trains the chat backend on the given file."""
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
def learn(self, line):
"""Updates the chat backend based on the given line of input."""
pass
def save(self):
"""Saves the backend to disk, if needed."""
pass
def reply(self, message):
"""Generates a reply to the given message."""
return "(dummy response)"
class MarkovBackend(Backend):
"""Chat backend using markov chains."""
def __init__(self, brain_file):
self.brain = MarkovDatabaseBrain(brain_file)
def sanitize(self, word):
"""Removes any awkward whitespace characters from the given word.
Removes '\n', '\r', and '\\u2028' (unicode newline character)."""
return word.replace('\n', '').replace('\r', '').replace('\u2028', '')
def train_file(self, filename):
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
self.save()
def learn(self, line):
line = line.strip()
words = line.split(' ')
words = [self.sanitize(word) for word in words]
for i in range(len(words) - 2):
prefix = words[i], words[i + 1]
follow = words[i + 2]
self.brain.add(prefix, follow)
def save(self):
self.brain.save()
def get_random_next_link(self, word1, word2):
"""Gives a word that could come after the two provided.
Words that follow the two given words are weighted by how frequently
they appear after them.
"""
possibilities = self.brain.get_followers((word1, word2))
if not possibilities:
return None
total = 0
for p in possibilities:
total += possibilities[p]
num = random.randint(1, total)
total = 0
for p in possibilities:
total += possibilities[p]
if total >= num:
break
return p
def reply(self, message):
if self.brain.is_empty():
return ''
seed = None
# try to seed reply from the message
possible_seed_words = message.split()
while seed is None and possible_seed_words:
message_word = random.choice(possible_seed_words)
seeds = list(self.brain.get_pairs_containing_word_ignoring_case(
message_word))
if seeds:
seed = random.choice(seeds)
else:
possible_seed_words.remove(message_word)
# we couldn't seed the reply from the input
# fall back to random seed
if seed is None:
seed = self.brain.get_three_random_words()
words = list(seed)
while self.brain.contains_pair((words[-2], words[-1])) and \
len(words) < 100:
word = self.get_random_next_link(words[-2], words[-1])
words.append(word)
return ' '.join(words)
class Config(object):
def __init__(self, cfgparser):
self.backend = cfgparser.get('General', 'backend')
self.display_name = cfgparser.get('General', 'display name')
self.learning = cfgparser.getboolean('General', 'learning')
self.username = cfgparser.get('Login', 'username')
self.password = cfgparser.get('Login', 'password')
self.server = cfgparser.get('Login', 'server')
self.default_response_rate = cfgparser.getfloat(
'General', 'default response rate')
self.response_rates = {}
for room_id, rate in cfgparser.items('Response Rates'):
room_id = room_id.replace('-colon-', ':')
self.response_rates[room_id] = float(rate)
def get_response_rate(self, room_id):
"""Returns our response rate for the room with the given room id."""
if room_id in self.response_rates:
return self.response_rates[room_id]
else:
return self.default_response_rate
def write(self):
"""Writes this config back to the file, with any changes reflected."""
cfgparser = ConfigParser()
cfgparser.add_section('General')
cfgparser.set('General', 'default response rate',
str(self.default_response_rate))
cfgparser.set('General', 'backend', self.backend)
cfgparser.set('General', 'display name', self.display_name)
cfgparser.set('General', 'learning', str(self.learning))
cfgparser.add_section('Login')
cfgparser.set('Login', 'username', self.username)
cfgparser.set('Login', 'password', self.password)
cfgparser.set('Login', 'server', self.server)
cfgparser.add_section('Response Rates')
for room_id, rate in list(self.response_rates.items()):
# censor colons because they are a configparser special
# character
room_id = room_id.replace(':', '-colon-')
cfgparser.set('Response Rates', room_id, str(rate))
with open('config.cfg', 'wt') as configfile:
cfgparser.write(configfile)
def get_default_configparser():
"""Returns a ConfigParser object for the default config file."""
config = ConfigParser(allow_no_value=True)
config.add_section('General')
config.set('General', 'default response rate', "0.10")
config.set('General', 'backend', 'markov')
config.set('General', 'display name', 'Markov')
config.set('General', 'learning', 'on')
config.add_section('Login')
config.set('Login', 'username', 'username')
config.set('Login', 'password', 'password')
config.set('Login', 'server', 'http://matrix.org')
config.add_section('Response Rates')
return config
class Bot(object):
"""Handles everything that the bot does."""
def __init__(self, config, chat_backend):
self.config = config
self.client = None
self.chat_backend = chat_backend
self.event_queue = queue.Queue()
self.invite_queue = queue.Queue()
def login(self):
"""Logs onto the server."""
client = MatrixClient(self.config.server)
client.login_with_password_no_sync(
self.config.username, self.config.password)
self.client = client
def get_room(self, event):
"""Returns th
|
e room the given event took place in."""
return self.client.rooms[event['room_id']]
def handle_command(self, event, command, args):
"""Handles the given command, possibly sending a reply to it."""
command = command.lower()
if command == '!rate':
|
if args:
num = re.match(r'[0-9]*(\.[0-9]+)?(%|)', args[0]).group()
if not num:
self.reply(event, "Error: Could not parse number.")
return
if num[-1] == '%':
rate = float(num[:-1]) / 100
else:
rate = float(num)
self.config.response_rates[event['room_id']] = rate
self.reply(event, "Response rate set to %f." % rate)
else:
rate = self.config.get_response_rate(ev
|
val-github/lammps-dev
|
tools/python/neb_combine.py
|
Python
|
gpl-2.0
| 2,172
| 0.020718
|
#!/usr/local/bin/python
# make new dump file by combining snapshots from multiple NEB replica dumps
# Syntax: neb_combine.py -switch arg(s) -switch arg(s) ...
# -o outfile = new dump file
# each snapshot has NEB atoms from all replicas
# -r dump1 dump2 ... = replica dump files of NEB atoms
# can be in any order
# -b dumpfile = background atoms (optional)
# first snapshot in this file used as static non-NEB atoms
import sys,os
path = os.environ["LAMMPS_PYTHON_TOOLS"]
sys.path.append(path)
from dump import dump
# parse args
outfile = ""
backfile = ""
rfiles = []
argv = sys.argv
iarg = 1
narg = len(argv)
while iarg < narg:
if argv[iarg] == "-o":
outfile = argv[iarg+1]
iarg += 2
elif argv[iarg] == "-b":
backfile = argv[iarg+1]
iarg += 2
elif argv[iarg] == "-r":
ilast = iarg + 1
while ilast < narg and argv[ilast][0] != '-': ilast += 1
rfiles = argv[iarg+1:ilast]
iarg = ilast
else: break
if iarg < narg or not outfile or not rfiles:
print "Syntax: neb_combine.py -o outfile -b backfile -r dump1 dump2 ..."
sys.exit()
if os.path.exists(outfile): os.remove(outfile)
# ntotal = total atoms in each snapshot
# reset IDs of atoms in each NEB dump file
ntotal = 0
d = []
for file in rfiles:
one = dump(file)
nnew = one.snaps[0].nselect
idvec = range(ntotal+1,ntotal+nnew+1)
one.setv("id",idvec)
ntotal += nnew
d.append(one)
# nback = additional atoms in each snapshot
# reset IDs of atoms in background file
if backfile:
back = dump(backfile)
t = back.time()
back.tselect.on
|
e(t[0])
nback = back.snaps[0].nselect
idvec = range(ntotal+1,ntotal+nback+1)
back.setv("id",idvec)
else: nback = 0
ntotal += nback
# write out each snaps
|
hot
# natoms = ntotal, by overwriting nselect
# add background atoms if requested
times = d[0].time()
for time in times:
d[0].tselect.one(time)
i = d[0].findtime(time)
hold = d[0].snaps[i].nselect
d[0].snaps[i].nselect = ntotal
d[0].write(outfile,1,1)
d[0].snaps[i].nselect = hold
for one in d[1:]:
one.tselect.one(time)
one.write(outfile,0,1)
if backfile: back.write(outfile,0,1)
|
crmccreary/openerp_server
|
openerp/tools/test_reports.py
|
Python
|
agpl-3.0
| 13,310
| 0.005259
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Helper functions for reports testing.
Please /do not/ import this file by default, but only explicitly call it
through the code of yaml tests.
"""
import openerp.netsvc as netsvc
import openerp.tools as tools
import logging
import openerp.pooler as pooler
from openerp.tools.safe_eval import safe_eval
from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if data is None:
data = {}
if context is None:
context = {}
if rname.startswith('report.'):
rname_s = rname[7:]
else:
rname_s = rname
_logger.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
res = netsvc.LocalService(rname).create(cr, uid, ids, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
(rname, type(res)))
(res_data, res_format) = res
if not res_data:
raise ValueError("Report %s produced an empty result!" % rname)
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
fp = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE).stdout
res_text = tools.ustr(fp.read())
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'foobar':
# TODO
pass
else:
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
_logger.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
wiz_data=None, wiz_buttons=None,
context=None, our_module=None):
"""Take an ir.action.act_window and follow it until a report is produced
:param action_id: the integer id of an action, or a reference to xml id
of the act_window (can search [our_module.]+xml_id
:param active_model, active_ids: call the action as if it had been launched
from that model+ids (tree/form view action)
:param wiz_data: a dictionary of values to use in the wizard, if needed.
They will override (or complete) the defa
|
ult values of the
wizard form.
:param wiz_buttons: a list of button names, or button icon strings, which
should be preferred to press during the wizard.
Eg.
|
'OK' or 'gtk-print'
:param our_module: the name of the calling module (string), like 'account'
"""
if not our_module and isinstance(action_id, basestring):
if '.' in action_id:
our_module = action_id.split('.', 1)[0]
if context is None:
context = {}
else:
context = context.copy() # keep it local
# TODO context fill-up
pool = pooler.get_pool(cr.dbname)
def log_test(msg, *args):
_logger.log(netsvc.logging.TEST, " - " + msg, *args)
datas = {}
if active_model:
datas['model'] = active_model
if active_ids:
datas['ids'] = active_ids
if not wiz_buttons:
wiz_buttons = []
if isinstance(action_id, basestring):
if '.' in action_id:
act_module, act_xmlid = action_id.split('.', 1)
else:
if not our_module:
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
act_module = our_module
act_xmlid = action_id
act_model, act_id = pool.get('ir.model.data').get_object_reference(cr, uid, act_module, act_xmlid)
else:
assert isinstance(action_id, (long, int))
act_model = 'ir.action.act_window' # assume that
act_id = action_id
act_xmlid = '<%s>' % act_id
def _exec_action(action, datas, context):
# taken from client/modules/action/main.py:84 _exec_action()
if isinstance(action, bool) or 'type' not in action:
return
# Updating the context : Adding the context of action in order to use it on Views called from buttons
if datas.get('id',False):
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
context.update(safe_eval(action.get('context','{}'), context.copy()))
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
for key in ('res_id', 'res_model', 'view_type', 'view_mode',
'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'):
datas[key] = action.get(key, datas.get(key, None))
view_id = False
if action.get('views', []):
if isinstance(action['views'],list):
view_id = action['views'][0][0]
datas['view_mode']= action['views'][0][1]
else:
if action.get('view_id', False):
view_id = action['view_id'][0]
elif action.get('view_id', False):
view_id = action['view_id'][0]
assert datas['res_model'], "Cannot use the view without a model"
# Here, we have a view that we need to emulate
log_test("will emulate a %s view: %s#%s",
action['view_type'], datas['res_model'], view_id or '?')
view_res = pool.get(datas['res_model']).fields_view_get(cr, uid, view_id, action['view_type'], context)
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
view_data = {}
if view_res.get('fields',{}).keys():
view_data = pool.get(datas['res_model']).default_get(cr, uid, view_res['fields'].keys(), context)
if datas.get('form'):
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
_logger.debug("V
|
liberation/django-admin-tools
|
admin_tools/dashboard/modules.py
|
Python
|
mit
| 24,954
| 0.000641
|
"""
Module where admin tools dashboard modules classes are defined.
"""
from django.utils.text import capfirst
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.utils.itercompat i
|
mport is_iterable
from admin_tools.utils import AppListElementMixin, uniquify
class DashboardModule(object):
"""
Base class for all dashboard modules.
Dashboard modules have the following properties:
``e
|
nabled``
Boolean that determines whether the module should be enabled in
the dashboard by default or not. Default value: ``True``.
``draggable``
Boolean that determines whether the module can be draggable or not.
Draggable modules can be re-arranged by users. Default value: ``True``.
``collapsible``
Boolean that determines whether the module is collapsible, this
allows users to show/hide module content. Default: ``True``.
``deletable``
Boolean that determines whether the module can be removed from the
dashboard by users or not. Default: ``True``.
``title``
String that contains the module title, make sure you use the django
gettext functions if your application is multilingual.
Default value: ''.
``title_url``
String that contains the module title URL. If given the module
title will be a link to this URL. Default value: ``None``.
``css_classes``
A list of css classes to be added to the module ``div`` class
attribute. Default value: ``None``.
``pre_content``
Text or HTML content to display above the module content.
Default value: ``None``.
``content``
The module text or HTML content. Default value: ``None``.
``post_content``
Text or HTML content to display under the module content.
Default value: ``None``.
``template``
The template to use to render the module.
Default value: 'admin_tools/dashboard/module.html'.
"""
template = 'admin_tools/dashboard/module.html'
enabled = True
draggable = True
collapsible = True
deletable = True
show_title = True
title = ''
title_url = None
css_classes = None
pre_content = None
post_content = None
children = None
id = None
def __init__(self, title=None, **kwargs):
if title is not None:
self.title = title
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
self.css_classes = self.css_classes or []
# boolean flag to ensure that the module is initialized only once
self._initialized = False
def init_with_context(self, context):
"""
Like for the :class:`~admin_tools.dashboard.Dashboard` class, dashboard
modules have a ``init_with_context`` method that is called with a
``django.template.RequestContext`` instance as unique argument.
This gives you enough flexibility to build complex modules, for
example, let's build a "history" dashboard module, that will list the
last ten visited pages::
from admin_tools.dashboard import modules
class HistoryDashboardModule(modules.LinkList):
title = 'History'
def init_with_context(self, context):
request = context['request']
# we use sessions to store the visited pages stack
history = request.session.get('history', [])
for item in history:
self.children.append(item)
# add the current page to the history
history.insert(0, {
'title': context['title'],
'url': request.META['PATH_INFO']
})
if len(history) > 10:
history = history[:10]
request.session['history'] = history
Here's a screenshot of our history item:
.. image:: images/history_dashboard_module.png
"""
pass
def is_empty(self):
"""
Return True if the module has no content and False otherwise.
>>> mod = DashboardModule()
>>> mod.is_empty()
True
>>> mod.pre_content = 'foo'
>>> mod.is_empty()
False
>>> mod.pre_content = None
>>> mod.is_empty()
True
>>> mod.children.append('foo')
>>> mod.is_empty()
False
>>> mod.children = []
>>> mod.is_empty()
True
"""
return self.pre_content is None and \
self.post_content is None and \
len(self.children) == 0
def render_css_classes(self):
"""
Return a string containing the css classes for the module.
>>> mod = DashboardModule(enabled=False, draggable=True,
... collapsible=True, deletable=True)
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable'
>>> mod.css_classes.append('foo')
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable foo'
>>> mod.enabled = True
>>> mod.render_css_classes()
'dashboard-module draggable collapsible deletable foo'
"""
ret = ['dashboard-module']
if not self.enabled:
ret.append('disabled')
if self.draggable:
ret.append('draggable')
if self.collapsible:
ret.append('collapsible')
if self.deletable:
ret.append('deletable')
ret += self.css_classes
return ' '.join(ret)
def _prepare_children(self):
pass
class Group(DashboardModule):
"""
Represents a group of modules, the group can be displayed in tabs,
accordion, or just stacked (default).
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.Group`
has two extra properties:
``display``
A string determining how the group should be rendered, this can be one
of the following values: 'tabs' (default), 'accordion' or 'stacked'.
``force_show_title``
Default behaviour for Group module is to force children to always show
the title if Group has ``display`` = ``stacked``. If this flag is set
to ``False``, children title is shown according to their``show_title``
property. Note that in this case is children responsibility to have
meaningful content if no title is shown.
Here's an example of modules group::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.Group(
title="My group",
display="tabs",
children=[
modules.AppList(
title='Administration',
models=('django.contrib.*',)
),
modules.AppList(
title='Applications',
exclude=('django.contrib.*',)
)
]
))
The screenshot of what this code produces:
.. image:: images/dashboard_module_group.png
"""
force_show_title = True
template = 'admin_tools/dashboard/modules/group.html'
display = 'tabs'
def init_with_context(self, context):
if self._initialized:
return
for module in self.children:
# to simplify the whole stuff, modules have some limitations,
# they cannot be dragged, collapsed or closed
module.collapsible = False
module.draggable = False
|
Abdoctor/behave
|
behave/formatter/plain.py
|
Python
|
bsd-2-clause
| 4,689
| 0.000213
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from behave.formatter.base import Formatter
from behave.model_describe import ModelPrinter
from behave.textutil import make_indentation
# -----------------------------------------------------------------------------
# CLASS: PlainFormatter
# -----------------------------------------------------------------------------
class PlainFormatter(Formatter):
"""
Provides a simple plain formatter without coloring/formatting.
The formatter displays now also:
* multi-line text
|
(doc-strings)
* table
* tags (maybe)
"""
name = "plain"
description = "Very basic formatter with maximum compatibility"
SHOW_MULTI_LINE = True
SHOW_TAGS = False
SHOW_A
|
LIGNED_KEYWORDS = False
DEFAULT_INDENT_SIZE = 2
def __init__(self, stream_opener, config, **kwargs):
super(PlainFormatter, self).__init__(stream_opener, config)
self.steps = []
self.show_timings = config.show_timings
self.show_multiline = config.show_multiline and self.SHOW_MULTI_LINE
self.show_aligned_keywords = self.SHOW_ALIGNED_KEYWORDS
self.show_tags = self.SHOW_TAGS
self.indent_size = self.DEFAULT_INDENT_SIZE
# -- ENSURE: Output stream is open.
self.stream = self.open()
self.printer = ModelPrinter(self.stream)
# -- LAZY-EVALUATE:
self._multiline_indentation = None
@property
def multiline_indentation(self):
if self._multiline_indentation is None:
offset = 0
if self.show_aligned_keywords:
offset = 2
indentation = make_indentation(3 * self.indent_size + offset)
self._multiline_indentation = indentation
return self._multiline_indentation
def reset_steps(self):
self.steps = []
def write_tags(self, tags, indent=None):
if tags and self.show_tags:
indent = indent or ""
text = " @".join(tags)
self.stream.write(u"%s@%s\n" % (indent, text))
# -- IMPLEMENT-INTERFACE FOR: Formatter
def feature(self, feature):
self.reset_steps()
self.write_tags(feature.tags)
self.stream.write(u"%s: %s\n" % (feature.keyword, feature.name))
def background(self, background):
self.reset_steps()
indent = make_indentation(self.indent_size)
text = u"%s%s: %s\n" % (indent, background.keyword, background.name)
self.stream.write(text)
def scenario(self, scenario):
self.reset_steps()
self.stream.write(u"\n")
indent = make_indentation(self.indent_size)
text = u"%s%s: %s\n" % (indent, scenario.keyword, scenario.name)
self.write_tags(scenario.tags, indent)
self.stream.write(text)
def step(self, step):
self.steps.append(step)
def result(self, result):
"""
Process the result of a step (after step execution).
:param result:
"""
step = self.steps.pop(0)
indent = make_indentation(2 * self.indent_size)
if self.show_aligned_keywords:
# -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):
text = u"%s%6s %s ... " % (indent, step.keyword, step.name)
else:
text = u"%s%s %s ... " % (indent, step.keyword, step.name)
self.stream.write(text)
status_text = result.status.name
if self.show_timings:
status_text += " in %0.3fs" % step.duration
if result.error_message:
self.stream.write(u"%s\n%s\n" % (status_text, result.error_message))
else:
self.stream.write(u"%s\n" % status_text)
if self.show_multiline:
if step.text:
self.doc_string(step.text)
if step.table:
self.table(step.table)
def eof(self):
self.stream.write("\n")
# -- MORE: Formatter helpers
def doc_string(self, doc_string):
self.printer.print_docstring(doc_string, self.multiline_indentation)
def table(self, table):
self.printer.print_table(table, self.multiline_indentation)
# -----------------------------------------------------------------------------
# CLASS: Plain0Formatter
# -----------------------------------------------------------------------------
class Plain0Formatter(PlainFormatter):
"""
Similar to old plain formatter without support for:
* multi-line text
* tables
* tags
"""
name = "plain0"
description = "Very basic formatter with maximum compatibility"
SHOW_MULTI_LINE = False
SHOW_TAGS = False
SHOW_ALIGNED_KEYWORDS = False
|
alxgu/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_DoS_policy.py
|
Python
|
gpl-3.0
| 14,026
| 0.001497
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_DoS_policy
short_description: Configure IPv4 DoS policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and DoS_policy category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_DoS_policy:
description:
- Configure IPv4 DoS policies.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
anomaly:
description:
- Anomaly name.
suboptions:
action:
description:
- Action taken when the threshold is reached.
choices:
- pass
- block
log:
description:
- Enable/disable anomaly logging.
choices:
- enable
- disable
name:
description:
- Anomaly name.
required: true
quarantine:
description:
- Quarantine method.
choices:
- none
- attacker
quarantine-expiry:
description:
- Duration of quarantine. (Format ###d##h##m, minimum 1m, maximum 364d23h59m, default = 5m). Requires quarantine set to attacker.
quarantine-log:
description:
- Enable/disable quarantine logging.
choices:
- disable
- enable
status:
description:
- Enable/disable this anomaly.
choices:
- disable
- enable
threshold:
description:
- Anomaly threshold. Number of detected instances per minute that triggers the anomaly action.
threshold(default):
description:
- Number of detected instances per minute which triggers action (1 - 2147483647, default = 1000). Note that each anomaly has a
different threshold value assigned to it.
comments:
descriptio
|
n:
- Comment.
ds
|
taddr:
description:
- Destination address name from available addresses.
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
interface:
description:
- Incoming interface name from available interfaces. Source system.zone.name system.interface.name.
policyid:
description:
- Policy ID.
required: true
service:
description:
- Service object from available options.
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
srcaddr:
description:
- Source address name from available addresses.
suboptions:
name:
description:
- Service name. Source firewall.address.name firewall.addrgrp.name.
required: true
status:
description:
- Enable/disable this policy.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 DoS policies.
fortios_firewall_DoS_policy:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_DoS_policy:
state: "present"
anomaly:
-
action: "pass"
log: "enable"
name: "default_name_6"
quarantine: "none"
quarantine-expiry: "<your_own_value>"
quarantine-log: "disable"
status: "disable"
threshold: "11"
threshold(default): "12"
comments: "<your_own_value>"
dstaddr:
-
name: "default_name_15 (source firewall.address.name firewall.addrgrp.name)"
interface: "<your_own_value> (source system.zone.name system.interface.name)"
policyid: "17"
service:
-
name: "default_name_19 (source firewall.service.custom.name firewall.service.group.name)"
srcaddr:
-
name: "default_name_21 (source firewall.address.name firewall.addrgrp.name)"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the l
|
tal-nino/edwin
|
edwinServer/common/database_all.py
|
Python
|
apache-2.0
| 267
| 0
|
# -*- cod
|
ing: utf-8 -*-
'''
'''
from __future__ import absolute_import
from __future__ import with_statement
from . import database_meta
from . import database_tera
def closeAllConnections():
database_meta.close
|
Connection()
database_tera.closeConnection()
|
alshedivat/tensorflow
|
tensorflow/python/ops/check_ops.py
|
Python
|
apache-2.0
| 47,658
| 0.005267
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Asserts and Boolean Checks.
See the [Asserts and
checks](https://tensorflow.org/api_guides/python/check_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_near',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a InvalidArgumentError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise errors.InvalidArgumentError(node_def=None, op=None,
message='\n'.join(data_static))
def _shape_and_dtype_str(tensor):
"""Returns a string containing tensor's shape and dtype."""
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
@tf_export(
'debugging.assert_proper_iterable',
v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])
@deprecation.deprecated_endpoints('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
@tf_export(
'debugging.assert_negat
|
ive',
v1=['debugging.assert_negative', 'assert_negative'])
@deprecation.deprecated_endpoints('assert_negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negativ
|
e means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
@tf_export(
'debugging.assert_positive',
v1=['debugging.assert_positive', 'assert_positive'])
@deprecation.deprecated_endpoints('assert_positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
@tf_export(
'debugging.assert_non_negative',
v1=['debugging.assert_non_negative', 'assert_non_negative'])
@deprecation.deprecated_endpoints('assert_non_negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
w
|
smurfix/parsimonious
|
parsimonious/nodes.py
|
Python
|
mit
| 13,036
| 0.00069
|
"""Nodes that make up parse trees
Parsing spits out a tree of these, which you can then tell to walk itself and
spit out a useful value. Or you can walk it yourself; the structural attributes
are public.
"""
# TODO: If this is slow, think about using cElementTree or something.
from inspect import isfunction
from sys import version_info, exc_info
from parsimonious.exceptions import VisitationError, UndefinedLabel
from parsimonious.utils import StrAndRepr
from six import reraise, python_2_unicode_compatible, with_metaclass,
iteritems
@python_2_unicode_compatible
class Node(StrAndRepr):
"""A parse tree node
Consider these immutable once constructed. As a side effect of a
memory-saving strategy in the cache, multiple references to a single
``Node`` might be returned in a single parse tree. So, if you start
messing with one, you'll see surprising parallel changes pop up elsewhere.
My philosophy is that parse trees (and their nodes) should be
representation-agnostic. That is, they shouldn't get all mixed up with what
the final rendered form of a wiki page (or the intermediate representation
of a programming language, or whatever) is going to be: you should be able
to parse once and render several representations from the tree, one after
another.
"""
# I tried making this subclass list, but it got ugly. I had to construct
# invalid ones and patch them up later, and there were other problems.
__slots__ = ['expr_name', # The name of the expression that generated me
'full_text', # The full text fed to the parser
'start', # The position in the text where that expr started matching
'end', # The position after starft where the expr first didn't
# match. [start:end] follow Python slice conventions.
'children'] # List of child parse tree nodes
def __init__(self, expr_name, full_text, start, end, children=None):
self.expr_name = expr_name
self.full_text = full_text
self.start = start
self.end = end
self.children = children or []
def __iter__(self):
"""Support looping over my children and doing tuple unpacks on me.
It can be very handy to unpack nodes in arg lists; see
:class:`PegVisitor` for an example.
"""
return iter(self.children)
@property
def text(self):
"""Return the text this node matched."""
return self.full_text[self.start:self.end]
# From here down is just stuff for testing and debugging.
def prettily(self, error=None):
"""Return a unicode, pretty-printed representation of me.
:arg error: The node to highlight because an error occurred there
"""
# TODO: If a Node appears multiple times in the tree, we'll point to
# them all. Whoops.
def indent(text):
return '\n'.join((' ' + line) for line in text.splitlines())
ret = [u'<%s%s matching "%s">%s' % (
self.__class__.__name__,
(' called "%s"' % self.expr_name) if self.expr_name else '',
self.text,
' <-- *** We were here. ***' if error is self else '')]
for n in self:
ret.append(indent(n.prettily(error=error)))
return '\n'.join(ret)
def __str__(self):
"""Return a compact, human-readable representation of me."""
return self.prettily()
def __eq__(self, other):
"""Support by-value deep comparison with other nodes for testing."""
return (other is not None and
self.expr_name == other.expr_name and
self.full_text == other.full_text and
self.start == other.start and
self.end == other.end and
self.children == other.children)
def __ne__(self, other):
return not self == other
def __repr__(self, top_level=True):
"""Return a bit of code (though not an expression) that will recreate
me."""
# repr() of unicode flattens everything out to ASCII, so we don't need
# to explicitly encode things afterward.
ret = ["s = %r" % self.full_text] if top_level else []
ret.append("%s(%r, s, %s, %s%s)" % (
self.__class__.__name__,
self.expr_name,
self.start,
self.end,
(', children=[%s]' %
', '.join([c.__repr__(top_level=False) for c in self.children]))
if self.children else ''))
return '\n'.join(ret)
class RegexNode(Node):
"""Node returned from a ``Regex`` expression
Grants access to the ``re.Match`` object, in case you want to access
capturing groups, etc.
"""
__slots__ = ['match']
class RuleDecoratorMeta(type):
def __new__(metaclass, name, bases, namespace):
def unvisit(name):
"""Remove any leading "visit_" from a method name."""
return name[6:] if name.startswith('visit_') else name
methods = [v for k, v in iteritems(namespace) if
hasattr(v, '_rule') and isfunction(v)]
if methods:
from parsimonious.grammar import Grammar # circular import dodge
methods.sort(key=(lambda x: x.func_code.co_firstlineno)
if version_info[0] < 3 else
(lambda x: x.__code__.co_firstlineno))
# Possible enhancement: once we get the Grammar extensibility story
# solidified, we can have @rules *add* to the default grammar
# rather than pave over it.
namespace['grammar'] = Grammar(
'\n'.join('{name} = {expr}'.format(name=unvisit(m.__name__),
expr=m._rule)
for m in methods))
return super(RuleDecoratorMeta,
metaclass).__new__(metaclass, name, bases, namespace)
class NodeVisitor(with_metaclass(RuleDecoratorMeta,object)):
"""A shell for writing things that turn parse trees i
|
nto something useful
Performs a depth-first traversal of an AST. Subclass this, add methods for
each expr
|
you care about, instantiate, and call
``visit(top_node_of_parse_tree)``. It'll return the useful stuff. This API
is very similar to that of ``ast.NodeVisitor``.
These could easily all be static methods, but that would add at least as
much weirdness at the call site as the ``()`` for instantiation. And this
way, we support subclasses that require state state: options, for example,
or a symbol table constructed from a programming language's AST.
We never transform the parse tree in place, because...
* There are likely multiple references to the same ``Node`` object in a
parse tree, and changes to one reference would surprise you elsewhere.
* It makes it impossible to report errors: you'd end up with the "error"
arrow pointing someplace in a half-transformed mishmash of nodes--and
that's assuming you're even transforming the tree into another tree.
Heaven forbid you're making it into a string or something else.
"""
#: The :term:`default grammar`: the one recommended for use with this
#: visitor. If you populate this, you will be able to call
#: :meth:`NodeVisitor.parse()` as a shortcut.
grammar = None
#: Classes of exceptions you actually intend to raise during visitation
#: and which should propogate out of the visitor. These will not be
#: wrapped in a VisitationError when they arise.
unwrapped_exceptions = ()
# TODO: If we need to optimize this, we can go back to putting subclasses
# in charge of visiting children; they know when not to bother. Or we can
# mark nodes as not descent-worthy in the grammar.
def visit(self, node):
"""Walk a parse tree, transforming it into another representation.
Recursively descend a parse tree, dispatching to the method named after
the rule in the :class:`~parsimonious.grammar.Grammar` that produced
each node. If, for e
|
capstone-rust/capstone-rs
|
capstone-sys/capstone/bindings/python/capstone/mos65xx_const.py
|
Python
|
mit
| 3,179
| 0
|
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [mos65xx_const.py]
MOS65XX_REG_INVALID = 0
MOS65XX_REG_ACC = 1
MOS65XX_REG_X = 2
MOS65XX_REG_Y = 3
MOS65XX_REG_P = 4
MOS65XX_REG_SP = 5
MOS65XX_REG_DP = 6
MOS65XX_REG_B = 7
MOS65XX_REG_K = 8
MOS65XX_REG_ENDING = 9
MOS65XX_AM_NONE = 0
MOS65XX_AM_IMP = 1
MOS65XX_AM_ACC = 2
MOS65XX_AM_IMM = 3
MOS65XX_AM_REL = 4
MOS65XX_AM_INT = 5
MOS65XX_AM_BLOCK = 6
MOS65XX_AM_ZP = 7
MOS65XX_AM_ZP_X = 8
MOS65XX_AM_ZP_Y = 9
MOS65XX_AM_ZP_REL = 10
MOS65XX_AM_ZP_IND = 11
MOS65XX_AM_ZP_X_IND = 12
MOS65XX_AM_ZP_IND_Y = 13
MOS65XX_AM_ZP_IND_
|
LONG = 14
MOS65XX_AM_ZP_IND_LONG_Y = 15
MOS65XX_AM_ABS = 16
MOS65XX_AM_ABS_X = 17
MOS65XX_AM_ABS_Y = 18
MOS65XX_AM_AB
|
S_IND = 19
MOS65XX_AM_ABS_X_IND = 20
MOS65XX_AM_ABS_IND_LONG = 21
MOS65XX_AM_ABS_LONG = 22
MOS65XX_AM_ABS_LONG_X = 23
MOS65XX_AM_SR = 24
MOS65XX_AM_SR_IND_Y = 25
MOS65XX_INS_INVALID = 0
MOS65XX_INS_ADC = 1
MOS65XX_INS_AND = 2
MOS65XX_INS_ASL = 3
MOS65XX_INS_BBR = 4
MOS65XX_INS_BBS = 5
MOS65XX_INS_BCC = 6
MOS65XX_INS_BCS = 7
MOS65XX_INS_BEQ = 8
MOS65XX_INS_BIT = 9
MOS65XX_INS_BMI = 10
MOS65XX_INS_BNE = 11
MOS65XX_INS_BPL = 12
MOS65XX_INS_BRA = 13
MOS65XX_INS_BRK = 14
MOS65XX_INS_BRL = 15
MOS65XX_INS_BVC = 16
MOS65XX_INS_BVS = 17
MOS65XX_INS_CLC = 18
MOS65XX_INS_CLD = 19
MOS65XX_INS_CLI = 20
MOS65XX_INS_CLV = 21
MOS65XX_INS_CMP = 22
MOS65XX_INS_COP = 23
MOS65XX_INS_CPX = 24
MOS65XX_INS_CPY = 25
MOS65XX_INS_DEC = 26
MOS65XX_INS_DEX = 27
MOS65XX_INS_DEY = 28
MOS65XX_INS_EOR = 29
MOS65XX_INS_INC = 30
MOS65XX_INS_INX = 31
MOS65XX_INS_INY = 32
MOS65XX_INS_JML = 33
MOS65XX_INS_JMP = 34
MOS65XX_INS_JSL = 35
MOS65XX_INS_JSR = 36
MOS65XX_INS_LDA = 37
MOS65XX_INS_LDX = 38
MOS65XX_INS_LDY = 39
MOS65XX_INS_LSR = 40
MOS65XX_INS_MVN = 41
MOS65XX_INS_MVP = 42
MOS65XX_INS_NOP = 43
MOS65XX_INS_ORA = 44
MOS65XX_INS_PEA = 45
MOS65XX_INS_PEI = 46
MOS65XX_INS_PER = 47
MOS65XX_INS_PHA = 48
MOS65XX_INS_PHB = 49
MOS65XX_INS_PHD = 50
MOS65XX_INS_PHK = 51
MOS65XX_INS_PHP = 52
MOS65XX_INS_PHX = 53
MOS65XX_INS_PHY = 54
MOS65XX_INS_PLA = 55
MOS65XX_INS_PLB = 56
MOS65XX_INS_PLD = 57
MOS65XX_INS_PLP = 58
MOS65XX_INS_PLX = 59
MOS65XX_INS_PLY = 60
MOS65XX_INS_REP = 61
MOS65XX_INS_RMB = 62
MOS65XX_INS_ROL = 63
MOS65XX_INS_ROR = 64
MOS65XX_INS_RTI = 65
MOS65XX_INS_RTL = 66
MOS65XX_INS_RTS = 67
MOS65XX_INS_SBC = 68
MOS65XX_INS_SEC = 69
MOS65XX_INS_SED = 70
MOS65XX_INS_SEI = 71
MOS65XX_INS_SEP = 72
MOS65XX_INS_SMB = 73
MOS65XX_INS_STA = 74
MOS65XX_INS_STP = 75
MOS65XX_INS_STX = 76
MOS65XX_INS_STY = 77
MOS65XX_INS_STZ = 78
MOS65XX_INS_TAX = 79
MOS65XX_INS_TAY = 80
MOS65XX_INS_TCD = 81
MOS65XX_INS_TCS = 82
MOS65XX_INS_TDC = 83
MOS65XX_INS_TRB = 84
MOS65XX_INS_TSB = 85
MOS65XX_INS_TSC = 86
MOS65XX_INS_TSX = 87
MOS65XX_INS_TXA = 88
MOS65XX_INS_TXS = 89
MOS65XX_INS_TXY = 90
MOS65XX_INS_TYA = 91
MOS65XX_INS_TYX = 92
MOS65XX_INS_WAI = 93
MOS65XX_INS_WDM = 94
MOS65XX_INS_XBA = 95
MOS65XX_INS_XCE = 96
MOS65XX_INS_ENDING = 97
MOS65XX_GRP_INVALID = 0
MOS65XX_GRP_JUMP = 1
MOS65XX_GRP_CALL = 2
MOS65XX_GRP_RET = 3
MOS65XX_GRP_INT = 4
MOS65XX_GRP_IRET = 5
MOS65XX_GRP_BRANCH_RELATIVE = 6
MOS65XX_GRP_ENDING = 7
MOS65XX_OP_INVALID = 0
MOS65XX_OP_REG = 1
MOS65XX_OP_IMM = 2
MOS65XX_OP_MEM = 3
|
leppa/home-assistant
|
tests/helpers/test_dispatcher.py
|
Python
|
apache-2.0
| 4,196
| 0.000238
|
"""Test dispatcher helpers."""
import asyncio
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
dispatcher_connect,
dispatcher_send,
)
from tests.common import get_test_home_assistant
class TestHelpersDispatcher:
"""Tests for discovery helper methods."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_simple_function(self):
"""Test simple function (executor)."""
calls = []
def test_funct(data):
"""Test function."""
calls.append(data)
dispatcher_connect(self.hass, "test", test_funct)
dispatcher_send(self.hass, "test", 3)
self.hass.block_till_done()
assert calls == [3]
dispatch
|
er_send(self.hass, "test", "bla")
self.hass.block_till_done()
assert calls == [3, "bla"]
def test_simple_function_unsub(self):
"""Test simple function (executor) and unsub."""
calls1 = []
calls2 = []
def test_funct1(data):
"""Test function."""
calls1.append(data)
def test_funct2(data):
"""Test function.""
|
"
calls2.append(data)
dispatcher_connect(self.hass, "test1", test_funct1)
unsub = dispatcher_connect(self.hass, "test2", test_funct2)
dispatcher_send(self.hass, "test1", 3)
dispatcher_send(self.hass, "test2", 4)
self.hass.block_till_done()
assert calls1 == [3]
assert calls2 == [4]
unsub()
dispatcher_send(self.hass, "test1", 5)
dispatcher_send(self.hass, "test2", 6)
self.hass.block_till_done()
assert calls1 == [3, 5]
assert calls2 == [4]
# check don't kill the flow
unsub()
dispatcher_send(self.hass, "test1", 7)
dispatcher_send(self.hass, "test2", 8)
self.hass.block_till_done()
assert calls1 == [3, 5, 7]
assert calls2 == [4]
def test_simple_callback(self):
"""Test simple callback (async)."""
calls = []
@callback
def test_funct(data):
"""Test function."""
calls.append(data)
dispatcher_connect(self.hass, "test", test_funct)
dispatcher_send(self.hass, "test", 3)
self.hass.block_till_done()
assert calls == [3]
dispatcher_send(self.hass, "test", "bla")
self.hass.block_till_done()
assert calls == [3, "bla"]
def test_simple_coro(self):
"""Test simple coro (async)."""
calls = []
@asyncio.coroutine
def test_funct(data):
"""Test function."""
calls.append(data)
dispatcher_connect(self.hass, "test", test_funct)
dispatcher_send(self.hass, "test", 3)
self.hass.block_till_done()
assert calls == [3]
dispatcher_send(self.hass, "test", "bla")
self.hass.block_till_done()
assert calls == [3, "bla"]
def test_simple_function_multiargs(self):
"""Test simple function (executor)."""
calls = []
def test_funct(data1, data2, data3):
"""Test function."""
calls.append(data1)
calls.append(data2)
calls.append(data3)
dispatcher_connect(self.hass, "test", test_funct)
dispatcher_send(self.hass, "test", 3, 2, "bla")
self.hass.block_till_done()
assert calls == [3, 2, "bla"]
async def test_callback_exception_gets_logged(hass, caplog):
"""Test exception raised by signal handler."""
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception("This is a bad message callback")
async_dispatcher_connect(hass, "test", bad_handler)
dispatcher_send(hass, "test", "bad")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert "Exception in bad_handler when dispatching 'test': ('bad',)" in caplog.text
|
steveherrin/PhDThesis
|
Thesis/scripts/make_bb_spectrum_plot.py
|
Python
|
mit
| 5,583
| 0.003582
|
import ROOT
from math import pi, sqrt, pow, exp
import scipy.integrate
import numpy
from array import array
alpha = 7.2973e-3
m_e = 0.51099892
Z_Xe = 54
Q = 2.4578
def F(Z, KE):
E = KE + m_e
W = E/m_e
Z0 = Z + 2
if W <= 1:
W = 1 + 1e-4
if W > 2.2:
a = -8.46e-2 + 2.48e-2*Z0 + 2.37e-4*Z0**2
b = 1.15e-2 + 3.58e-4*Z0 - 6.17e-5*Z0**2
else:
a = -0.811 + 4.46e-2*Z0 + 1.08e-4*Z0**2
b = 0.673 - 1.82e-2*Z0 + 6.38e-5*Z0**2
x = sqrt(W-1)
p = sqrt(W**2 - 1)
if (p <= 0):
result = 1
else:
result = W/p*exp(a + b*x)
return result
def D(D, K, i):
Z = Z_Xe
T0 = Q/m_e
E1 = 0.5*(K+D) + 1
E2 = 0.5*(K+D) + 1
p1 = sqrt(E1**2 - 1)
p2 = sqrt(E2**2 - 1)
T1 = E1 - 1
T2 = E2 - 1
return p1*E1*F(Z, T1*m_e)*p2*E2*F(Z, T1*m_e)*pow(T0 - K, i)
def SumSpectrum(K, i):
if K < 0:
return 0
elif K > Q:
return 0
a = -K/m_e
b = K/m_e
x = scipy.integrate.quad(D, a, b, (K/m_e, i))[0]
if x < 0:
return 0
else:
return x
def gauss_conv(x, y, res):
N = len(x)
mu = numpy.mean(x)
s = res*mu
gauss = [1.0/(s*sqrt(2*pi))*exp(-0.5*((a-mu)/s)**2) for a in x]
convolution = numpy.convolve(y, gauss,'same')
return convolution
def normalize(y, eps, f):
return [a*f for a in y]
N = 1000
min_E = 0.0
max_E = 1.2
E_scaled = array('d', numpy.linspace(min_E, max_E, N, False))
Es = array('d', (E*Q for E in E_scaled))
eps = (max_E - min_E)/N
bb0n = [0.5/eps if abs(E-Q)<eps else 0 for E in Es]
bb2n = [SumSpectrum(E, 5) for E in Es]
bb0n_smeared = gauss_conv(Es, bb0n, 0.02)
bb2n_smeared = gauss_conv(Es, bb2n, 0.02)
bb0n_int = scipy.integrate.simps(bb0n_smeared, None, eps)
bb0n_norm = array('d', normalize(bb0n_smeared, eps, 1e-2/bb0n_int))
bb2n_int = scipy.integrate.simps(bb2n_smeared, None, eps)
bb2n_norm = array('d', normalize(bb2n_smeared, eps, 1/bb2n_int))
g_bb0n = ROOT.TGraph(N, E_scaled, bb0n_norm)
g_bb0n.SetTitle("")
g_bb0n.SetLineStyle(ROOT.kDashed)
g_bb2n = ROOT.TGraph(N, E_scaled, bb2n_norm)
g_bb2n.SetTitle("")
bb0nX = []
bb0nX.append([0.5/eps if abs(E-Q)<eps else 0 for E in Es])
for i in [1, 2, 3, 5, 7]:
bb0nX.append([SumSpectrum(E, i) for E in Es])
bb0nX_graphs = []
for bb0nXn in bb0nX:
bb0nX_int = scipy.integrate.simps(bb0nXn, None, eps)
bb0nX_norm = array('d', normalize(bb0nXn, eps, 1/bb0nX_int))
g_bb0nX = ROOT.TGraph(N, E_scaled, bb0nX_norm)
bb0nX_graphs.append(g_bb0nX)
min_E = 0.9
max_E = 1.1
E_scaled_z = array('d', numpy.linspace(min_E, max_E, N, False))
Es_z = array('d', (E*Q for E in E_scaled_z))
eps_z = (max_E - min_E)/N
bb0n_z = [0.5/eps_z if abs(E-Q)<eps_z else 0 for E in Es_z]
bb2n_z = [SumSpectrum(E, 5) for E in Es_z]
bb0n_smeared_z = gauss_conv(Es_z, bb0n_z, 0.02)
bb2n_smeared_z = gauss_conv(Es_z, bb2n_z, 0.02)
bb0n_norm_z = array('d', normalize(bb0n_smeared_z, eps, 1e-6/bb0n_int))
bb2n_norm_z = array('d', normalize(bb2n_smeared_z, eps, 1.0/bb2n_int))
g_bb0n_z = ROOT.TGraph(N, E_scaled_z, bb0n_norm_z)
g_bb0n_z.SetTitle("")
g_bb0n_z.SetLineStyle(ROOT.kDashed)
g_bb2n_z = ROOT.TGraph(N, E_scaled_z, bb2n_norm_z)
g_bb2n_z.SetTitle("")
#print("bb0n %f"%(sum((y*eps for y in bb0n_norm))))
#print("bb2n %f"%(sum((y*eps for y in bb2n_norm))))
c_both = ROOT.TCanvas("c_both","c_both")
p = ROOT.TPad("p", "p", 0, 0, 1, 1)
p.SetRightMargin(0.02)
p.SetTopMargin(0.02)
p.Draw()
p.cd()
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
g_bb2n.GetYaxis().SetTitle("dN/dE")
g_bb2n.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
c_both.cd()
p_inset = ROOT.TPad("p_inset","p_inset",0.5, 0.5, 0.995, 0.995)
p_inset.SetRightMargin(0.05)
p_inset.SetTopMargin(0.05)
p_inset.Draw()
p_inset.cd()
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
g_bb2n_z.GetYaxis().SetTitle("dN/dE")
g_bb2n_z.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
g_bb2n_z.GetYaxis().SetNoExponent(False)
# Zoom in so we can't see edge effects of the convolution
g_bb2n_z.GetXaxis().SetRangeUser(1-0.25*(1-min_E), 1+0.25*(max_E-1))
g_bb2n_z.GetYaxis().SetRangeUser(0, 0.0004)
c_z = ROOT.TCanvas("c_z","c_z")
c_z.SetRightMargin(0.05)
c_z.SetTopMargin(0.05)
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
c = ROOT.TCanvas("c","c")
c.SetRightMargin(0.05)
c.SetTopMargin(0.05)
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
c_majoron = ROOT.TCanvas("c_majoron")
c_majoron.SetRightMargin(0.05)
c_majoron.SetTopMargin(0.05)
colors = [ROOT.kBlack, ROOT.kRed, ROOT.kGreen, ROOT.kBlue,
ROOT.kMagenta, ROOT.kCyan]
draw_opt = "AL"
for i in xrange(len(bb0nX_graphs)):
bb0nX_graphs[-(i+1)].SetLineColor(colors[-(i+1)])
bb0nX_graphs[-(i+1)].Draw(draw_opt)
draw_opt = "L"
# Draw bb0n last so it doesn't scale others to 0
bb0nX_graphs[-1].SetTitle("")
bb0nX_graphs[-1].GetXaxis().SetRangeUser(0, 1.1)
bb0nX_graphs[-1].GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
bb0nX_graphs[-1].GetYaxis().SetTitle("dN/dE")
l_majoron = ROOT.TLegend(0.45, 0.77, 0.85, 0.94)
l_majoron.SetFillColor(ROOT.kWhite)
l_majoron.SetNColumns(2)
l_majoron.AddEntry(bb0nX_graphs[0], "0#nu#beta#beta", "l")
l_majoron.AddEntry(bb0nX_graphs[1], "0#nu#beta#beta#chi^{0} (n=1)", "l")
l_majoron.AddEntry(bb0nX_graphs[4], "2#nu#beta#beta (n=5)", "l")
l_majoron.AddEntry(bb0nX_graphs[2], "0#nu#beta#beta#chi^{0} (n=2)", "l")
l_majoron.AddEntry(None, "",
|
"")
l_majoron.AddEntry(bb0nX_graphs[3], "0#nu#beta#beta#chi^{0}(#chi^{0}) (n=3)", "l")
l_majoron.AddEntry(None, "", "")
l_majo
|
ron.AddEntry(bb0nX_graphs[5], "0#nu#beta#beta#chi^{0}#chi^{0} (n=7)", "l")
l_majoron.Draw()
dummy = raw_input("Press Enter...")
|
YuepengGuo/sina_weibo_crawler
|
crawler/toolkit/weibo.py
|
Python
|
apache-2.0
| 11,860
| 0.002698
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '1.1.4'
__author__ = 'Liao Xuefeng (askxuefeng@gmail.com)'
'''
Python client SDK for sina weibo API using OAuth 2.
'''
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import gzip, time, json, hmac, base64, hashlib, urllib, urllib2, logging, mimetypes, collections
class APIError(StandardError):
'''
raise APIError if receiving json message indicating failure.
'''
def __init__(self, error_code, error, request):
self.error_code = error_code
self.error = error
self.request = request
StandardError.__init__(self, error)
def __str__(self):
return 'APIError: %s: %s, request: %s' % (self.error_code, self.error, self.request)
def _parse_json(s):
' parse str into JsonDict '
def _obj_hook(pairs):
' convert json object to python object '
o = JsonDict()
for k, v in pairs.iteritems():
o[str(k)] = v
return o
return json.loads(s, object_hook=_obj_hook)
class JsonDict(dict):
' general json object that allows attributes to be bound to and also behaves like a dict '
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(r"'JsonDict' object has no attribute '%s'" % attr)
def __setattr__(self, attr, value):
self[attr] = value
def _encode_params(**kw):
'''
do url-encode parameters
>>> _encode_params(a=1, b='R&D')
'a=1&b=R%26D'
>>> _encode_params(a=u'\u4e2d\u6587', b=['A', 'B', 123])
'a=%E4%B8%AD%E6%96%87&b=A&b=B&b=123'
'''
args = []
for k, v in kw.iteritems():
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
args.append('%s=%s' % (k, urllib.quote(qv)))
elif isinstance(v, collections.Iterable):
for i in v:
qv = i.encode('utf-8') if isinstance(i, unicode) else str(i)
args.append('%s=%s' % (k, urllib.quote(qv)))
else:
qv = str(v)
args.append('%s=%s' % (k, urllib.quote(qv)))
return '&'.join(args)
def _encode_multipart(**kw):
' build a multipart/form-data body with randomly generated boundary '
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="hidden"' % k)
data.append('Content-Length: %d' % len(content))
data.append('Content-Type: %s\r\n' % _guess_content_type(filename))
data.append(content)
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v.encode('utf-8') if isinstance(v, unicode) else v)
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary
def _guess_content_type(url):
n = url.rfind('.')
if n == (-1):
return 'application/octet-stream'
ext = url[n:]
return mimetypes.types_map.get(ext, 'application/octet-stream')
_HTTP_GET = 0
_HTTP_POST = 1
_HTTP_UPLOAD = 2
def _http_get(url, authorization=None, **kw):
logging.info('GET %s' % url)
return _http_call(url, _HTTP_GET, authorization, **kw)
def _http_post(url, authorization=None, **kw):
logging.info('POST %s' % url)
return _http_call(url, _HTTP_POST, authorization, **kw)
def _http_upload(url, authorization=None, **kw):
logging.info('MULTIPART POST %s' % url)
return _http_call(url, _HTTP_UPLOAD, authorization, **kw)
def _read_body(obj):
using_gzip = obj.headers.get('Content-Encoding', '') == 'gzip'
body = obj.read()
if using_gzip:
gzipper = gzip.GzipFile(fileobj=StringIO(body))
fcontent = gzipper.read()
gzipper.close()
return fcontent
return body
def _http_call(the_url, method, authorization, **kw):
'''
send an http request and return a json object if no error occurred.
'''
params = None
boundary = None
if method == _HTTP_UPLOAD:
# fix sina upload url:
the_url = the_url.replace('https://api.', 'https://upload.api.')
params, boundary = _encode_multipart(**kw)
else:
params = _encode_params(**kw)
if '/remind/' in the_url:
# fix sina remind api:
the_url = the_url.replace('https://api.', 'https://rm.api.')
http_url = '%s?%s' % (the_url, params) if method == _HTTP_GET else the_url
http_body = None if method == _HTTP_GET else params
req = urllib2.Request(http_url, data=http_body)
req.add_header('Accept-Encoding', 'gzip')
if authorization:
req.add_header('Authorization', 'OAuth2 %s' % authorization)
if boundary:
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
resp = urllib2.urlopen(req, timeout=5)
body = _read_body(resp)
r = _parse_json(body)
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', ''))
return r
except urllib2.HTTPError, e:
try:
r = _parse_json(
|
_read_body(e))
|
except:
r = None
if hasattr(r, 'error_code'):
raise APIError(r.error_code, r.get('error', ''), r.get('request', ''))
raise e
class HttpObject(object):
def __init__(self, client, method):
self.client = client
self.method = method
def __getattr__(self, attr):
def wrap(**kw):
if self.client.is_expires():
raise APIError('21327', 'expired_token', attr)
return _http_call('%s%s.json' % (self.client.api_url, attr.replace('__', '/')), self.method,
self.client.access_token, **kw)
return wrap
class APIClient(object):
'''
API client using synchronized invocation.
'''
def __init__(self, app_key, app_secret, redirect_uri=None, response_type='code', domain='api.weibo.com',
version='2'):
self.client_id = str(app_key)
self.client_secret = str(app_secret)
self.redirect_uri = redirect_uri
self.response_type = response_type
self.auth_url = 'https://%s/oauth2/' % domain
self.api_url = 'https://%s/%s/' % (domain, version)
self.access_token = None
self.expires = 0.0
self.get = HttpObject(self, _HTTP_GET)
self.post = HttpObject(self, _HTTP_POST)
self.upload = HttpObject(self, _HTTP_UPLOAD)
def parse_signed_request(self, signed_request):
'''
parse signed request when using in-site app.
Returns:
dict object like { 'uid': 12345, 'access_token': 'ABC123XYZ', 'expires': unix-timestamp },
or None if parse failed.
'''
def _b64_normalize(s):
appendix = '=' * (4 - len(s) % 4)
return s.replace('-', '+').replace('_', '/') + appendix
sr = str(signed_request)
logging.info('parse signed request: %s' % sr)
enc_sig, enc_payload = sr.split('.', 1)
sig = base64.b64decode(_b64_normalize(enc_sig))
data = _parse_json(base64.b64decode(_b64_normalize(enc_payload)))
if data['algorithm'] != u'HMAC-SHA256':
return None
expected_sig = hmac.new(self.client_secret, enc_payload, hashlib.sha256).digest() ;
if expected_sig == sig:
data.user_id = data.uid = data.get('user_id', None)
data.access_token = data.get('oauth_token', None)
expires = data.get('expires', None)
if expires:
data.expires = data.expires_in = time.time() + expires
return data
return None
def set_access_token(self, access_token, expires):
self.access_token = str(access_token)
self.expires = float(expires)
def get_authorize_url(
|
GroundPound/ManFuzzer
|
manparser/__init__.py
|
Python
|
apache-2.0
| 1,492
| 0.021448
|
'''
Created on Dec 23, 2012
@author: Peter
This module contains a few functions for extracting the parameters out of a man page.
'''
import subprocess
import logging
from arguments.valuedarguments import ValuedArguments
TIMEOUT = 3
logger = logging.getLogger('man-fuzzer')
def mineflags(executable):
'''Returns a set of progargs that can be used to generate arguments in a test case.'''
# Mine the flags
valuedarguments = ValuedArguments()
valuedarguments.parse(_mine_h_flags(executable,TIMEOUT))
valuedarguments.parse(_mine_H_flags(executable,TIMEOUT))
valuedarguments.parse(_mine_Help_flags(executable,TIMEOUT))
valuedarguments.parse(_mine_Man_flags(executable,TIMEOUT))
return valuedarguments
def _extract_arguments(command,timeout):
try:
child = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
child_output = child.communicate(timeout = timeout)
return repr(child_output)
except Exception as e:
logger.exception(e)
return ""
def _mine_h_flags(executable,timeout):
return _extract_arguments(str(executable) + " -h",timeout)
def _mine_H_flags(executable,timeout):
return _extract_arguments(str(executable) + " -H",timeout)
def _mine_Help_flags(executable,timeout):
return _extract_arguments(str(executa
|
ble) + " --help",timeout)
def _mine
|
_Man_flags(executable,timeout):
return _extract_arguments("man " + str(executable),timeout)
|
willybh11/python
|
advProg/turtleStuff/helloturtleworld.py
|
Python
|
gpl-3.0
| 846
| 0.055556
|
import turtle
import time
window = turtle.Screen()
def test():
allturtles = []
for i in range(4):
t1 = turtle.Turtle()
t2 = turtl
|
e.Turtle()
t3 = turtle.Turtle()
t4 = turtle.Turtle()
t1.speed(0)
t2.speed(0)
t3.speed(0)
t4.speed(0)
t1.penup()
t2.penup()
t3.penup()
t4.penup()
t1.setx(50*i)
t1.sety(50*i)
t2.setx(50*i)
t2.sety(-50*i)
t3.setx(-50*i)
t3.sety(50*i)
t4.setx(-50*i)
t4.sety(-50*i)
t1.pendown()
t2.pendown()
t3.pendown()
t4.pendown()
t1.ht()
t2.ht()
t3.ht
|
()
t4.ht()
allturtles.append([t1,t2,t3,t4])
start = time.clock()
for degrees in range(360):
for line in allturtles:
for t in line:
for repeat in range(2):
t.fd(200)
t.lt(90)
t.lt(1)
print "That took %f seconds." %(time.clock()-start)
test()
window.exitonclick()
|
Abraca/debian
|
waflib/Tools/suncc.py
|
Python
|
gpl-2.0
| 1,378
| 0.064586
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_scc(conf):
v=conf.env
cc=None
if v['CC']:cc=v['CC']
elif'CC'in conf.environ:cc=conf.environ['CC']
if not cc:cc=conf.find_program('cc',var='CC')
if not cc:conf.fatal('Could not find a Sun C compiler')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_
|
and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun com
|
piler'%cc)
v['CC']=cc
v['CC_NAME']='sun'
@conf
def scc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=[]
v['CC_TGT_F']=['-c','-o']
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cprogram_PATTERN']='%s'
v['CFLAGS_cshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cshlib']=['-G']
v['cshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cstlib']=['-Bstatic']
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
nextgis/nextgisweb
|
nextgisweb/layer/util.py
|
Python
|
gpl-3.0
| 85
| 0
|
from .
|
.i18n import trstring_factory
COMP_ID = 'layer'
_ = trstring_factory(COMP_ID)
|
|
kulawczukmarcin/mypox
|
mininet_scripts/simple_net.py
|
Python
|
apache-2.0
| 1,695
| 0.00059
|
__author__ = 'Ehsan'
from mininet.node import CPULimitedHost
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.log import setLogLevel, info
from mininet.node import RemoteController
from mininet.cli import CLI
"""
Instructions to run the topo:
1. Go to directory where this fil is.
2. run: sudo -E python Simple_Pkt_Topo.py.py
The topo has 4 switches and 4 hosts. They are connected in a star shape.
"""
class SimplePktSwitch(Topo):
"""Simple topology example."""
def __init__(self, **opts):
"""Create custom topo."""
# Initialize topology
# It uses the constructor for the Topo cloass
super(SimplePktSwitch, self).__init__(**opts)
# Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
# Adding switches
s1 = self.addSwitch('s1', dpid="0000000000000001")
s2 = self.addSwitch('s2', dpid="0000000000000002")
s3 = self.addSwitch('s3', dpid="0000000000000003")
s4 = self.addSwitch('s4', dpid="0000000000000004")
# Add links
self.addLink(h1, s1)
self.addLink(h2, s2)
self.addLink(h3, s3)
self.addLink(h4, s4)
self.addLink(s1, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
def run():
|
c = RemoteController('c', '192.168.
|
56.1', 6633)
net = Mininet(topo=SimplePktSwitch(), host=CPULimitedHost, controller=None)
net.addController(c)
net.start()
CLI(net)
net.stop()
# if the script is run directly (sudo custom/optical.py):
if __name__ == '__main__':
setLogLevel('info')
run()
|
jdber1/opendrop
|
opendrop/app/common/image_processing/plugins/define_line/component.py
|
Python
|
gpl-3.0
| 7,694
| 0.00065
|
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU Gener
|
al Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from typing import Any, Tuple
from gi.rep
|
ository import Gtk, Gdk
from opendrop.app import keyboard
from opendrop.app.common.image_processing.image_processor import ImageProcessorPluginViewContext
from opendrop.mvp import ComponentSymbol, View, Presenter
from opendrop.utility.bindable.gextension import GObjectPropertyBindable
from opendrop.geometry import Vector2, Line2
from opendrop.widgets.canvas import LineArtist, CircleArtist
from .model import DefineLinePluginModel
define_line_plugin_cs = ComponentSymbol() # type: ComponentSymbol[None]
@define_line_plugin_cs.view(options=['view_context', 'tool_id', 'color', 'z_index'])
class DefineLinePluginView(View['DefineLinePluginPresenter', None]):
def _do_init(
self,
view_context: ImageProcessorPluginViewContext,
tool_id: Any,
color: Tuple[float, float, float],
z_index: int,
) -> None:
self._view_context = view_context
self._tool_ref = view_context.get_tool_item(tool_id)
view_context.canvas.connect(
'cursor-up',
lambda canvas, pos: self.presenter.cursor_up(pos),
)
view_context.canvas.connect(
'cursor-down',
lambda canvas, pos: self.presenter.cursor_down(pos),
)
view_context.canvas.connect(
'cursor-motion',
lambda canvas, pos: self.presenter.cursor_move(pos),
)
view_context.canvas.connect(
'key-press-event',
self._hdl_canvas_key_press_event,
)
self.bn_tool_button_is_active = self._tool_ref.bn_is_active
self._canvas = view_context.canvas
self._defined_artist = LineArtist(
stroke_color=color,
stroke_width=1,
scale_strokes=True,
)
self._canvas.add_artist(self._defined_artist, z_index=z_index)
self._dragging_artist = LineArtist(
stroke_color=color,
stroke_width=1,
scale_strokes=True,
)
self._canvas.add_artist(self._dragging_artist, z_index=z_index)
self._control_point_artist = CircleArtist(
fill_color=color,
scale_radius=True,
)
self._canvas.add_artist(self._control_point_artist, z_index=z_index)
self.bn_defined = GObjectPropertyBindable(
g_obj=self._defined_artist,
prop_name='line',
)
self.bn_dragging = GObjectPropertyBindable(
g_obj=self._dragging_artist,
prop_name='line',
)
self.presenter.view_ready()
def show_control_point(self, xc: float, yc: float) -> None:
self._control_point_artist.props.xc = xc
self._control_point_artist.props.yc = yc
self._control_point_artist.props.radius = 2.0
def hide_control_point(self) -> None:
self._control_point_artist.props.radius = 0.0
def _hdl_canvas_key_press_event(self, widget: Gtk.Widget, event: Gdk.EventKey) -> bool:
self.presenter.key_press(
keyboard.KeyEvent(
key=keyboard.Key.from_value(event.keyval),
modifier=int(event.state)
)
)
# Stop event propagation.
return True
def _do_destroy(self) -> None:
self._canvas.remove_artist(self._defined_artist)
self._canvas.remove_artist(self._dragging_artist)
@define_line_plugin_cs.presenter(options=['model'])
class DefineLinePluginPresenter(Presenter['DefineLinePluginView']):
def _do_init(self, model: DefineLinePluginModel) -> None:
self._model = model
self.__data_bindings = []
self.__event_connections = []
def view_ready(self) -> None:
self.__data_bindings.extend([
self._model.bn_line.bind(
self.view.bn_defined
),
])
self.__event_connections.extend([
self.view.bn_tool_button_is_active.on_changed.connect(
self._hdl_tool_button_is_active_changed
),
])
self._hdl_tool_button_is_active_changed()
def _hdl_tool_button_is_active_changed(self) -> None:
if self._model.is_defining and not self.view.bn_tool_button_is_active.get():
self._model.discard_define()
def cursor_down(self, pos: Vector2[float]) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if self._model.is_defining:
self._model.discard_define()
self._model.begin_define(pos)
self._update_dragging_indicator(pos)
def cursor_up(self, pos: Vector2[float]) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if not self._model.is_defining:
return
self._model.commit_define(pos)
self._update_dragging_indicator(pos)
def cursor_move(self, pos: Vector2[float]) -> None:
self._update_dragging_indicator(pos)
def key_press(self, event: keyboard.KeyEvent) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if self._model.is_defining:
# User is currently using mouse to define
return
if event.key is keyboard.Key.Up:
self._model.nudge_up()
elif event.key is keyboard.Key.Down:
self._model.nudge_down()
elif event.key is keyboard.Key.Left:
self._model.nudgerot_anticlockwise()
elif event.key is keyboard.Key.Right:
self._model.nudgerot_clockwise()
def _update_dragging_indicator(self, current_cursor_pos: Vector2[float]) -> None:
if not self._model.is_defining:
self.view.bn_dragging.set(None)
self.view.hide_control_point()
return
pt0 = self._model.begin_define_pos
pt1 = current_cursor_pos
if pt0 == pt1:
self.view.bn_dragging.set(None)
self.view.hide_control_point()
return
self.view.bn_dragging.set(Line2(
pt0=pt0,
pt1=pt1,
))
self.view.show_control_point(*self._model.begin_define_pos)
def _do_destroy(self) -> None:
for db in self.__data_bindings:
db.unbind()
for ec in self.__event_connections:
ec.disconnect()
|
thatguyandy27/python-sandbox
|
learning-python/Ch3/dates_finished.py
|
Python
|
mit
| 1,126
| 0.039964
|
#
# Example file for working with date information
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
from datetime import date
from datetime import time
from datetime import datetime
def main():
## DATE OBJECTS
# Get today's date from the simple today() method from the date class
today = date.today()
print "Today's date is ", today
# print out the date's individual components
print "Date Components: ", today.day, today.month, today.year
# retrieve today's weekday
|
(0=Monday, 6=Sunday)
print "Today's Weekday #: ", today.weekday()
## DATETIME OBJECTS
# Get today's date from the datetime class
today = datetime.now()
print "The
|
current date and time is ", today
# Get the current time
t = datetime.time(datetime.now())
print "The current time is ", t
# weekday returns 0 (monday) through 6 (sunday)
wd = date.weekday(today)
# Days start at 0 for Monday
days = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"]
print "Today is day number %d" % wd
print "Which is a " + days[wd]
if __name__ == "__main__":
main();
|
museumsvictoria/nodel-recipes
|
(retired)/pjlinkqueue/script.py
|
Python
|
mit
| 1,588
| 0.027708
|
# Copyright (c) 2014 Museum Victoria
# This software is released under the MIT license (see license.txt for details)
from Queue import *
import threading
import atexit
remote_action_PowerOn = RemoteAction()
remote_action_PowerOff = RemoteAction()
remote_action_SetInput = RemoteAction()
def local_action_activate(x = None):
'''{ "title": "Turn on", "desc": "Turn on." }'''
queue.put({'function': 'remote_action_PowerOn', 'delay': 120})
queue.put({'function': 'remote_action_SetInput', 'arg':{"source":"DIGITAL", "number":1}, 'delay': 5})
print 'Activated'
def local_action_deactivate(x = None):
'''{ "title": "Turn off", "desc": "Turn off." }'''
queue.put({'function': 'remote_action_PowerOff', 'delay': 120})
print 'Deactivated'
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.isSet():
if queue.empty() != True:
job = queue.get()
try:
print "Calling command " + job['function']
func = globals()[job['function']]
arg = job['args'] if 'args' in job else ''
func.call(a
|
rg)
self.event.wait(job['delay'])
queue.task_done()
except Exception, e:
print e
print "Failed to call command " + job['function']
else:
self.event.wait(1)
def stop(self):
self.event.set()
queue = Queue()
th =
|
TimerClass()
@atexit.register
def cleanup():
print 'shutdown'
th.stop()
def main():
th.start()
print 'Nodel script started.'
|
project-fondue/python-yql
|
yql/logger.py
|
Python
|
bsd-3-clause
| 1,435
| 0
|
"""Logging for Python YQL."""
import os
import logging
import logging.handlers
LOG_DIRECTORY_DEFAULT = os.path.join(os.path.dirname(__file__), "../logs")
LOG_DIRECTORY = os.environ.get("YQL_LOG_DIR", LOG_DIRECTORY_DEFAULT)
LOG_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG_LEVEL = os.environ.get("YQL_LOGGING_LEVEL", 'debug')
LOG_FILENAME = os.path.join(LOG_DIRECTORY, "python-yql.log")
MAX_BYTES = 1024 * 1024
log_level = LOG_LEVELS.get(LOG_LEVEL)
yql_logger = logging.getLogger("python-yql")
yql_logger.setLevel(LOG_LEVELS.get(LOG_LEVEL))
class NullHandler(logging.Handler):
def emit(self, record):
pass
def
|
get_logger():
"""Set-upt the logger if enabled or fallback to NullHandler."""
if os.environ.get("YQL_LOGGING", False):
if not os.path.exists(LOG_DIRECTORY):
os.mkdir(LOG_DIRECTORY)
log_handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=MAX_BYTES,
|
backupCount=5)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
log_handler.setFormatter(formatter)
else:
log_handler = NullHandler()
yql_logger.addHandler(log_handler)
return yql_logger
|
beepee14/scikit-learn
|
sklearn/metrics/regression.py
|
Python
|
bsd-3-clause
| 16,953
| 0.000059
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
|
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
|
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.a
|
dddomodossola/remi
|
remi/__init__.py
|
Python
|
apache-2.0
| 698
| 0
|
from .gui import (
Widget,
Button,
TextInput,
SpinBox,
Label,
GenericDialog,
InputDialog,
ListView,
ListItem,
DropDown,
DropDownItem,
Image,
Table,
TableRow,
TableItem,
TableTitle,
Input,
Slider,
ColorPicker,
Date,
GenericObject,
FileFolderNavigator,
FileFolderItem,
FileSelection
|
Dialog,
Menu,
MenuItem,
FileUploader,
FileDownloader,
VideoPlayer,
)
from .server import App, Server, start
from pkg_resources import get_distribution, DistributionNotFound
try:
__versi
|
on__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
|
persandstrom/home-assistant
|
tests/components/alarm_control_panel/test_manual_mqtt.py
|
Python
|
apache-2.0
| 56,123
| 0
|
"""The tests for the manual_mqtt Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import (
fire_time_changed, get_test_home_assistant,
mock_mqtt_component, fire_mqtt_message, assert_setup_component)
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManualMqtt(unittest.TestCase):
"""Test the manual_mqtt alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_fail_setup_without_state_topic(self):
"""Test for failing with no state topic."""
with assert_setup_component(0) as config:
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt_alarm',
'command_topic': 'alarm/command'
}
})
assert not config[alarm_control_panel.DOMAIN]
def test_fail_setup_without_command_topic(self):
"""Test failing with no command topic."""
with assert_setup_component(0):
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt_alarm',
'state_topic': 'alarm/state'
}
})
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_HOME
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_home_with_template_code(self):
"""Attempt to arm with a template-based code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code_template': '{{ "abc" }}',
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_
|
topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.hass.start()
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, 'abc')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_ARMED_HOME, state.state)
def test_arm_aw
|
ay_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_AWAY
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqt
|
vshlapakov/kafka-python
|
kafka/consumer/multiprocess.py
|
Python
|
apache-2.0
| 10,236
| 0.001661
|
from __future__ import absolute_import
import logging
import time
from collections import namedtuple
from multiprocessing import Process, Manager as MPManager
try:
from Queue import Empty, Full
except ImportError: # python 2
from queue import Empty, Full
from .base import (
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
NO_MESSAGES_WAIT_TIME_SECONDS,
FULL_QUEUE_WAIT_TIME_SECONDS
)
from .simple import Consumer, SimpleConsumer
Events = namedtuple("Events", ["start", "pause", "exit"])
log = logging.getLogger("kafka")
def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
|
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
""
|
"
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except Full:
if events.exit.is_set(): break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
Arguments:
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
If you are connecting to a server that does not support offset
commit/fetch (any prior to 0.8.1.1), then you *must* set this to None
topic: the topic to consume
Keyword Arguments:
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1, partitions_per_proc=0,
**simple_consumer_options):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=None,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
manager = MPManager()
self.queue = manager.Queue(1024) # Child consumers dump messages into this
self.events = Events(
start = manager.Event(), # Indicates the consumers to start fetch
exit = manager.Event(), # Requests the consumers to shutdown
pause = manager.Event()) # Requests the consumers to pause fetch
self.size = manager.Value('i', 0) # Indicator of number of messages to fetch
# dict.keys() returns a view in py3 + it's not a thread-safe operation
# http://blog.labix.org/2008/06/27/watch-out-for-listdictkeys-in-python-3
# It's safer to copy dict as it only runs during the init.
partitions = list(self.offsets.copy().keys())
# By default, start one consumer process for all partitions
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if partitions_per_proc:
num_procs = len(partitions) / partitions_per_proc
if num_procs * partitions_per_proc < len(partitions):
num_procs += 1
# The final set of chunks
chunks = [partitions[proc::num_procs] for proc in range(num_procs)]
self.procs = []
for chunk in chunks:
options = {'partitions': list(chunk)}
if simple_consumer_options:
simple_consumer_options.pop('partitions', None)
options.update(simple_consumer_options)
args = (client.copy(), self.group, self.topic, self.queue,
self.size, self.events)
proc = Process(target=_mp_consume, args=args, kwargs=options)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.events.exit.set()
self.events.pause.set()
self.events.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.events.pause.set()
while True:
self.events.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.events.start.clear()
self.count_since_commit += 1
self._auto_commit()
|
zqhuang/COOP
|
mapio/pyscripts/check_QU.py
|
Python
|
gpl-3.0
| 1,311
| 0.012204
|
import time
import os
import sys
cclist = ['commander', 'nilc', 'sevem', 'smica']
nulist = ['0', '1']
hlist = ['hot0', 'hot1', 'hot2']
clist = ['cold0', 'cold1', 'cold2']
nslist = ['F', 'N', 'S']
readonly = 'T'
nmaps = 1000
print "=============== F =============="
for nu in nulist:
print " ------------------------- "
print "nu = " + nu
print " * hot spots * "
for cc in cclist:
print cc
for h in hlist:
os.system(r'./SST '+ cc + r' 1024 QU ' + nu + r' ' + str(nmaps) + r' self '+ h + r' T F T ' + readonly)
print(" * cold spots * ")
for cc in cclist:
print cc
for c in clist:
os.system(r'./SST '+ cc + r' 1024 QU ' + nu + r' ' + str(nmaps) + r' self '+ c + r' T F T '+ readonly)
sys.exit()
print "=============== NS =============="
for nu in nulist:
print " ------------------------- "
print "nu = " + nu
for cc in cclist:
print cc
for ns in nslist:
print "hemisphere: " + ns
os.system(r'./SST ' + cc + ' 1024 QU '
|
+ ' ' + nu + ' ' + str(nmaps) + ' self hot0 T ' + ns + ' T '+ readonly)
os.system(r'./SST ' + cc + ' 1024 QU ' + ' ' + nu + ' ' + str(nmaps) + ' self cold0 T ' + ns + ' T '+ readonly)
| |
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/subtype/rule_002.py
|
Python
|
gpl-3.0
| 1,387
| 0.000721
|
from vsg import parser
from vsg import token
from vsg.rules import consistent_token_case
lTokens = []
lTokens.append(toke
|
n.subtype_declaration.identifier)
lIgnore = []
lIgnore.append(token.interface_signal_declaration.identifier)
lIgnore.append(token.interface_unknown_declaration.identifier)
lIgnore.append(token.interface_constant_declaration.identifier)
lIgnore.append(token.interface_variable_declaration.identifier)
lIgnore.append(token.association_element.formal_part)
lIgnore.append(parser.whitespace)
lIgnore.append(parser.carriage_return)
lIgnore.append(parser.blank_line)
class rule_002(con
|
sistent_token_case):
'''
This rule checks for consistent capitalization of subtype names.
**Violation**
.. code-block:: vhdl
subtype read_size is range 0 to 9;
subtype write_size is range 0 to 9;
signal read : READ_SIZE;
signal write : write_size;
constant read_sz : read_size := 8;
constant write_sz : WRITE_size := 1;
**Fix**
.. code-block:: vhdl
subtype read_size is range 0 to 9;
subtype write_size is range 0 to 9;
signal read : read_size;
signal write : write_size;
constant read_sz : read_size := 8;
constant write_sz : write_size := 1;
'''
def __init__(self):
consistent_token_case.__init__(self, 'subtype', '002', lTokens, lIgnore)
|
gnowxilef/plexpy
|
plexpy/webserve.py
|
Python
|
gpl-3.0
| 52,488
| 0.002705
|
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
|
published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://
|
www.gnu.org/licenses/>.
from plexpy import logger, notifiers, plextv, pmsconnect, common, log_reader, datafactory, graphs, users
from plexpy.helpers import checked, radio
from mako.lookup import TemplateLookup
from mako import exceptions
import plexpy
import threading
import cherrypy
import hashlib
import random
import json
import os
try:
# pylint:disable=E0611
# ignore this error because we are catching the ImportError
from collections import OrderedDict
# pylint:enable=E0611
except ImportError:
# Python 2.6.x fallback, from libs
from ordereddict import OrderedDict
def serve_template(templatename, **kwargs):
interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/')
template_dir = os.path.join(str(interface_dir), plexpy.CONFIG.INTERFACE)
_hplookup = TemplateLookup(directories=[template_dir])
try:
template = _hplookup.get_template(templatename)
return template.render(**kwargs)
except:
return exceptions.html_error_template().render()
class WebInterface(object):
def __init__(self):
self.interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/')
@cherrypy.expose
def index(self):
if plexpy.CONFIG.FIRST_RUN_COMPLETE:
raise cherrypy.HTTPRedirect("home")
else:
raise cherrypy.HTTPRedirect("welcome")
@cherrypy.expose
def home(self):
config = {
"home_stats_length": plexpy.CONFIG.HOME_STATS_LENGTH,
"home_stats_type": plexpy.CONFIG.HOME_STATS_TYPE,
"home_stats_count": plexpy.CONFIG.HOME_STATS_COUNT,
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER,
}
return serve_template(templatename="index.html", title="Home", config=config)
@cherrypy.expose
def welcome(self, **kwargs):
config = {
"launch_browser": checked(plexpy.CONFIG.LAUNCH_BROWSER),
"refresh_users_on_startup": checked(plexpy.CONFIG.REFRESH_USERS_ON_STARTUP),
"pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER,
"pms_ip": plexpy.CONFIG.PMS_IP,
"pms_is_remote": checked(plexpy.CONFIG.PMS_IS_REMOTE),
"pms_port": plexpy.CONFIG.PMS_PORT,
"pms_token": plexpy.CONFIG.PMS_TOKEN,
"pms_ssl": checked(plexpy.CONFIG.PMS_SSL),
"pms_uuid": plexpy.CONFIG.PMS_UUID,
"tv_notify_enable": checked(plexpy.CONFIG.TV_NOTIFY_ENABLE),
"movie_notify_enable": checked(plexpy.CONFIG.MOVIE_NOTIFY_ENABLE),
"music_notify_enable": checked(plexpy.CONFIG.MUSIC_NOTIFY_ENABLE),
"tv_notify_on_start": checked(plexpy.CONFIG.TV_NOTIFY_ON_START),
"movie_notify_on_start": checked(plexpy.CONFIG.MOVIE_NOTIFY_ON_START),
"music_notify_on_start": checked(plexpy.CONFIG.MUSIC_NOTIFY_ON_START),
"video_logging_enable": checked(plexpy.CONFIG.VIDEO_LOGGING_ENABLE),
"music_logging_enable": checked(plexpy.CONFIG.MUSIC_LOGGING_ENABLE),
"logging_ignore_interval": plexpy.CONFIG.LOGGING_IGNORE_INTERVAL,
"check_github": checked(plexpy.CONFIG.CHECK_GITHUB)
}
# The setup wizard just refreshes the page on submit so we must redirect to home if config set.
# Also redirecting to home if a PMS token already exists - will remove this in future.
if plexpy.CONFIG.FIRST_RUN_COMPLETE or plexpy.CONFIG.PMS_TOKEN:
raise cherrypy.HTTPRedirect("home")
else:
return serve_template(templatename="welcome.html", title="Welcome", config=config)
@cherrypy.expose
def get_date_formats(self):
if plexpy.CONFIG.DATE_FORMAT:
date_format = plexpy.CONFIG.DATE_FORMAT
else:
date_format = 'YYYY-MM-DD'
if plexpy.CONFIG.TIME_FORMAT:
time_format = plexpy.CONFIG.TIME_FORMAT
else:
time_format = 'HH:mm'
formats = {'date_format': date_format,
'time_format': time_format}
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(formats)
@cherrypy.expose
def home_stats(self, time_range='30', stat_type='0', stat_count='5', **kwargs):
data_factory = datafactory.DataFactory()
stats_data = data_factory.get_home_stats(time_range=time_range, stat_type=stat_type, stat_count=stat_count)
return serve_template(templatename="home_stats.html", title="Stats", data=stats_data)
@cherrypy.expose
def library_stats(self, **kwargs):
pms_connect = pmsconnect.PmsConnect()
stats_data = pms_connect.get_library_stats()
return serve_template(templatename="library_stats.html", title="Library Stats", data=stats_data)
@cherrypy.expose
def history(self):
return serve_template(templatename="history.html", title="History")
@cherrypy.expose
def users(self):
return serve_template(templatename="users.html", title="Users")
@cherrypy.expose
def graphs(self):
return serve_template(templatename="graphs.html", title="Graphs")
@cherrypy.expose
def sync(self):
return serve_template(templatename="sync.html", title="Synced Items")
@cherrypy.expose
def user(self, user=None, user_id=None):
user_data = users.Users()
if user_id:
try:
user_details = user_data.get_user_details(user_id=user_id)
except:
logger.warn("Unable to retrieve friendly name for user_id %s " % user_id)
elif user:
try:
user_details = user_data.get_user_details(user=user)
except:
logger.warn("Unable to retrieve friendly name for user %s " % user)
else:
logger.debug(u"User page requested but no parameters received.")
raise cherrypy.HTTPRedirect("home")
return serve_template(templatename="user.html", title="User", data=user_details)
@cherrypy.expose
def edit_user_dialog(self, user=None, user_id=None, **kwargs):
user_data = users.Users()
if user_id:
result = user_data.get_user_friendly_name(user_id=user_id)
status_message = ''
elif user:
result = user_data.get_user_friendly_name(user=user)
status_message = ''
else:
result = None
status_message = 'An error occured.'
return serve_template(templatename="edit_user.html", title="Edit User", data=result, status_message=status_message)
@cherrypy.expose
def edit_user(self, user=None, user_id=None, friendly_name=None, **kwargs):
if 'do_notify' in kwargs:
do_notify = kwargs.get('do_notify')
else:
do_notify = 0
if 'keep_history' in kwargs:
keep_history = kwargs.get('keep_history')
else:
keep_history = 0
if 'thumb' in kwargs:
custom_avatar = kwargs['thumb']
else:
custom_avatar = ''
user_data = users.Users()
if user_id:
try:
user_data.set_user_friendly_name(user_id=user_id,
friendly_name=friendly_name,
do_notify=do_notify,
keep_history=keep_history)
user_data.set_user_profile_url(user_id=user_id,
profile_url=custom_a
|
MyRobotLab/pyrobotlab
|
home/hairygael/InMoov2.full3.byGael.Langevin.1.py
|
Python
|
apache-2.0
| 138,420
| 0.055592
|
#file : InMoov2.full3.byGael.Langevin.1.py
# this script is provided as a basic guide
# most parts can be run by uncommenting them
# InMoov now can be started in modular pieces
import random
import threading
import itertools
leftPort = "COM20"
rightPort = "COM7"
i01 = Runtime.createAndStart("i01", "InMoov")
#inmoov = Runtime.createAndStart("alice", "ProgramAB")
#inmoov.startSession()
directionServo = Runtime.start("directionServo","Servo")
forwardServo = Runtime.start("forwardServo","Servo")
right = Runtime.start("i01.right", "Arduino")
right.connect("COM7")
#cleverbot = Runtime.createAndStart("cleverbot","CleverBot")
# starts everything
##i01.startAll(leftPort, rightPort)
directionServo.attach(right, 12)
forwardServo.attach(right, 13)
#directionServo.attach("COM7", 12)
#forwardServo.attach("COM7", 13)
# starting parts
i01.startMouthControl(leftPort)
i01.startMouth()
#to tweak the default voice
i01.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
i01.startHead(leftPort)
##############
# tweaking default settings of jaw
i01.head.jaw.setMinMax(65,90)
#i01.head.jaw.map(0,180,10,35)
i01.mouthControl.setmouth(65,90)
i01.head.jaw.setRest(90)
# tweaking default settings of eyes
i01.head.eyeY.setMinMax(0,180)
i01.head.eyeY.map(0,180,80,100)
i01.head.eyeY.setRest(85)
i01.head.eyeX.setMinMax(0,180)
i01.head.eyeX.map(0,180,70,100)
i01.head.eyeX.setRest(85)
i01.head.neck.setMinMax(0,180)
i01.head.neck.map(0,180,15,155)
i01.head.neck.setRest(70)
i01.head.rothead.setMinMax(0,180)
i01.head.rothead.map(0,180,30,150)
i01.head.rothead.setRest(86)
###################
i01.startEyesTracking(leftPort)
i01.startHeadTracking(leftPort)
##############
i01.startEar()
##############
torso = i01.startTorso("COM20")
# tweaking default torso settings
torso.topStom.setMinMax(0,180)
torso.topStom.map(0,180,70,120)
torso.midStom.setMinMax(0,180)
torso.topStom.map(0,180,60,120)
#torso.lowStom.setMinMax(0,180)
torso.topStom.setRest(105)
#torso.midStom.setRest(90)
#torso.lowStom.setRest(90)
##############
i01.startLeftHand(leftPort)
# tweaking default settings of left hand
i01.leftHand.thumb.setMinMax(0,180)
i01.leftHand.index.setMinMax(0,180)
i01.leftHand.majeure.setMinMax(0,180)
i01.leftHand.ringFinger.setMinMax(0,180)
i01.leftHand.pinky.setMinMax(0,180)
i01.leftHand.thumb.map(0,180,45,140)
i01.leftHand.index.map(0,180,40,140)
i01.leftHand.majeure.map(0,180,30,176)
i01.leftHand.ringFinger.map(0,180,25,175)
i01.leftHand.pinky.map(0,180,15,112)
################
i01.startLeftArm(leftPort)
#tweak defaults LeftArm
#i01.leftArm.bicep.setMinMax(0,90)
#i01.leftArm.rotate.setMinMax(46,160)
#i01.leftArm.shoulder.setMinMax(30,100)
#i01.leftArm.omoplate.setMinMax(10,75)
################
i01.startRightHand(rightPort,"atmega2560")
# tweaking defaults settings of right hand
i01.rightHand.thumb.setMinMax(0,180)
i01.rightHand.index.setMinMax(0,180)
i01.rightHand.majeure.setMinMax(0,180)
i01.rightHand.ringFinger.setMinMax(0,180)
i01.rightHand.pinky.setMinMax(0,180)
i01.rightHand.thumb.map(0,180,55,135)
i01.rightHand.index.map(0,180,35,140)
i01.rightHand.majeure.map(0,180,8,120)
i01.rightHand.ringFinger.map(0,180,40,125)
i01.rightHand.pinky.map(0,180,10,110)
#################
i01.startRightArm(rightPort)
# tweak default RightArm
#i01.rightArm.bicep.setMinMax(0,90)
#i01.rightArm.rotate.setMinMax(46,160)
#i01.rightArm.shoulder.setMinMax(30,100)
#i01.rightArm.omoplate.setMinMax(10,75)
################
# starting part with a reference, with a reference
# you can interact further
#opencv = i01.startOpenCV()
#opencv.startCapture()
# or you can use i01's reference
#i01.opencv.startCapture()
#i01.headTracking.faceDetect()
#i01.eyesTracking.faceDetect()
#i01.headTracking.pyramidDown()
############################################################
#to tweak the default Pid values
i01.eyesTracking.xpid.setPID(20.0,5.0,0.1)
i01.eyesTracking.ypid.setPID(20.0,5.0,0.1)
i01.headTracking.xpid.setPID(12.0,5.0,0.1)
i01.headTracking.ypid.setPID(12.0,5.0,0.1)
############################################################
i01.startPIR("COM20",30)
#def input():
#print 'python object is ', msg_clock_pulse
#pin = msg_i01_right_publishPin.data[0]
#print 'pin data is ', pin.pin, pin.value
#if (pin.value == 1):
#i01.mouth.speak("I was dreaming")
#powerup()
#relax()
###########################
|
#################################
helvar = 1
weathervar = 1
# play rock
|
paper scissors
inmoov = 0
human = 0
###############################################################
# after a start you may call detach to detach all
# currently attached servos
#i01.detach()
#i01.attach()
# auto detaches any attached servos after 120 seconds of inactivity
#i01.autoPowerDownOnInactivity(100)
#i01.speakErrors(false)
# purges any "auto" methods
#i01.purgeAllTasks()
# remote control services
# WebGUI - for more information see
# http://myrobotlab.org/service/WebGUI
# Xmpp - for more information see
# http://myrobotlab.org/service/Xmpp
# system check - called at anytime
#i01.systemCheck()
# take the current position of all attached servos <- FIXME
# and create a new method named "newGesture"
#i01.captureGesture("newGesture")
# all ear associations are done python startEar() only starts
# the peer service
# After ear.startListening(), the ear will listen for commands
#############################################################################################
# i01.systemCheck()
#i01.mouth.speakBlocking(cleverbot.chat("hi"))
#i01.mouth.speakBlocking(cleverbot.chat("how are you"))
# verbal commands
ear = i01.ear
ear.addCommand("rest", "python", "rest")
ear.addCommand("attach head", "i01.head", "attach")
ear.addCommand("disconnect head", "i01.head", "detach")
ear.addCommand("attach eyes", "i01.head.eyeY", "attach")
ear.addCommand("disconnect eyes", "i01.head.eyeY", "detach")
ear.addCommand("attach right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect right hand", "i01.rightHand", "detach")
ear.addCommand("attach left hand", "i01.leftHand", "attach")
ear.addCommand("disconnect left hand", "i01.leftHand", "detach")
ear.addCommand("attach everything", "i01", "attach")
ear.addCommand("disconnect everything", "i01", "detach")
ear.addCommand("attach left arm", "i01.leftArm", "attach")
ear.addCommand("disconnect left arm", "i01.leftArm", "detach")
ear.addCommand("attach right arm", "i01.rightArm", "attach")
ear.addCommand("disconnect right arm", "i01.rightArm", "detach")
ear.addCommand("attach torso", "i01.torso", "attach")
ear.addCommand("disconnect torso", "i01.torso", "detach")
ear.addCommand("attach jaw", "i01.head.jaw", "attach")
ear.addCommand("disconnect jaw", "i01.head.jaw", "detach")
ear.addCommand("attach wheel", "directionServo","forwardServo", "attach")
ear.addCommand("disconnect wheel", "directionServo","forwardServo", "detach")
ear.addCommand("search humans", "python", "trackHumans")
ear.addCommand("quit search", "python", "stopTracking")
ear.addCommand("track", "python", "trackPoint")
ear.addCommand("freeze track", "python", "stopTracking")
ear.addCommand("open hand", "python", "handopen")
ear.addCommand("close hand", "python", "handclose")
ear.addCommand("camera on", i01.getName(), "cameraOn")
ear.addCommand("off camera", i01.getName(), "cameraOff")
ear.addCommand("capture gesture", i01.getName(), "captureGesture")
# FIXME - lk tracking setpoint
ear.addCommand("giving", i01.getName(), "giving")
ear.addCommand("fighter", i01.getName(), "fighter")
ear.addCommand("fist hips", "python", "fistHips")
ear.addCommand("look at this", i01.getName(), "lookAtThis")
ear.addCommand("victory", i01.getName(), "victory")
ear.addCommand("arms up", "python", "armsUp")
ear.addCommand("arms front", i01.getName(), "armsFront")
ear.addCommand("da vinci", i01.getName(), "daVinci")
# FIXME -
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("stop listening", ear.getName(), "stopListening")
##sets the servos back to full speed, anywhere in sequence or gestures
ear.addCommand("full speed", "python", "fullspeed")
|
phapdv/project_euler
|
pe56.py
|
Python
|
mit
| 616
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from base import Problem
class Solution(Problem):
def solve(self, input_):
numberLargest = 0
for a in range(1, 100):
if a % 10 != 0:
for b in range(1, 100):
num_pow = a**b
number_sum = self.digits_sum(num_pow)
if number_sum > numberLargest:
numberLargest
|
= number_sum
print('Solve problem {}'.format(self.number))
print(numberLargest)
if __name__ == '__main__':
solution = Solution(56)
solution.solve
|
(100)
|
kotoromo/Numeric_Methods
|
Bisection/util.py
|
Python
|
gpl-3.0
| 1,437
| 0.005567
|
from decimal import *
class util:
def getInterval(self, function):
"""
function: lambda function
Method that obtains the integer intervals where the function
changes its sign from negative to positive.
If it finds the function's root within its intervals, it
returns it.
returns: Tuple containing the intervals or the function's root
"""
sup = 5
inf = -5
while(True):
for i in range(inf, sup):
if( (function(i) < 0) and (function(i+1) > 0) is True):
return (i, i+1)
if(function(i) is 0):
print("Solution found!: " + str(i))
return (i, function(i))
inf-=1
sup+=1
#print(inf, sup)
def getMiddleValue(self, a, b):
"""
Returns a value in-between a and b.
returns: int
"""
return (a+b)/2.0
def computeAbsoluteError(self, x, x_prev):
"""
Returns the absolute error.
returns: float
"""
return abs(float((x_prev)-float(x)))
def verifyOppositeSign(self, a, b):
"""
Returns whether or not a has opposit
|
e signs with b.
returns: boolean
"""
|
#if a is a negative number and so is b
return ( (a < 0) is not (b < 0))
|
indrz/indrz
|
indrz/poi_manager/forms.py
|
Python
|
gpl-3.0
| 769
| 0.003901
|
from django import forms
from poi_manager.models import Poi, PoiCategory
from mptt.forms import TreeNodeChoiceField
class PoiCategoryForm(forms.ModelForm):
cat_name = forms.CharField(max_length=128, help_text="Please enter the category name.")
parent = TreeNodeChoiceField(queryset=PoiCategory.objects.all(), required=False)
class Meta:
model = PoiCategory
fields = ('cat_name', 'parent',)
class PoiForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text
|
="Please enter the title of the page.")
floor_num = forms.IntegerField(initial=0, required=False)
category = TreeNodeChoiceField(queryset=PoiCategory.objects.all())
class Meta:
model = Poi
fields = ('name', 'floor_num', 'category'
|
,)
|
ngageoint/scale
|
scale/storage/migrations/0008_auto_20170609_1443.py
|
Python
|
apache-2.0
| 1,859
| 0.001614
|
# -*- coding: utf-8 -*-
# Generated by Dja
|
ngo 1.11.1 on 2017-06-09 14:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('batch', '0002_auto_20170412
|
_1225'),
('recipe', '0018_recipefile_recipe_input'),
('storage', '0007_auto_20170412_1225'),
]
operations = [
migrations.AddField(
model_name='scalefile',
name='batch',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='batch.Batch'),
),
migrations.AddField(
model_name='scalefile',
name='job_output',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.Recipe'),
),
migrations.AddField(
model_name='scalefile',
name='recipe_job',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.RecipeType'),
),
migrations.AddField(
model_name='scalefile',
name='source_ended',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
migrations.AddField(
model_name='scalefile',
name='source_started',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
]
|
andreaso/ansible
|
lib/ansible/modules/network/nxos/nxos_static_route.py
|
Python
|
gpl-3.0
| 10,343
| 0.002804
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_static_route
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
notes:
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
required: false
default: default
tag:
description:
- Route tag value (numeric).
required: false
default: null
route_name:
description:
- Name of the route. Used with the name parameter on the CLI.
required: false
default: null
pref:
description:
- Preference or administrative difference of route (range 1-255).
required: false
default: null
state:
description:
- Manage the state of the resource.
required: tru
|
e
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "3.3.3.3"
route_name: testing
pref: 100
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample:
|
{"next_hop": "3.3.3.3", "pref": "100",
"prefix": "192.168.20.64/24", "route_name": "testing",
"vrf": "default"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"next_hop": "3.3.3.3", "pref": "100",
"prefix": "192.168.20.0/24", "route_name": "testing",
"tag": null}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def state_present(module, candidate, prefix):
commands = list()
invoke('set_route', module, commands, prefix)
if commands:
if module.params['vrf'] == 'default':
candidate.add(commands, parents=[])
else:
candidate.add(commands, parents=['vrf context {0}'.format(module.params['vrf'])])
def state_absent(module, candidate, prefix):
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
commands = list()
parents = 'vrf context {0}'.format(module.params['vrf'])
invoke('set_route', module, commands, prefix)
if module.params['vrf'] == 'default':
config = netcfg.get_section(commands[0])
if config:
invoke('remove_route', module, commands, config, prefix)
candidate.add(commands, parents=[])
else:
config = netcfg.get_section(parents)
splitted_config = config.split('\n')
splitted_config = map(str.strip, splitted_config)
if commands[0] in splitted_config:
invoke('remove_route', module, commands, config, prefix)
candidate.add(commands, parents=[parents])
def fix_prefix_to_regex(prefix):
prefix = prefix.replace('.', '\.').replace('/', '\/')
return prefix
def get_existing(module, prefix, warnings):
key_map = ['tag', 'pref', 'route_name', 'next_hop']
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = 'vrf context {0}'.format(module.params['vrf'])
prefix_to_regex = fix_prefix_to_regex(prefix)
route_regex = ('.*ip\sroute\s{0}\s(?P<next_hop>\S+)(\sname\s(?P<route_name>\S+))?'
'(\stag\s(?P<tag>\d+))?(\s(?P<pref>\d+)).*'.format(prefix_to_regex))
if module.params['vrf'] == 'default':
config = str(netcfg)
else:
config = netcfg.get_section(parents)
if config:
try:
match_route = re.match(route_regex, config, re.DOTALL)
group_route = match_route.groupdict()
for key in key_map:
if key not in group_route:
group_route[key] = ''
group_route['prefix'] = prefix
group_route['vrf'] = module.params['vrf']
except (AttributeError, TypeError):
group_route = {}
else:
group_route = {}
msg = ("VRF {0} didn't exist.".format(module.params['vrf']))
if msg not in warnings:
warnings.append(msg)
return group_route
def remove_route(module, commands, config, prefix):
commands.append('no ip route {0} {1}'.format(prefix, module.params['next_hop']))
def set_route(module, commands, prefix):
route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop'])
if module.params['route_name']:
route_cmd += ' name {0}'.format(module.params['route_name'])
if module.params['tag']:
route_cmd += ' tag {0}'.format(module.params['tag'])
if module.params['pref']:
route_cmd += ' {0}'.format(module.params['pref'])
commands.append(route_cmd)
def get_dotted_mask(mask):
bits = 0
for i in xrange(32-mask,32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24,
(bits & 0xff0000) >> 16, (bits & 0xff00) >> 8 , (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
spli
|
alexandrovteam/pyImagingMSpec
|
pyImagingMSpec/test/imutils_test.py
|
Python
|
apache-2.0
| 3,746
| 0.001068
|
import unittest
import numpy as np
from ..imutils import nan_to_zero, quantile_threshold, interpolate
class ImutilsTest(unittest.TestCase):
def test_nan_to_zero_with_ge_zero(self):
ids = (
np.zeros(1),
np.ones(range(1, 10)),
np.arange(1024 * 1024)
)
for id_ in ids:
before = id_.copy()
notnull = nan_to_zero(id_)
np.testing.assert_array_equal(before, id_)
np.testing.assert_array_equal(notnull, before != 0)
def test_nan_to_zero_with_negatives(self):
negs = (
np.array([-1]),
np.array([np.nan]),
- np.arange(1, 1024 * 1024 + 1).reshape((1024, 1024)),
np.linspace(0, -20, 201)
)
for neg in negs:
sh = neg.shape
expected_notnull = np.zeros(sh).astype(np.bool_)
actual_notnull = nan_to_zero(neg)
np.testing.assert_array_equal(neg, np.zeros(sh))
np.testing.assert_array_equal(actual_notnull, expected_notnull)
def test_nan_to_zero_with_mixed(self):
test_cases = (
(np.array([-1, np.nan, 1e6, -1e6]), np.array([0, 0, 1e6, 0])),
(np.arange(-2, 7).reshape((3, 3)), np.array([[0, 0, 0], np.arange(1, 4), np.arange(4, 7)])),
)
for input_, expected in test_cases:
nan_to_zero(input_)
np.testing.assert_array_equal(input_, expected)
def test_nan_to_zero_with_empty(self):
in_ = None
self.assertRaises(AttributeError, nan_to_zero, in_)
self.assertIs(in_, None)
in_ = []
self.assertRaises(TypeError, nan_to_zero, in_)
self.assertEqual(in_, [])
in_ = np.array([])
notnull = nan_to_zero(in_)
self.assertSequenceEqual(in_, [])
self.assertSequenceEqual(notnull, [])
def test_quantile_threshold_ValueError(self):
test_cases = (
(np.arange(0), np.arange(0, dtype=np.bool_), -37),
(np.arange(0), np.arange(0, dtype=np.bool_), -4.4),
|
(np.arange(0), np.arange(0, dtype=np.bool_), 101)
)
|
kws = ('im', 'notnull_mask', 'q_val',)
for args in test_cases:
kwargs = {kw: val for kw, val in zip(kws, args)}
self.assertRaises(ValueError, quantile_threshold, **kwargs)
def test_quantile_threshold_trivial(self):
test_cases = (
((np.arange(10), np.ones(10, dtype=np.bool_), 100), (np.arange(10), 9)),
(
(np.arange(101, dtype=np.float32), np.ones(101, dtype=np.bool_), 100. / 3),
(np.concatenate((np.arange(34), np.repeat(100. / 3, 67))), 100. / 3),
),
(
(np.arange(20), np.repeat([True, False], 10), 100),
(np.concatenate((np.arange(10), np.repeat(9, 10))), 9)
),
)
kws = ('im', 'notnull_mask', 'q_val',)
for args, expected in test_cases:
kwargs = {kw: val for kw, val in zip(kws, args)}
im_in = args[0]
im_expected, q_expected = expected
q_actual = quantile_threshold(**kwargs)
self.assertAlmostEqual(q_expected, q_actual, delta=1e-7)
np.testing.assert_array_almost_equal(im_in, im_expected, decimal=6)
def test_interpolate(self):
im_in = np.arange(900, dtype=np.float32).reshape((30, 30))
im_in[2, 3] = np.nan
notnull = im_in > 0
im_out = interpolate(im_in, notnull)
np.testing.assert_array_almost_equal(im_in[notnull], im_out[notnull])
self.assertAlmostEqual(im_out[0, 0], 0)
self.assertAlmostEqual(im_out[2, 3], 63)
if __name__ == '__main__':
unittest.main()
|
ProjectSky/FrackinUniverse-sChinese-Project
|
script/tools/patch_tool.py
|
Python
|
mit
| 6,836
| 0.009206
|
import json
import os
import re
from codecs import open as open_n_decode
from json_tools import field_by_path, list_field_paths, prepare
# 网上抄的函数,用来合成json,结果还算可靠尼玛呢坑死我了
def add_value(dict_obj, path, value):
obj = dict_obj
for i, v in enumerate(path):
if i + 1 == len(path):
if not isinstance(obj.get(v, ''), list):
obj[v] = list()
obj[v].append(value)
continue
obj[v] = obj.get(v, '') or dict()
obj = obj[v]
return dict_obj
# 稳定筛选文本的必要函数,原理是将文本所在的索引值提取出来
def op_select(jsons):
index
|
= 0
json_text = json.loads(jsons)
op_list = list()
i_list = list()
result = list()
for i, value in enumerate(json_text):
op_result = value['op']
op_list.append(op_result)
i_list.append(i)
op_dict = list(zip(i_list, op_list))
for value in op_dict:
if list(op_dict[index])[1] == 'remov
|
e' or list(op_dict[index])[1] == 'test':
pass
else:
result.append(list(op_dict[index])[0])
index = index+1
return result
# 为某些patch文件写的解析法?大概吧,效率很低
def detect_patch(jsons):
string = prepare(jsons)
result = json.loads(string)
new_list=list()
for i in result:
if 'path' in i:
new_list.append(i)
else:
for v in i:
new_list.append(v)
return new_list
# 绝对可靠的扫描方式,针对普通的patch,摒弃了繁琐的转换和词典筛选!
def trans_patch(jsons,ex=None):
json_text = detect_patch(jsons)
dict_result = list()
fin_result = list()
for i, value in enumerate(json_text):
try:
dict_result.append([value['op'], value['path'],value['value']])
except:
pass
if ex is not None:
try:
path_list = [i[1] for i in dict_result]
for v in ex:
path_list = replace_the_path(path_list, v)
for x,y in enumerate(path_list):
dict_result[x][1] = y
except:
pass
for i in dict_result:
if i[0] == 'add' or i[0] == 'replace':
path_1 = i[1]
path_2 = list_field_paths(i[2])
if path_2 == []:
path_2 = ['*']
else:
pass
for v in path_2:
if path_2 == ['*']:
value = i[2]
path = path_1.replace('/', '', 1)
fin_result.append([path,value])
else:
value = field_by_path(i[2], v)
path = (path_1+'/' + v).replace('/', '', 1)
fin_result.append([path,value])
else:
pass
return fin_result
def to_a_list(tuple, no):
re = list()
for i in tuple:
re.append(i[no])
return re
# fuck utf8 bom!(未完成)
"""
def fuck_utf8_bom(jsons):
print(str(jsons))
u = str(jsons).decode('utf-8-sig')
s = u.encode('utf-8')
return s
"""
def trans_patch_spcial_1(jsons, ex):
json_text = detect_patch(jsons)
value_list = list()
path_list = list()
path_list_2 = list()
path_list_3 = list()
value_list_2 = list()
op_list = list()
ex_list = ex
for i, value in enumerate(json_text):
path_result = value['path']
op_result = value['op']
try:
value_result = value['value']
except:
value_result = ''
value_list.append(value_result)
path_list.append(path_result)
op_list.append(op_result)
"""
for i in ex_list:
if i[2] == 1 :
o = i[1]
for text in path_list:
if not re.search(i[0]+'/'+'-',text) == None :
wait = text.replace(i[0]+'/'+'-',i[0]+'/'+str(o))
path_list_3.append(wait)
o=o+1
else:
path_list_3.append(text)
else:
for text in path_list:
if not re.search(i[0]+'/'+'-',text) == None :
wait = text.replace(i[0]+'/-',i[0]+'/'+str(i[1]))
path_list_3.append(wait)
else:
path_list_3.append(text)
path_list = path_list_3
"""
for i in ex_list:
path_list = replace_the_path(path_list, i)
dict_result = tuple(zip(op_list, path_list, value_list))
for i in dict_result:
if i[0] == 'add' or i[0] == 'replace':
path_1 = i[1]
path_2 = list_field_paths(i[2])
if path_2 == []:
path_2 = ['*']
else:
pass
for v in path_2:
if path_2 == ['*']:
value = i[2]
path = path_1.replace('/', '', 1)
value_list_2.append(value)
path_list_2.append(path)
else:
value = field_by_path(i[2], v)
path = (path_1+'/' + v).replace('/', '', 1)
value_list_2.append(value)
path_list_2.append(path)
else:
pass
result = tuple(zip(path_list_2, value_list_2))
return result
def replace_the_path(path, rule):
path_list_3 = list()
o = rule[1]
for text in path:
if rule[2] == 1:
if not re.search(rule[0]+'/'+'-', text) == None:
wait = text.replace(rule[0]+'/'+'-', rule[0]+'/'+str(o))
path_list_3.append(wait)
o = o+1
else:
path_list_3.append(text)
else:
if not re.search(rule[0]+'/'+'-', text) == None:
wait = text.replace(rule[0]+'/-', rule[0]+'/'+str(rule[1]))
path_list_3.append(wait)
else:
path_list_3.append(text)
return path_list_3
if __name__ == "__main__":
jsons2 = open_n_decode(
'E:/FrackinUniverse/dungeon_worlds.config.patch', "r", "utf_8_sig")
jsons3 = open_n_decode(
'E:/FrackinUniverse/items/categories.config.patch', "r", "utf_8_sig")
list233 = [('generic', 70, 1),('cheerful', 31, 1),('jerk', 31, 1),('flirty', 31, 1),('anxious', 31, 1),('easilyspooked',32,1),('clumsy',31,1),('excited',31,1),('intrusive',31,1),('dumb',32,1),('emo',30,1),('fast',31,1),('nocturnal',32,1),('socialite',31,1),('ambitious',30,1)]
test = trans_patch(jsons2)
##test = detect_patch(json.loads(prepare(jsons3)))
"""
dict_old = dict()
for i in range(len(test)):
dict_old['/'+test[i][0]] = test[i][1]
print(dict_old)
"""
|
mvdbeek/tools-iuc
|
data_managers/data_manager_fetch_ncbi_taxonomy/data_manager/data_manager.py
|
Python
|
mit
| 4,462
| 0.002465
|
import argparse
import datetime
import json
import os
import shutil
import tarfile
import zipfile
from urllib.request import Request, urlopen
def url_download(url, workdir):
file_path = os.path.join(workdir, 'download.dat')
if not os.path.exists(workdir):
os.makedirs(workdir)
src = None
dst = None
try:
req = Request(url)
src = urlopen(req)
with open(file_path, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
if tarfile.is_tarfile(file_path):
fh = tarfile.open(file_path, 'r:*')
elif zipfile.is_zipfile(file_path):
fh = zipfile.ZipFile(file_path, 'r')
else:
return
fh.extractall(workdir)
os.remove(file_path)
def download_name_maps(url, workdir, partial):
if partial:
map_files = [
'pdb.accession2taxid.gz',
]
else:
map_files = [
'dead_nucl.accession2taxid.gz',
'dead_prot.accession2taxid.gz',
'dead_wgs.accession2taxid.gz',
'nucl_gb.accession2taxid.gz',
'nucl_wgs.accession2taxid.gz',
'pdb.accession2taxid.gz',
'prot.accession2taxid.gz',
'prot.accession2taxid.FULL.gz'
]
if not os.path.exists(workdir):
os.makedirs(workdir)
for map in map_files:
src = "{}{}".format(url, map)
dest = os.path.join(workdir, map)
print("Downloading taxonomy accession2taxid file from {} to {}".format(src, dest))
try:
req = Request(src)
src = urlopen(req)
with open(dest, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
def move_files_to_final_dir(workdir, target_directory, copy=False):
for filename in os.listdir(workdir):
if copy:
shutil.copy(os.path.join(workdir, filename), target_directory)
else:
shutil.move(os.path.join(workdir, filename), target_directory)
def main(args):
workdir = os.path.abspath(os.path.join(os.getcwd(), 'taxonomy'))
url_download(args.url, workdir)
data_manager_entry = {}
data_manager_entry['value'] = args.name.lower()
data_manager_entry['name'] = args.name
data_manager_entry['path'] = '.'
data_manager_json = dict(data_tables=dict(ncbi_taxonomy=data_manager_entry))
with open(args.output) as fh:
params = json.load(fh)
if args.name_maps:
workdir_a2t = os.path.join(os.getcwd(), 'accession2taxid')
download_name_maps("ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/", workdir_a2t, args.partial)
target_directory_a2t = os.path.join(params['output_data'][0]['extra_files_path'], "accession2taxid")
os.makedirs(target_directory_a2t)
move_files_to_final_dir(workdir_a2t, target_directory_a2t)
# Also copy taxonomy data to accession2taxid dir
move_files_to_final_dir(workdir, target_directory_a2t, copy=True)
data_manager_json['data_tables']['ncbi_accession2taxid'] = data_manager_entry
target_directory_tax = os.path.join(params['output_data'][0]['extra_files_path'], "taxonomy")
os.makedirs(target_directory_tax)
move_files_to_final_dir(workdir, target_directory_tax)
with open(args.output, 'w') as fh:
json.dump(data_manager_json, fh, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create data manage
|
r json.')
parser
|
.add_argument('--out', dest='output', action='store', help='JSON filename')
parser.add_argument('--name', dest='name', action='store', default=str(datetime.date.today()), help='Data table entry unique ID')
parser.add_argument('--url', dest='url', action='store', default='ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz', help='Download URL')
parser.add_argument('--name-maps', dest='name_maps', action='store_true', help='')
parser.add_argument('--partial', dest='partial', action='store_true', help='Only download a small subset of data (for testing)')
args = parser.parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.