repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
shirlei/helios-server | helios_auth/view_utils.py | Python | apache-2.0 | 1,428 | 0.013305 | """
Utilities for all views
Ben Adida (12-30-2008)
"""
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import loader
import helios_auth
from helios_auth.security import get_user
##
## BASICS
##
SUCCESS = HttpResponse("SUCCESS")
##
## template abstraction
##
def prepare_vars(request, values):
vars_with_user = values.copy()
if request:
vars_with_user['user'] = get_user(request)
vars_with_user['csrf_token'] = request.session['csrf_token']
vars_with_user['SECURE_URL_HOST'] = settings.SECURE_URL_HOST
vars_with_user['STATIC'] = '/static/auth'
vars_with_user['MEDIA_URL'] = '/static/auth/'
vars_with_user['TEMPLATE_BASE'] = helios_auth.TEMPLATE_BASE
vars_with_user['TEMPLATE_BASENONAV'] = helios_auth.TEMPLATE_BASENONAV
vars_with_user['settings'] = settings
return vars_with_user
def render_template(request, template_name, values=None):
vars_with_user = prepare_vars(request, val | ues or {})
return render_to_response('helios_auth/templates/%s.html' % template_name, vars_with_user)
def render_template_raw(request, template_name, values=None):
t = loader.get_template(template_name + '.html')
values = values or {}
vars_with_user = prepare_vars(request, values)
return t.render(context=vars_with_user, request=request)
def render_json(json_txt):
return HttpResponse( | json_txt)
|
w568w/GitHubStar | settings.py | Python | gpl-3.0 | 220 | 0.075 | # -* | - coding:utf-8 -*-
#############settings#############
NAME = "1" #GitStar用户名
PASSWORD = "1" #GitStar密码
GITNAME = "1" #GitHub用户名
GITPASSWORD = "1" #GitHub密码
######## | #####settings#############
|
byuphamerator/phamerator-dev | phamerator/plugins/get_relatives.py | Python | gpl-2.0 | 429 | 0.006993 | #!/usr/bin/env python
from phamerator import *
from phamerator.phamerator_manage_db import *
from phamerator.db_conf import db_conf
import sys, getpass
GeneID = sys.argv[1]
password = getpass. | getpass()
db = raw_input('database: ')
c = db_conf(username='root', password=password, | server='134.126.132.72', db=db).get_cursor()
print get_relatives(c, GeneID, alignmentType='both', clustalwThreshold=0.275, blastThreshold=0.0001)
|
NeostreamTechnology/Microservices | venv/lib/python2.7/site-packages/simplejson/tests/test_item_sort_key.py | Python | mit | 1,376 | 0.00436 | from unittest import TestCase
import simplejson as json
from operator import itemgetter
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
|
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, | "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
def test_item_sort_key_value(self):
# https://github.com/simplejson/simplejson/issues/173
a = {'a': 1, 'b': 0}
self.assertEqual(
'{"b": 0, "a": 1}',
json.dumps(a, item_sort_key=lambda kv: kv[1]))
|
davidam/python-examples | perceval/perceval_gerrit_me.py | Python | gpl-3.0 | 615 | 0 | #! /usr/bin/env python3
from datetime import datetime, timedelta
from pe | rceval.backends.core.gerrit import Gerrit
# hostname of the Gerrit instance
hostname = 'gerrit.opnfv.org'
# user for sshing to the Gerrit instance
user = 'd.arroyome'
# retrieve on | ly reviews changed since one day ago
from_date = datetime.now() - timedelta(days=1)
# create a Gerrit object, pointing to hostname, using user for ssh access
repo = Gerrit(hostname=hostname, user=user)
# fetch all reviews as an iterator, and iterate it printing each review id
for review in repo.fetch(from_date=from_date):
print(review['data']['number'])
|
jasondunsmore/heat | heat_integrationtests/functional/test_default_parameters.py | Python | apache-2.0 | 3,045 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from heat_integrationtests.functional import functional_base
class DefaultParametersTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2013-05-23
parameters:
length:
type: string
default: 40
resources:
random1:
type: nested_random.yaml
random2:
type: OS::Heat::RandomString
properties:
length: {get_param: length}
outputs:
random1:
value: {get_attr: [random1, random1_value]}
random2:
value: {get_resource: random2}
'''
nested_template = '''
heat_template_version: 2013-05-23
parameters:
length:
type: string
default: 50
resources:
random1:
type: OS::Heat::RandomString
properties:
length: {get_param: length}
outputs:
random1 | _value:
value: {get_resource: random1}
'''
scenarios = [
('none', dict(param=None, default=None, temp_def=True,
expect1=50, expect2=40)),
('default', dict(param=None, default=12, temp_def=True,
expect1=12, expect2=12)),
('both', dict(pa | ram=15, default=12, temp_def=True,
expect1=12, expect2=15)),
('no_temp_default', dict(param=None, default=12, temp_def=False,
expect1=12, expect2=12)),
]
def setUp(self):
super(DefaultParametersTest, self).setUp()
def test_defaults(self):
env = {'parameters': {}, 'parameter_defaults': {}}
if self.param:
env['parameters'] = {'length': self.param}
if self.default:
env['parameter_defaults'] = {'length': self.default}
if not self.temp_def:
# remove the default from the parameter in the nested template.
ntempl = yaml.safe_load(self.nested_template)
del ntempl['parameters']['length']['default']
nested_template = yaml.safe_dump(ntempl)
else:
nested_template = self.nested_template
stack_identifier = self.stack_create(
template=self.template,
files={'nested_random.yaml': nested_template},
environment=env
)
stack = self.client.stacks.get(stack_identifier)
for out in stack.outputs:
if out['output_key'] == 'random1':
self.assertEqual(self.expect1, len(out['output_value']))
if out['output_key'] == 'random2':
self.assertEqual(self.expect2, len(out['output_value']))
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/ping/tasking_dsz.py | Python | unlicense | 444 | 0.009009 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 | 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4129
PROVIDER_ANY = 4129
PROVIDER = 16846881
PROVIDER_FLAV = 16912417
RP | C_INFO_SEND = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0]) |
cga-harvard/cga-worldmap | geonode/proxy/views.py | Python | gpl-3.0 | 15,886 | 0.008939 | import random
from django.http import HttpResponse
from httplib import HTTPConnection, HTTPSConnection
from urlparse import urlsplit
import httplib2
import urllib
from django.utils import simplejson as json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.html import escape
from django.views.decorators.csrf import csrf_exempt
import logging
from urlparse import urlparse
from geonode.maps.models import LayerStats, Layer
from xml.etree.ElementTree import XML, ParseError
import re
logger = logging.getLogger("geonode.proxy.views")
HGL_URL = 'http://hgl.harvard.edu:8080/HGL'
_valid_tags = "\{http\:\/\/www\.opengis\.net\/wms\}WMS_Capabilities|\
WMT_MS_Capabilities|WMS_DescribeLayerResponse|\
\{http\:\/\/www\.opengis\.net\/gml\}FeatureCollection|msGMLOutput|\
\{http\:\/\/www.opengis\.net\/wfs\}FeatureCollection|\
rss|{http://www.w3.org/2005/Atom}feed|\
\{http\:\/\/www\.w3\.org\/2001\/XMLSchema\}schema|\
{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF"
_user, _password = settings.GEOSERVER_CREDENTIALS
h = httplib2.Http()
h.add_credentials(_user, _password)
_netloc = urlparse(settings.GEOSERVER_BASE_URL).netloc
h.authorizations.append(
httplib2.BasicAuthentication(
(_user, _password),
_netloc,
settings.GEOSERVER_BASE_URL,
{},
None,
None,
h
)
)
@csrf_exempt
def proxy(request):
if 'url' not in request.GET:
return HttpResponse(
"The proxy service requires a URL-encoded URL as a parameter.",
status=400,
content_type="text/plain"
)
url = urlsplit(request.GET['url'])
# Don't allow localhost connections unless in DEBUG mode
if not settings.DEBUG and re.search('localhost|127.0.0.1', url.hostname):
return HttpResponse(status=403)
locator = url.path
if url.query != "":
locator += '?' + url.query
if url.fragment != "":
locator += '#' + url.fragment
# Strip all headers and cookie info
headers = {}
conn = HTTPConnection(url.hostname, url.port) if url.scheme == "http" else HTTPSConnection(url.hostname, url.port)
conn.request(request.method, locator, request.raw_post_data, headers)
result = conn.getresponse()
response = HttpResponse(
valid_response(result.read()),
status=result.status,
content_type=result.getheader("Content-Type", "text/plain")
)
return response
def valid_response(responseContent):
#Proxy should only be used when expecting an XML or JSON response
#ArcGIS Server GetFeatureInfo xml response
if re.match("<FeatureInfoResponse", responseContent):
return responseContent
# ows exceptions
if "<ows:ExceptionReport" in responseContent:
return responseContent
if responseContent[0] == "<":
try:
from defusedxml.ElementTree import fromstring
et = fromstring(responseContent)
if re.match(_valid_tags, et.tag):
return responseContent
except ParseError:
return None
elif re.match('\[|\{', responseContent):
try:
json.loads(responseContent)
return responseContent
except:
return None
return None
@csrf_exempt
def geoserver_rest_proxy(request, proxy_path, downstream_path):
if not request.user.is_authenticated():
return HttpResponse(
"You must be logged in to access GeoServer",
mimetype="text/plain",
status=401)
def strip_prefix(path, prefix):
assert path.startswith(prefix)
return path[len(prefix):]
path = strip_prefix(request.get_full_path(), proxy_path)
url = "".join([settings.GEOSERVER_BASE_URL, downstream_path, path])
http = httplib2.Http()
http.add_credentials(*settings.GEOSERVER_CREDENTIALS)
headers = dict()
if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META:
headers["Content-Type"] = request.META["CONTENT_TYPE"]
response, content = http.request(
url, request.method,
body=request.raw_post_data or None,
headers=headers)
return HttpResponse(
content=content,
status=response.status,
mimetype=response.get("content-type", "text/plain"))
def picasa(request):
url = "http://picasaweb.google.com/data/feed/base/all?thumbsize=160c&"
kind = request.GET['kind'] if request.method == 'GET' else request.POST['kind']
bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox']
query = request.GET['q'] if request.method == 'GET' else request.POST['q']
maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results']
coords = bbox.split(",")
coords[0] = -180 if float(coords[0]) <= -180 else coords[0]
coords[2] = 180 if float(coords[2]) >= 180 else coords[2]
coords[1] = coords[1] if float(coord | s[1]) > -90 else -90
coords[3] = coords[3] if float(coords[3]) < 90 else 90
newbbox = str(coords[0]) + ',' + str(coords[1]) + ',' + str(coords[2]) + ',' + str(coords[3])
url = url + "kind=" + kind + "&max-results=" + maxResults + "&bbox=" + newbbo | x + "&q=" + urllib.quote(query.encode('utf-8')) #+ "&alt=json"
feed_response = urllib.urlopen(url).read()
return HttpResponse(feed_response, mimetype="text/xml")
def flickr(request):
url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=%s" % settings.FLICKR_API_KEY
bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox']
query = request.GET['q'] if request.method == 'GET' else request.POST['q']
maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results']
coords = bbox.split(",")
coords[0] = -180 if float(coords[0]) <= -180 else coords[0]
coords[2] = 180 if float(coords[2]) >= 180 else coords[2]
coords[1] = coords[1] if float(coords[1]) > -90 else -90
coords[3] = coords[3] if float(coords[3]) < 90 else 90
newbbox = str(coords[0]) + ',' + str(coords[1]) + ',' + str(coords[2]) + ',' + str(coords[3])
url = url + "&tags=%s&per_page=%s&has_geo=1&bbox=%s&format=json&extras=geo,url_q&accuracy=1&nojsoncallback=1" % (query,maxResults,newbbox)
feed_response = urllib.urlopen(url).read()
return HttpResponse(feed_response, mimetype="text/xml")
def hglpoints (request):
from xml.dom import minidom
import re
url = HGL_URL + "/HGLGeoRSS?GeometryType=point"
bbox = ["-180","-90","180","90"]
max_results = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results']
if max_results is None:
max_results = "100"
try:
bbox = request.GET['bbox'].split(",") if request.method == 'GET' else request.POST['bbox'].split(",")
except:
pass
query = request.GET['q'] if request.method == 'GET' else request.POST['q']
url = url + "&UserQuery=" + urllib.quote(query.encode('utf-8')) #+ \
#"&BBSearchOption=1&minx=" + bbox[0] + "&miny=" + bbox[1] + "&maxx=" + bbox[2] + "&maxy=" + bbox[3]
dom = minidom.parse(urllib.urlopen(url))
iterator = 1
for node in dom.getElementsByTagName('item'):
if iterator <= int(max_results):
description = node.getElementsByTagName('description')[0]
guid = node.getElementsByTagName('guid')[0]
title = node.getElementsByTagName('title')[0]
if guid.firstChild.data != 'OWNER.TABLE_NAME':
description.firstChild.data = description.firstChild.data + '<br/><br/><p><a href=\'javascript:void(0);\' onClick=\'app.addHGL("' \
+ escape(title.firstChild.data) + '","' + re.sub("SDE\d?\.","", guid.firstChild.data) + '");\'>Add to Map</a></p>'
iterator +=1
else:
node.parentNode.removeChild(node)
return HttpResponse(dom.toxml(), mimetype="text/xml")
def hglServiceStarter (request, layer):
#Check if the layer is accessible to public, if not return 403
accessUrl = HGL_URL + "/ |
Wopple/fimbulvetr | src/client/util.py | Python | bsd-3-clause | 613 | 0.008157 | import math
def get_direction(src, targe | t):
diff = | map(lambda a, b: a - b, target, src)
mag = math.sqrt(sum(map(lambda a: a ** 2, diff)))
if mag == 0:
return [0, 0]
return map(lambda a: a / mag, diff)
def distance(pos1, pos2):
return math.sqrt(sum(map(lambda a: a ** 2, map(lambda a, b: a - b, pos1, pos2))))
def magnitude(vector):
return math.sqrt(sum(map(lambda a: a ** 2, vector)))
class Drawable(object):
def draw(self, surface, camera=(0, 0)):
coordinates = (self.rect.left - camera[0], self.rect.top - camera[1])
surface.blit(self.image, coordinates)
|
OxPython/Python-dict-str | dict_str.py | Python | epl-1.0 | 411 | 0.009756 | '''
Created on Jul 2, 2014
@author: viejoemer
I can print a dict as a string in Python?
¿Yo puedo imprimi | r un dict como un string en Python?
str(dict) Produces a printable string representation of a dictionary, but
this not work for JSON format.
'''
#Definition of a dictionary
d = {'three': 3, 'two': 2, 'one': 1}
print | (type(d))
print(d)
#Print a dict like a string
s = str(d)
print(type(s))
print(s) |
MiraHead/mlmvn | src/dataio/arffio.py | Python | gpl-2.0 | 8,420 | 0.000238 | #! /usr/bin/env python
'''
Arff loader for categorical and numerical attributes, based
on scipy.io.arff.arffloader With minor changes for this
project (eg. categorical attributes are mapped onto integers
and whole dataset is returned as numpy array of floats)
If any unsupported data types appear or if arff is malformed,
ParseArffError with info about error is raised.
@author Miroslav Hlavacek <mira.hlavackuj@gmail.com>
'''
from __future__ import division, absolute_import
from functools import partial
import numpy as np
from ..dataio.dataio_const import DataIOError
from ..dataio.dataio_const import NUMERIC_ATT
from ..dataio.dataio_const import NOMINAL_ATT
class ParseArffError(DataIOError):
""" Error while parsing arff file - either
malformed arff or unsupported arff functionality
"""
pass
def loadarff(f):
"""Read an arff file.
Retrieves name of relation, attribute names and types, possible values
of nominal attributes and data. The data is returned as a numpy array of
floats.\n
It can read files with numeric and nominal attributes. All nominal
attribute values are converted to integers (but stored as floats -
because of numpy).\n
Not implemented functionality:\n
* date type attributes\n
* string type attributes\n
* relational type attributes\n
* sparse files reading\n
* missing values handling\n
@param f : file-like or str - object to read from, or filename to open.
@returns Tuple (relation, ls_attributes, d_nominal_values, data)
where:\n
\b relation is string name of relation in arff\n
\b ls_attributes is list with all attribute names\n
\b d_nominal_values is dictionary containing lists with all
possible values for each nominal attribute. Key to this
list is integer - position of attribute in ls_attributes.
\b data is numpy array of float type, where shape is
(n_samples, n_attributes)
@throws ParseArffError This is raised if the given file is not
ARFF-formatted or some values are missing
or some values are of bad type or if some
data type is unsupported.
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(in_file):
# Parse the header file
try:
relation, ls_atts, d_nom_vals = read_header(in_file)
except ValueError as e:
raise ParseArffError("Error while parsing header, error was: "
+ str(e))
#prepare convertors and parse data
convertors = []
idx = 0
for name, att_type in ls_atts:
if att_type == NUMERIC_ATT:
convertors.append(safe_float)
elif att_type == NOMINAL_ATT:
convertors.append(partial(safe_nominal, ls_values=d_nom_vals[idx]))
idx += 1
n_columns = len(convertors)
def generator(row_iter):
# skip comments and empty lines
raw = row_iter.next()
while len(raw.strip()) == 0 or raw[0] == '%':
raw = row_iter.next()
try:
# retrieve delimiter of data from first data field
delim = get_delim(raw)
rows = raw.split(delim)
if len(rows) != n_columns:
raise ParseArffError('Wrong number of attributes on line: '
+ raw.strip())
# 'compiling' the range since it does not change
elems = list(range(n_columns))
for i in elems:
yield convertors[i](rows[i])
except ValueError as e:
raise ParseArffError('Error while parsing data: "%s" on line "%s"'
% (str(e), raw.strip()))
for raw in row_iter:
rows = raw.split(delim)
while not rows or rows[0][0] == '%':
raw = row_iter.next()
rows = raw.split(delim)
if len(rows) != n_columns:
raise ParseArffError('Wrong number of attributes on line: '
+ raw)
try:
for i in elems:
yield convertors[i](rows[i])
except ValueError as e:
raise ParseArffError('Type error or missing value while '
'parsing data: "%s" on line:"%s"'
% (str(e), raw))
gen = generator(in_file)
data = np.fromiter(gen, complex)
# reshape array appropriately
data = data.reshape(data.shape[0] / n_columns, n_columns)
return relation, ls_atts, d_nom_vals, data
def read_header(in_file):
"""Read the header of the iterable in_file.
Parse all attribute names, types and store
possible values for any encountered nominal attribute.
@param in_file File opened for textual reading
@returns Tuple (relation, ls_attributes, d_nominal_values)
where:\n
\b relation is string name of relation in arff\n
\b ls_attributes is list with all attribute names\n
\b d_nominal_values is dictionary containing lists with all
possible values for each nominal attribute. Key to this
list is integer - position of attribute in ls_attributes.
"""
# Header is everything up to DATA attribute
relation = "Unknown relation"
ls_attributes = []
d_nominal_vals = {}
num_attributes = 0
keyword = ''
while keyword != '@data':
line = next(in_file)
chunks = line.rstrip('\n').split()
# ignore blank lines and commments
if not chunks or chunks[0][0] != '@':
continue
try:
keyword = chunks[0].lower()
if keyword == '@attribute':
name = chunks[1]
att_type = parse_type(chunks[2])
val_names = None
if att_type == NOMINAL_ATT:
val_names = chunks[2].strip('{}').split(',')
ls_attributes.append((name, att_type))
if not val_names is None:
d_nominal_vals[num_attributes] = val_names
num_attributes += 1
elif keyword == '@relation':
relation = chunks[1]
elif keyword != '@data':
raise ParseArffError("Error parsing line %s" % line)
except KeyError as e:
raise ParseArffError('Malformed arff attribute: %s on line %s '
% (str(e), line))
return relation, ls_attributes, d_nominal_vals
def parse_type(attrtype):
"""Given an arff attribute type description returns
whether is attribute nominal or numeric, for other
data types, ParseArffError is raised.
@param String representing value of attribute
@return String with either for given type defined in dataio...
either NUMERIC_ATT or NOMINAL_ATT
@throw ParseArffError If the type is unknown or unsupported
"""
atype = attrtype.lower().strip()
if atype[0] == '{':
return NOMINAL_ATT
elif atype[:len('real')] == 'real':
return NUMERIC_ATT
elif atype[:len('integer')] == 'integer':
return NUMERIC_ATT
elif atype[:len('numeric')] == 'numeric':
return NUMERIC_ATT
else:
raise ParseArffError("Unknown or unsupported attribute %s" % atype)
def safe_float(data):
""" float convertor """
if data.strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet" | )
return np.float(data)
def safe_nominal(data, ls_values):
""" nominal convertor """
svalue = data.strip()
if svalue[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
if svalue in ls_values:
return ls_values.index(svalue)
else:
raise ValueError('Not defined value of nominal attribute')
def get_delim | (line):
" |
oehokie/brwry-python | device.py | Python | mit | 1,314 | 0.05175 | import RPi.GPIO as io
io.setwarnings(False)
io.setmode(io.BCM)
class Device:
def __init__(self,config):
self.config = config
for device in self.config['gpioPINs']['unavailable']:
io.setup(int(device),io.OUT)
io.output(int(device),False)
for device in self.config['gpioPINs']['available']:
io.setup(int(device),io.OUT)
io.output(int(device),False)
def deviceOn(self,pin):
for device in self.config['gpioPINs']['unavailable']:
if int(device) == int(pin):
io.output(int(pin),True)
else:
print "Does Not Match Existing Device"
def deviceOff(self,pin):
for device in self.config['gpioPINs']['unavailable']:
if int(device) == int(pin):
io.output(int(pin),False)
else:
print "Does Not Match Existing Device"
d | ef updateDevices(se | lf,config):
self.config = config
def getCurStatus(self,devType):
curStatus = {}
for device in self.config[devType]:
curStatus[str(device['deviceName'])] = io.input(int(device['gpioPIN']))
return curStatus
def deviceStatus(self,pin):
#note: checking the input of an output pin is permitted
return io.input(int(pin))
def allOff(self):
for device in self.config['gpioPINs']['unavailable']:
io.output(int(device),False)
for device in self.config['gpioPINs']['available']:
io.output(int(device),False)
|
chaen/DIRAC | docs/diracdoctools/cmd/codeReference.py | Python | gpl-3.0 | 13,727 | 0.009543 | #!/usr/bin/env python
""" create rst files for documentation of DIRAC """
import os
import shutil
import socket
import sys
import logging
import glob
from diracdoctools.Utilities import writeLinesToFile, mkdir, makeLogger
from diracdoctools.Config import Configuration, CLParser as clparser
LOG = makeLogger('CodeReference')
# global used inside the CustomizedDocs modules
CUSTOMIZED_DOCSTRINGS = {}
class CLParser(clparser):
"""Extension to CLParser to also parse buildType."""
def __init__(self):
super(CLParser, self).__init__()
self.log = LOG.getChild('CLParser')
self.clean = False
self.parser.add_argument('--buildType', action='store', default='full',
choices=['full', 'limited'],
help='Build full or limited code reference',
)
self.parser.add_argument('--clean', action='store_true',
help='Remove rst files and exit',
)
def parse(self):
super(CLParser, self).parse()
self.log.info('Parsing options')
self.buildType = self.parsed.buildType
self.clean = self.parsed.clean
def optionDict(self):
oDict = super(CLParser, self).optionDict()
oDict['buildType'] = self.buildType
oDict['clean'] = self.clean
return oDict
class CodeReference(object):
"""Module to create rst files containing autodoc for sphinx."""
def __init__(self, configFile='docs.conf'):
self.config = Configuration(configFile, sections=['Code'])
self.orgWorkingDir = os.getcwd()
def end(self):
"""Make sure we are back in the original working directory."""
LOG.info('Done with creating code reference')
os.chdir(self.orgWorkingDir)
def getCustomDocs(self):
"""Import the dynamically created docstrings from the files in CustomizedDocs.
Use 'exec' to avoid a lot of relative import, pylint errors, etc.
"""
customizedPath = os.path.join(self.config.code_customDocsPath, '*.py')
LOG.info('Looking for custom strings in %s', customizedPath)
for filename in glob.glob(customizedPath):
LOG.info('Found customization: %s', filename)
exec(open(filename).read(), globals()) # pylint: disable=exec-used
def mkPackageRst(self, filename, modulename, fullmodulename, subpackages=None, modules=None):
"""Make a rst file for module containing other modules."""
if modulename == 'scripts':
return
else:
modulefinal = modulename
lines = []
lines.append('%s' % modulefinal)
lines.append('=' * len(modulefinal))
lines.append('.. module:: %s ' % fullmodulename)
lines.append('')
if subpackages or modules:
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
subpackages = [s for s in subpackages if not s.endswith(('scripts', ))]
if subpackages:
LOG.info('Module %r with subpackages: %r', fullmodulename, ', '.join(subpackages))
lines.append('SubPackages')
lines.append('...........')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in sorted(subpackages):
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
lines.append('')
# remove CLI etc. because we drop them earlier
modules = [m for m in modules if not m.endswith('CLI') and '-' not in m]
if modules:
lines.append('Modules')
lines.append('.......')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for module in sorted(modules):
lines.append(' %s.rst' % (module.split('/')[-1],))
lines.append('')
writeLinesToFile(filename, lines)
def mkDummyRest(self, classname, _fullclassname):
"""Create a dummy rst file for files that behave badly."""
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('')
lines.append(' This is an empty file, because we cannot parse this file correctly or it causes problems')
lines.append(' , please look at the source code directly')
writeLinesToFile(filename, lines)
def mkModuleRst(self, classname, fullclassname, buildtype='full'):
"""Create rst file for module."""
LOG.info('Creating rst file for %r, aka %r', classname, fullclassname)
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('.. automodule:: %s' % fullclassname)
if buildtype == 'full':
lines.append(' :members:')
if classname not in self.config.code_noInherited:
lines.append(' :inherited-members:')
lines.append(' :undoc-members:')
lines.append(' :show-inheritance:')
if classname in self.config.code_privateMembers:
lines.append(' :special-members:')
lines.append(' :private-members:')
else:
lines.append(' :special-members: __init__')
if classname.startswith('_'):
lines.append(' :private-members:')
if fullclassname in CUSTOMIZED_DOCSTRINGS:
ds = CUSTOMIZED_DOCSTRINGS[fullclassname]
if ds.replace:
lines = ds.doc_string
else:
lines.append(ds.doc_string)
writeLinesToFile(filename, lines)
def getsubpackages(self, abspath, direc):
"""return list of subpackages with full path"""
packages = []
for dire in direc:
if dire.lower() == 'test' or dire.lower() == 'tests' or '/test' in dire.lower():
LOG.debug('Skipping test directory: %s/%s', abspath, dire)
continue
if dire.lower() == 'docs' or '/docs' in dire.lower():
LOG.debug('Skipping docs directory: %s/%s', abspath, dire)
continue
if os.path.exists(os.path.join(self.config.sourcePath, abspath, dire, '__init__.py')):
packages.append(os.path.join(dire))
return packages
def getmodules(self, abspath, _direc, files):
"""Return list of subpackages with full path."""
packages = []
for filename in files:
if filename.lower().startswith('test') or filename.lower().endswith('test') or \
any(f.lower() in filename.lower() for f in self.config.code_ignoreFiles):
LOG.debug('Skipping file: %s/%s', abspath, filename)
continue
if 'test' in filename.lower():
LOG.warn("File contains 'test', but is kept: %s/%s", abspath, filename)
if filename != '__init__.py':
packages.append(filename.split('.py')[0])
return packages
def cleanDoc(self):
"""Remove the code output folder."""
LOG.info('Removing existing code documentation: %r', self.config.code_targetPath)
if os.path.exists(self.config.code_targetPath):
shutil.rmtree(self.config.code_targetPath)
def createDoc(self, buildtype="full"):
"""create the rst files for all the things we want them for"""
LOG.info('self.config.sourcePath: %s', self.config.sourcePath)
LOG.info('self.config.targetPath: %s', self.config.code_targetPath)
LOG.info('Host: %s', socket.gethostname())
# we need to replace existing rst files so we can decide how much code-doc to create
if os.path.exists(self.config.code_targetPath) and os.environ.get('READTHEDOCS', 'False') == 'True':
self.cleanDoc()
mkdir(self.config.code_targetPath)
os.chdir(self.config.code_targetPath)
self.getCustomDocs()
LOG.info('Now creating rst files: starting in %r', self.config.sourcePath)
firstModule = True
for root, direc, files in os.walk(self.config.sourcePath):
configTemplate = [os.path.join(root, _) for _ in files if _ == 'ConfigTemplate.cfg']
files = [_ for _ in files if _.endswith('.py')]
if '__in | it__.py' not in files:
continue
eli | f any(f.lower() in root.lower() for f in self.config.code_ignoreFolders):
LOG.debug('Skipping folder: %s', root)
continue
modulename = root.split('/')[-1].strip('.')
codePath = root.split(self.config.sourcePath)[1].strip('/.')
docPath = codePath
if docPat |
fishstamp82/loprop | test/test_bond.py | Python | gpl-3.0 | 5,042 | 0.018247 | import unittest
import numpy as np
str_nobond = """AU
3 1 2 1
1 0.00000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.34509720 3.78326969 -0.00000000 -0.00000000 3.96610412 0.00000000 3.52668267 0.00000000 -0.00000000 -2.98430053 0.00000000 -0.00000000 0.00000000 -0.00000000 1.26744725 -0.00000000 2.16730601
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 1.15495299 0.60859677 -0.00000000 1.21104235 -4.46820475 0.00000000 -4.55909022 -0.05601735 0.00000000 -3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 -1.15495299 0.60859677 0.00000000 1.21104235 4.46820475 -0.00000000 -4.55909022 0.05601735 0.00000000 3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
Time used in Loprop : 0.45 (cpu) 0.11 (wall)
"""
str_bond ="""AU
5 1 22 1
1 0.00000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.41788500 1.19165567 0.00000000 0.00000000 2.74891057 0.00000000 1.33653383 0.00000000 0.00000000 4.18425484 0.00000000 -0.00000000 -0.00000000 -0.00000000 0.19037387 0.00000000 5.96033807
1 0.71521500 0.00000000 0.55358000 0.00000000 -0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 1.98015668 -0.00000000 2.19014883 -7.24839104 0.00000000 -7.16855538 0.59534043 0.00000000 -5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.12774005 0.00000000 -0.07659922 0.25654398 0.00000000 0.16487465 -0.00000000 -0.00000000 0.11596794 -0.84400923 0.00000000 -0.97481253 -0.35368757 -0.00000000 -0.84709793 0.00000000 -0.07813759 0.00000000 -0.50758833
1 -0.71521500 0.00000000 0.55358000 0.00000000 0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 -1.98015668 0.00000000 2.19014883 7.24839104 -0.00000000 -7.16855538 -0.59534043 0.00000000 5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.12774005 0.00000000 -0.07659922 0.25654398 -0.00000000 -0.16487465 0.00000000 0.00000000 0.11596794 0.84400923 -0.00000000 -0.97481253 0.35368757 0.00000000 0.84709793 -0.00000000 -0.07813759 -0.00000000 -0.50758833
Time used in Loprop : 0.45 (cpu) 0.11 (wall)
"""
class TestBond(unittest.TestCase):
def test_bond_nobond_properties(self):
#a0 = 0.52917721092
a0 = 1.0
#Read in string that is for no bonds output
lines = [line for line in str_bond.split('\n') if len(line.split()) > 10 ]
n_bond = np.array( | [8.0, 0.0, 1.0, 0.0, 1.0], dtype = float )
r_bond = a0 * np.array( [l.split()[1:4] for l in lines ], dtype = float)
q_bond = np.array( [l.split()[4] for l in lines], dtype = float)
d_bond = np.array( [l.split()[5:8] for l in lines], dtype = float)
a_bond = np.ar | ray( [l.split()[8:15] for l in lines], dtype = float)
b_bond = np.array( [l.split()[15:26] for l in lines], dtype = float)
#Read in string that is for bonds output -b
lines = [line for line in str_nobond.split('\n') if len(line.split()) > 10 ]
n_nobond = np.array( [8.0, 1.0, 1.0], dtype = float )
r_nobond = a0 * np.array( [l.split()[1:4] for l in lines ], dtype = float)
q_nobond = np.array( [l.split()[4] for l in lines], dtype = float)
d_nobond = np.array( [l.split()[5:8] for l in lines], dtype = float)
a_nobond = np.array( [l.split()[8:15] for l in lines], dtype = float)
b_nobond = np.array( [l.split()[15:26] for l in lines], dtype = float)
#Total dipole moment should be the same
coc_bond = np.einsum( 'ij,i', r_bond , n_bond ) / n_bond.sum()
coc_nobond = np.einsum( 'ij,i', r_nobond , n_nobond ) / n_nobond.sum()
np.testing.assert_allclose( coc_bond, coc_nobond )
a_tot_bond = np.sum(a_bond)
a_tot_nobond = np.sum(a_nobond)
np.testing.assert_allclose( a_tot_bond, a_tot_nobond )
b_tot_bond = np.sum(b_bond)
b_tot_nobond = np.sum(b_nobond)
np.testing.assert_allclose( b_tot_bond, b_tot_nobond )
dip_bond = np.einsum( 'ij,i', (r_bond - coc_bond), q_bond ) + d_bond.sum(axis=0)
dip_nobond = np.einsum( 'ij,i', (r_nobond - coc_nobond), q_nobond ) + d_nobond.sum(axis = 0 )
|
atuljain/odoo | openerp/addons/base/ir/ir_http.py | Python | agpl-3.0 | 6,283 | 0.003183 | #----------------------------------------------------------
# ir_http modular http routing
#----------------------------------------------------------
import logging
import re
import sys
import werkzeug.exceptions
import werkzeug.routing
import openerp
from openerp import http
from openerp.http import request
from openerp.osv import osv, orm
_logger = logging.getLogger(__name__)
UID_PLACEHOLDER = object()
class ModelConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelConverter, self).__init__(url_map)
self.model = model
self.regex = '([0-9]+)'
def to_python(self, value):
m = re.match(self.regex, value)
return request.registry[self.model].browse(
request.cr, UID_PLACEHOLDER, int(m.group(1)), context=request.context)
def to_url(self, value):
return value.id
class ModelsConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelsConve | rter, self).__init__(url_map)
self.model = model
# TODO add support for slug in the form [A-Za-z0-9-] bla-bla-89 -> id 89
self.regex = '([0-9,]+)'
def to_python(self, value):
return request.registry[self.model].browse(request.cr, UID_PLACEHOLDER, [int(i) for i in value.split( | ',')], context=request.context)
def to_url(self, value):
return ",".join(i.id for i in value)
class ir_http(osv.AbstractModel):
_name = 'ir.http'
_description = "HTTP routing"
def _get_converters(self):
return {'model': ModelConverter, 'models': ModelsConverter}
def _find_handler(self, return_rule=False):
return self.routing_map().bind_to_environ(request.httprequest.environ).match(return_rule=return_rule)
def _auth_method_user(self):
request.uid = request.session.uid
if not request.uid:
raise http.SessionExpiredException("Session expired")
def _auth_method_none(self):
request.uid = None
def _auth_method_public(self):
if not request.session.uid:
dummy, request.uid = self.pool['ir.model.data'].get_object_reference(request.cr, openerp.SUPERUSER_ID, 'base', 'public_user')
else:
request.uid = request.session.uid
def _authenticate(self, auth_method='user'):
if request.session.uid:
try:
request.session.check_security()
# what if error in security.check()
# -> res_users.check()
# -> res_users.check_credentials()
except (openerp.exceptions.AccessDenied, openerp.http.SessionExpiredException):
# All other exceptions mean undetermined status (e.g. connection pool full),
# let them bubble up
request.session.logout()
getattr(self, "_auth_method_%s" % auth_method)()
return auth_method
def _handle_exception(self, exception):
# If handle_exception returns something different than None, it will be used as a response
return request._handle_exception(exception)
def _dispatch(self):
# locate the controller method
try:
rule, arguments = self._find_handler(return_rule=True)
func = rule.endpoint
except werkzeug.exceptions.NotFound, e:
return self._handle_exception(e)
# check authentication level
try:
auth_method = self._authenticate(func.routing["auth"])
except Exception:
# force a Forbidden exception with the original traceback
return self._handle_exception(
convert_exception_to(
werkzeug.exceptions.Forbidden))
processing = self._postprocess_args(arguments, rule)
if processing:
return processing
# set and execute handler
try:
request.set_handler(func, arguments, auth_method)
result = request.dispatch()
if isinstance(result, Exception):
raise result
except Exception, e:
return self._handle_exception(e)
return result
def _postprocess_args(self, arguments, rule):
""" post process arg to set uid on browse records """
for arg in arguments.itervalues():
if isinstance(arg, orm.browse_record) and arg._uid is UID_PLACEHOLDER:
arg._uid = request.uid
try:
arg[arg._rec_name]
except KeyError:
return self._handle_exception(werkzeug.exceptions.NotFound())
def routing_map(self):
if not hasattr(self, '_routing_map'):
_logger.info("Generating routing map")
cr = request.cr
m = request.registry.get('ir.module.module')
ids = m.search(cr, openerp.SUPERUSER_ID, [('state', '=', 'installed'), ('name', '!=', 'web')], context=request.context)
installed = set(x['name'] for x in m.read(cr, 1, ids, ['name'], context=request.context))
if openerp.tools.config['test_enable']:
installed.add(openerp.modules.module.current_test)
mods = [''] + openerp.conf.server_wide_modules + sorted(installed)
self._routing_map = http.routing_map(mods, False, converters=self._get_converters())
return self._routing_map
def convert_exception_to(to_type, with_message=False):
""" Should only be called from an exception handler. Fetches the current
exception data from sys.exc_info() and creates a new exception of type
``to_type`` with the original traceback.
If ``with_message`` is ``True``, sets the new exception's message to be
the stringification of the original exception. If ``False``, does not
set the new exception's message. Otherwise, uses ``with_message`` as the
new exception's message.
:type with_message: str|bool
"""
etype, original, tb = sys.exc_info()
try:
if with_message is False:
message = None
elif with_message is True:
message = str(original)
else:
message = str(with_message)
raise to_type, message, tb
except to_type, e:
return e
# vim:et:
|
googleapis/python-bigquery-storage | google/cloud/bigquery_storage_v1/reader.py | Python | apache-2.0 | 27,503 | 0.000618 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import io
import json
import time
try:
import fastavro
except ImportError: # pragma: NO COVER
fastavro = None
import google.api_core.exceptions
import google.rpc.error_details_pb2
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
_STREAM_RESUMPTION_EXCEPTIONS = (
google.api_core.exceptions.ServiceUnavailable,
# Caused by transport-level error. No status code was received.
# https://github.com/googleapis/python-bigquery-storage/issues/262
google.api_core.exceptions.Unknown,
)
# The Google API endpoint can unexpectedly close long-running HTTP/2 streams.
# Unfortunately, this condition is surfaced to the caller as an internal error
# by gRPC. We don't want to resume on all internal errors, so instead we look
# for error message that we know are caused by problems that are safe to
# reconnect.
_STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES = (
# See: https://github.com/googleapis/google-cloud-python/pull/9994
"RST_STREAM",
)
_FASTAVRO_REQUIRED = (
"fastavro is required to parse ReadRowResponse messages with Avro bytes."
)
_PANDAS_REQUIRED = "pandas is required to create a DataFrame"
_PYARROW_REQUIRED = (
"pyarrow is required to parse ReadRowResponse messages with Arrow bytes."
)
class ReadRowsStream(object):
"""A stream of results from a read rows request.
This stream is an iterable of
:class:`~google.cloud.bigquery_storage_v1.types.ReadRowsResponse`.
Iterate over it to fetch all row messages.
If the fastavro library is installed, use the
:func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.rows()`
method to parse all messages into a stream of row dictionaries.
If the pandas and fastavro libraries are installed, use the
:func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.to_dataframe()`
method to parse all messages into a :class:`pandas.DataFrame`.
This object should not be created directly, but is returned by
other methods in this library.
"""
def __init__(
self, client, name, offset, read_rows_kwargs, retry_delay_callback=None
):
"""Construct a ReadRowsStream.
Args:
client ( \
~google.cloud.bigquery_storage_v1.services. \
big_query_read.BigQueryReadClient \
):
A GAPIC client used to reconnect to a ReadRows stream. This
must be the GAPIC client to avoid a circular dependency on
this class.
name (str):
Required. Stream ID from which rows are being read.
offset (int):
Required. Position in the stream to start
reading from. The offset requested must be less than the last
row read from ReadRows. Requesting a larger offset is
undefined.
read_rows_kwargs (dict):
Keyword arguments to use when reconnecting to a ReadRows
stream.
retry_delay_callback (Optional[Callable[[float], None]]):
If the client receives a retryable error that asks the client to
delay its next attempt and retry_delay_callback is not None,
ReadRowsStream will call retry_delay_callback with the delay
duration (in seconds) before it starts sleeping until the next
attempt.
Returns:
Iterable[ \
~google.cloud.bigquery_storage.types.ReadRowsResponse \
]:
A sequence of row messages.
"""
# Make a copy of the read position so that we can update it without
# mutating the original input.
self._client = client
self._name = name
self._offset = offset
self._read_rows_kwargs = read_rows_kwargs
self._retry_delay_callback = retry_delay_callback
self._wrapped = None
def __iter__(self):
"""An iterable of messages.
Returns:
Iterable[ \
~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
]:
A sequence of row messages.
"""
# Infinite loop to reconnect on reconnectable errors while processing
# the row stream.
if self._wrapped is None:
self._reconnect()
while True:
try:
for message in self._wrapped:
rowcount = message.row_count
self._offset += rowcount
yield message
return # Made it through the whole stream.
except google.api_core.exceptions.InternalServerError as exc:
resumable_error = any(
resumable_message in exc.message
for resumable_message in _STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES
)
if not resumable_error:
raise
except _STREAM_RESUMPTION_EXCEPTIONS:
# Transient error, so reconnect to the stream.
pass
except Exception as exc:
if not self._resource_exhausted_exception_is_retryable(exc):
raise
self._reconnect()
def _reconnect(self):
"""Reconnect to the ReadRows stream using the most recent offset."""
while True:
try:
self._wrapped = self._client.read_rows(
read_stream=self._name,
offset=self._offset,
**self._read_rows_kwargs
)
break
except Exception as exc:
if not self._resource_exhausted_exception_is_retryable(exc):
raise
def _resource | _exhausted_exception_is_r | etryable(self, exc):
if isinstance(exc, google.api_core.exceptions.ResourceExhausted):
# ResourceExhausted errors are only retried if a valid
# RetryInfo is provided with the error.
#
# TODO: Remove hasattr logic when we require google-api-core >= 2.2.0.
# ResourceExhausted added details/_details in google-api-core 2.2.0.
details = None
if hasattr(exc, "details"):
details = exc.details
elif hasattr(exc, "_details"):
details = exc._details
if details is not None:
for detail in details:
if isinstance(detail, google.rpc.error_details_pb2.RetryInfo):
retry_delay = detail.retry_delay
if retry_delay is not None:
delay = max(
0,
float(retry_delay.seconds)
+ (float(retry_delay.nanos) / 1e9),
)
if self._retry_delay_callback:
self._retry_delay_callback(delay)
time.sleep(delay)
return True
return False
def rows(self, read_session=None):
"""Iterate over all rows in the stream.
This method requires the fastavro library in order to parse row
messages in avro format. For arrow format messages, the pyarrow
|
lyoniionly/django-cobra | src/cobra/core/configure/user_config.py | Python | apache-2.0 | 1,284 | 0.001558 | from cobra.cor | e.loading import get_model
from cobra.core import json
class UserConfig(object):
default_config = {
'guide.task.participant': '1',
'guide.document.share': '1',
'guide.customer.share': '1',
'guide.workflow.operation': '1',
'guide.workflow.createform': '1',
'order.task.search': 'default',
'order.task.searchDirection': 'DESC',
'portal.workdyna': 'subordinates-task',
'system | .menu.display':'',
'viewState.task': 'list',
'guide.biaoge.showintro': '1',
'workreport.push.set': '1',
'agenda.push.set': '1'
}
def __init__(self, user):
self.__user_config = self.__build_user_config(user)
def __build_user_config(self, user):
UserOption = get_model('option', 'UserOption')
u_c = {}
for k, v in self.default_config.items():
u_c[k] = UserOption.objects.get_value(user, None, k, v)
return u_c
def to_python(self):
configs = []
for k, v in self.__user_config.items():
m = {
'configKey': k,
'configValue': v
}
configs.append(m)
return configs
def to_json(self):
return json.dumps(self.to_python()) |
epcoullery/epcstages | stages/models.py | Python | agpl-3.0 | 27,475 | 0.00398 | import json
from collections import OrderedDict
from contextlib import suppress
from datetime import date, timedelta
from django.conf import settings
from django.db import models
from django.db.models import Case, Count, When
from . import utils
CIVILITY_CHOICES = (
('Madame', 'Madame'),
('Monsieur', 'Monsieur'),
)
class Section(models.Model):
""" Filières """
name = models.CharField("Nom", max_length=20)
has_stages = models.BooleanField("Planifie la PP sur ce site", default=False)
class Meta:
verbose_name = "Filière"
def __str__(self):
return self.name
@property
def is_fe(self):
"""fe=formation en entreprise"""
return self.name in {'ASA', 'ASE', 'ASSC'}
@property
def is_EPC(self):
return self.name in {'ASA', 'ASE', 'ASSC', 'EDE', 'EDS'}
@property
def is_ESTER(self):
return self.name in {'MP_ASE', 'MP_ASSC'}
class Level(models.Model):
name = models.CharField(max_length=10, verbose_name='Nom')
class Meta:
verbose_name = "Niveau"
verbose_name_plural = "Niveaux"
def __str__(self):
return self.name
def delta(self, diff):
if diff == 0:
return self
try:
return Level.objects.get(name=str(int(self.name)+diff))
except Level.DoesNotExist:
return None
class ActiveKlassManager(models.Manager):
def get_queryset(self):
return super().get_queryset().annotate(
num_students=Count(Case(When(student__archived=False, then=1)))
).filter(num_students__gt=0)
class Klass(models.Model):
name = models.CharField(max_length=10, verbose_name='Nom', unique=True)
section = models.ForeignKey(Section, verbose_name='Filière', on_delete=models.PROTECT)
level = models.ForeignKey(Level, verbose_name='Niveau', on_delete=models.PROTECT)
teacher = models.ForeignKey('Teacher', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='Maître de classe')
teacher_ecg = models.ForeignKey('Teacher', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='Maître ECG', related_name='+')
teacher_eps = models.ForeignKey('Teacher', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='Maître EPS', related_name='+')
objects = models.Manager()
active = ActiveKlassManager()
class Meta:
verbose_name = "Classe"
def __str__( | self):
return self.name
def is_Ede_pe(self):
return 'EDE' in self.name and 'pe' in self.name
def is_Ede_ps(self):
return 'EDE' in self.name and 'ps' in self.name
class Teacher(models.Model):
civility = models.CharField(max_length=10, choices=CIVILITY_CHOICES, verbose_name='Civilité')
first_name = models.CharField(max_length=40, ver | bose_name='Prénom')
last_name = models.CharField(max_length=40, verbose_name='Nom')
abrev = models.CharField(max_length=10, verbose_name='Sigle')
birth_date = models.DateField(verbose_name='Date de naissance', blank=True, null=True)
email = models.EmailField(verbose_name='Courriel', blank=True)
contract = models.CharField(max_length=20, verbose_name='Contrat')
rate = models.DecimalField(default=0.0, max_digits=4, decimal_places=1, verbose_name="Taux d'activité")
ext_id = models.IntegerField(blank=True, null=True)
previous_report = models.IntegerField(default=0, verbose_name='Report précédent')
next_report = models.IntegerField(default=0, verbose_name='Report suivant')
can_examinate = models.BooleanField("Peut corriger examens candidats", default=False)
archived = models.BooleanField(default=False)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True,
verbose_name='Compte utilisateur'
)
class Meta:
verbose_name='Enseignant'
ordering = ('last_name', 'first_name')
def __str__(self):
return '{0} {1}'.format(self.last_name, self.first_name)
@property
def full_name(self):
return '{0} {1}'.format(self.first_name, self.last_name)
@property
def civility_full_name(self):
return '{0} {1} {2}'.format(self.civility, self.first_name, self.last_name)
@property
def role(self):
return {'Monsieur': 'enseignant-formateur', 'Madame': 'enseignante-formatrice'}.get(self.civility, '')
def calc_activity(self):
"""
Return a dictionary of calculations relative to teacher courses.
Store plus/minus periods to self.next_report.
"""
mandats = self.course_set.filter(subject__startswith='#')
ens = self.course_set.exclude(subject__startswith='#')
tot_mandats = mandats.aggregate(models.Sum('period'))['period__sum'] or 0
tot_ens = ens.aggregate(models.Sum('period'))['period__sum'] or 0
# formation periods calculated at pro-rata of total charge
tot_formation = int(round(
(tot_mandats + tot_ens) / settings.MAX_ENS_PERIODS * settings.MAX_ENS_FORMATION
))
tot_trav = self.previous_report + tot_mandats + tot_ens + tot_formation
tot_paye = tot_trav
max_periods = settings.MAX_ENS_PERIODS + settings.MAX_ENS_FORMATION
# Special situations triggering reporting (positive or negative) hours for next year:
# - full-time teacher with a total charge under 100%
# - teachers with a total charge over 100%
self.next_report = 0
if (self.rate == 100 and tot_paye < max_periods) or (tot_paye > max_periods):
tot_paye = max_periods
self.next_report = tot_trav - tot_paye
self.save()
return {
'mandats': mandats,
'tot_mandats': tot_mandats,
'tot_ens': tot_ens,
'tot_formation': tot_formation,
'tot_trav': tot_trav,
'tot_paye': tot_paye,
'report': self.next_report,
}
def calc_imputations(self, ratios):
"""
Return a tuple for accountings charges
"""
activities = self.calc_activity()
imputations = OrderedDict(
[('ASAFE', 0), ('ASSCFE', 0), ('ASEFE', 0), ('MPTS', 0), ('MPS', 0), ('EDEpe', 0), ('EDEps', 0),
('EDS', 0), ('CAS_FPP', 0)]
)
courses = self.course_set.all()
for key in imputations:
imputations[key] = courses.filter(imputation__contains=key).aggregate(models.Sum('period'))['period__sum'] or 0
# Spliting imputations for EDE, ASE and ASSC
ede = courses.filter(imputation='EDE').aggregate(models.Sum('period'))['period__sum'] or 0
if ede > 0:
pe = int(round(ede * ratios['edepe'], 0))
imputations['EDEpe'] += pe
imputations['EDEps'] += ede - pe
ase = courses.filter(imputation='ASE').aggregate(models.Sum('period'))['period__sum'] or 0
if ase > 0:
asefe = int(round(ase * ratios['asefe'], 0))
imputations['ASEFE'] += asefe
imputations['MPTS'] += ase - asefe
assc = courses.filter(imputation='ASSC').aggregate(models.Sum('period'))['period__sum'] or 0
if assc > 0:
asscfe = int(round(assc * ratios['asscfe'], 0))
imputations['ASSCFE'] += asscfe
imputations['MPS'] += assc - asscfe
# Split formation periods in proportions
tot = sum(imputations.values())
if tot > 0:
for key in imputations:
imputations[key] += round(imputations[key] / tot * activities['tot_formation'],0)
return (activities, imputations)
def total_logbook(self):
return LogBook.objects.filter(teacher=self).aggregate(models.Sum('nb_period'))['nb_period__sum']
total_logbook.short_description = 'Solde du carnet du lait'
class LogBookReason(models.Model):
name = models.CharField('Motif', max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Motif de carnet du lait'
verbose_name_plural = 'Motifs de carnet du lait'
class LogBook(models.Model):
|
josephbisch/the-blue-alliance | helpers/cache_clearer.py | Python | mit | 14,357 | 0.003761 | from google.appengine.ext import ndb
from controllers.api.api_district_controller import ApiDistrictListController, ApiDistrictEventsController, ApiDistrictRankingsController
from controllers.api.api_event_controller import ApiEventController, ApiEventTeamsController, \
ApiEventMatchesController, ApiEventStatsController, \
ApiEventRankingsController, ApiEventAwardsController, ApiEventListController, ApiEventDistrictPointsController
from controllers.api.api_match_controller import ApiMatchController
from controllers.api.api_team_controller import ApiTeamController, ApiTeamEventsController, ApiTeamEventAwardsController, \
ApiTeamEventMatchesController, ApiTeamMediaController, ApiTeamYearsParticipatedController, \
ApiTeamListController, ApiTeamHistoryEventsController, ApiTeamHistoryAwardsController, ApiTeamHistoryRobotsController
from database import get_affected_queries
from models.event import Event
from models.event_team import EventTeam
from models.team import Team
class CacheClearer(object):
@classmethod
def _queries_to_cache_keys_and_controllers(cls, queries):
out = []
for query in queries:
out.append((query.cache_key, type(query)))
return out
@classmethod
def get_award_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this award
"""
event_keys = affected_refs['event']
team_keys = affected_refs['team_list']
years = affected_refs['year']
return cls._get_event_awards_cache_keys_and_controllers(event_keys) + \
cls._get_team_event_awards_cache_keys_and_controllers(team_keys, event_keys) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.award_updated(affected_refs))
@classmethod
def get_event_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this event
"""
event_keys = affected_refs['key']
years = affected_refs['year']
event_district_abbrevs = affected_refs['event_district_abbrev']
event_team_keys_future = EventTeam.query(EventTeam.event.IN([event_key for event_key in event_keys])).fetch_async(None, keys_only=True)
team_keys = set()
for et_key in event_team_keys_future.get_result():
team_key_name = et_key.id().split('_')[1]
team_keys.add(ndb.Key(Team, team_key_name))
return cls._get_events_cache_keys_and_controllers(event_keys) + \
cls._get_event_district_points_cache_keys_and_controllers(event_keys) + \
cls._get_eventlist_cache_keys_and_controllers(years) + \
cls._get_team_events_cache_keys_and_controllers(team_keys, years) + \
cls._get_districtlist_cache_keys_and_controllers(years) + \
cls._get_district_events_cache_keys_and_controllers(event_district_abbrevs, years) + \
cls._get_district_rankings_cache_keys_and_controllers(event_district_abbrevs, years) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.event_updated(affected_refs))
@classmethod
def get_eventteam_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this eventteam
"""
event_keys = affected_refs['event']
team_keys = affected_refs['team']
years = affected_refs['year']
return cls._get_eventteams_cache_keys_and_controllers(event_keys) + \
cls._get_team_events_cache_keys_and_controllers(team_keys, years) + \
cls._get_team_years_participated_cache_keys_and_controllers(team_keys) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.eventteam_updated(affected_refs))
@classmethod
def get_districtteam_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this eventteam
"""
return cls._queries_to_cache_keys_and_controllers(get_affected_queries.districtteam_updated(affected_refs))
@classmethod
def get_match_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this match
"""
match_keys = affected_refs['key']
event_keys = affected_refs['event']
team_keys = affected_refs['team_keys']
years = affected_refs['year']
return cls._get_match_cache_keys_and_controllers(match_keys) + \
cls._get_matches_cache_keys_and_controllers(event_keys) + \
cls._get_team_event_matches_cache_keys_and_controllers(team_keys, event_keys) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.match_updated(affected_refs))
@classmethod
def get_media_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that ref | erence this media
"""
reference_keys = affected_refs['references']
years = affected_refs['year']
return cls._get_media_cache_keys_and_controllers(reference_keys, years) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.media_updated(affected_refs))
@classmethod
def get_robot_cache_keys_and_controllers(cls, affected_refs):
"" | "
Gets cache keys and controllers that reference this robot
"""
team_keys = affected_refs['team']
return cls._get_robots_cache_keys_and_controllers(team_keys) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.robot_updated(affected_refs))
@classmethod
def get_team_cache_keys_and_controllers(cls, affected_refs):
"""
Gets cache keys and controllers that references this team
"""
team_keys = affected_refs['key']
event_team_keys_future = EventTeam.query(EventTeam.team.IN([team_key for team_key in team_keys])).fetch_async(None, keys_only=True)
event_keys = set()
for et_key in event_team_keys_future.get_result():
event_key_name = et_key.id().split('_')[0]
event_keys.add(ndb.Key(Event, event_key_name))
return cls._get_teams_cache_keys_and_controllers(team_keys) + \
cls._get_eventteams_cache_keys_and_controllers(event_keys) + \
cls._get_teamlist_cache_keys_and_controllers(team_keys) + \
cls._queries_to_cache_keys_and_controllers(get_affected_queries.team_updated(affected_refs))
@classmethod
def _get_districtlist_cache_keys_and_controllers(cls, years):
cache_keys_and_controllers = []
for year in filter(None, years):
cache_keys_and_controllers.append((ApiDistrictListController.get_cache_key_from_format(year), ApiDistrictListController))
return cache_keys_and_controllers
@classmethod
def _get_district_events_cache_keys_and_controllers(cls, district_shorts, years):
cache_keys_and_controllers = []
for district_short in filter(None, district_shorts):
for year in filter(None, years):
cache_keys_and_controllers.append((ApiDistrictEventsController.get_cache_key_from_format(district_short, year), ApiDistrictEventsController))
return cache_keys_and_controllers
@classmethod
def _get_district_rankings_cache_keys_and_controllers(cls, district_shorts, years):
cache_keys_and_controllers = []
for district_short in filter(None, district_shorts):
for year in filter(None, years):
cache_keys_and_controllers.append((ApiDistrictRankingsController.get_cache_key_from_format(district_short, year), ApiDistrictRankingsController))
return cache_keys_and_controllers
@classmethod
def _get_event_awards_cache_keys_and_controllers(cls, event_keys):
cache_keys_and_controllers = []
for event_key in filter(None, event_keys):
|
apache/incubator-superset | superset/db_engine_specs/hana.py | Python | apache-2.0 | 2,364 | 0.001269 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Dict, Optional
from superset.db_engine_specs.base import LimitMethod
from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
from superset.utils import core as utils
class HanaEngineSpec(PostgresBaseEngineSpec):
engine = "hana"
engine_name = "SAP HANA"
limit_method = LimitMethod.WRAP_SQL
force_column_alias_quotes = True
max_column_name_length = 30
_time_grain_expressions = {
None: "{col}",
"PT1S": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,20))",
"PT1M": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,17) || '00')",
"PT1H": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,14) || '00:00')",
"P1D | ": "TO_DATE({col})",
"P1M": "TO_DATE(SUBSTRING(TO_DATE({col}),0,7)||'-01')",
"P3M": "TO_DATE(SUBSTRING( \
TO_DATE({col}), 0, 5)|| LPAD(CAST((CAST(SUBSTRING(QUARTER( \
TO_DATE({col}), 1), 7, 1) as int)-1)*3 +1 as text),2,'0') | ||'-01')",
"P1Y": "TO_DATE(YEAR({col})||'-01-01')",
}
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"TO_DATE('{dttm.date().isoformat()}', 'YYYY-MM-DD')"
if tt == utils.TemporalType.TIMESTAMP:
return f"""TO_TIMESTAMP('{dttm
.isoformat(timespec="microseconds")}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
return None
|
callowayproject/django-tinymcewrapper | example/simpleapp/admin.py | Python | apache-2.0 | 377 | 0.002653 | from django.contrib import admin
from .models import SimpleModel, InlineModel
c | lass SimpleInline(admin.TabularInline):
model = InlineModel
class SimpleModelAdmin(admin.ModelAdmin):
list_display = ('name', )
prepopulated_fields = {'slug': ('name',)}
s | earch_fields = ('name',)
inlines = [SimpleInline]
admin.site.register(SimpleModel, SimpleModelAdmin)
|
DalenWBrauner/FloridaDataOverlay | Website/Florida_Data_Overlay/Overlay/models.py | Python | mit | 1,899 | 0.005793 | from django import forms
from django.db import models
class Births(models.Model):
year = models.IntegerField("Year")
county = models.CharField("County",max_length=20)
mothersAge = models.IntegerField("Mother's Age")
mothersEdu = models.CharField("Mother's Education",max_length=50)
source = models.URLField("Source")
isRepeat = models.BooleanField("Is a Repeat Birth")
births = models.IntegerField("Births")
def get_fields(self):
fields = []
for f in self._meta.fields:
fields.append(f.name)
return fields
def get_names(self):
names = []
for f in self._meta.fields:
names.append(self._meta.get_field(f.name).verbose_name.title())
return names
def __unicode__(self):
s = "In " + self.county + " county, " + str(self.year)
s += ", there were " + str(self.births)
if self.isRepeat: s += " repeat births to "
else: s += " first births to "
s += str(self.mothersAge) + "-year-old mothers who "
s += self.mothersEdu + ", according to " + self.source
return s
class Diseases(models.Model):
year = models.IntegerField("Year")
county = models.CharField("County",max_length=20)
topic = models.CharField("Topic",max_length=50)
# Topics:
# HIV Cases
# AIDS Cases
# HIV+AIDS Deaths
# HIV+AIDS Deaths Age-Adjusted
source = models.URLField("Source")
count = models.IntegerField("Count")
rate = models.FloatField("Rate")
def __unicode__(self):
s = "In " + self.county + " county, " + str(self.year)
s += ", there were " + str(self.count) + " "
s += self.topic + " (or " + str(self.rat | e)
s += "%), according to " + self.source
return s
|
class Upload(models.Model):
upfile = models.FileField(upload_to='Updates Go Here')
|
foarsitter/equal-gain-python | decide/data/reader.py | Python | gpl-3.0 | 6,159 | 0.001461 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import csv
from copy import copy
from typing import List, Dict
import typesystem
from .types import PartialActor, ActorIssue, PartialIssue, IssuePosition, IssueDescription, Comment
types = {
PartialActor.starts_with: PartialActor,
ActorIssue.starts_with: ActorIssue,
PartialIssue.starts_with: PartialIssue,
IssuePosition.starts_with: IssuePosition,
IssueDescription.starts_with: IssueDescription,
Comment.starts_with: Comment,
}
class InputDataFile:
def __init__(self):
self.errors = {}
self.rows = {}
self.data = {}
def add_typed_object(self, obj):
klass = obj.__class__
if klass in self.data:
self.data[klass][obj] = obj
else:
self.data[klass] = {obj: obj}
@classmethod
def open(cls, filename: str) -> "InputDataFile":
"""
Transforms a file with comma separated values to a dictionary where the key is the row number
"""
data = cls()
with open(filename, "rt", encoding="utf-8", errors="replace") as csv_file:
# guess the document format
dialect = csv.Sniffer().sniff(csv_file.read(1024))
csv_file.seek(0)
reader = csv.reader(csv_file, dialect=dialect)
InputDataFile.open_reader(reader, data)
return data
@classmethod
def open_reader(cls, reader, data=None):
if not data:
data = cls()
data.parse_rows(reader)
data.update_issues_with_positions()
if data.is_valid:
data.validate_actor_issue_positions()
return data
def parse_rows(self, items):
for index, row in enumerate(items):
# keep the original data
self.rows[index] = row
try:
type_obj = csv_row_to_type(row)
self.add_typed_object(type_obj)
except typesystem.ValidationError as e:
self.errors[index] = e # collect the error for displaying purpose
@property
def is_valid(self):
return len(self.errors) == 0
@property
def actors(self) -> Dict[str, PartialActor]:
return self.data[PartialActor]
@property
def issues(self) -> Dict[str, PartialIssue]:
return self.data[PartialIssue]
@property
def actor_issues(self) -> Dict[str, ActorIssue]:
return self.data[ActorIssue]
@property
def issue_positions(self) -> Dict[str, IssuePosition]:
return self.data[IssuePosition]
def update_issues_with_positions(self):
"""
Once the file is complete, we can update the lower an upper positions of the issue
"""
if IssuePosition in self.data:
for issue_position in self.issue_positions.values():
if issue_pos | ition.issue in self.issues:
issue = self.issues[issue_position.issue]
if issue.lower is None:
issue.lower = issue_position.position
elif issue_position.position < issue.lower:
i | ssue.lower = issue_position
if issue.upper is None:
issue.upper = issue_position.position
elif issue_position.position > issue.upper:
issue.upper = issue_position.position
self.set_default_issue_positions()
def set_default_issue_positions(self):
for issue in self.issues.values():
if issue.lower is None:
issue.lower = 0
if issue.upper is None:
issue.upper = 100
def validate_actor_issue_positions(self):
"""
Validate the positions of the actor issues against the lower & upper issue bounds
"""
# find the starting position of the actor issues, so we can show the error at the correct position
row_index_correction = 0
for type_class in types.values():
if type_class in self.data and type_class != ActorIssue:
row_index_correction += len(self.data[type_class])
for index, actor_issue in enumerate(self.actor_issues.values(), row_index_correction + 1):
if actor_issue.actor not in self.actors:
self.errors[index] = typesystem.ValidationError(
key='actor',
text='{} not found in document'.format(actor_issue.actor)
)
if actor_issue.issue in self.issues:
issue = self.issues[actor_issue.issue]
try:
actor_issue.validate_position(issue)
except typesystem.ValidationError as e:
if index in self.errors:
self.errors[index] = e
else:
self.errors[index] = e
else:
self.errors[index] = typesystem.ValidationError(
key='issue',
text='{} not found document'.format(actor_issue.issue)
)
def csv_row_to_type(row: List[str]):
"""
Translate a list of values to the corresponding object
"""
key = row[0] # the first element contains the #id field
row = row[1:] # the rest the row
if key not in types.keys():
raise Exception(f"Add key {key} to Reader.types (row row: {row}")
row_type = types[key]
field_names = row_type.fields.keys()
row = squash(len(row_type.fields), row)
obj = row_type.validate(dict(zip(field_names, row)))
return obj
def squash(fields: int, data: List[str], delimiter=" ") -> List[str]:
"""
Finds out how many fields there are and joins the overhead in to the lasted field
i.e:
The object x, y, z contains 3 field.
The row x,y,z,a,b has 5 values.
The values a & b will be squashed to z with the given delimiter
"""
if fields >= len(data):
return data
output = copy(data)
del output[-1]
output[-1] = delimiter.join(data[fields - 1:])
return output
|
kyle8998/Practice-Coding-Questions | leetcode/295-Hard-Find-Median-From-Data-Stream/answer.py | Python | unlicense | 2,682 | 0.010067 | #!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Optimal 2 Heap O(1) Solution
#-------------------------------------------------------------------------------
from heapq import *
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
# Create two heaps.
# One to keep track of the smaller half and one to keep track of the larger half
self.small = [] # Max Heap
self.large = [] # Min Heap
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
# If heaps are equal size add appropriate element to large heap
# To determine what element to put in large heap, first we must compare and extract
# the greatest element in the small heap
#
# Note: Negative values in small heap because we want it to mimic the behavior of a max heap
if len(self.small) == len(self.large):
heappush(self.large, -heappushpop(self.small, -num))
else:
heappush(self.small, -heappushpop(self.large, num))
def findMedian(self):
"""
:rtype: float
"""
# If heaps uneven, find mean. Otherwise just remove first element in bigger heap.
if len(self.small) == len(self.large):
return float(self.large[0] - self.small[0]) / 2.0
else:
return float(self.large[0])
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
#-------------------------------------------------------------------------------
# NAIVE SORTING SOLUTION O(nlogn)
#-------------------------------------------------------------------------------
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.nums = []
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
self.nums.append(num)
def findMedian(self):
"""
:rtype: float
"""
self.nums.sort()
if len(self.nums) == 0:
return None
elif len(self.nums) % 2 == 1:
return float(self.nums[len(self.n | ums)//2])
else:
return (self.nums[len(self.nums)//2] + self.nums[(len(self.nums)//2)-1]) / 2
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
#---------------------------------------------------------- | ---------------------
|
blakev/sowing-seasons | summer/ext/logs.py | Python | mit | 362 | 0.005525 | import socket
from logging import Filter
from summer.settings import APP_CONFIG
class IPFilter(Filter):
def __init__(self, nam | e=''):
super(IPFilter, self).__init__(name)
self.ip = soc | ket.gethostbyname(socket.gethostname())
def filter(self, record):
record.ip = self.ip + ':%s' % APP_CONFIG.get('port', '??')
return True |
schlichtanders/pyparsing-2.0.3-OrderedDict | examples/excelExpr.py | Python | mit | 2,327 | 0.017619 | # excelExpr.py
#
# Copyright 2010, Paul McGuire
#
# A partial implementation of a parser of Excel formula expressions.
#
from pyparsingOD import (CaselessKeyword, Suppress, Word, alphas,
alphanums, nums, Optional, Group, oneOf, Forward, Regex,
operatorPrecedence, opAsso | c, dblQuotedString, delimitedList,
| Combine, Literal, QuotedString)
EQ,EXCL,LPAR,RPAR,COLON,COMMA = map(Suppress, '=!():,')
EXCL, DOLLAR = map(Literal,"!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'",escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas,max=2)
rowRef = Optional(DOLLAR) + Word(nums)
cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
rowRef("row")))
cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
| cellRef | Word(alphas,alphanums))
expr = Forward()
COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr
ifFunc = (CaselessKeyword("if") +
LPAR +
Group(condExpr)("condition") +
COMMA + expr("if_true") +
COMMA + expr("if_false") + RPAR)
statFunc = lambda name : CaselessKeyword(name) + LPAR + delimitedList(expr) + RPAR
sumFunc = statFunc("sum")
minFunc = statFunc("min")
maxFunc = statFunc("max")
aveFunc = statFunc("ave")
funcCall = ifFunc | sumFunc | minFunc | maxFunc | aveFunc
multOp = oneOf("* /")
addOp = oneOf("+ -")
numericLiteral = Regex(r"\-?\d+(\.\d+)?")
operand = numericLiteral | funcCall | cellRange | cellRef
arithExpr = operatorPrecedence(operand,
[
(multOp, 2, opAssoc.LEFT),
(addOp, 2, opAssoc.LEFT),
])
textOperand = dblQuotedString | cellRef
textExpr = operatorPrecedence(textOperand,
[
('&', 2, opAssoc.LEFT),
])
expr << (arithExpr | textExpr)
test1 = "=3*A7+5"
test2 = "=3*Sheet1!$A$7+5"
test2a ="=3*'Sheet 1'!$A$7+5"
test2b ="=3*'O''Reilly''s sheet'!$A$7+5"
test3 = "=if(Sum(A1:A25)>42,Min(B1:B25), " \
"if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))"
test3a = "=sum(a1:a25,10,min(b1,c2,d3))"
import pprint
tests = [locals()[t] for t in list(locals().keys()) if t.startswith("test")]
for test in tests:
print(test)
pprint.pprint( (EQ + expr).parseString(test,parseAll=True).asList() )
print()
|
crack-mx/habemoscurriculum | habemusCurriculum/habemusCurriculum/wsgi.py | Python | mit | 411 | 0 | """
WSGI config for habem | usCurriculum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "habemusCurriculum.settings")
app | lication = get_wsgi_application()
|
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/conf/models.py | Python | apache-2.0 | 2,977 | 0 | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import json
# Django
from django.db import models
# Tower
from awx.main.models.base import CreatedModifiedModel, prevent_search
from awx.main.fields import JSONField
from awx.main.utils import encrypt_field
from awx.conf import settings_registry
__all__ = ['Setting']
class Setting(CreatedModifiedModel):
key = models.CharField(
max_length=255,
)
value = JSONField(
null=True,
)
user = prevent_search(models.ForeignKey(
'auth.User',
related_name='settings',
default=None,
null=True,
editable=False,
on_delete=models.CASCADE,
))
def __str__(self):
try:
json_value = json.dumps(self.value)
except ValueError:
# In the rare case the DB value is invalid JSON.
json_value = u'<Invalid JSON>'
if self.user:
return u'{} ({}) = {}'.format(self.key, self.user, json_value)
else:
return u'{} = {}'.format(self.key, json_value)
def save(self, *args, **kwargs):
encrypted = settings_registry.is_setting_encrypted(self.key)
new_instance = not bool(self.pk)
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# When first saving to the database, don't store any encrypted field
# value, but instead save it until after the instance is created.
# Otherwise, store encrypted value to the database.
if encrypted:
if new_instance:
self._saved_value = self.value
self.value = ''
else:
self.value = encrypt_field(self, 'value')
if 'value' not in update_fields:
update_fields.append('value')
super(Setting, self).save(*args, **kwargs)
# After saving a new instance for the first time, set the encrypted
# field and save again.
if encrypted and new_instance:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.value = self._saved_value
self.save(update_fields=['value'])
@classmethod
def get_cache_key(self, key):
return key
@classmeth | od
def get_cache_id_key(self, key):
return '{}_ID'.format(key)
def display_value(self):
if self.key == 'LICENSE' and 'license_key' in self.value:
# don't log the license key in activity stream
value = self.value.copy()
value['license_key'] = '********'
return value
return self.value
import awx.conf.signals # noqa
from awx.main.registrar import activity_stream_registrar # noq | a
activity_stream_registrar.connect(Setting)
import awx.conf.access # noqa
|
lehmannro/translate | misc/lru.py | Python | gpl-2.0 | 4,388 | 0.000684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of translate.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from collections import deque
from weakref import WeakValueDictionary
import gc
class LRUCachingDict(WeakValueDictionary):
"""Caching dictionary like object that discards the least recently
used objects when number of cached items exceeds maxsize.
cullsize is the fraction of items that will be discarded when
maxsize is reached.
"""
def __init__(self, maxsize, cullsize=2, *args, **kwargs):
self.cullsize = max(2, cullsize)
self.maxsize = max(cullsize, maxsize)
self.queue = deque()
WeakValueDictionary.__init__(self, *args, **kwargs)
def cull(self):
"""free memory by deleting old items from cache"""
# maximum cache size exceeded, cull old items
#
# note queue is the real cache but its size is boundless
# since it might have duplicate references.
#
# don't bother culling if queue is smaller than weakref,
# this means there are too many references outside the
# cache, culling won't free much memory (if any).
while len(self) >= self.maxsize <= len(self.queue):
cullsize = max(int(len(self.queue) / self.cullsize), 2)
try:
for i in range(cullsize):
self.queue.popleft()
except IndexError:
# queue is empty, bail out.
#FIXME: should we force garbage collection here too?
break
# call garbage collecter manually since objects
# with circular references take some time to get
# collected
for i in xrange(5):
gc.collect()
def __setitem__(self, key, value):
# check boundaries to minimiza duplicate references
while len(self.queue) and self.queue[0][0] == key:
# item at left end of queue pop it since it'll be appended
# to right
self.queue.popleft()
while len(self.queue) and self.queue[-1][0] == key:
# item at right end of queue pop it since it'll be
# appended again
self.queue.pop()
if len(self) >= self.maxsize:
self.cull()
self.queue.append((key, value))
WeakValueDictionary.__setitem__(self, key, value)
def __getitem__(self, key):
value = WeakValueDictionary.__getitem__(self, key)
# check boundaries to minimiza duplicate references
while len(self.queue) > 0 and self.queue[0][0] == key:
# item at left end of queue pop it since it'll be appended
# to right
self.queue.popleft()
# only append if item is not at right end of queue
| if not (len(self.queue) and self.queue[-1][0] == key):
self.queue.append((key, value))
return value
def __delitem__(self, key):
# can't efficiently find item in queue to delete, check
# boundaries. otherwise just wait til | l next cache purge
while len(self.queue) and self.queue[0][0] == key:
# item at left end of queue pop it since it'll be appended
# to right
self.queue.popleft()
while len(self.queue) and self.queue[-1][0] == key:
# item at right end of queue pop it since it'll be
# appended again
self.queue.pop()
return WeakValueDictionary.__delitem__(self, key)
def clear(self):
self.queue.clear()
return WeakValueDictionary.clear(self)
def setdefault(self, key, default):
if key not in self:
self[key] = default
return self[key]
|
william5065/fineMonkeyRunner | test.py | Python | apache-2.0 | 1,099 | 0.014138 | #!/usr/bin/env finemonkeyrunner
# -*- coding:utf8 -*-
import sys
sys.path.append(r'D:\learning\python\auto\fineMonkeyRunner')
from com.fine.android.finemonkeyrunner import fineMonkeyRunner
# 导入包路径,否则找不到 ---注意
#sys.path.append(r'C:\Users\wangxu\AppData\Local\Android\sdk\tools\testscript')
#sys.path.append(r'D:\learning\python\auto\fineMonkeyRunner')
finemonkeyrunner = fineMonkeyRunner('emulator-5554')
#finemonkeyrunner.assertfocusedwindowmame('com.mdsd.wiicare/com.mdsd.wiicare.function.LoginActivity_')
#f | inemonkeyrunner.assertcurrentactivity('com.mdsd.wiicare/com.mdsd.wiicare.function.LoginActivity_')
view = finemonkeyrunner.getviewbyID('id/etAccount')
print finemonk | eyrunner.getviewinfo_classname(view)
#print finemonkeyrunner.getelementinfo_locate('id/etAccount')
#print finemonkeyrunner.getviewinfo_visible(view)
#finemonkeyrunner.typebyid('id/etPassword','123')
#ss = finemonkeyrunner.getviewssametext('id/drawerLayout','经鼻气管插管')
#print finemonkeyrunner.viewlist
#finemonkeyrunner.getviewinfo(view)
#finemonkeyrunner.forcestopapp('com.mdsd.wiicare') |
liuyonggg/learning_python | riddle/einstein.py | Python | mit | 4,632 | 0.025907 | from pyeda.inter import *
'''
The Englishman lives in the red house.
The Swede keeps dogs.
The Dane drinks tea.
The green house is just to the left of the white one.
The owner of the green house drinks coffee.
The Pall Mall smoker keeps birds.
The owner of the yellow house smokes Dunhills.
The man in the center house drinks milk.
The Norwegian lives in the first house.
The Blend smoker has a neighbor who keeps cats.
The man who smokes Blue Masters drinks beer.
The man who keeps horses lives next to the Dunhill smoker.
The German smokes Prince.
The Norwegian lives next to the blue house.
The Blend smoker has a neighbor who drinks water.
'''
X = exprvars('x', (1,6), (1,6), (1,6))
class Solve:
def __init__(self):
X = exprvars('x', (1,6), (1,6), (1,6))
self.DIGITS = "123456789"
self.F = And (*[ And (*[ OneH | ot (*[X[r,c,v] for v in range(1,6)]) for c in range(1,6)]) for r in range(1,6)])
self.C = And (*[ And (*[ OneHot (*[X[r,c,v] for r in range(1,6)]) | for v in range(1,6)]) for c in range(1,6)])
# The Englishman lives in the red house.
self.r1 = Or (*[ And(X[r, 1, 1], X[r, 2, 1]) for r in range(1,6)])
# The Swede keeps dogs.
self.r2 = Or (*[ And(X[r, 1, 2], X[r, 3, 1]) for r in range(1,6)])
# The Dane drinks tea.
self.r3 = Or (*[ And(X[r, 1, 5], X[r, 4, 1]) for r in range(1,6)])
# The green house is just to the left of the white one.
self.r4 = Or (*[ And(X[r, 2, 2], X[r+1, 2, 3]) for r in range(1,5)])
# The owner of the green house drinks coffee.
self.r5 = Or (*[ And(X[r, 2, 2], X[r, 4, 2]) for r in range(1,6)])
# The Pall Mall smoker keeps birds.
self.r6 = Or (*[ And(X[r, 5, 1], X[r, 3, 2]) for r in range(1,6)])
# The owner of the yellow house smokes Dunhills.
self.r7 = Or (*[ And(X[r, 2, 4], X[r, 5, 2]) for r in range(1,6)])
# The man in the center house drinks milk.
self.r8 = X[3, 4, 3]
#The Norwegian lives in the first house.
self.r9 = X[1, 1, 3]
#The Blend smoker has a neighbor who keeps cats.
self.r10 = Or (Or (*[ And(X[r, 5, 3], X[r+1, 3, 2]) for r in range(1,5)]), Or (*[ And(X[r, 5, 3], X[r-1, 3, 2]) for r in range(2,6)]))
#The man who smokes Blue Masters drinks beer.
self.r11 = Or (*[ And(X[r, 5, 4], X[r, 4, 4]) for r in range(1,6)])
#The man who keeps horses lives next to the Dunhill smoker.
self.r12 = Or (Or (*[ And(X[r, 3, 4], X[r+1, 5, 2]) for r in range(1,5)]), Or (*[ And(X[r, 3, 4], X[r-1, 5, 2]) for r in range(2,6)]))
#The German smokes Prince.
self.r13 = Or (*[ And(X[r, 1, 4], X[r, 5, 5]) for r in range(1,6)])
#The Norwegian lives next to the blue house.
self.r14 = Or (Or (*[ And(X[r, 1, 3], X[r+1, 2, 5]) for r in range(1,5)]), Or (*[ And(X[r, 1, 3], X[r-1, 2, 5]) for r in range(2,6)]))
#The Blend smoker has a neighbor who drinks water.
self.r15 = Or (Or (*[ And(X[r, 5, 3], X[r+1, 4, 5]) for r in range(1,5)]), Or (*[ And(X[r, 5, 3], X[r-1, 4, 5]) for r in range(2,6)]))
def solve(self):
S = And(self.F, self.C, self.r1, self.r2, self.r3, self.r4, self.r5, self.r6, self.r7, self.r8, self.r9, self.r10, self.r11, self.r12, self.r13, self.r14, self.r15)
#answer = And(X[1,1,3], X[2,1,5], X[3,1,1], X[4,1,4], X[5,1,2], X[1,2,4], X[2,2,5], X[3,2,1], X[4,2,2], X[5,2,3], X[1,3,3],X[2,3,4], X[3,3,2], X[4,3,5], X[5,3,1], X[1,4,5],X[2,4,1],X[3,4,3],X[4,4,2],X[5,4,4],X[1,5,2],X[2,5,3],X[3,5,1],X[4,5,5],X[5,5,4])
S = S.to_cnf()
return S.satisfy_one()
def toGrid(self, gridStr):
grid = True
assert (len(gridStr) == 81)
r = 0
c = 0
for ch in gridStr:
assert (ch in self.DIGITS or ch in ".")
if ch in self.DIGITS:
grid = And (grid, X[r+1, c+1, int(ch)])
r = r + (c == 8)
c = (c + 1) % 9
return grid
def display(self, solutions):
for s in solutions:
self.display(s)
print ("\n\n")
def displayOne(self, solution):
for r in range(1,6):
line = ""
for c in range(1,6):
for v in range(1,6):
if solution[X[r, c, v]]:
line = line + ("%d " % v)
print ("%s" % line)
if __name__ == "__main__":
gridStr = (
".73...8.."
"..413..5."
".85..631."
"5...9..3."
"..8.1.5.."
".1..6...7"
".516..28."
".4..529.."
"..2...64.")
s = Solve()
print (s.solve())
#s.displayOne(s.solve(gridStr))
|
novafloss/django-anysign | demo/django_anysign_demo/models.py | Python | bsd-3-clause | 253 | 0 | from django_anysign import api as django_anysign
class SignatureTy | pe(django_anysign.SignatureType):
pass
class Signature(django_anysign.SignatureFactory(SignatureType)):
pass
|
class Signer(django_anysign.SignerFactory(Signature)):
pass
|
jorisvandenbossche/DS-python-data-analysis | notebooks/_solutions/case4_air_quality_analysis9.py | Python | bsd-3-clause | 327 | 0.009288 | fig, ax = plt.subplots()
data['2012':].mean().plot(kind='bar', ax=ax, rot=0, color='C0')
ax.set_ylabel("NO$_2$ c | oncentration (µg/m³)")
ax.axhline(y=40., color='darkorange')
ax.text(0.01, 0.48, 'Yearly limit is 40 µg/m³',
horizontal | alignment='left', fontsize=13,
transform=ax.transAxes, color='darkorange'); |
hjq300/zim-wiki | zim/errors.py | Python | gpl-2.0 | 4,949 | 0.024247 | # -*- coding: utf-8 -*-
# Copyright 2009-2013 Jaap Karssenberg <jaap.karssenberg@gmail.com>
# The Error class needed to be put in a separate file to avoid recursive
# imports.
'''This module contains the base class for all errors in zim'''
import sys
import logging
logger = logging.getLogger('zim')
use_gtk_errordialog = False
def set_use_gtk(use_gtk):
'''Set whether or not L{show_error} and L{exception_handler}
shold use the L{ErrorDialog} or not.
@param use_gtk: set C{True} for interactive gui, C{False} for
terminal mode
'''
global use_gtk_errordialog
use_gtk_errordialog = use_gtk
def get_error_msg(error):
'''Returns the message to show for an error
@param error: error object or string
@returns: 2-tuple of: message string and a boolean
whether a traceback should be shown or not
'''
if isinstance(error, Error):
# An "expected" error
return error.msg, False
elif isinstance(error, EnvironmentError):
# Normal error, e.g. OSError or IOError
msg = error.strerror
if hasattr(error, 'filename') and error.filename:
msg += ': ' + error.filename
return msg, False
else:
# An unexpected error, all other Exception's
msg = _('Looks like you found a bug') # T: generic error dialog
return msg, True
def log_error(error, debug=None):
'''Log error and traceback
@param error: error as understood by L{get_error_msg()}
@param debug: optional debug message, defaults to the error itself
'''
msg, show_trace = get_error_msg(error)
if debug is None:
debug = msg
if show_trace:
# unexpected error - will be logged with traceback
logger.exception(debug)
else:
# expected error - log trace to debug
logger.debug(debug, exc_info=1)
logger.error(msg)
def _run_error_dialog(error):
#~ try:
from zim.gui.widgets import ErrorDialog
ErrorDialog(None, error, do_logging=False).run()
| #~ except:
#~ logger.error('Failed to run error dialog')
def show_error(error):
'''Show an error by calling L{log_error()} and when running
interactive also calling L{ErrorDialog}.
@param error: the error object
'''
log_error(error)
if use_gtk_errordialog:
_run_error_dialog(error)
def exception_handler(debug):
'''Like C{show_error()} but with debug message instead of the actual
error. Intended | to be used in C{except} blocks as a catch-all for
both intended and unintended errors.
@param debug: debug message for logging
'''
# We use debug as log message, rather than the error itself
# the error itself shows up in the traceback anyway
exc_info = sys.exc_info()
error = exc_info[1]
del exc_info # recommended by manual
log_error(error, debug=debug)
if use_gtk_errordialog:
_run_error_dialog(error)
class Error(Exception):
'''Base class for all errors in zim.
This class is intended for application and usage errors, these will
be caught in the user interface and presented as error dialogs.
In contrast and Exception that does I{not} derive from this base
class will result in a "You found a bug" dialog. Do not use this
class e.g. to catch programming errors.
Subclasses should define two attributes. The first is 'msg', which is
the short description of the error. Typically this gives the specific
input / page / ... which caused the error. In there should be an attribute
'description' (either as class attribute or object attribute) with a verbose
description. This description can be less specific but should explain
the error in a user friendly way. The default behavior is to take 'msg' as
the single argument for the constructor. So a minimal subclass only needs
to define a class attribute 'description'.
For a typical error dialog in the Gtk interface the short string from 'msg'
will be shown as the title in bold letters while the longer 'description'
is shown below it in normal letters. As a guideline error classes that are
used in the gui or that can be e.g. be raised on invalid input from the
user should be translated.
'''
description = ''
msg = '<Unknown Error>'
# in case subclass does not define instance attribute
def __init__(self, msg, description=None):
self.msg = msg
if description:
self.description = description
# else use class attribute
def __str__(self):
msg = self.__unicode__()
return msg.encode('utf-8')
def __unicode__(self):
msg = u'' + self.msg.strip()
if self.description:
msg += '\n\n' + self.description.strip() + '\n'
return msg
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.msg)
# Defined here because these errors are not specific to files, but can
# occur in different storage models as well
class TrashNotSupportedError(Error):
'''Error raised when trashing is not supported and delete should
be used instead
'''
pass
class TrashCancelledError(Error):
'''Error raised when a trashign operation is cancelled. (E.g. on
windows the system will prompt the user with a confirmation
dialog which has a Cancel button.)
'''
pass
|
ulikoehler/cv_algorithms | test/TestGrassfire.py | Python | apache-2.0 | 513 | 0.009747 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from numpy.testing import assert_approx_equal, asse | rt_allclose, assert_array_equal
from nose.tools import assert_equal, assert_true, assert_false, assert_greater, assert_less
import cv2
import cv_algorithms
import numpy as np
class TestGrassfire(object):
def test_grassfire(self):
"Test grassfire transform"
mask = np.zeros((10,10), dtype=np.uint8)
# Currently just test whether it crashes
cv_algorithms.grassfi | re(mask) |
JShadowMan/trainquery | Example.py | Python | mit | 1,687 | 0.012026 | #!/usr/bin/env python3
#
# Copyright (C) 2016-2017 ShadowMan
#
import time
import random
import asyncio
import logging
import cProfile
from trainquery import train_query, utils, train_selector, train_query_result, exceptions
logging.basicConfig(level = logging.INFO)
async def foreach_train(result):
if isinstance(result, train_query_result.ResultParser):
selector = result.select(random.choice(result.get_trains_code()))
print(selector.train_code, selector.start_time, selector.arrive_time, selector.total_time, selector.start_station, selector.end_station)
try:
print('\t', selector.train_code, await selector.seat())
except exceptions.ReTryExceed as e:
| logging.info('query seat retry count exceeded. ignore this train[{}]'.format(selector.train_code))
print('\t\t', selector.train_code, await selector.check())
loop = asyncio.get_event_loop()
loop.set_debug(True)
query = train_query.TrainQuery()
task = [
asyncio.ensure_future(query.query('北京', '南京', int(time.time()) + 3600 | * 24, result_handler= foreach_train), loop = loop),
asyncio.ensure_future(query.query('北京', '南京', int(time.time()) + 3600 * 24, result_handler = foreach_train), loop = loop),
asyncio.ensure_future(query.query('北京', '南京', int(time.time()) + 3600 * 24, result_handler = foreach_train), loop = loop)
]
# results = loop.run_until_complete(asyncio.gather(*task))
#
# loop.run_until_complete(foreach_results(results, loop))
# loop.run_until_complete(utils.clean())
utils.async_startup(loop, *task)
# utils.async_foreach(foreach_results, *task, args = (loop,))
|
bennylope/django-simple-auth | tests/test_models.py | Python | bsd-2-clause | 302 | 0 | # encoding: utf-8
from __future__ import unicode_literals
fr | om django.test import TestCase
from simple_auth.models import Password
class ModelTests(TestCase):
| def test_save_object(self):
Password.objects.create(
name="My test",
password="ajdkjkjakdj",
)
|
antechrestos/cf-python-client | main/cloudfoundry_client/v3/service_instances.py | Python | apache-2.0 | 1,550 | 0.001935 | from typing import Optional, TYPE_CHECKING, List
from cloudfoundry_client.v3.entities import Entity, EntityManager, ToOneRelationship
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
class ServiceInstanceManager(EntityManager):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient"):
super(ServiceInstanceManager, self).__init__(target_endpoint, client, "/v3/service_instances")
def create(
| self,
name: str,
space_guid: str,
service_plan_guid: str,
meta_labels: Optional[dict] = None,
meta_annotations: Optional[dict] = None,
parameters: Optional[dict] = None,
tags: Optional[List[str]] = None,
) -> Entity:
data = {
"name": name,
| "type": "managed",
"relationships": {"space": ToOneRelationship(space_guid), "service_plan": ToOneRelationship(service_plan_guid)},
}
if parameters:
data["parameters"] = parameters
if tags:
data["tags"] = tags
if meta_labels or meta_annotations:
metadata = dict()
if meta_labels:
metadata["labels"] = meta_labels
if meta_annotations:
metadata["annotations"] = meta_annotations
data["metadata"] = metadata
return super(ServiceInstanceManager, self)._create(data)
def remove(self, guid: str, asynchronous: bool = True):
super(ServiceInstanceManager, self)._remove(guid, asynchronous)
|
saikobee/pypixel | examples/rainbow_random_circles.py | Python | lgpl-2.1 | 257 | 0.023346 | #!/usr/bi | n/python2
from pypixel import *
show()
h = 0
while True:
x = random(WIDTH)
y = random(HEIGHT)
r = random(50, 100)
h += 1
h %= 360
s = 100
v = 100
c = hsv2rgb((h, s, v))
circle(c, (x, y), r)
update()
| |
StephenBarnes/qtile | libqtile/window.py | Python | mit | 42,566 | 0.000094 | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the " | Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software | is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import array
import pickle
import contextlib
import inspect
import traceback
from xcffib.xproto import EventMask, StackMode, SetMode
import xcffib.xproto
from . import command
from . import utils
from . import hook
# ICCM Constants
NoValue = 0x0000
XValue = 0x0001
YValue = 0x0002
WidthValue = 0x0004
HeightValue = 0x0008
AllValues = 0x000F
XNegative = 0x0010
YNegative = 0x0020
USPosition = (1 << 0)
USSize = (1 << 1)
PPosition = (1 << 2)
PSize = (1 << 3)
PMinSize = (1 << 4)
PMaxSize = (1 << 5)
PResizeInc = (1 << 6)
PAspect = (1 << 7)
PBaseSize = (1 << 8)
PWinGravity = (1 << 9)
PAllHints = (PPosition | PSize | PMinSize | PMaxSize | PResizeInc | PAspect)
InputHint = (1 << 0)
StateHint = (1 << 1)
IconPixmapHint = (1 << 2)
IconWindowHint = (1 << 3)
IconPositionHint = (1 << 4)
IconMaskHint = (1 << 5)
WindowGroupHint = (1 << 6)
MessageHint = (1 << 7)
UrgencyHint = (1 << 8)
AllHints = (InputHint | StateHint | IconPixmapHint | IconWindowHint |
IconPositionHint | IconMaskHint | WindowGroupHint | MessageHint |
UrgencyHint)
WithdrawnState = 0
DontCareState = 0
NormalState = 1
ZoomState = 2
IconicState = 3
InactiveState = 4
RectangleOut = 0
RectangleIn = 1
RectanglePart = 2
VisualNoMask = 0x0
VisualIDMask = 0x1
VisualScreenMask = 0x2
VisualDepthMask = 0x4
VisualClassMask = 0x8
VisualRedMaskMask = 0x10
VisualGreenMaskMask = 0x20
VisualBlueMaskMask = 0x40
VisualColormapSizeMask = 0x80
VisualBitsPerRGBMask = 0x100
VisualAllMask = 0x1FF
ReleaseByFreeingColormap = 1
BitmapSuccess = 0
BitmapOpenFailed = 1
BitmapFileInvalid = 2
BitmapNoMemory = 3
XCSUCCESS = 0
XCNOMEM = 1
XCNOENT = 2
# float states
NOT_FLOATING = 1 # not floating
FLOATING = 2
MAXIMIZED = 3
FULLSCREEN = 4
TOP = 5
MINIMIZED = 6
_NET_WM_STATE_REMOVE = 0
_NET_WM_STATE_ADD = 1
_NET_WM_STATE_TOGGLE = 2
class _Window(command.CommandObject):
def __init__(self, window, qtile):
self.window, self.qtile = window, qtile
self.hidden = True
self.group = None
self.icons = {}
window.set_attribute(eventmask=self._windowMask)
try:
g = self.window.get_geometry()
self._x = g.x
self._y = g.y
self._width = g.width
self._height = g.height
# note that _float_info x and y are
# really offsets, relative to screen x,y
self._float_info = {
'x': g.x,
'y': g.y,
'w': g.width,
'h': g.height,
}
except xcffib.xproto.DrawableError:
# Whoops, we were too early, so let's ignore it for now and get the
# values on demand.
self._x = None
self._y = None
self._width = None
self._height = None
self._float_info = None
self.borderwidth = 0
self.bordercolor = None
self.name = "<no name>"
self.strut = None
self.state = NormalState
self.window_type = "normal"
self._float_state = NOT_FLOATING
self._demands_attention = False
self.hints = {
'input': True,
'icon_pixmap': None,
'icon_window': None,
'icon_x': 0,
'icon_y': 0,
'icon_mask': 0,
'window_group': None,
'urgent': False,
# normal or size hints
'width_inc': None,
'height_inc': None,
'base_width': 0,
'base_height': 0,
}
self.updateHints()
def _geometry_getter(attr):
def get_attr(self):
if getattr(self, "_" + attr) is None:
g = self.window.get_geometry()
self.x = g.x
self.y = g.y
self.width = g.width
self.height = g.height
# note that _float_info x and y are
# really offsets, relative to screen x,y
self._float_info = {
'x': g.x, 'y': g.y,
'w': g.width, 'h': g.height
}
return getattr(self, "_" + attr)
return get_attr
def _geometry_setter(attr):
def f(self, value):
if not isinstance(value, int) and attr != "_float_info":
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
self.qtile.log.error("!!!! setting %s to a non-int %s; please report this!", attr, value)
self.qtile.log.error(''.join(stack_trace[:-1]))
value = int(value)
setattr(self, "_" + attr, value)
return f
x = property(fset=_geometry_setter("x"), fget=_geometry_getter("x"))
y = property(fset=_geometry_setter("y"), fget=_geometry_getter("y"))
width = property(
fset=_geometry_setter("width"),
fget=_geometry_getter("width")
)
height = property(
fset=_geometry_setter("height"),
fget=_geometry_getter("height")
)
_float_info = property(
fset=_geometry_setter("_float_info"),
fget=_geometry_getter("_float_info")
)
def updateName(self):
try:
self.name = self.window.get_name()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
hook.fire("window_name_change")
def updateHints(self):
"""
update the local copy of the window's WM_HINTS
http://tronche.com/gui/x/icccm/sec-4.html#WM_HINTS
"""
try:
h = self.window.get_wm_hints()
normh = self.window.get_wm_normal_hints()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
# FIXME
# h values
# {
# 'icon_pixmap': 4194337,
# 'icon_window': 0,
# 'icon_mask': 4194340,
# 'icon_y': 0,
# 'input': 1,
# 'icon_x': 0,
# 'window_group': 4194305
# 'initial_state': 1,
# 'flags': set(['StateHint',
# 'IconMaskHint',
# 'WindowGroupHint',
# 'InputHint',
# 'UrgencyHint',
# 'IconPixmapHint']),
# }
if normh:
normh.pop('flags')
normh['min_width'] = max(0, normh.get('min_width', 0))
normh['min_height'] = max(0, normh.get('min_height', 0))
if not normh['base_width'] and \
normh['min_width'] and \
normh['width_inc']:
# seems xcffib does ignore base width :(
normh['base_width'] = (
normh['min_width'] % normh['width_inc']
)
if not normh['base_height'] and \
normh['min_height'] and \
normh['height_inc']:
# seems xcffib does ignore base height :(
normh['base_height'] = (
|
mikaelboman/home-assistant | homeassistant/components/netatmo.py | Python | mit | 1,792 | 0 | """
Support for the Netatmo devices (Weather Station and Welcome camera).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/netatmo/
"""
import logging
from urllib.error import HTTPError
from homeassistant.const import (
CONF_API_KEY, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.helpers import validate_config, discovery
REQUIREMENTS = [
' | https://github.com/jabesq/netatmo-api-python/archive/'
'v0.5.0.zip#lnetatmo==0.5.0']
_LOGGER = logging.getLogger(__name__)
CONF_SECRET_KEY = 'secret_key'
DOMAIN = "netatmo"
NETATMO_AUTH = None
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup the Netatmo devices."""
if not validate_config(config,
{DOMAIN: [CONF_API_KEY,
| CONF_USERNAME,
CONF_PASSWORD,
CONF_SECRET_KEY]},
_LOGGER):
return None
import lnetatmo
global NETATMO_AUTH
try:
NETATMO_AUTH = lnetatmo.ClientAuth(config[DOMAIN][CONF_API_KEY],
config[DOMAIN][CONF_SECRET_KEY],
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
"read_station read_camera "
"access_camera")
except HTTPError:
_LOGGER.error(
"Connection error "
"Please check your settings for NatAtmo API.")
return False
for component in 'camera', 'sensor':
discovery.load_platform(hass, component, DOMAIN, None, config)
return True
|
cocoloco69/pynet | week4/w4e2.py | Python | apache-2.0 | 1,206 | 0.021559 | #!/usr/bin/env python
import paramiko
import time
from getpass import getpass
max_buffer = 65535
def send_command(rconn,cmd,idle):
rconn.send(cmd + '\n')
time.sleep(idle)
return rconn.recv(max_buffer)
def main():
ip_addr = '50.76.53.27'
username = 'pyclass'
password = getpass()
port = 8022
rconn_pre = paramiko.SSHClient()
rconn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
rconn_pre.connect(ip_addr,username=username,password=password,look_for_keys=False,allow_agent=False,port=port)
r | conn = rconn_pre.invoke_shell()
output = send_command(rconn,"term length 0",1)
print output
output = send_command(rconn,"show log",2)
| print "Show log before changes\n %s" % output
output = send_command(rconn,"conf t",1)
print "Entering config mode\n %s" % output
output = send_command(rconn,"logging buffered 10000",2)
print "Making changes to the logging buffer\n %s" % output
output = send_command(rconn,"exit",1)
print "Existing config mode.\n %s" %output
output = send_command(rconn,"show log",2)
print "Show log after the changes\n %s" % output
if __name__ == "__main__":
main()
|
zhenxuan00/mmdgm | conv-mmdgm/layer/LogisticRegression.py | Python | mit | 4,977 | 0.004219 | import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""
Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
"""
Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
| # probabilities
# Where:
# W is a matrix where column-k represent the separation hyper plain for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper
# plain-k
# actually, these are functions, not variables
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic d | escription of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""
Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
# acutually, choose the ture likelihood from a matrix m x nc
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""
Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError() |
GbalsaC/bitnamiP | openedx/core/djangoapps/user_api/preferences/tests/test_api.py | Python | agpl-3.0 | 17,881 | 0.003427 | # -*- coding: utf-8 -*-
"""
Unit tests for preference APIs.
"""
import datetime
import ddt
import unittest
from mock import patch
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from dateutil.parser import parse as parse_datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ...accounts.api import create_account
from ...errors import UserNotFound, UserNotAuthorized, PreferenceValidationError, PreferenceUpdateError
from ...models import UserProfile, UserOrgTag
from ...preferences.api import (
get_user_preference, get_user_preferences, set_user_preference, update_user_preferences, delete_user_preference,
update_email_opt_in
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class TestPreferenceAPI(TestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestPreferenceAPI, self).setUp()
self.user = UserFactory.create(password=self.password)
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.no_such_user = UserFactory.create(password=self.password)
self.no_such_user.username = "no_such_user"
self.test_preference_key = "test_key"
self.test_preference_value = "test_value"
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
def test_get_user_preference(self):
"""
Verifies the basic behavior of get_user_preference.
"""
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
self.test_preference_value
)
self.assertEqual(
get_user_preference(self.staff_user, self.test_preference_key, username=self.user.username),
self.test_preference_value
)
def test_get_user_preference_errors(self):
"""
Verifies that get_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
get_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
def test_get_user_preferences(self):
"""
Verifies the basic behavior of get_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: self.test_preference_value,
}
self.assertEqual(get_user_preferences(self.user), expected_user_preferences)
self.assertEqual(get_user_preferences(self.staff_user, username=self.user.username), expected_user_preferences)
def test_get_user_preferences_errors(self):
"""
Verifies that get_user_preferences returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preferences(self.user, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preferences(self.no_such_user)
with self.assertRaises(UserNotAuthorized):
get_user_preferences(self.different_user, username=self.user.username)
def test_set_user_preference(self):
"""
Verifies the basic behavior of set_user_preference.
"""
test_key = u'ⓟⓡⓔⓕⓔⓡⓔⓝⓒⓔ_ⓚⓔⓨ'
test_value = u'ǝnןɐʌ_ǝɔuǝɹǝɟǝɹd'
set_user_preference(self.user, test_key, test_value)
self.assertEqual(get_user_preference(self.user, test_key), test_value)
set_user_preference(self.user, test_key, "new_value", username=self.user.username)
self.assertEqual(get_user_preference(self.user, test_key), "new_value")
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_set_user_preference_errors(self, user_preference_save):
"""
Verifies that set_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
set_user_preference(self.user, self.test_preference_key, "new_value", username="no_such_user")
with self.assertRaises(UserNotFound):
set_user_preference(self.no_such_user, self.test_preference_key, "new_value")
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.staff_user, self.test_preference_key, "new_value", username=self.user.username)
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.different_user, self.test_preference_key, "new_value", username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, too_long_key, "new_value")
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in (None, "", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, self.test_preference_key, empty_value)
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
set_user_preference(self.user, u"new_key_ȻħȺɍłɇs", u"new_value_ȻħȺɍłɇs")
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs'."
)
def test_update_user_preferences(self):
"""
Verifies the basic behavior of update_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: "new_value",
}
set_user_preference(self.user, self.test_preference_key, "new_value")
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
set_user_preference(self.user, self.test_preference_key, "new_value", username=self.user.username)
self.assertEqual(
| get_user_preference(self.user, self.test_preference_key),
"new_value"
)
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_update_user_preferences_errors(sel | f, user_preference_save, user_preference_delete):
"""
Verifies that set_user_preferences returns appropriate errors.
"""
update_data = {
self.test_preference_key: "new_value |
flammified/terrabot | terrabot/packets/packet5.py | Python | mit | 340 | 0 | from . import packet
class Packet5(packet.Packet):
def _ | _init__(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_s | tructured_data("<h", 0) # ItemID
|
mbarsacchi/mbarsacchi.github.com | python/MarkovChain_simple.py | Python | mit | 2,443 | 0.011052 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 10:59:19 2017
@author: marcobarsacchi
"""
import numpy.random as random
class MarkovChain(object):
"""Simple Markov Chain Model.
"""
def __init__(self, n, P, T, states=None, verbose= False):
"""Initialize a simple Markov Chain.
"""
assert len(P) == n, "Probability vector should be of size %d" %n
assert len(T) == len(T[0]) and len(T) == n, "Transition matrix should be of size %d" %n
assert states is None or len(states) == n, "States vector should be of size $d" %n
# Number of states of the MarkovChain
self.n = n
self.states = states
self.p = P
self.T = T
s | elf.state = random.choice(range(self.n),1,p=self.p)[0]
self.verbose = verbose
def set_state(self, state):
"""Set the state for the MarkovChain to the specified state.
Can | be used for initialization.
"""
self.state = self.states.index(state) if self.states else state
if self.verbose:
state = self.states[self.state] if self.states else self.state
print 'State is now: %s' % (state)
def get_state(self):
"""Get the state of the markov chain.
Returns the state of the chain
"""
state = self.states[self.state] if self.states else self.state
return state
def move(self):
"""Move the chain of one step.
Returns true if the state is changed
"""
current_state = self.state
self.state = random.choice(range(self.n),1,p=self.T[current_state])[0]
state = self.states[self.state] if self.states else self.state
if self.verbose:
print 'New State: %s' % (state)
return self.state != current_state
# Jukes e Cantor
# alpha < 1/3.
def JC69(nucleotide):
"""Initialize a Markov Chain for a nucleotide, using a Jukes e Cantor model.
"""
n = 4
prob = [0.25, 0.25, 0.25, 0.25]
alpha = 0.005
JC69_sub = [[1-alpha*3, alpha, alpha, alpha],
[alpha, 1-alpha*3, alpha, alpha],
[alpha, alpha, 1-alpha*3, alpha],
[alpha, alpha, alpha, 1-alpha*3]]
states = list('ACGT')
mc = MarkovChain(n, prob, JC69_sub, states = states)
mc.set_state(nucleotide)
return mc
|
AwesomeTurtle/personfinder | app/admin_create_repo.py | Python | apache-2.0 | 2,406 | 0.000416 | #!/usr/bin/python2.7
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from const import *
from model import *
from utils import *
class Handler(BaseHandler):
"""An admin page to create a repository."""
repo_required = False
admin_required = True
def get(self):
self.render('admin_create_repo.html')
def post(self):
new_repo = self.params.new_repo
Repo(key_name=new_repo).put()
config.set_for_repo( # Provide some defaults.
new_repo,
language_menu_options=['en', 'fr'],
repo_titles={'en': 'Earthquake', 'fr': u'S\xe9isme'},
keywords='person finder, people finder, person, people, ' +
'crisis, survivor, family',
use_family_name=True,
use_alternate_names=True,
use_postal_code=True,
allow_believed_dead_via_ui=False,
min_query_word_length=2,
show_profile_entry=False,
| profile_websites=DEFAULT_PROFILE_WEBSITES,
map_default_zoom=6,
map_default_center=[0, 0],
map_size_pixels=[400, 280],
read_auth_key_required=True,
search_auth_key_required=True,
deactivated=False,
launched=False,
deactivation_message_html='',
start_page_custom_htmls={'en': '', 'fr': ''}, |
results_page_custom_htmls={'en': '', 'fr': ''},
view_page_custom_htmls={'en': '', 'fr': ''},
seek_query_form_custom_htmls={'en': '', 'fr': ''},
footer_custom_htmls={'en': '', 'fr': ''},
bad_words='',
published_date=get_utcnow_timestamp(),
updated_date=get_utcnow_timestamp(),
test_mode=False,
force_https=False,
zero_rating_mode=False,
)
self.redirect('/admin', new_repo)
|
anderslanglands/alShaders2 | tests/tests.py | Python | bsd-3-clause | 17,604 | 0.00409 | #
#
# Copyright (c) 2014, 2015, 2016, 2017 Psyop Media Company, LLC
# See license.txt
#
#
import unittest
import subprocess
import os
import OpenImageIO as oiio
from OpenImageIO import ImageBuf, ImageSpec, ImageBufAlgo
def get_all_arnold_tests():
""" Returns the list of arnold integration tests (Only run in Arnold)"""
return [] + get_all_cryptomatte_tests()
def get_all_cryptomatte_tests():
""" Returns the list of arnold integration tests (Only run in Arnold)"""
import cryptomatte_tests
return cryptomatte_tests.get_all_cryptomatte_tests()
#############################################
# KickAndCompare base class
#############################################
class KickAndCompareTestCase(unittest.TestCase):
ass = ""
arnold_v = 1
arnold_t = 4
@classmethod
def setUpClass(self):
assert self.ass, "No test name specified on test."
file_dir = os.path.abspath(os.path.dirname(__file__))
build_dir = os.path.normpath(os.path.join(file_dir, "..", "build")).replace("\\", "/")
print build_dir
if not os.path.exists(build_dir):
raise RuntimeError("could not find %s ", build_dir)
self.ass_file = os.path.join(file_dir, self.ass)
ass_file_name = os.path.basename(self.ass_file)
test_dir = os.path.abspath(os.path.dirname(self.ass_file))
self.result_dir = os.path.join(test_dir, "%s_result" % ass_file_name[:3]).replace("\\", "/")
self.correct_result_dir = os.path.join(test_dir, "%s_correct" % ass_file_name[:3]).replace("\\", "/")
self.result_log = os.path.join(self.result_dir, "log.txt").replace("\\", "/")
self.correct_file_names = [
x for x in os.listdir(self.correct_result_dir)
if os.path.isfile(os.path.join(self.correct_result_dir, x))
]
assert os.path.isfile(self.ass_file), "No test ass file found. %s" % (self.ass_file)
assert os.path.isdir(test_dir), "No test dir found. %s" % (test_dir)
assert os.path.isdir(self.correct_result_dir), "No correct result dir found. %s" % (
self.correct_result_dir)
# only remove previous results after it's confirmed everything else exists, to
# mitigate odds we're looking at the wrong dir or something.
if os.path.exists(self.result_dir):
assert os.path.isdir(self.result_dir), "result directory is not a directory"
else:
os.mkdir(self.result_dir)
for file_name, result_file in ((x, os.path.join(self.result_dir, x))
for x in os.listdir(self.result_dir)):
if os.path.isfile(result_file):
os.remove(os.path.join(self.result_dir, file_name))
remaining_files = [
file_name for file_name in os.listdir(self.result_dir)
if os.path.isfile(os.path.join(self.result_dir, file_name))
]
assert not remaining_files, "Files were not cleaned up: %s " % remaining_files
cmd = 'kick -v {v} -t {t} -dp -dw -sl -nostdin -logfile {log} -i {ass}'.format(
v=self.arnold_v, t=self.arnold_t, log=self.result_log, ass=ass_file_name)
cwd = test_dir.replace("\\", "/")
print cmd, cwd
env = os.environ.copy()
env["ARNOLD_PLUGIN_PATH"] = "%s;%s" % (build_dir, env.get("ARNOLD_PLUGIN_PATH", ""))
proc = subprocess.Popen(cmd, cwd=cwd, env=env, shell=True, stderr=subprocess.PIPE)
rc = proc.wait()
assert rc == 0, "Render return code indicates a failure: %s " % rc
#
# Helpers
#
def load_images(self, file_name):
allowed_exts = {".exr", ".tif", ".png", ".jpg"}
result_file = os.path.join(self.result_dir, file_name)
correct_result_file = os.path.join(self.correct_result_dir, file_name)
if os.path.splitext(result_file)[1] not in allowed_exts:
return None, None
result_image = ImageBuf(result_file)
correct_result_image = ImageBuf(correct_result_file)
return result_image, correct_result_image
def assertSameChannels(self, result_image, correct_result_image):
r_channels = set(result_image.spec().channelnames)
c_channels = set(correct_result_image.spec().channelnames)
self.assertEqual(r_channels, c_channels,
"Channels mismatch between result and correct. %s vs %s" % (r_channels,
c_channels))
def compare_image_pixels(self, result_image, correct_result_image, threshold):
"""
Given a file to find it results, compare it against the correct result.
Returns None if the input is not an image.
Returns oiio.CompareResults if results can be compared.
"""
compresults = oiio.CompareResults()
ImageBufAlgo.compare(result_image, correct_result_image, threshold, threshold, compresults)
return compresults
#
# Assertions, for use in other test cases
#
def assertAllResultFilesPresent(self):
""" Checks there are no files in result not in correct, and vice versa """
for file_name in os.listdir(self.correct_result_dir):
result_file = os.path.join(self.result_dir, file_name)
self.assertTrue(
os.path.isfile(result_file),
'Result file "%s" not found for correct answer file. %s' % (file_name,
self.result_dir))
for file_name in os.listdir(self.result_dir):
correct_result_file = os.path.join(self.correct_result_dir, file_name)
self.assertTrue(
os.path.isfile(correct_result_file),
'Correct answer "%s" not found for result file. %s' % (file_name,
self.correct_result_dir))
def assertResultImageEqual(self, file_name, threshold, msg, print_results=True):
"""
Given a file name to find it results, compare it against the correct result.
"""
result, correct_result = self.load_images(file_name)
if not result or not correct_result:
return
self.assertSameChannels(result, correct_result)
compresults = self.compare_image_ | pixels(result, correct_result, threshold)
if compresults is None:
return
if compresults.maxerror == 0.0:
result_msg = file_name + " - Perfect match."
else:
result_msg = file_name + " - meanerror: %s rms_error: %s PSNR: %s maxerror: %s " % (
compresults.meanerror, compresults.rms_error, compresults.PSNR,
compresults.maxerror)
self.assertEqual | (compresults.nfail, 0,
"%s. Did not match within threshold %s. %s" % (msg, file_name, result_msg))
# if print_results:
# print("Passed: (within tolerance) - ", result_msg)
def assertAllResultImagesEqual(self, tolerance):
""" Checks all correct images match results """
for file_name in self.correct_file_names:
self.assertResultImageEqual(file_name, tolerance, "")
#############################################
# Test cases themselves
#############################################
class CryptomatteTest01(KickAndCompareTestCase):
ass = "01_htoa_instances.ass"
def setUp(self):
self.result_images = []
for file_name in self.correct_file_names:
img, correct_img = self.load_images(file_name)
if img and correct_img:
self.result_images.append((img, correct_img))
def cryptomatte_metadata(self, ibuf):
"""Returns dictionary of key, value of cryptomatte metadata"""
return {
a.name: a.value
for a in ibuf.spec().extra_attribs if a.name.startswith("cryptomatte")
}
def sorted_cryptomatte_metadata(self, img):
"""
Gets a dictionary of the cryptomatte metadata, interleved by cryptomatte stream.
for example:
{" |
maksimbulva/sc2streamhelper_info | battlenetclient/apps.py | Python | mit | 105 | 0 | from django.ap | ps import AppConfig
class | BattlenetclientConfig(AppConfig):
name = 'battlenetclient'
|
musicbrainz/picard | test/test_bytes2human.py | Python | gpl-2.0 | 4,925 | 0.000609 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2013, 2019-2020 Laurent Monin
# Copyright (C) 2014, 2017 Sophist-UK
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2018-2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from test.picardtestcase import PicardTestCase
from picard.i18n import setup_gettext
from picard.util import bytes2human
class Testbytes2human(PicardTestCase):
def setUp(self):
super().setUp()
# we are using temporary locales for tests
self.tmp_path = self.mktmpdir()
self.localedir = os.path.join(self.tmp_path, 'locale')
def test_00(self):
# testing with default C locale, english
lang = 'C'
setup_gettext(self.localedir, lang)
self.run_ | test(lang)
self.assertEqual(bytes2human.binary(45682), '44.6 KiB')
self.assertEqual(bytes2human.binary(-45682), '-44.6 KiB')
self.assertEqual(bytes2human.binary(-45682, 2), '-44.61 KiB')
self.assertEqual(bytes2human.decimal(45682), '45.7 kB')
self.assertEqual(byte | s2human.decimal(45682, 2), '45.68 kB')
self.assertEqual(bytes2human.decimal(9223372036854775807), '9223.4 PB')
self.assertEqual(bytes2human.decimal(9223372036854775807, 3), '9223.372 PB')
self.assertEqual(bytes2human.decimal(123.6), '123 B')
self.assertRaises(ValueError, bytes2human.decimal, 'xxx')
self.assertRaises(ValueError, bytes2human.decimal, '123.6')
self.assertRaises(ValueError, bytes2human.binary, 'yyy')
self.assertRaises(ValueError, bytes2human.binary, '456yyy')
try:
bytes2human.decimal('123')
except Exception as e:
self.fail('Unexpected exception: %s' % e)
def test_calc_unit_raises_value_error(self):
self.assertRaises(ValueError, bytes2human.calc_unit, 1, None)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 100)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 999)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 1023)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 1025)
self.assertEqual((1.0, 'B'), bytes2human.calc_unit(1, 1024))
self.assertEqual((1.0, 'B'), bytes2human.calc_unit(1, 1000))
def run_test(self, lang='C', create_test_data=False):
"""
Compare data generated with sample files
Setting create_test_data to True will generated sample files
from code execution (developer-only, check carefully)
"""
filename = os.path.join('test', 'data', 'b2h_test_%s.dat' % lang)
testlist = self._create_testlist()
if create_test_data:
self._save_expected_to(filename, testlist)
expected = self._read_expected_from(filename)
self.assertEqual(testlist, expected)
if create_test_data:
# be sure it is disabled
self.fail('!!! UNSET create_test_data mode !!! (%s)' % filename)
@staticmethod
def _create_testlist():
values = [0, 1]
for n in [1000, 1024]:
p = 1
for e in range(0, 6):
p *= n
for x in [0.1, 0.5, 0.99, 0.9999, 1, 1.5]:
values.append(int(p * x))
list = []
for x in sorted(values):
list.append(";".join([str(x), bytes2human.decimal(x),
bytes2human.binary(x),
bytes2human.short_string(x, 1024, 2)]))
return list
@staticmethod
def _save_expected_to(path, a_list):
with open(path, 'wb') as f:
f.writelines([line + "\n" for line in a_list])
f.close()
@staticmethod
def _read_expected_from(path):
with open(path, 'r') as f:
lines = [line.rstrip("\n") for line in f.readlines()]
f.close()
return lines
def test_calc_unit(self):
self.assertEqual(bytes2human.calc_unit(12456, 1024), (12.1640625, 'KiB'))
self.assertEqual(bytes2human.calc_unit(-12456, 1000), (-12.456, 'kB'))
self.assertRaises(ValueError, bytes2human.calc_unit, 0, 1001)
|
sahildua2305/eden | modules/s3/s3profile.py | Python | mit | 20,693 | 0.001546 | # -*- coding: utf-8 -*-
""" S3 Profile
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from gluon.html import *
from gluon.http import redirect
from gluon.storage import Storage
from s3crud import S3CRUD
from s3data import S3DataList
from s3resource import S3FieldSelector
# =============================================================================
class S3Profile(S3CRUD):
"""
Interactive Method Handler for Profile Pages
Configure widgets using s3db.configure(tablename, profile_widgets=[])
@ToDo: Make more configurable:
* Currently assumes a max of 2 widgets per row
* Currently uses Bootstrap classes
* Currently uses internal widgets rather than S3Method widgets
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
API entry point
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http in ("GET", "POST", "DELETE"):
if r.record:
output = self.profile(r, **attr)
else:
# Redirect to the List View
redirect(r.url(method=""))
else:
r.error(405, r.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def profile(self, r, **attr):
"""
Generate a Profile page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
tablename = self.tablename
get_config = current.s3db.get_config
# Page Title
title = get_config(tablename, "profile_title")
if not title:
try:
title = r.record.name
except:
title = current.T("Profile Page")
# Page Header
header = get_config(tablename, "profile_header")
if not header:
header = H2(title, _class="profile_header")
output = dict(title=title,
header=header)
# Get the page widgets
widgets = get_config(tablename, "profile_widgets")
# Index the widgets by their position in the config
for index, widget in enumerate(widgets):
widget["index"] = index
if r.representation == "dl":
# Ajax-update of one datalist
get_vars = r.get_vars
index = r.get_vars.get("update", None)
if index:
try:
index = int(index)
except ValueError:
datalist = ""
else:
# @ToDo: Check permissions to the Resource & do something different if no permission
datalist = self._datalist(r, widgets[index], **attr)
output["item"] = datalist
else:
# Default page-load
rows = []
if widgets:
append = rows.append
odd = True
for widget in widgets:
w_type = widget["type"]
if odd:
row = DIV(_class="row profile")
colspan = widget.get("colspan", 1)
if w_type == "map":
row.append(self._map(r, widget, **attr))
if colspan == 2:
append(row)
elif w_type == "comments":
row.append(self._comments(r, widget, **attr))
if colspan == 2:
append(row)
elif w_type == "datalist":
row.append(self._datalist(r, widget, **attr))
if colspan == 2:
append(row)
else:
raise
if odd:
odd = False
else:
odd = True
append(row)
else:
# Method not supported for this resource
# @ToDo Some kind of 'Page not Configured'?
r.error(405, r.ERROR.BAD_METHOD)
output["rows"] = rows
current.response.view = self._view(r, "profile.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def _resolve_context(context, id):
"""
Resolve a context filter
@param context: the context (as a string)
@param id: the record_id
"""
if context == "location":
# Show records linked to this Location & all it's Child Locations
s = "(location)$path"
# This vers | ion doesn't serialize_url
#m = ("%(id)s/*,*/%(id)s/*" % dict(id=id)).split(",")
#filter = (S3FieldSelector(s).like(m)) | | (S3FieldSelector(s) == id)
m = ("%(id)s,%(id)s/*,*/%(id)s/*,*/%(id)s" % dict(id=id)).split(",")
m = [f.replace("*", "%") for f in m]
filter = S3FieldSelector(s).like(m)
# @ToDo:
#elif context == "organisation":
# # Show records linked to this Organisation and all it's Branches
# s = "(%s)" % context
# filter = S3FieldSelector(s) == id
else:
# Normal: show just records linked directly to this master resource
s = "(%s)" % context
filter = S3FieldSelector(s) == id
return filter
# -------------------------------------------------------------------------
def _comments(self, r, widget, **attr):
"""
Generate a Comments widget
@param r: the S3Request instance
@param widget: the widget as a tuple: (label, type, icon)
@param attr: controller attributes for the request
@ToDo: Configurable to use either Disqus or internal Comments
"""
label = widget.get("label", "")
if label:
label = current.T(label)
icon = widget.get("icon", "")
if icon:
icon = TAG[""](I(_class=icon), " ")
# Render the widget
output = DIV(H4(icon,
label,
_class="profile-sub-header"),
DIV(_class="thumbnail"),
_class="span12")
return output
# -------------------------------------------------------------------------
def _datalist(self, r, widget, **attr):
"""
Generate a dataList
@param r: the S3Request instance
@param widget: the widget as a tuple: (label, tablename, icon, filter)
@param attr: controller attributes for the request
"""
T = current.T
s3db = current.s3db
id = r.id
context = wi |
discoproject/disco | tests/test_raw.py | Python | bsd-3-clause | 433 | 0.006928 | from disco.test import Te | stCase, TestJob
class RawJob(TestJob):
@staticmethod
def map(e, params):
yield 'raw://{0}'.format(e), ''
class RawTestCase(TestCase):
def runTest(self):
input = ['raw://eeny', 'raw://meeny', 'raw://miny', 'raw://moe']
self.job = RawJob().run(input=input)
self.assertEqual(sorted(self.results(self.job)),
sorted((i, '') for i in i | nput))
|
erikjjpost/scripts | PcapTimeline.py | Python | cc0-1.0 | 1,022 | 0.03229 | from scapy.all import *
import plotly
from datetime import datetime
import pandas as pd
#Read the packets from file
packets=rdpcap('mypcap.pcap')
#lists to hold packetinfo
pktBytes=[]
pktTimes=[]
#Read each packet and append to the lists
for pkt in packets:
if IP in pkt:
try:
pktBytes.append(pkt[IP].len)
pktTime=datetime.fromtimestamp(pkt.time)
pktTimes.append(pktTime.strftime("%Y-%m-%d %H:%M:%S.%f"))
except:
pass
# convert list to series
bytes=pd.Series(pktBytes).astype(int)
times=pd.to_datetime(pd.Series(pktTimes).astype(str), errors='coerce')
#Create the dateframe
df=pd.DataFrame({"Bytes": bytes, "Times":times})
#Set the d | ate to a timestamp
df=df.set_index('Times')
df2=df.resample('2S').sum()
print(df2)
#Create the graph
plotly.offline.plot({
"data":[plotly.graph_objs.Scatter(x=df2.index, y=df2['Bytes'])],
"layout":plotly.graph_objs.Layout(title="Bytes over Time ",
xaxis=dict(title="Time"),
yaxis | =dict(title="Bytes"))})
Output |
FABtotum/colibri-fabui | fabui/ext/py/fabtotum/fabui/constants.py | Python | gpl-2.0 | 2,175 | 0.012414 | #!/bin/env python
# -*- coding: utf-8; -*-
#
# (c) 2016 FABtotum, http://www.fabtotum.com
#
# This file is part of FABUI.
#
# FABUI is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# FABUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FABUI. If not, see <http://www.gnu.org/licenses/>.
__authors__ = "Daniel Kesler - Krios Mane"
__license__ = "GPL - https://opensource.org/licenses/GPL-3.0"
__version__ = "1.0"
############################
# HARDWARE SETTINGS #
############################
Z_MAX_OFFSET = 241.5
############### | #############
# ERROR CODES #
############################
ERROR_KILLED = 100
ERROR_STOPPED = 101
ERROR_DOOR_OPEN = 102
ERROR_MIN_TEMP = 103
ERROR_MAX_TEM | P = 104
ERROR_MAX_BED_TEMP = 105
ERROR_X_MAX_ENDSTOP = 106
ERROR_X_MIN_ENDSTOP = 107
ERROR_Y_MAX_ENDSTOP = 108
ERROR_Y_MIN_ENDSTOP = 109
ERROR_IDLE_SAFETY = 110
ERROR_WIRE_END = 111
ERROR_Y_BOTH_TRIGGERED = 120
ERROR_Z_BOTH_TRIGGERED = 121
ERROR_AMBIENT_TEMP = 122
ERROR_EXTRUDE_MINTEMP = 123
ERROR_LONG_EXTRUSION = 124
ERROR_HEAD_ABSENT = 125
ERROR_PWR_OFF = 999
############################
# GCODE #
############################
FAN_MAX_VALUE = 255
FAN_MIN_VAlUE = 0
################################
# MY.FABTOTUM.COM
################################
SERVICE_SUCCESS = 200
SERVICE_UNAUTHORIZED = 401
SERVICE_FORBIDDEN = 403
SERVICE_SERVER_ERROR = 500
SERVICE_INVALID_PARAMETER = 1001
SERVICE_ALREADY_REGISTERED = 1002
SERVICE_PRINTER_UNKNOWN = 1003
################################
# HEADS & MODULES
################################
PRISM_MODULE_ID = 8 |
thinkingserious/sendgrid-onenote | app.py | Python | mit | 2,377 | 0.008835 | from flask import Flask, request
import sendgrid
import json
import requests
import os
app = Flask(__name__)
SENDGRID_USER = os.getenv('SENDGRID_USER')
SENDGRID_PASS = os.getenv('SENDGRID_PASS')
ONENOTE_TOKEN = os.getenv('ONENOTE_TOKEN')
# Make the WSGI interface available at the top level so wfastcgi can get it.
wsgi_app = app.wsgi_app
sg = sendgrid.SendGridClient(SENDGRID_USER, SENDGRID_PASS)
# Receive a POST from the SendGrid Event Webhook
@app.route('/event', methods = ['POST'])
def event():
message = sendgrid.Mail()
message.add_to('Elmer Thomas <elmer@sendgrid.com>')
message.set_subject('Bounce Alert')
data = request.stream.read().decode("utf-8")
data = json.loads(data)
for i in range(len(data)):
# For a list of all event types see: https://sendgrid.com/docs/API_Reference/Webhooks/event.html
event = data[i]['event']
if event == "bounce":
# Create and post the OneNote message
url = "https://www.onenote.com/api/v1.0/pages"
auth = 'Bearer ' + ONENOTE_TOKEN
body = "An email from " + data[i]['email'] + " bounced. You might want to do something about that :)"
payload = "<!DOCTYPE HTML><html><head><title>Bounced Email Alert</title></head>"
payload += "<body>" + body + "</body></html>"
headers = {'Authorization':auth,'Content-type':'text/html'}
res = requests.post(url, headers=headers, data=payload)
# Send an email alert
mail = "An email sent to " + data[i]['email'] + " bounced. Return value from OneNote is: " + res.text
message.set_html(mail)
message.set_text(mail)
message.set_from('Elmer Thomas <elmer.thomas@sendgrid.com>')
status, msg = sg.send(message)
| return "HTTP/1.1 200 OK"
@app.route('/', methods = ['GET'])
def hello():
"""Renders a sample page."""
return "Hello Universe!"
@app.route('/tos', methods = ['GET'])
def tos():
return "Terms of Servi | ce Placeholder."
@app.route('/privacy', methods = ['GET'])
def privacy():
return "Privacy Policy Placeholder."
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
jumoconnect/openjumo | server/configs/devinstance/local_settings.py | Python | mit | 786 | 0.005089 | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'jumodj | ango',
'USER': 'jumo',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
},
}
PROXY_SERVER = ""
BROKER_HOST = ""
BROKER_PORT = 5672
BROKER_USER = ""
BROKER_PASSWORD = ""
BROKER_VHOST = "/"
#Facebook settings
FACEBOOK_APP_ID = ''
FACEBOOK_API_KEY = ''
FACEBOO | K_SECRET = ''
STATIC_URL = "http://localhost:8000"
HTTP_HOST = "localhost:8000"
ADMIN_MEDIA_PREFIX = STATIC_URL + '/static/media/admin/'
#ADMIN_MEDIA_PREFIX = 'http://static.jumo.com/static/media/admin/'
IGNORE_HTTPS = True
CELERY_ALWAYS_EAGER=True
DSTK_API_BASE = "http://DSTKSERVER"
# Make sure to fill in S3 info
AWS_ACCESS_KEY = ''
AWS_SECRET_KEY = ''
AWS_PHOTO_UPLOAD_BUCKET = ""
|
waqasbhatti/wcs2kml | python/fits/fitsparse.py | Python | bsd-3-clause | 7,156 | 0.016769 | #!/usr/bin/python
# Copyright (c) 2007-2009, Google Inc.
# Author: Ryan Scranton
# All rights reserved.
#
# Redis | tribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materi | als provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Changelist:
#
# 9/25/07 Added call to fits_simple_verify() to verify input file is FITS.
"""
Convert a FITS data table into a container with the ability to
write out a valid KML Placemark.
"""
import sys
import os
import fitslib
import pyfits
class SimplePlacemark:
"""
Just a simple container to keep track of the basic data for a single point
in the FITS file and a method for generating the associated placemark
string on demand.
"""
def __init__(self):
self.__name = ''
self.__description = ''
self.__longitude = -200.0
self.__latitude = -100.0
self.__styleUrl = ''
def Name(self):
return self.__name
def Description(self):
return self.__description
def Longitude(self):
return self.__longitude
def Latitude(self):
return self.__latitude
def StyleUrl(self):
return self.__styleUrl
def SetName(self,name):
self.__name = name
def SetDescription(self,description):
self.__description = description
def SetLongitude(self,longitude):
self.__longitude = float(longitude)
def SetLatitude(self,latitude):
self.__latitude = float(latitude)
def SetStyleUrl(self,styleUrl):
self.__styleUrl = styleUrl
def Placemark(self,indent_level=1):
tab_space = ' '*indent_level
placemark = [tab_space+'<Placemark>\n']
placemark.append(tab_space+' <name>'+self.__name+'</name>\n')
placemark.append(tab_space+' <description>'+self.__description+
'</description>\n')
placemark.append(tab_space+' <styleUrl>'+self.__styleUrl+'</styleUrl>\n')
placemark.append(tab_space+' <Point>\n')
placemark.append(tab_space+' <coordinates>'+str(self.__longitude)+','+
str(self.__latitude)+',0</coordinates>\n')
placemark.append(tab_space+' </Point>\n')
placemark.append(tab_space+'</Placemark>\n')
return ''.join(placemark)
def objIAUName(surveyName,ra,dec):
dec = float(dec)
if dec < 0.0:
sign = '-'
else:
sign = '+'
dec = abs(dec)
dec_deg = int(dec)
dec = dec - float(dec_deg)
dec = dec*60
dec_min = int(dec)
dec = dec - float(dec_min)
dec_sec = dec*60
ra = float(ra)
ra_hr = int(ra/15.0)
ra = ra/15.0 - float(ra_hr)
ra = ra*60
ra_min = int(ra)
ra = ra - float(ra_min)
ra_sec = ra*60
dec_str = "%s%02i<sup>d</sup>%02i<sup>m</sup>%02.2lf<sup>s</sup>" % (sign,dec_deg,dec_min,dec_sec)
ra_str = "%02i<sup>h</sup>%02i<sup>m</sup>%02.2lf<sup>s</sup>" % (ra_hr,ra_min,ra_sec)
iauName = "%s J%02i%02i%1.2lf%s%02i%02i%02.1lf" % (surveyName,ra_hr,ra_min,ra_sec,sign,dec_deg,dec_min,dec_sec)
return ra_str,dec_str,iauName
def parseFitsColumns(cols):
raIndex = -1
decIndex = -1
idx = 0
for colNames in cols.names:
if colNames.lower().find('ra') != -1 and raIndex == -1:
raIndex = idx
raTag = colNames
if colNames.lower().find('right_ascension') != -1 and raIndex == -1:
raIndex = idx
raTag = colNames
if colNames.lower().find('rightascension') != -1 and raIndex == -1:
raIndex = idx
raTag = colNames
if colNames.lower().find('dec') != -1 and decIndex == -1:
decIndex = idx
decTag = colNames
if colNames.lower().find('declination') != -1 and decIndex == -1:
decIndex = idx
decTag = colNames
idx = idx+1
return raTag,raIndex,decTag,decIndex
def FITSParse(fitsFile,orderbyField='',hdu=1,surveyName=''):
"""
Parse a FITS file into a list of FITSPoint instances that contain
placemark data for each object.
"""
fitslib.fits_simple_verify(fitsFile)
hduList = pyfits.open(fitsFile)
if len(surveyName) == 0:
surveyName = os.path.basename(fitsFile).split('.')[0]
cols = hduList[hdu].columns
if not cols:
return
raTag,raIndex,decTag,decIndex = parseFitsColumns(cols)
if raIndex != -1 and decIndex != -1:
print 'Found possibilities for RA and DEC in header: %s, %s' % (raTag,decTag)
else:
print "Didn't find either both RA and DEC equivalent in header."
return
tbData = hduList[1].data
if len(orderbyField) > 0:
print "Re-sorting FITS data using '"+orderbyField.upper()+"' field."
tmpCol = tbData.field(orderbyField)
idx = tmpCol.argsort()
tbData = tbData[idx]
fitsObjectList = []
objCounter = 0
for objData in tbData:
fitsObject = SimplePlacemark()
ra = objData.field(raIndex)
dec = objData.field(decIndex)
raGeo = ra - 180.0
decGeo = dec
fitsObject.SetLongitude(raGeo)
fitsObject.SetLatitude(decGeo)
(ra_str,dec_str,iauName) = objIAUName(surveyName,ra,dec)
fitsObject.SetName(iauName)
description = []
description.append("<table width='300' cellspacing='0' cellpadding='0'>")
idx = 0
for colNames in cols.names:
if idx == raIndex:
description.append("<tr><td align='center'>"+colNames+"</td><td align='center'>"+ra_str+"</td></tr>\n")
elif idx == decIndex:
description.append("<tr><td align='center'>"+colNames+"</td><td align='center'>"+dec_str+"</td></tr>\n")
else:
description.append("<tr><td align='center'>"+colNames+"</td><td align='center'>"+str(objData.field(colNames))+"</td></tr>\n")
idx = idx + 1
description.append('</table>\n')
description = ''.join(description)
fitsObject.SetDescription(description)
fitsObject.SetStyleUrl('#FitsPoint')
objCounter = objCounter + 1
if objCounter % 10000 == 0:
print '%i/%i objects...' % (objCounter,len(tbData))
fitsObjectList.append(fitsObject)
return fitsObjectList
|
biomodels/MODEL1011090002 | MODEL1011090002/model.py | Python | cc0-1.0 | 427 | 0.009368 | import os
path = os.path.dirname | (os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1011090002.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml |
sbml = libsbml.readSBMLFromString(sbmlString) |
Satariall/xvm-test | src/xpm/xvm_main/configwatchdog.py | Python | gpl-3.0 | 1,794 | 0.007246 | """ XVM (c) www.modxvm.com 2013-2015 """
__all__ = ['startConfigWatchdog', 'stopConfigWatchdog']
# PUBLIC
def startConfigWatchdog():
# debug('startConfigWatchdog')
_g_configWatchdog.stopConfigWatchdog()
_g_configWatchdog.configWatchdog()
def stopConfigWatchdog():
# debug('stopConfigWatchdog')
_g_configWatchdog.stopConfigWatchdog()
# PRIVATE
import os
import traceback
import BigWorld
from gui.shared import g_eventBus, events
from constants import *
from logger import *
class _ConfigWatchdog(object):
configWatchdogTimerId = None
lastConfigDirState = None
def configWatchdog(self):
# debug('configWatchdog(): {0}'.format(XVM.CONFIG_DIR))
self.configWatchdogTimerId = None
try:
x = [(nm, os.path.getmtime(nm)) for nm in [os.path.join(p, f)
for p, n, fn in os.walk(XVM.CONFIG_DIR)
for f in fn]]
if self.lastConfigDirState is None:
self.lastConfigDirState = x
elif self.lastConfigDirState != x:
self.lastConfigDirState = x
# debug('reload config')
g_eventBus.handleEvent(events.HasCtxEvent(XVM_EVENT.RELOAD_CONFIG, {'filename':XVM.CONFIG_FILE}))
ret | urn
except Exception, ex:
err(traceback.format_exc())
self.configWatchdogTimerId = BigWorld.callback(1, self.configWatchdog)
def stopConfigWatchdog(self):
# deb | ug('stopConfigWatchdog')
if self.configWatchdogTimerId is not None:
BigWorld.cancelCallback(self.configWatchdogTimerId)
self.configWatchdogTimerId = None
_g_configWatchdog = _ConfigWatchdog()
|
camallen/aggregation | experimental/mongo/IBCC.py | Python | apache-2.0 | 12,657 | 0.009718 | #!/usr/bin/env python
from __future__ import print_function
import csv
import pymongo
from itertools import chain, combinations
import shutil
import os
import sys
if os.path.exists("/home/ggdhines/github/pyIBCC/python"):
sys.path.append("/home/ggdhines/github/pyIBCC/python")
else:
sys.path.append("/Users/greghines/Code/pyIBCC/python")
import ibcc
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
class IBCC:
def __init__(self):
self.client = pymongo.MongoClient()
self.db = self.client['serengeti_2014-05-13']
self.species_groups = [["gazelleThomsons", "gazelleGrants"], ]
self.species_groups = [["gazelleThomsons"], ["gazelleGrants"]]
self.speciesList = ['elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
self.cutoff = 5
self.user_list = None
self.subject_list = None
if os.path.exists("/Users/greghines/Databases"):
self.baseDir = "/Users/greghines/Databases/serengeti/"
else:
pass
def __csv_in__(self):
#check to see if this collection already exists (for this particular cutoff) - if so, skip
db = self.client["system"]
collection = db["namespace"]
if ('merged_classifications'+str(self.cutoff)) in self.db.collection_names():
print("mongoDB collection already exists")
return
reader = csv.reader(open(self.baseDir+"goldFiltered.csv", "rb"), delimiter=",")
next(reader, None)
curr_name = None
curr_id = None
species_list = []
collection = self.db['merged_classifications'+str(self.cutoff)]
zooniverse_id_count = {}
count = 0
for row in reader:
user_name = row[1]
subject_zooniverse_id = row[2]
species = row[11]
if (user_name != curr_name) or (subject_zooniverse_id != curr_id):
if not(curr_name is None):
if curr_id in zooniverse_id_count:
zooniverse_id_count[curr_id] += 1
else:
zooniverse_id_count[curr_id] = 1
if zooniverse_id_count[curr_id] <= self.cutoff:
count += 1
document = {"user_name": curr_name, "subject_zooniverse_id": curr_id, "species_list": species_list}
collection.insert(document)
curr_name = user_name[:]
species_list = []
curr_id = subject_zooniverse_id[:]
species_list.append(species)
document = {"user_name": curr_name, "subject_zooniverse_id": curr_id, "species_list": species_list}
collection.insert(document)
def __createConfigFile(self,counter,numClasses):
f = open(self.baseDir+"ibcc/"+str(counter)+"config.py",'wb')
print("import numpy as np\nscores = np.array("+str(range(numClasses))+")", file=f)
print("nScores = len(scores)", file=f)
print("nClasses = "+str(numClasses),file=f)
print("inputFile = '"+self.baseDir+"ibcc/"+str(counter)+".in'", file=f)
print("outputFile = '"+self.baseDir+"ibcc/"+str(counter)+".out'", file=f)
print("confMatFile = '"+self.baseDir+"ibcc/"+str(counter)+".mat'", file=f)
if numClasses == 4:
print("alpha0 = np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2,2, 2]])", file=f)
print("nu0 = np.array([25.0, 25.0, 25.0, 1.0])", file=f)
elif numClasses == 2:
print("alpha0 = np.array([[2, 1], [1, 2],])", file=f)
print("nu0 = np.array([50.,50.])", file=f)
else:
assert(False)
f.close()
def __analyze_results__(self):
#to save having to repeatedly read through the experts' classifications, read them all in now
expertClassifications = [[] for i in range(len(self.subject_list))]
try:
f = open("NA.csv", 'rb')
except IOError:
f = open(self.baseDir+"expert_classifications_raw.csv", "rU")
expertReader = csv.reader(f, delimiter=',')
next(expertReader, None)
for row in expertReader:
subjectID = row[2]
subjectIndex = self.subject_list.index(subjectID)
species = | row[12]
#has this species already been added to the list?
i | f not(species in expertClassifications[subjectIndex]):
expertClassifications[subjectIndex].append(species)
#start off by assuming that we have classified all photos correctly
correct_classification = [1 for i in range(len(self.subject_list))]
counter = -1
#go through each of the species groups, get the user predictions and compare them to the experts' predictions
for speciesGroup in self.species_groups:
#find all of the possible subgroups
required_l = list(powerset(speciesGroup))
prohibited_l = [[s for s in speciesGroup if not(s in r)] for r in required_l]
#open up the prediction file corresponding to the next species group
counter += 1
ibcc_output_reader = csv.reader(open(self.baseDir+"ibcc/"+str(counter)+".out","rb"), delimiter=" ")
#go through the predictions for each of the photos (subjects)
for row in ibcc_output_reader:
assert(len(row) == (len(required_l)+1))
#get the subject ID and the predictions
subjectIndex = int(float(row[0]))
predictions = [float(r) for r in row[1:]]
predicted_class = predictions.index(max(predictions))
#now get the experts' classification (and subject/photo + species group)
tagged = expertClassifications[subjectIndex]
meet_required = [sorted(list(set(tagged).intersection(r))) == sorted(list(r)) for r in required_l]
meet_prohibited = [tuple(set(tagged).intersection(p)) == () for p in prohibited_l]
meet_overall = [r and p for (r, p) in zip(meet_required, meet_prohibited)]
assert(sum([1. for o in meet_overall if o]) == 1)
expert_class = meet_overall.index(True)
if expert_class != predicted_class:
correct_classification[subjectIndex] = 0
print(len(correct_classification) - sum(correct_classification))
def __find_nonempty__(self):
self.nonempty_list = []
total = 0
collection = self.db['merged_classifications'+str(self.cutoff)]
for document in collection.find():
total += 1
subject_zooniverse_id = document["subject_zooniverse_id"]
user_species_list = document["species_list"]
for speciesGroup in self.species_groups:
required_l = list(powerset(speciesGroup))
prohibited_l = [[s for s in speciesGroup if not(s in r)] for r in required_l]
meet_required = [sorted(list(set(user_species_list).intersection(r))) == sorted(list(r)) for r in required_l]
meet_prohibited = [tuple(set(user_species_list).intersection(p)) == () for p in prohibited_l]
meet_overall = [r and p for (r, p) in zip(meet_required, meet_prohibited)]
class_id = meet_overall.index(True)
if (class_id != 0) and not(subject_zooniverse_id in self.nonempty_list):
self.nonempty_list.append(subject_zooniverse_id)
print |
adamjchristiansen/CS470 | bzagents/other_pigeons/wild_pigeon.py | Python | gpl-3.0 | 2,746 | 0.02185 | #!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent | 0.py localhost 49857
#################################################################
import sys
import math
import time
import random
import numpy
from bzrc import BZRC, Command
from numpy import linspace
class Agent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants | ()
self.commands = []
self.num_ticks = 0
self.MAXTICKS = 100
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = [flag for flag in flags if flag.color != self.constants['team']]
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.obstacles = self.bzrc.get_obstacles()
self.commands = []
if self.num_ticks % self.MAXTICKS == 0:
for tank in mytanks:
# make sure the velocity is between 0.5 and 1
magnitude = random.random() * 0.5 + 0.5
relative_angle = 0.5
command = Command(tank.index, magnitude, 2 * relative_angle, False)
self.commands.append(command)
results = self.bzrc.do_commands(self.commands)
self.num_ticks = self.num_ticks + 1
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = Agent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4 |
30loops/libthirty | libthirty/base.py | Python | bsd-3-clause | 3,588 | 0.000279 | # Copyright (c) 2011-2012, 30loops.net
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of 30loops.net nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL 30loops.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base mixin classes for 30loops handlers."""
import os
import json
import requests
from requests.auth import HTTPBasicAuth
from .state import env
from .exceptions import HttpError, HttpReturnError
class HttpBaseHandler(object):
"""Base class for requests to the 30loops API. It serves as a HTTP mixin
class for all handlers that are talking to an API endpoint."""
def __init__(self):
self.cert_file = os.path.join(
os.path.dirname(__file__), "ssl", "StartSSL_CA.pem")
def request(self, uri, method='GET', data=None, headers=None):
# additional arguments for the http request
kwargs = {}
if data:
kwargs['data'] = data
if env.username and env.password:
kwargs['auth'] = HTTPBasicAuth(
username=env.username,
password=env.password)
if headers is None:
headers = {}
headers['Accept'] = "application/json"
kwargs['verify'] = self.cert_file
try:
response = requests.request(
method=method.lower(),
url=uri,
**kwargs)
except requests.ConnectionError:
raise HttpError("Connection error.")
bad_statuses = [400, 401, 403, 404]
if response.status_code in bad_statuses:
error = json.loads(response.content)
raise HttpReturnError(response.status_code, error['message'])
error_statuses = [500, 501, 502, 503, 504]
if response.status_code in error_statuses:
raise HttpReturnError(response.status_code,
"There seems to be an error on 30loops.net. Chances are \
good that we are already working on it.")
self.response = response
def get(self):
self.request(self.uri(), 'GET')
def put(self, data):
self.request(self.uri(), 'PUT', data)
def post(self, data):
self.request(self.uri(), 'POST', data)
def delete(self):
| self.request(se | lf.uri(), 'DELETE')
|
UNINETT/nav | tests/integration/models/model_test.py | Python | gpl-2.0 | 855 | 0 | """
Query DB using Django models test
Intended purpose is to catch obvious omissions in DB state or the Django models
themselves.
"""
import os
from django.db import connection
try:
# Django >= 1.8
import django.apps
get_models = django.apps.apps.get_models
del django.apps
except ImportError:
# Django < 1.9
from django.db.models im | port get_models
import pytest
import nav.models
# Ensure that all modules are loaded
for file_name in os.listdir(os.path.dirname(nav.models.__file__)):
if file_name.endswith('.py') and not file_name.startswith('__init__'):
module_name = file_name.replace('.py', '')
__import__('nav.models.%s' % module_ | name)
@pytest.mark.parametrize("model", get_models())
def test_django_model(model):
connection.close() # Ensure clean connection
list(model.objects.all()[:5])
|
Panos512/inspire-next | inspirehep/dojson/experiments/model.py | Python | gpl-2.0 | 1,040 | 0 | # -*- coding: utf-8 -*-
#
# This file | is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in th | e hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""experiments model definition."""
from ..schema import SchemaOverdo
experiments = SchemaOverdo(schema="experiments.json")
|
stuysu/roomres.stuysu.org | utils/utils.py | Python | mit | 7,391 | 0.01353 | import smtplib
from pymongo import MongoClient
import hashlib
import re
from random import randrange
import datetime
from calendar import monthrange
#test
connection = MongoClient("localhost", 27017, connect=False)
db = connection['database']
collection = db['rooms']
"""
Returns hashed password
Args:
text - string to be hashed
Returns:
hashed string
"""
def hash(text):
return hashlib.sha256(text).hexdigest()
"""
~~-----------------------------USERS----------------------------------------~~
"""
"""
Checks whether username is allowed
Args:
username - string to be checked
Returns:
True if string is allowed
False if it is not
"""
def check_username(username):
return not re.search('[^a-zA-Z\s]', username) and len(username) > 0
"""
Registers an user with their email, name, and password.
Args:
name - club name
email - user email address
password - password for the user
Returns:
True if user does not exist
False if user already exists
"""
def register_user(name, email, pwd):
check = list(db.users.find({'email':email}))
if check == []:
t = {'name':name, 'email': email, 'pwd': hash(pwd) }
db.users.insert(t)
return True
return False
"""
Finds club name with email.
Args:
email - club user email address
Returns:
True if club exist
False if club does not exist
"""
def find_club(email):
name = list(db.users.find({'email':email}))
if name != []:
return name[0]['name']
return False
"""
Confirms a user with email and osis.
Args:
email - club user email address
pwd - password for the user
Returns:
True if user exist
False if user does not exist
"""
def confirm_user(email, pwd):
check = list(db.users.find({'email':email}))
if check != []:
if check[0]['pwd']== hash(pwd):
return True
return False
"""
Makes a calendar - dictionary
Args:
day - day of the week
date - date of the month
num - 0 = current; 1 = next moneth
Returns:
dictionary in day: [dates] format
"""
def calendardict(i):
d={}
today = str(datetime.date.today())
month = int(today.split('-')[1])
year = int(today.split('-')[0])
if i == 0:
now = list(monthrange(year, month)) # returns [weekday of first day, number of days]
if i == 1:
if month == 12:
year +=1
month = 1
else:
month += 1
now = list(monthrange(year, month))
currPos = 0
date = 1
L = []
tempL = []
if now[0] != currPos:
while currPos < 7:
if date < 2 and now[0] != currPos:
tempL += [0]
else:
tempL += [date]
date += 1
currPos += 1
L += [tempL]
tempL = []
while date < now[1] + 1:
if len(tempL) == 7:
L += [tempL]
tempL = []
else:
tempL += [date]
date +=1
L += [tempL]
print L
return L
"""
~~-----------------------------ADMIN------------------------------------~~
"""
"""
Adds rooms to room list 5 at a time
Args:
r<n>: room number
Return:
True if succeded
False if not |
"""
def add_room(l):
for room in l:
check = list(db.rooms.find({'room': room}))
today = str(datetime.date.today())
month = str(today.split('-')[1])
year = str(today.split('-')[0])
date = year + '-' + month + '-'
month2 = str((int(month)+1)%12)
if month=="12":
year2=str(int(year)+1)
date2 = year2 + '-' + month2 + '-'
d = 1
if len(room)>2 and check == []:
while d < 32:
| t = {'day': date + str(d) , 'room':room, 'club': ''}
t2 = {'day': date2 + str(d) , 'room':room, 'club': ''}
d+=1
db.rooms.insert(t)
db.rooms.insert(t2)
"""
adds club name to end of date-room-club
Args:
d = date
r = room #
e = club name
Return:
True if succeded
False if not
"""
def book_room(d, r, e):
check = list(db.rooms.find({'day': d}))
email(e, "Room Booking", "You are now booked for " + str(r) + " on " + str(d) )
if check != []:
db.rooms.update(
{
'day': d,
'room' : r
},
{'$set':
{
"club": e
}
}
)
return True
"""
*admin usage only
change room number of a club
Args:
d = date
r = room #
c = club number
r2 = new room #
Return:
True if succeded
False if not
"""
def change_room(d, r, r2, c):
check = list(db.rooms.find({'day': d}))
if check != []:
db.rooms.update(
{
'day': d,
'club': c,
'room': r
},
{'$set':
{
'room' : r2
}
}
)
book_room(d, r, c)
email(c, "Booking Changed", "Sorry for the inconvenience, but because of faculty requests, your room booking on " + d + " is now in room " + r2)
return True
"""
change password of an email
Args:
u = email
p = password
Return:
True if succeded
False if not
"""
def changepwd(u, p):
check = list(db.users.find({'email': u}))
if check != []:
db.users.update(
{
'email': u
},
{'$set':
{
'pwd' : hash(p)
}
}
)
email(u, "Password Changed", "Your password is now " + p)
return True
"""
*admin usage only
cancel a booking
Args:
d = date
r = room #
Return:
True if succeded
False if not
"""
def del_room(d, r, c):
check = list(db.rooms.find({'day': d}))
if check != []:
db.rooms.update(
{
'day': d,
'room' : r
},
{'$set':
{
"club": ''
}
}
)
email(c, "Booking Cancelled", "Your room booking on " + d + " is now cancelled")
return True
"""
*admin usage only
take a room off
Args:
r = room #
Return:
True if succeded
False if not
"""
def takeoff_room(r):
check = list(db.rooms.find({'room': r}))
if check != []:
collection.remove({'room' : r})
return True
return False
"""
Returns hashed password
Args:
name - email address to send to
Returns:
boolean if email was sent
"""
def email(name, subject, message):
send=True
TO=name
SUBJECT= subject
#randint = str(randrange(1000000000))
#TEXT="Your user name is " + name + '.' + "Your verification id is " + randint
TEXT= name + ", " + message
gmail_sender=""
gmail_passwd=""
server= smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo
server.login(gmail_sender, gmail_passwd)
BODY='\r\n'.join([
'To: %s' % TO,
'From: %s' % gmail_sender,
'Subject: %s' % SUBJECT,
'',
TEXT
])
#if name[-9:]=='@stuy.edu':
try:
server.sendmail(gmail_sender, TO, BODY)
print 'email sent'
return True
except:
print 'Error in sending email'
#else:
#print'Please use a stuy.edu email address'
#send=False
server.quit()
return True
#return send
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/epmapper/epm_twr_t.py | Python | gpl-2.0 | 764 | 0.007853 | # encoding: utf-8
# module samba.dcerpc.epmapper
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/epmapper.so
# by generator 1.135
""" epmapper DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class epm_twr_t(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature u | nknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with typ | e S, a subtype of T """
pass
tower = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
tower_length = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
roadmapper/ansible | lib/ansible/modules/cloud/vultr/vultr_ssh_key_info.py | Python | gpl-3.0 | 3,509 | 0.001426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# (c) 2019, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_ssh_key_info
short_description: Get information about the Vultr SSH keys available.
description:
- Get infos about SSH keys available.
version_added: "2.9"
author:
- "Yanis Guenane (@Spredzy)"
- "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Get Vultr SSH keys infos
vultr_ssh_key_info:
register: result
- name: Print the infos
debug:
var: result.vultr_ssh_key_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_ssh_key_info:
description: Response from Vultr API as list
returned: success
type: complex
contains:
id:
description: ID of the ssh key
returned: success
type: str
sample: 5904bc6ed9234
name:
description: Name of the ssh key
returned: success
type: str
sample: my ssh key
date_created:
desc | ripti | on: Date the ssh key was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
ssh_key:
description: SSH public key
returned: success
type: str
sample: "ssh-rsa AA... someother@example.com"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrSSHKeyInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrSSHKeyInfo, self).__init__(module, "vultr_ssh_key_info")
self.returns = {
'SSHKEYID': dict(key='id'),
'name': dict(),
'ssh_key': dict(),
'date_created': dict(),
}
def get_sshkeys(self):
return self.api_query(path="/v1/sshkey/list")
def parse_keys_list(keys_list):
if not keys_list:
return []
return [key for id, key in keys_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
sshkey_info = AnsibleVultrSSHKeyInfo(module)
result = sshkey_info.get_result(parse_keys_list(sshkey_info.get_sshkeys()))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
kagalle/darfortie | darfortie/darfortie_params.py | Python | gpl-3.0 | 4,855 | 0.012358 | # Script Name: darfortie.py
# Author: Ken Galle
# License: GPLv3
# Description: returns a dictionary of values to be used as parameters in the dar command:
# dar_path : path and name to dar executable, optional, defaults to 'dar'
# config : string, possibly None
# prune : list of string, possibly empty
# incremental : boolean
# text_sort : boolean
# source_path : string
# dest_path_and_base_name : string
import optparse
#import logging
# Keep the dar switches explicitly on the dar command line instead of buried in variables.
# so the params{} dictionary is fine, but without putting in, e.g. -R, etc.
# provides:
# config parameter string
# prune parameter string
# incremental boolean
# text_sort boolean
def parse():
#log = logging.getLogger('darfortie_params')
usageString = "usage: \n%prog [common-options] [backup-options] <source_path> <dest_path_and_base_name>\n"
# + \
# "%prog [common-options] [restore-options] <dar_path_and_base_name> <destination_path>"
descriptionString = "A front-end for dar that supports incremental backups based on " + \
| "the existing backups found in the destination folder. <source_path> is the " + \
| "root path to back up (dar -R). <dest_path_and_base_name> is the dar base name. This " + \
"may include an optional path. This program will supply date strings to the final " + \
"name and dar itself will supply slice numbers to form the complete filename."
epilogString = "Based on http://dar.linux.free.fr/doc/mini-howto/dar-differential-backup-mini-howto.en.html"
p = optparse.OptionParser(usage=usageString, description=descriptionString, epilog=epilogString)
#common options
# -d --dar: dar filespec --> dar_path
p.add_option("-d", "--dar", action="store", dest="dar_path", metavar="dar_filespec",
help="filespec of dar executable; defaults to 'dar'")
# -c --config: specify .dar config file --> conf
p.add_option("-c", "--config", action="store", dest="conf", metavar="config_filespec",
help="filespec of dar config file to use instead of .darrc or etc/darrc.")
#backup options
# -P --prune: add dar prune paths --> prune
p.add_option("-P", "--prune", action="append", dest="prune", metavar="prune_path",
help="Specify prune paths (dar -P) to add to call to dar. Paths should be relative to " +
"<source_path>. This option can be repeated as needed.")
# -i --incremental: enable incremental (dar -A) mode
p.add_option("-i", "--incremental", action="store_true", dest="incremental", default=False,
help="search for previous backup to use for incremental backup (dar -A). " +
"Finds most recent like-named backup in destination path.")
# -I --previous_path: path to location of previous file for use by incremental option
p.add_option("-I", "--previous_path", action="store", dest="previous_path", metavar="previous_path",
help="alters the behavior of --incremental such that the search for a previous " +
"backup file is done in previous_path, instead of the destination path.")
# -i --incremental: enable incremental (dar -A) mode
p.add_option("-t", "--text-sort", action="store_true", dest="text_sort", default=False,
help="when searching for the latest previous backup to use, sort by file name instead " +
"of sorting my file modification date. For names that have yyyymmdd, etc dates/times as " +
"part of the names.")
#restore options (need parse_args() grouping feature
# dictionary to return
params = {}
# parse command line
opts, args = p.parse_args()
# Note optparse errors return exit code 2.
if len(args) != 2:
p.print_usage()
exit(1)
params['dar_path'] = opts.dar_path
params['config'] = opts.conf
params['prune'] = opts.prune
params['incremental'] = opts.incremental
params['text_sort'] = opts.text_sort
params['previous_path'] = opts.previous_path
params['source_path'] = args[0]
params['dest_path_and_base_name'] = args[1]
#log.info("params:dar_path=" + str(params['dar_path']))
#log.info("params:config=" + str(params['config']))
#if params['prune'] is None:
# log.info("params:prune is None")
#else:
# log.info("params:prune count=" + str(len(params['prune'])))
# for onePath in params['prune']:
# log.info("params:prune=" + str(onePath))
#log.info("params:incremental=" + str(params['incremental']))
#log.info("params:text_sort=" + str(params['text_sort']))
#log.info("params:source_path=" + str(params['source_path']))
#log.info("params:dest_path_and_base_name=" + str(params['dest_path_and_base_name']))
#log.info("params:previous_path=" + str(params['previous_path']))
return params
|
PyFilesystem/pyfilesystem2 | tests/test_url_tools.py | Python | mit | 1,370 | 0.000731 | # coding: utf-8
"""Test url tools. """
from __future__ import unicode_literals
import platform
import unittest
from fs._url_tools import url_quote
class TestBase(unittest.TestCase):
def test_quote(self):
test_fixtures = [
# test_snippet, expected
["foo/bar/egg/foofoo", "foo/bar/egg/foofoo"],
["foo/bar ha/barz", "foo/bar%20ha/barz"],
["example b.txt", "example%20b.txt"],
["exampleㄓ.txt", "example%E3%84%93.txt"],
]
if platform.system() == "Windows":
test_fixtures.extend(
[
["C:\\My Documents\\test.txt", "C:/My%20Documents/test.txt"],
["C:/My Documents/test.txt", "C:/My%20Documents/test.txt"],
# on Windows '\' is regarded as path separator
["test/forward\\slash", "test/forward/slash"],
| ]
)
else:
test_fixtures.extend(
[
# colon:tmp is bad path under Windows
["test/colon:tmp", "test/colon%3Atmp"],
# Unix treat \ as %5C
["test/forward\\slash", "test/forward%5Cslash"],
]
)
for test_snippet, expected in test_fixtures:
self | .assertEqual(url_quote(test_snippet), expected)
|
shadowmint/nwidget | samples/snake/views/credits_view.py | Python | apache-2.0 | 1,085 | 0.004608 | # Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http:/ | /www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language go | verning permissions and
# limitations under the License.
import pyglet
import cocos
import model
import nwidget
class CreditsView(cocos.layer.Layer):
""" Testing class """
def __init__(self, assets):
super(CreditsView, self).__init__()
# Clear events from other views
nwidget.events.clear(cocos.director.director.window)
self.is_event_handler = True
self.ui = model.Ui("credits.py", {})
self.add(self.ui)
# Background
bg = model.Background(assets)
self.add(bg.node)
nwidget.listen("CREDITS_MENU", model.Game.menu)
|
daedric/cntouch_driver | .ycm_extra_conf.py | Python | gpl-2.0 | 6,853 | 0.024953 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED | , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE. |
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-I',
'/lib/modules/3.13.0-32-generic/build/include',
'-I',
'.',
'-std=c99'
'-nostdinc',
'-isystem',
'/usr/lib/gcc/x86_64-linux-gnu/4.8/include',
'-I',
'/usr/src/linux-headers-3.13.0-32-generic/arch/x86/include',
'-I',
'arch/x86/include/generated',
'-I',
'include',
'-I',
'/usr/src/linux-headers-3.13.0-32-generic/arch/x86/include/uapi',
'-I',
'arch/x86/include/generated/uapi',
'-I',
'/usr/src/linux-headers-3.13.0-32-generic/include/uapi',
'-I',
'include/generated/uapi',
'-include',
'/usr/src/linux-headers-3.13.0-32-generic/include/linux/kconfig.h',
'-I',
'ubuntu/include',
'-D__KERNEL__',
'-Wall',
'-Wundef',
'-Wstrict-prototypes',
'-Wno-trigraphs',
'-fno-strict-aliasing',
'-fno-common',
'-Werror-implicit-function-declaration',
'-Wno-format-security',
'-fno-delete-null-pointer-checks',
'-O2',
'-m64',
'-mno-mmx',
'-mno-sse',
'-mpreferred-stack-boundary=3',
'-mtune=generic',
'-mno-red-zone',
'-mcmodel=kernel',
'-funit-at-a-time',
'-maccumulate-outgoing-args',
'-fstack-protector',
'-DCONFIG_X86_X32_ABI',
'-DCONFIG_AS_CFI=1',
'-DCONFIG_AS_CFI_SIGNAL_FRAME=1',
'-DCONFIG_AS_CFI_SECTIONS=1',
'-DCONFIG_AS_FXSAVEQ=1',
'-DCONFIG_AS_AVX=1',
'-DCONFIG_AS_AVX2=1',
'-pipe',
'-Wno-sign-compare',
'-fno-asynchronous-unwind-tables',
'-mno-sse',
'-mno-mmx',
'-mno-sse2',
'-mno-3dnow',
'-mno-avx',
'-Wframe-larger-than=1024',
'-Wno-unused-but-set-variable',
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls',
'-fno-var-tracking-assignments',
'-pg',
'-mfentry',
'-DCC_USING_FENTRY',
'-Wdeclaration-after-statement',
'-Wno-pointer-sign',
'-fno-strict-overflow',
'-fconserve-stack',
'-Werror=implicit-int',
'-Werror=strict-prototypes',
'-DCC_HAVE_ASM_GOTO',
'-O2',
'-I/home/daedric/usb-driver/../include',
'-DMODULE',
'-D"KBUILD_STR(s)=#s"',
'-D"KBUILD_BASENAME=KBUILD_STR(cntouch)"',
'-D"KBUILD_MODNAME=KBUILD_STR(cntouch)"',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
compilation_database_folder = os.path.join(DirectoryOfThisScript(), 'build/')
if os.path.exists(compilation_database_folder):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
CPP_EXTS = ['cpp', 'c', 'cc', 'cxx', 'C']
def IsHeader(path):
name, ext = os.path.splitext(path);
return ext[1:] in ['h', 'hpp', 'hh', 'hxx', 'H']
def IsSource(path):
name, ext = os.path.splitext(path);
return ext[1:] in CPP_EXTS
def GetSource(path):
name, ext = os.path.splitext(path);
if 'include' in name:
name = name.replace('include', 'src')
for ext in CPP_EXTS:
filename = name + '.' + ext
if os.path.exists(filename):
return filename
filename = os.path.basename(path)
filename, ext = os.path.splitext(filename)
possibilities = set([filename + '.' + ext for ext in CPP_EXTS])
for root, dirs, files in os.walk('.'):
files = set(files)
intersection = files & possibilities
for file in intersection:
file = os.path.join(root, file)
if os.path.exists(file):
return file
return None
def BestEffort(filename):
file_dir = os.path.dirname(filename)
for root, dirs, files in os.walk(file_dir):
for f in files:
f = os.path.join(root, f)
if IsHeader(f):
source = GetSource(f)
if source:
return source
elif IsSource(f):
return f
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ) and not flag.startswith('.'):
new_flag = os.path.join( '/usr/src/linux-headers-3.13.0-32-generic', flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename ):
initial_filename = filename
final_flags = None
if database:
if IsHeader(filename):
source = GetSource(filename) or BestEffort(filename)
if source:
filename = os.path.abspath(source)
compilation_info = database.GetCompilationInfoForFile( filename )
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
if not final_flags:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
System25/gecosws-config-assistant | firstboot_lib/Builder.py | Python | gpl-2.0 | 11,770 | 0.001869 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
'''Enhances builder connections, provides object to access glade objects'''
from gi.repository import GObject # pylint: disable=E0611
from gi.repository import Gtk
import inspect
import functools
import logging
logger = logging.getLogger('gecosws-confifg-assistant_lib')
from xml.etree.cElementTree import ElementTree
# this module is big so uses some conventional prefixes and postfixes
# *s list, except self.widgets is a dictionary
# *_dict dictionary
# *name string
# ele_* element in a ElementTree
# pylint: disable=R0904
# the many public methods is a feature of Gtk.Builder
class Builder(Gtk.Builder):
''' extra features
connects glade defined handler to default_handler if necessary
auto connects widget to handler with matching name or alias
auto connects several widgets to a handler via multiple aliases
allow handlers to lookup widget name
logs every connection made, and any on_* not made
'''
def __init__(self):
GObject.GObject.__init__(self)
self.widgets = {}
self.glade_handler_dict = {}
self.connections = []
self._reverse_widget_dict = {}
# pylint: disable=R0201
# this is a method so that a subclass of Builder can redefine it
def default_handler(self,
handler_name, filename, *args, **kwargs):
'''helps the apprentice guru
glade defined handlers that do not exist come here instead.
An apprentice guru might wonder which signal does what he wants,
now he can define any likely candidates in glade and notice which
ones get triggered when he plays with the project.
this method does not appear in Gtk.Builder'''
logger.debug('''tried to call non-existent function:%s()
expected in %s
args:%s
kwargs:%s''', handler_name, filename, args, kwargs)
# pylint: enable=R0201
def get_name(self, widget):
''' allows a handler to get the name (id) of a widget
this method does not appear in Gtk.Builder'''
return self._reverse_widget_dict.get(widget)
def add_from_file(self, filename):
'''parses xml file and stores wanted details'''
Gtk.Builder.add_from_file(self, filename)
# extract data for the extra interfaces
tree = ElementTree()
tree.parse(filename)
ele_widgets = tree.getiterator("object")
for ele_widget in ele_widgets:
name = ele_widget.attrib['id']
widget = self.get_object(name)
# populate indexes - a dictionary of widgets
self.widgets[name] = widget
# populate a reversed dictionary
self._reverse_widget_dict[widget] = name
# populate connections list
ele_signals = ele_widget.findall("signal")
connections = [
(name,
ele_signal.attrib['name'],
ele_signal.attrib['handler']) for ele_signal in ele_signals]
if connections:
self.connections.extend(connections)
ele_signals = tree.getiterator("signal")
for ele_signal in ele_signals:
self.glade_handler_dict.update(
{ele_signal.attrib["handler"]: None})
def connect_signals(self, callback_obj):
'''connect the handlers defined in glade
reports successful and failed connections
and logs call to missing handlers'''
filename = inspect.getfile(callback_obj.__class__)
callback_handler_dict = dict_from_callback_obj(callback_obj)
connection_dict = {}
connection_dict.update(self.glade_handler_dict)
connection_dict.update(callback_handler_dict)
for item in connection_dict.items():
if item[1] is None:
# the handler is missing so reroute to default_handler
handler = functools.partial(
self.default_handler, item[0], filename)
connection_dict[item[0]] = handler
# replace the run time warning
logger.warn("expected handler '%s' in %s",
item[0], filename)
# connect glade define handlers
Gtk.Builder.connect_signals(self, connection_dict)
# let's tell the user how we applied the glade design
for connection in self.connections:
widget_name, signal_name, handler_name = connection
logger.debug("connect builder by design '%s', '%s', '%s'",
widget_name, signal_name, handler_name)
def get_ui(self, callback_obj=None, by_name=True):
'''Creates the ui object with widgets as attributes
connects signals by 2 methods
this method does not appear in Gtk.Builder'''
result = UiFactory(self.widgets)
# Hook up any signals the user defined in glade
if callback_obj is not None:
# connect glade define handlers
self.connect_signals(callback_obj)
if by_name:
auto_connect_by_name(callback_obj, self)
return result
# pylint: disable=R0903
# this class deliberately does not provide any public interfaces
# apart from the glade widgets
class UiFactory():
''' provides an object with attributes as glade widgets'''
def __init__(self, widget_dict):
self._widget_dict = widget_dict
for (widget_name, widget) in widget_dict.items():
setattr(self, widget_name, widget)
# Mangle any non-usable names (like with spaces or dashes)
# into pythonic ones
cannot_message = """cannot bind ui.%s, name already exists
consider using a pythonic name instead of design name '%s'"""
consider_message = """consider using a pythonic name instead of design name '%s'"""
for (widget_name, widget) in widget_dict.items():
pyname = make_pyname(widget_name)
if pyname != widget_name:
if hasattr(self, pyname):
logger.debug(cannot_message, pyname, widget_name)
else:
logger.debug(consider_message, widget_name)
setattr(self, pyname, widget)
def iterator():
'''Support 'for o in self' '''
return iter(widget_dict.values())
setattr(self, '__iter__', iterator)
def __getitem__(self, name):
'access as dictionary where name might be non-pythonic'
return self._widget_dict[name]
# pylint: enable=R0903
def make_pyname(name):
''' mangles non-pythonic names into pythonic ones'''
pyname = ''
for character in name:
if (character.isalpha() or character == '_' or
(pyname and character.isdigit())):
pyname += character
else:
pyname += '_'
return pyname
def getmembers | (object, predicate=None):
"""Return all members of an object as (name, | value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError, RuntimeError:
continue
except Exception:
continue
if not pr |
asposeslides/Aspose_Slides_Java | Plugins/Aspose-Slides-Java-for-Python/tests/WorkingWithText/ReplaceText/ReplaceText.py | Python | mit | 393 | 0.007634 | __author__ = 'fahadadeel'
import jpype
import os.path
from WorkingWithText import Repl | aceText
asposeapispath = os.path.join(os.path.abspath("../../../"), "lib")
print "You need to put your Aspose.Slides for Java APIs .jars in this folder:\n"+asposeapispath
jpype | .startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
testObject = ReplaceText('data/')
testObject.main() |
akvo/akvo-rsr | akvo/rsr/migrations/0068_iaticheck.py | Python | agpl-3.0 | 1,001 | 0.003996 | # -*- coding: utf-8 -*-
import django.db.models.deletion
from django.db import models, migrations
import akvo.rsr.fields
class Migration(migrations.Migration):
dependencies = [
('rsr', '0067_auto_20160412_1858'),
]
operations = [
migrations.CreateModel(
name='IatiCheck',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField(verbose_name='st | atus')),
('description', akvo.rsr.fields.ValidXMLTextField(verbose_name='description')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_checks', verbose_name='project', to='rsr.Project')),
],
options={
'verbose_name': 'IATI check',
'verbose_name_plural': 'IATI checks',
},
bases=(models.Model,),
) | ,
]
|
tarikfayad/trelby | src/locations.py | Python | gpl-2.0 | 2,162 | 0.000463 | import mypickle
import util
# manages location-information for a single screenplay. a "location" is a
# single place that can be referred to using multiple scene names, e.g.
# INT. MOTEL ROOM - DAY
# INT. MOTEL ROOM - DAY - 2 HOURS LATER
# INT. MOTEL ROOM - NIGHT
class Locations:
cvars = None
def __init__(self):
if not self.__class__.cvars:
v = self.__class__.cvars = mypickle.Vars()
v.addList("locations", [], "Locations",
mypickle.ListVar("", [], "",
mypickle.StrLatin1Var("", "", "")))
v.makeDicts()
self.__class__.cvars.setDefaults(self)
# self.locations is a list of lists of strings, where the inner
# lists list scene names to combine into one location. e.g.
# [
# [
# "INT. ROOM 413 - DAY",
# "INT. ROOM 413 - NIGHT"
# ]
# ]
# load from string 's'. does not throw any exceptions and silently
# ignores any errors.
def load(self, s):
self.cvars.load(self.cvars.makeVals(s), "", self)
# save to a string and return that.
def save(self):
return self.cvars.save("", self)
# refresh location list against the given scene names (in the format
# returned by Screenplay.getSceneNames()). removes unknown and
# duplicate scenes from locations | , and if that results in a location
# with 0 scenes, removes that location completely. also upper-cases
# all the scene names, sorts the lists, first each location list's
# scenes, and then the locations based on the first scene of the
# location.
def refresh(self, sceneNames):
locs = []
added = {}
for sceneList in self.locations:
scenes = []
f | or scene in sceneList:
name = util.upper(scene)
if (name in sceneNames) and (name not in added):
scenes.append(name)
added[name] = None
if scenes:
scenes.sort()
locs.append(scenes)
locs.sort()
self.locations = locs
|
verycumbersome/the-blue-alliance | controllers/ajax_controller.py | Python | mit | 11,365 | 0.001848 | import logging
import os
import urllib2
import json
import time
import datetime
from base_controller import CacheableHandler, LoggedInHandler
from consts.client_type import ClientType
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from helpers.model_to_dict import ModelToDict
from helpers.mytba_helper import MyTBAHelper
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.event import Event
from models.favorite i | mport Favorite
from models.mobile_client import MobileClient
from models.sitevar import Sitevar
from models.typeahead_entry import TypeaheadEntry
class AccountInfoHandler(LoggedInHandler):
" | ""
For getting account info.
Only provides logged in status for now.
"""
def get(self):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
user = self.user_bundle.user
self.response.out.write(json.dumps({
'logged_in': True if user else False,
'user_id': user.user_id() if user else None
}))
class AccountRegisterFCMToken(LoggedInHandler):
"""
For adding/updating an FCM token
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
user_id = self.user_bundle.user.user_id()
fcm_token = self.request.get('fcm_token')
uuid = self.request.get('uuid')
display_name = self.request.get('display_name')
client_type = ClientType.WEB
query = MobileClient.query(
MobileClient.user_id == user_id,
MobileClient.device_uuid == uuid,
MobileClient.client_type == client_type)
if query.count() == 0:
# Record doesn't exist yet, so add it
MobileClient(
parent=ndb.Key(Account, user_id),
user_id=user_id,
messaging_id=fcm_token,
client_type=client_type,
device_uuid=uuid,
display_name=display_name).put()
else:
# Record already exists, update it
client = query.fetch(1)[0]
client.messaging_id = fcm_token
client.display_name = display_name
client.put()
class AccountFavoritesHandler(LoggedInHandler):
"""
For getting an account's favorites
"""
def get(self, model_type):
if not self.user_bundle.user:
self.response.set_status(401)
return
favorites = Favorite.query(
Favorite.model_type==int(model_type),
ancestor=ndb.Key(Account, self.user_bundle.user.user_id())).fetch()
self.response.out.write(json.dumps([ModelToDict.favoriteConverter(fav) for fav in favorites]))
class AccountFavoritesAddHandler(LoggedInHandler):
"""
For adding an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_type = int(self.request.get("model_type"))
model_key = self.request.get("model_key")
user_id = self.user_bundle.user.user_id()
fav = Favorite(
parent=ndb.Key(Account, user_id),
user_id=user_id,
model_key=model_key,
model_type=model_type
)
MyTBAHelper.add_favorite(fav)
class AccountFavoritesDeleteHandler(LoggedInHandler):
"""
For deleting an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_key = self.request.get("model_key")
model_type = int(self.request.get("model_type"))
user_id = self.user_bundle.user.user_id()
MyTBAHelper.remove_favorite(user_id, model_key, model_type)
class LiveEventHandler(CacheableHandler):
"""
Returns the necessary details to render live components
Uses timestamp for aggressive caching
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "live-event:{}:{}" # (event_key, timestamp)
CACHE_HEADER_LENGTH = 60 * 10
def __init__(self, *args, **kw):
super(LiveEventHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, event_key, timestamp):
if int(timestamp) > time.time():
self.abort(404)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, timestamp)
super(LiveEventHandler, self).get(event_key, timestamp)
def _render(self, event_key, timestamp):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
event = Event.get_by_id(event_key)
matches = []
for match in event.matches:
matches.append({
'name': match.short_name,
'alliances': match.alliances,
'order': match.play_order,
'time_str': match.time_string,
})
event_dict = {
# 'rankings': event.rankings,
# 'matchstats': event.matchstats,
'matches': matches,
}
return json.dumps(event_dict)
class TypeaheadHandler(CacheableHandler):
"""
Currently just returns a list of all teams and events
Needs to be optimized at some point.
Tried a trie but the datastructure was too big to
fit into memcache efficiently
"""
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "typeahead_entries:{}" # (search_key)
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(TypeaheadHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, search_key):
search_key = urllib2.unquote(search_key)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(search_key)
super(TypeaheadHandler, self).get(search_key)
def _render(self, search_key):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
entry = TypeaheadEntry.get_by_id(search_key)
if entry is None:
return '[]'
else:
self._last_modified = entry.updated
return entry.data_json
class WebcastHandler(CacheableHandler):
"""
Returns the HTML necessary to generate the webcast embed for a given event
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "webcast_{}_{}" # (event_key)
CACHE_HEADER_LENGTH = 60 * 5
def __init__(self, *args, **kw):
super(WebcastHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, event_key, webcast_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, webcast_number)
super(WebcastHandler, self).get(event_key, webcast_number)
def _render(self, event_key, webcast_number):
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
output = {}
if not webcast_number.isdigit():
return json.dumps(output)
webcast_number = int(webcast_number) - 1
event = Event.get_by_id(event_key)
if event and event.webcast:
webcast = event.webcast[webcast_number]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
else:
special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts')
special_webcasts = special_webcasts_future.get_result()
if special_webcasts:
special_webcasts = special_webcasts.contents['webcasts']
else:
special_webcasts = []
special_webcasts_dict = {}
for webcast in special_webcasts:
special_webcasts_dict[webcast['key_name']] = webcast
if event_key in special_webcasts_dict:
webcast = special_webcasts_dict[event_key]
if 'type' in webcast and 'channel' in webcast:
out |
sdlBasic/sdlbrt | win32/mingw/opt/lib/python2.7/distutils/tests/test_check.py | Python | lgpl-2.1 | 4,034 | 0.000744 | # -*- encoding: utf8 -*-
"""Tests for distutils.command.check."""
import unittest
from test.test_support import run_ | unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata) |
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with Unicode entries
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = u'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
rhyolight/nupic.son | app/soc/modules/gci/views/readonly_template.py | Python | apache-2.0 | 879 | 0.002275 | # Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under t | he License.
"""Module containing the GCI readonly template classes.
"""
from soc.views import readonly_template
class GCIModelReadOnlyTemplate(readonly_template.ModelReadOnlyTemplate):
"""Class to render readonly templates for GCI models.
"""
template_path = 'modules/gci/_readonly_template.html'
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/utils/tests/test_io.py | Python | bsd-2-clause | 2,734 | 0.004023 | # encoding: utf-8
"""Tests for io.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io as stdlib_io
import os.path
import stat
import sys
from io import StringIO
from subprocess import Popen, PIPE
import unittest
import nose.tools as nt
from IPython.testing.decorators import skipif, skip_win32
from IPython.utils.io import IOStream, Tee, capture_output
from IPython.utils.py3compat import doctest_refactor_print
from IPython.utils.tempdir import TemporaryDirectory
def test_tee_simple():
"Very simple check with stdout only"
chan = StringIO()
text = 'Hello'
tee = Tee(chan, channel='stdout')
print(text, file=chan)
nt.assert_equal(chan.getvalue(), text+"\n")
class TeeTestCase(unittest.TestCase):
def tchan(self, channel, check='close'):
trap = StringIO()
chan = StringIO()
text = 'Hello'
std_ori = getattr(sys, channel)
setattr(sys, channel, trap)
tee = Tee(chan, channel=channel)
print(text, end='', file=chan)
setattr(sys, channel, std_ori)
trap_val = trap.getvalue()
nt.assert_equal(chan.getvalue(), text)
if check=='close':
tee.close()
| else:
del tee
def test(self):
for chan in ['stdout', 'stderr']:
for check in ['close', 'del']:
self.tchan(chan, check)
def test_io_init():
"""Test that io.stdin/out/err exist at startup"""
for name in ('stdin', 'stdout', 'stderr'):
cmd = doctest_refactor_print("from IPython.utils import io;print io.%s.__class__"%name | )
p = Popen([sys.executable, '-c', cmd],
stdout=PIPE)
p.wait()
classname = p.stdout.read().strip().decode('ascii')
# __class__ is a reference to the class object in Python 3, so we can't
# just test for string equality.
assert 'IPython.utils.io.IOStream' in classname, classname
def test_IOStream_init():
"""IOStream initializes from a file-like object missing attributes. """
# Cause a failure from getattr and dir(). (Issue #6386)
class BadStringIO(StringIO):
def __dir__(self):
attrs = super(StringIO, self).__dir__()
attrs.append('name')
return attrs
iostream = IOStream(BadStringIO())
iostream.write('hi, bad iostream\n')
assert not hasattr(iostream, 'name')
def test_capture_output():
"""capture_output() context works"""
with capture_output() as io:
print('hi, stdout')
print('hi, stderr', file=sys.stderr)
nt.assert_equal(io.stdout, 'hi, stdout\n')
nt.assert_equal(io.stderr, 'hi, stderr\n')
|
City-of-Helsinki/smbackend | services/migrations/0055_rename_unit_description_fields.py | Python | agpl-3.0 | 1,495 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-04 09:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("services", "0054_rename_last_modified_time"),
]
operations = [
migrations.RenameField(
model_name="unit",
old_name="desc",
new_name="description",
),
migrations.RenameField(
model_name="unit",
old_name="desc_en",
new_name="description_en",
),
migrations.RenameField(
model_name="unit",
old_name="desc_fi",
| new_name="description_fi",
),
migrations.RenameField(
model_name="unit",
old_name="desc_sv",
new_name="description_sv",
),
migrations.RenameField(
model_name="unit",
old_name="short_desc",
new_name="short_description",
),
migrations.Re | nameField(
model_name="unit",
old_name="short_desc_en",
new_name="short_description_en",
),
migrations.RenameField(
model_name="unit",
old_name="short_desc_fi",
new_name="short_description_fi",
),
migrations.RenameField(
model_name="unit",
old_name="short_desc_sv",
new_name="short_description_sv",
),
]
|
michaeldove/abode | chat/protocol.py | Python | mit | 735 | 0.008163 | import re
################################################################################
# Human Command Protocol
################################################################################
class HumanCommandProtocol:
def __init__(self):
self.commands = (('^help', self.help),)
def help(self, *args):
"""
Returns hel | p text, list of lines..
"""
return ['describe controls']
def parse(self, text):
"""
Parse text and execute appropriate command.
"""
for (pattern, handler) in self.commands:
match = re.search(pattern, text)
if match:
| return handler(text)
return ('Unknown command',)
|
rg3915/wttd2 | eventex/subscriptions/models.py | Python | mit | 808 | 0 | from django.db import models
from django.shortcuts import resolve_url as r
fr | om eventex.subscriptions.validators import validate_cpf
class Subscription(models.Model):
name = models.CharField('nome', max_length=100)
cpf = models.CharField('CPF', max_length=11, validators=[validate_cpf])
email = models.EmailField('e-mail', blank=True)
phone = models.CharField('telefone', max_length=20, blank=True)
paid = models.BooleanField('pago', default=False)
created_at = models.DateTimeF | ield('criado em', auto_now_add=True)
class Meta:
ordering = ('-created_at',)
verbose_name = 'inscrição'
verbose_name_plural = 'inscrições'
def __str__(self):
return self.name
def get_absolute_url(self):
return r('subscriptions:detail', self.pk)
|
supracd/pygal | pygal/test/test_stacked.py | Python | lgpl-3.0 | 2,083 | 0 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later | version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Stacked chart related tests"""
from pygal import StackedLine
def test_stacke | d_line():
"""Test stacked line"""
stacked = StackedLine()
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
def test_stacked_line_reverse():
"""Test stack from top stacked line"""
stacked = StackedLine(stack_from_top=True)
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('11', '14', '10', '12'))
def test_stacked_line_log():
"""Test logarithmic stacked line"""
stacked = StackedLine(logarithmic=True)
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
def test_stacked_line_interpolate():
"""Test interpolated stacked line"""
stacked = StackedLine(interpolate='cubic')
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
|
jianajavier/pnc-cli | pnc_cli/swagger_client/models/product_singleton.py | Python | apache-2.0 | 2,857 | 0.0014 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class ProductSingleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ProductSingleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'ProductRest'
}
| self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this ProductSingleton.
:return: The content of this ProductSingleton.
:rtype: ProductRest
"""
retu | rn self._content
@content.setter
def content(self, content):
"""
Sets the content of this ProductSingleton.
:param content: The content of this ProductSingleton.
:type: ProductRest
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
agaldona/odoo-addons | procurement_purchase_by_sale_contract/tests/test_procurement_purchase_by_sale_contract.py | Python | agpl-3.0 | 2,609 | 0 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3. | 0.html
import openerp.tests.common as common
class TestProcurementPurchaseBySaleContract(common.TransactionCase):
def setUp(self):
super(TestProcurementPurchaseBySaleContract, self).setUp()
self.procurement_model = self.env['procurement.order']
self.sale_model = self.env['sale.order']
product = self.env.ref('product.p | roduct_product_36')
product.categ_id.procured_purchase_grouping = 'sale_contract'
product.route_ids = [(6, 0,
[self.ref('purchase.route_warehouse0_buy'),
self.ref('stock.route_warehouse0_mto')])]
account_vals = {'name': 'purchase_order_line_with_sale_account',
'date_start': '2016-01-15',
'date': '2016-02-28'}
self.account = self.env['account.analytic.account'].create(
account_vals)
sale_vals = {
'partner_id': self.ref('base.res_partner_1'),
'partner_shipping_id': self.ref('base.res_partner_1'),
'partner_invoice_id': self.ref('base.res_partner_1'),
'pricelist_id': self.env.ref('product.list0').id,
'project_id': self.account.id}
sale_line_vals = {
'product_id': product.id,
'name': product.name,
'product_uom_qty': 7,
'product_uos_qty': 7,
'product_uom': product.uom_id.id,
'price_unit': product.list_price}
sale_vals['order_line'] = [(0, 0, sale_line_vals)]
self.sale_order = self.sale_model.create(sale_vals)
def test_procurement_purchase_by_sale_contract(self):
self.sale_order.action_button_confirm()
cond = [('sale_line_id', '=', self.sale_order.order_line[0].id)]
proc = self.procurement_model.search(cond, limit=1)
cond = [('id', '>', proc.id),
('product_uom', '=', proc.product_uom.id),
('product_uos_qty', '=', proc.product_uos_qty),
('product_qty', '=', proc.product_qty),
('product_uos', '=', proc.product_uos.id),
('product_id', '=', proc.product_id.id),
('group_id', '=', proc.group_id.id)]
proc = self.procurement_model.search(cond, limit=1)
if proc.state == 'confirmed':
proc.run()
self.assertNotEqual(
proc.purchase_line_id.account_analytic_id, False,
'Purchase line without analytic account')
|
manahl/mockextras | mockextras/__init__.py | Python | bsd-2-clause | 69 | 0 | from ._stub | import *
from . | _fluent import *
from ._matchers import *
|
tomato42/fsresck | tests/nbd/test_request.py | Python | gpl-2.0 | 6,139 | 0.000163 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <hubert@kario.pl>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and c | onditions of the GNU General Public License version 2.
#
# This program i | s distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import mock
from mock import call
except ImportError:
import unittest.mock as mock
from unittest.mock import call
from fsresck.nbd.request import NBDRequestSocket, recvexactly, Error, \
NBDRequest
from fsresck.compat import compat_str
class TestError(unittest.TestCase):
def test___repr__(self):
with self.assertRaises(Error) as exception:
raise Error('test')
self.assertIn("request.Error('test'", repr(exception.exception))
class TestNBDRequest(unittest.TestCase):
def test___init__(self):
request = NBDRequest(None, None, None, None)
self.assertIsNotNone(request)
def test___ne__(self):
request1 = NBDRequest(1, 2, 3, 4)
request2 = NBDRequest(1, 2, 3, 4)
self.assertFalse(request1 != request2)
class TestRecvexactly(unittest.TestCase):
def test_zero_read(self):
sock = None
data = recvexactly(sock, 0)
self.assertEqual(bytearray(0), data)
def test_full_read(self):
sock = mock.MagicMock()
sock.recv_into.return_value = 10
data = recvexactly(sock, 10)
self.assertEqual(bytearray(10), data)
sock.recv_into.assert_called_once_with(data, 10, 0)
def test_partial_reads(self):
sock = mock.MagicMock()
sock.recv_into.side_effect = (4, 6)
data = recvexactly(sock, 10)
self.assertEqual(bytearray(10), data)
self.assertEqual(len(sock.recv_into.call_args_list), 2)
call = sock.recv_into.call_args_list[0]
self.assertEqual(call[0][1:], (10, 0))
call = sock.recv_into.call_args_list[1]
self.assertEqual(call[0][1:], (6, 0))
def test_broken_read(self):
sock = mock.MagicMock()
sock.recv_into.side_effect = (4, 0)
with self.assertRaises(Error):
recvexactly(sock, 10)
class TestNBDRequestSocket(unittest.TestCase):
def test___init__(self):
sock = NBDRequestSocket(None)
self.assertIsNotNone(sock)
@mock.patch('fsresck.nbd.request.recvexactly')
def test_recv(self, mock_mthd):
mock_mthd.return_value = bytearray(
b'\x25\x60\x95\x13' # magic value
b'\x00\x00\x00\x00' # command type - read
b'\x50\xe4\x93\x01\x00\x88\xff\xff' # handle
b'\x00\x00\x00\x00\x00\x00\x00\x00' # offset
b'\x00\x00\x40\x00' # length
)
obj = NBDRequestSocket(None).recv()
self.assertEqual(NBDRequest(0, 0x50e493010088ffff, 0, 0x4000), obj)
@mock.patch('fsresck.nbd.request.recvexactly')
def test_recv_write(self, mock_mthd):
mock_mthd.side_effect = (bytearray(
b'\x25\x60\x95\x13' # magic value
b'\x00\x00\x00\x01' # command type - write
b'\x50\xe4\x93\x01\x00\x88\xff\xff' # handle
b'\x00\x00\x00\x00\x00\x00\x00\x00' # offset
b'\x00\x00\x00\x04'), # length
bytearray(
b'\xff\xff\xff\xff' # payload
))
obj = NBDRequestSocket(None).recv()
self.assertEqual(bytearray(b'\xff'*4), obj.data)
self.assertEqual(NBDRequest(1, 0x50e493010088ffff, 0, 0x04,
bytearray(b'\xff'*4)), obj)
@mock.patch('fsresck.nbd.request.recvexactly')
def test_recv_bad_write(self, mock_mthd):
mock_mthd.return_value = bytearray(
b'\x25\x60\x95\x14' # bad magic value
b'\x00\x00\x00\x00' # command type - read
b'\x50\xe4\x93\x01\x00\x88\xff\xff' # handle
b'\x00\x00\x00\x00\x00\x00\x00\x00' # offset
b'\x00\x00\x40\x00' # length
)
sock = NBDRequestSocket(None)
with self.assertRaises(Error):
sock.recv()
def test_send_read(self):
raw_sock = mock.MagicMock()
raw_sock.sendall.return_value = None
cmd = NBDRequest(0, 0x134, 0, 0x4000)
sock = NBDRequestSocket(raw_sock)
sock.send(cmd)
raw_sock.sendall.assert_called_once_with(compat_str(bytearray(
b'\x25\x60\x95\x13'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x014'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00@\x00')))
def test_send_write(self):
raw_sock = mock.MagicMock()
raw_sock.sendall.return_value = None
cmd = NBDRequest(1, 0x134, 0, 0x04, bytearray(b'\xff'*4))
sock = NBDRequestSocket(raw_sock)
sock.send(cmd)
raw_sock.sendall.assert_called_once_with(compat_str(bytearray(
b'\x25\x60\x95\x13'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x014'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x04'
b'\xff\xff\xff\xff')))
|
sam81/pysoundanalyser | pysoundanalyser/dialog_apply_filter.py | Python | gpl-3.0 | 11,571 | 0.00458 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Samuele Carcagno <sam.carcagno@gmail.com>
# This file is part of pysoundanalyser
# pysoundanalyser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pysoundanalyser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pysoundanalyser. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QComboBox, QDialog, QDialogButtonBox, QDoubleValidator, QGridLayout, QIntValidator, QLabel, QLineEdit, QVBoxLayout
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtGui import QComboBox, QDialog, QDialogButtonBox, QDoubleValidator, QGridLayout, QIntValidator, QLabel, QLineEdit, QVBoxLayout
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QDoubleValidator, QIntValidator
from PyQt5.QtWidgets import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QVBoxLayout
class applyFIR2PresetsDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.prm = parent.prm
self.currLocale = self.parent().prm['data']['currentLocale']
self.currLocale.setNumberOptions(self.currLocale.OmitGroupSeparator | self.currLocale.RejectGroupSeparator)
vbl = QVBoxLayout()
self.grid = QGridLayout()
filterTypeLabel = QLabel(self.tr('Filter Type: '))
self.filterChooser = QComboBox()
self.filterChooser.addItems([self.tr('lowpass'), self.tr('highpass'), self.tr('bandpass'), self.tr('bandstop')])
self.filterChooser.setCurrentIndex(0)
self.grid.addWidget(self.filterChooser, 0, 1)
self.filterChooser.currentIndexChanged[int].connect(self.onChangeFilterType)
self.filterOrderLabel = QLabel(self.tr('Filter Order: '))
self.filterOrderWidget = QLineEdit('256')
self.filterOrderWidget.setValidator(QIntValidator(self))
self.grid.addWidget(self.filterOrderLabel, 0, 2)
self.grid.addWidget(self.filterOrderWidget, 0, 3)
self.currFilterType = self.tr('lowpass')
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.cutoffLabel, 2, 1)
self.grid.addWidget(self.cutoffWidget, 2, 2)
self.grid.addWidget(self.endCutoffLabel, 2, 3)
self.grid.addWidget(self.endCutoffWidget, 2, 4)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
vbl.addLayout(self.grid)
vbl.addWidget(buttonBox)
self.setLayout(vbl)
self.setWindowTitle(self.tr("Apply Filter"))
def onChangeFilterType(self):
prevFilterType = self.currFilterType
self.currFilterType = str(self.filterChooser.currentText())
if self.currFilterType != prevFilterType:
if prevFilterType == self.tr('lowpass'):
self.grid.removeWidget(self.cutoffLabel)
#self.cutoffLabel.setParent(None)
self.cutoffLabel.deleteLater()
self.grid.removeWidget(self.endCutoffLabel)
#self.endCutoffLabel.setParent(None)
self.endCutoffLabel.deleteLater()
self.grid.removeWidget(self.cutoffWidget)
#self.cutoffWidget.setParent(None)
self.cutoffWidget.deleteLater()
self.grid.rem | oveWidget(self.endCutoffWidget)
#self.endCutoffWidget.setParent(None)
self.endCutoffWidget.deleteLater()
elif prevFilterType == self.tr('highpass'):
self.grid.removeWidget(self.cutoffLabel)
#self.cutoffLabel.setParent(None)
self.cutoffLabel.deleteLater()
self.grid.removeWidget(self.startCutoffLabel)
| #self.startCutoffLabel.setParent(None)
self.startCutoffLabel.deleteLater()
self.grid.removeWidget(self.cutoffWidget)
#self.cutoffWidget.setParent(None)
self.cutoffWidget.deleteLater()
self.grid.removeWidget(self.startCutoffWidget)
#self.startCutoffWidget.setParent(None)
self.startCutoffWidget.deleteLater()
elif prevFilterType == self.tr('bandpass') or prevFilterType == self.tr('bandstop'):
self.grid.removeWidget(self.lowerCutoffLabel)
#self.lowerCutoffLabel.setParent(None)
self.lowerCutoffLabel.deleteLater()
self.grid.removeWidget(self.startCutoffLabel)
#self.startCutoffLabel.setParent(None)
self.startCutoffLabel.deleteLater()
self.grid.removeWidget(self.lowerCutoffWidget)
#self.lowerCutoffWidget.setParent(None)
self.lowerCutoffWidget.deleteLater()
self.grid.removeWidget(self.startCutoffWidget)
#self.startCutoffWidget.setParent(None)
self.startCutoffWidget.deleteLater()
self.grid.removeWidget(self.higherCutoffLabel)
#self.higherCutoffLabel.setParent(None)
self.higherCutoffLabel.deleteLater()
self.grid.removeWidget(self.endCutoffLabel)
#self.endCutoffLabel.setParent(None)
self.endCutoffLabel.deleteLater()
self.grid.removeWidget(self.higherCutoffWidget)
#self.higherCutoffWidget.setParent(None)
self.higherCutoffWidget.deleteLater()
self.grid.removeWidget(self.endCutoffWidget)
#self.endCutoffWidget.setParent(None)
self.endCutoffWidget.deleteLater()
if self.currFilterType == self.tr('lowpass'):
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.cutoffLabel, 2, 1)
self.grid.addWidget(self.cutoffWidget, 2, 2)
self.grid.addWidget(self.endCutoffLabel, 2, 3)
self.grid.addWidget(self.endCutoffWidget, 2, 4)
elif self.currFilterType == self.tr('highpass'):
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.startCutoffLabel = QLabel(self.tr('Start Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
startCutoff = 0.8
self.startCutoffWidget = QLineEdit(self.currLocale.toString(startCutoff))
self.startCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(s |
hb9kns/PyBitmessage | src/helper_random.py | Python | mit | 172 | 0.005814 | import | os
from pyelliptic.openssl import OpenSSL
def randomBytes(n):
try:
return os.urandom(n)
except NotImplementedError:
return OpenSSL.rand | (n)
|
cschulee/ee542-code | find_leds.py | Python | mit | 2,091 | 0.020086 | # find_leds.py Find LEDs
# 2014-10-30
# The purpose of this script is to find the
# coordinates of Light Emitting Diodes in
# camera frame
# Currently it only looks for green and stores
# an intermediate and an altered picture back
# to the current working directory, to show
# things are working
# Import necessary packages
import picamera
import cv2
import numpy as np
# Connect to pi camera and capture image
camera = picamera.PiCamera()
camera.hflip = True
camera.vflip = True
IMAGE = '/home/pi/ee542-code/images/align.jpg'
IMAGE_THRESH = '/home/pi/ee542-code/images/align_threshed.jpg'
IMAGE_MARKED = '/home/pi/ee542-code/images/align_marked.jpg'
camera.capture(IMAGE)
img = cv2.imread(IMAGE)
camera.close()
# Define min and max color range
#GREEN_MIN = np.array([20,100,200],np.uint8)
#GREEN_MAX = np.array([80,220,255],np.uint8)
#YELLOW_MIN = np.array([20,100,100],np.uint8)
#YELLOW_MAX = np.array([40,255,255],np.uint8)
BLUE_MIN= np.array([80,100,150],np.uint8) |
BLUE_MAX= np.array([120,255,255],np.uint8)
# Convert img to Hue, Saturation, Value format
hsv_img = cv2.cvtColor(img | ,cv2.COLOR_BGR2HSV)
# Threshold the image - results in b&w graphic where
# in-threshold pixels are white and out-of-threshold
# pixels are black
img_threshed = cv2.inRange(hsv_img, BLUE_MIN, BLUE_MAX)
# Find the circles
circles = cv2.HoughCircles(img_threshed,cv2.cv.CV_HOUGH_GRADIENT,10,5,param1=200,param2=5,minRadius=0,maxRadius=25)
# Mark the circles
for i in circles[0,:]:
cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)
# Temprary - save the files back to disk to view later
cv2.imwrite('img_threshed.jpg',img_threshed)
cv2.imwrite('marked_image.jpg',img)
avg_x = np.average([x[0] for x in circles[0]])
h = abs(circles[0][0][1] - circles[0][1][1])
print 'Average apparant horizontal coordinate was ' + str(avg_x) + ' pixels'
print 'Actual lateral offset is ' + str((avg_x - (640/2))/10) + ' inches'
print 'Average appparant height between LEDs was ' + str(h) + ' pixels'
print 'Actual forward offset is ' + str(h/2.4) + ' inches'
|
plotly/plotly.py | packages/python/plotly/plotly/validators/treemap/_ids.py | Python | mit | 430 | 0 | import _plot | ly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="treemap", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "cal | c"),
**kwargs
)
|
i5o/openshot-sugar | openshot/windows/TransitionProperties.py | Python | gpl-3.0 | 7,648 | 0.037003 | # OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os
import gtk
from classes import messagebox, profiles, project, video
from windows.SimpleGtkBuilderApp import SimpleGtkBuilderApp
# init the foreign language
from language import Language_Init
class frmTransitionProperties(SimpleGtkBuilderApp):
def __init__(self, path="TransitionProperties.ui", root="frmTransitionProperties", domain="OpenShot", form=None, project=None, current_transition=None, **kwargs):
SimpleGtkBuilderApp.__init__(self, os.path.join(project.UI_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self._ = _
# add items to direction combo
options = [_("Transition"), _("Mask")]
# loop through export to options
for option in options:
# append profile to list
self.cboType.append_text(option)
# add items to direction combo
options = [_("Up"), _("Down")]
# loop through export to options
for option in options:
# append profile to list
self.cboDirection.append_text(option)
self.form = form
self.project = project
self.current_transition = current_transition
self.frmTransitionProperties.show_all()
# init the project type properties
self.lblName.set_text(self.current_transition.name)
self.spinbtnStart.set_value(round(self.current_transition.position_on_track, 2))
self.spinbtnLength.set_value(round(self.current_transition.length , 2))
self.hsSoftness.set_value(self.current_transition.softness * 100.0)
self.hsThreshold.set_value(self.current_transition.mask_value)
# set the dropdown boxes
self.set_type_dropdown()
self.set_direction_dropdown()
def set_type_dropdown(self):
# get correct gettext method
_ = self._
# get the model and iterator of the project type dropdown box
model = self.cboType.get_model()
iter = model.get_iter_first()
while True:
# get the value of each item in the dropdown
value = model.get_value(iter, 0)
# check for the matching project type
if self.current_transition.type == "mask" and value.lower() == _("Mask").lower():
# set the item as active
self.cboType.set_active_iter(iter)
break
# check for the matching project type
if self.current_transition.type == "transition" and value.lower() == _("Transition").lower():
# set the item as active
self.cboType.set_active_iter(iter)
break
# get the next item in the list
iter = model.iter_next(iter)
# break loop when no more dropdown items are found
if iter is None:
break
# disable if mask threshold
if self.current_transition.type == "transition":
self.hsThreshold.set_sensitive(False)
else:
self.hsThreshold.set_sensitive(True)
def set_direction_dropdown(self):
# get correct gettext method
_ = self._
# get the model and iterator of the | project type dropdown box
model = self.cboDirection.get_model()
iter = model.get_iter_first()
while True:
# get the value of each item in the dropdown
value = model.get_value(iter, 0).lower()
# check for the matching project type
if self.cur | rent_transition.reverse == False and value == _("Up").lower():
# set the item as active
self.cboDirection.set_active_iter(iter)
# check for the matching project type
if self.current_transition.reverse == True and value == _("Down").lower():
# set the item as active
self.cboDirection.set_active_iter(iter)
# get the next item in the list
iter = model.iter_next(iter)
# break loop when no more dropdown items are found
if iter is None:
break
# disable if mask
if self.current_transition.type == _("Mask").lower():
self.cboDirection.set_sensitive(False)
else:
self.cboDirection.set_sensitive(True)
def on_cboType_changed(self, widget, *args):
print "on_cboType_changed"
# get correct gettext method
_ = self._
# get new type
localType = self.cboType.get_active_text()
# disable if mask
if localType.lower() == _("Mask").lower():
self.cboDirection.set_sensitive(False)
else:
self.cboDirection.set_sensitive(True)
# disable if mask threshold
if localType.lower() == _("Transition").lower():
self.hsThreshold.set_sensitive(False)
else:
self.hsThreshold.set_sensitive(True)
def on_spinbtnStart_value_changed(self, widget, *args):
"""Prevents any part of the transition to go outside the timeline.
A transition outside the timeline is a worthless transition."""
start = self.spinbtnStart.get_value()
length = self.spinbtnLength.get_value()
end = start + length
timeline_length = self.project.sequences[0].length
if end > timeline_length:
self.spinbtnStart.set_value(timeline_length - length)
def on_spinbtnLength_value_changed(self, widget, *args):
"""Prevents any part of the transition to go outside the timeline.
A transition outside the timeline is a worthless transition."""
start = self.spinbtnStart.get_value()
length = self.spinbtnLength.get_value()
end = start + length
timeline_length = self.project.sequences[0].length
if end > timeline_length:
self.spinbtnLength.set_value(timeline_length - start)
def on_btnCancel_clicked(self, widget, *args):
print "on_btnCancel_clicked"
self.frmTransitionProperties.destroy()
def on_btnApply_clicked(self, widget, *args):
print "on_btnApply_clicked"
# get correct gettext method
_ = self._
# Get settings
localStart = self.spinbtnStart.get_value()
localLength = self.spinbtnLength.get_value()
localcboType = self.cboType.get_active_text()
localcboDirection = self.cboDirection.get_active_text().lower()
localhsSoftness = self.hsSoftness.get_value()
localhsThreshold = self.hsThreshold.get_value()
# update position and length
self.current_transition.position_on_track = localStart
self.current_transition.length = localLength
# update transition object
if localcboType.lower() == _("Mask").lower():
self.current_transition.type = "mask"
else:
self.current_transition.type = "transition"
if localcboDirection == _("Up").lower():
self.current_transition.reverse = False
else:
self.current_transition.reverse = True
self.current_transition.softness = float(localhsSoftness) / 100.0
self.current_transition.mask_value = localhsThreshold
# mark project as modified
self.project.set_project_modified(is_modified=True, refresh_xml=True, type = self._("Modified transition properties"))
# Refresh the MLT XML file
self.project.RefreshXML()
# Refresh form
self.project.form.refresh()
# close window
self.frmTransitionProperties.destroy()
def main():
frmTransitionProperties = frmTransitionProperties()
frmTransitionProperties.run()
if __name__ == "__main__":
main()
|
Alshain-Oy/Cloudsnake-Application-Server | code_examples/class_test_01.py | Python | apache-2.0 | 546 | 0.062271 | #!/usr/bin/env python
# Cloudsnake Application server
# Licensed under | Apache License, see license.txt
# Author: Markus Gronholm <markus@alshain.fi> Alshain Oy
class Luokka( object ):
| def __init__( self, N ):
self.luku = N
def test( self ):
return self.luku
def test_001( data ):
#print >> cloudSnake.output, "Moi kaikki"
#print >> cloudSnake.output, cloudSnake.call( 'mean', [ [1,2,3,4] ] )
print >> cloudSnake.output, "Luokkakoe nro 1"
otus = cloudSnake.call( 'Luokka', [7] )
print >> cloudSnake.output, otus.test()
|
zstackio/zstack-woodpecker | integrationtest/vm/mini/image_replication/test_replicate_image_with_bs_almost_full.py | Python | apache-2.0 | 2,743 | 0.002552 | '''
New Integration test for image replication.
Test Image Replicating while ImageStore Backup storage almost full
@author: Legion
'''
import os
import time
import random
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
from zstacklib.utils import ssh
image_name = 'image-replication-test-' + time.strftime('%y%m%d%H%M%S', time.localtime())
test_stub = test_lib.lib_get_test_stub()
img_repl = test_stub.ImageReplication()
remove_file_cmd = 'rm -rf big_size_file'
bs = None
bs2 = None
def test():
global bs
global bs2
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = os.getenv('zstackHaVip')
img_repl.clean_on_expunge()
bs_list = img_repl.get_bs_list()
bs = random.choice(bs_list)
bs_list. | remove(bs)
bs2 = bs_list[0]
fallocate_size = int(bs.availableCapacity) - 9169934592
fallocate_size2 = int(bs2.availableCapacity) - 9989934592
fallocate_cmd = 'fallocate -l %s big_size_file' % fallocate_size
fallocate_cmd2 = 'fallocate -l %s big_size_file' % fallocate_size2
ssh.execute(fallocate_cmd, bs.hostname, 'root', 'password', False)
ssh.execute(fallocate_cmd2, bs2.hostname, 'root', 'password', False)
img_repl.add_image(image_name, bs_uuid=bs.uuid, url=os.get | env('imageUrl_windows'))
img_repl.wait_for_image_replicated(image_name)
img_repl.delete_image()
img_repl.expunge_image()
time.sleep(3)
img_repl.add_image(image_name, bs_uuid=bs.uuid, url=os.getenv('imageUrl_raw'))
img_repl.create_vm(image_name, 'image-replication-test-vm')
img_repl.delete_image()
img_repl.expunge_image()
time.sleep(3)
img_repl.crt_vm_image('image-replication-test-root-template')
img_repl.wait_for_image_replicated('image-replication-test-root-template')
ssh.execute(remove_file_cmd, bs.hostname, 'root', 'password', False)
ssh.execute(remove_file_cmd, bs2.hostname, 'root', 'password', False)
test_util.test_pass('Batch Create VM during Image Replicating Test Success')
def env_recover():
global bs
global bs2
img_repl.reclaim_space_from_bs()
ssh.execute(remove_file_cmd, bs.hostname, 'root', 'password', False)
ssh.execute(remove_file_cmd, bs2.hostname, 'root', 'password', False)
try:
img_repl.vm.destroy()
except:
pass
#Will be called only if exception happens in test().
def error_cleanup():
global bs
global bs2
ssh.execute(remove_file_cmd, bs.hostname, 'root', 'password', False)
ssh.execute(remove_file_cmd, bs2.hostname, 'root', 'password', False)
try:
img_repl.delete_image()
img_repl.expunge_image()
img_repl.reclaim_space_from_bs()
img_repl.vm.destroy()
except:
pass
|
jkolbe/INF1340-Fall14-A1 | exercise2.py | Python | mit | 1,039 | 0.002887 | #!/usr/bin/env python3
"""
Perform a checksum on a UPC
Assignment 1, Exercise 2, INF1340 Fall 2014
"""
__autho | r__ = 'Joanna Kolbe, Tania Misquitta'
__email__ = "joannakolbe@gmail.com"
__copyright__ = "2014 JK, TM"
__status__ = "Prototype"
# imports one per line
def checksum (upc):
"""
Checks if the digits in a UPC is consistent with checksum
:param upc: a 12-digit universal product code
:return:
Boolean: True, checksum is correct
| False, otherwise
:raises:
TypeError if input is not a strong
ValueError if string is the wrong length (with error string stating how many digits are over or under
"""
# check type of input
# raise TypeError if not string
# check length of string
# raise ValueError if not 12
# convert string to array
# hint: use the list function
# generate checksum using the first 11 digits provided
# check against the the twelfth digit
# return True if they are equal, False otherwise
return False
|
iglpdc/nipype | nipype/interfaces/dipy/tests/test_auto_TensorMode.py | Python | bsd-3-clause | 882 | 0.010204 | # AUTO-GENERATE | D by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..tensors import TensorMode
def test_TensorMode_inputs():
input_map = dict(bvals=dict(mandatory=True,
),
bvecs=dict(mandatory=True,
),
in_file=dict(mandatory=True,
),
mask_file=dict(),
out_f | ilename=dict(genfile=True,
),
)
inputs = TensorMode.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_TensorMode_outputs():
output_map = dict(out_file=dict(),
)
outputs = TensorMode.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
jackrzhang/zulip | zerver/templatetags/app_filters.py | Python | apache-2.0 | 6,764 | 0.001774 | import os
from html import unescape
from typing import Any, Dict, List, Optional
import markdown
import markdown.extensions.admonition
import markdown.extensions.codehilite
import markdown.extensions.extra
import markdown.extensions.toc
import markdown_include.include
from django.conf import settings
from django.template import Library, engines, loader
from django.utils.safestring import mark_safe
from jinja2.exceptions import TemplateNotFound
import zerver.lib.bugdown.fenced_code
import zerver.lib.bugdown.api_arguments_table_generator
import zerver.lib.bugdown.api_code_examples
import zerver.lib.bugdown.nested_code_blocks
import zerver.lib.bugdown.tabbed_sections
import zerver.lib.bugdown.help_settings_links
import zerver.lib.bugdown.help_relative_links
import zerver.lib.bugdown.help_emoticon_translations_table
from zerver.context_processors import zulip_default_context
from zerver.lib.cache import ignore_unhashable_lru_cache
register = Library()
def and_n_others(values: List[str], limit: int) -> str:
# A helper for the commonly appended "and N other(s)" string, with
# the appropriate pluralization.
return " and %d other%s" % (len(values) - limit,
"" if len(values) == limit + 1 else "s")
@register.filter(name='display_list', is_safe=True)
def display_list(values: List[str], display_limit: int) -> str:
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = "%s" % (values[0],)
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(
"%s" % (value,) for value in values[:-1])
display_string += " and %s" % (values[-1],)
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(
"%s" % (value,) for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string
md_extensions = None # type: Optional[List[Any]]
md_macro_extension = None # type: Optional[Any]
# Prevent the automatic substitution of macros in these docs. If
# they contain a macro, it is always used literally for documenting
# the macro system.
docs_without_macros = [
"incoming-webhooks-walkthrough.md",
]
# Much of the time, render_markdown_path is called with hashable
# arguments, so this decorator is effective even though it only caches
# the results when called if none of the arguments are unhashable.
@ignore_unhashable_lru_cache(512)
@register.filter(name='render_markdown_path', is_safe=True)
def render_markdown_path(markdown_file_path: str,
context: Optional[Dict[Any, Any]]=None,
pure_markdown: Optional[bool]=False) -> str:
"""Given a path to a markdown file, return the rendered html.
Note that this assumes that any HTML in the markdown file is
trusted; it is intended to be used for documentation, not user
data."""
if context is None:
context = {}
# We set this global hackishly
from zerver.lib.bugdown.help_settings_links import set_relative_settings_links
set_relative_settings_links(bool(context.get('html_settings_links')))
from zerver.lib.bugdown.help_relative_links import set_relative_help_links
set_relative_help_links(bool(context.get('html_settings_links')))
global md_extensions
global md_macro_extension
if md_extensions is None:
md_extensions = [
markdown.extensions.extra.makeExtension(),
markdown.extensions.toc.makeExtension(),
markdown.extensions.admonition.makeExtension(),
markdown.extensions.codehilite.makeExtension(
linenums=False,
guess_lang=False
),
zerver.lib.bugdown.fenced_code.makeExtension(),
zerver.lib.bugdown.api_arguments_table_generator.makeExtension(
base_path='templates/zerver/api/'),
zerver.lib.bugdown.api_code_examples.makeExtension(),
zerver.lib.bugdown.nested_code_blocks.makeExtension(),
zerver.lib.bugdown.tabbed_sections.makeExtension(),
zerver.lib.bugdown.help_settings_links.makeExtension(),
zerver.lib.bugdown.help_relative_links.makeExtension(),
zerver.lib.bugdown.help_emoticon_translations_table.makeExtension(),
]
if md_macro_extension is None:
md_macro_extension = markdown_include.include.makeExtension(
base_path='templates/zerver/help/include/')
if any(doc in markdown_file_path for doc in docs_without_macros):
md_engine = markdown.Markdown(extensions=md_extensions)
else:
md_engine = markdown.Markdown(extensions=md_extensions + [md_macro_extension])
md_engine.reset()
jinja = engines['Jinja2']
try:
# By default, we do both Jinja2 templating and markdown
# processing on the file, to make it easy to use both Jinja2
# context variables and markdown includes in the file.
markdown_string = jinja.env.loader.get_source(jinja.env, markdown_file_path)[0]
except TemplateNotFound as e:
if pure_markdown:
# For files such as /etc/zulip/terms.md where we don't intend
# to use Jinja2 template variables, we still try to load the
# template using Jinja2 (in case the file path isn't absolute
# and does happen to be in Jinja's recognized template
# directories), and if that fails, we try to load it directly
# from disk.
with open(markdown_file_path) as fp:
markdown_string = fp.read()
else:
raise e
html = md_engine.convert(markdown_string)
rendered_html = jinja.from_string(html).render(context)
if context.get('unescape_rendered_html', False):
# In some excep | tional cases (such as our Freshdesk webhook docs),
# code blocks in some of our Markdown templates have characters such
# as '{' encoded as '{' to prevent clashes with Jinja2 syntax,
# but the encoded form never gets decoded because the text ends up
# inside a <pre> tag. So here, we explicitly "unescape" such characters
# if 'unescape_ren | dered_html' is True.
rendered_html = unescape(rendered_html)
return mark_safe(rendered_html)
|
soscpd/bee | root/tests/zguide/examples/Python/fileio3.py | Python | mit | 2,555 | 0.00274 | # File Transfer model #3
#
# In which the client requests each chunk individually, using
# command pipelining to give us a credit-based flow control.
import os
from threading import Thread
import zmq
from zhelpers import socket_set_hwm, zpipe
CHUNK_SIZE = 250000
def client_thread(ctx, pipe):
dealer = ctx.socket(zmq.DEALER)
socket_set_hwm(dealer, 1)
dealer.connect("tcp://127.0.0.1:6000")
total = 0 # Total bytes received
chunks = 0 # Total chunks received
while True:
# ask for next chunk
| dealer.send_multipart([
b"fetch",
b"%i" % total,
b"%i" % CHUNK_SIZE
])
try:
chunk = dealer.recv()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # | shutting down, quit
else:
raise
chunks += 1
size = len(chunk)
total += size
if size < CHUNK_SIZE:
break # Last chunk received; exit
print ("%i chunks received, %i bytes" % (chunks, total))
pipe.send(b"OK")
# .split File server thread
# The server thread waits for a chunk request from a client,
# reads that chunk and sends it back to the client:
def server_thread(ctx):
file = open("testdata", "r")
router = ctx.socket(zmq.ROUTER)
router.bind("tcp://*:6000")
while True:
# First frame in each message is the sender identity
# Second frame is "fetch" command
try:
msg = router.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # shutting down, quit
else:
raise
identity, command, offset_str, chunksz_str = msg
assert command == b"fetch"
offset = int(offset_str)
chunksz = int(chunksz_str)
# Read chunk of data from file
file.seek(offset, os.SEEK_SET)
data = file.read(chunksz)
# Send resulting chunk to client
router.send_multipart([identity, data])
# The main task is just the same as in the first model.
# .skip
def main():
# Start child threads
ctx = zmq.Context()
a,b = zpipe(ctx)
client = Thread(target=client_thread, args=(ctx, b))
server = Thread(target=server_thread, args=(ctx,))
client.start()
server.start()
# loop until client tells us it's done
try:
print a.recv()
except KeyboardInterrupt:
pass
del a,b
ctx.term()
if __name__ == '__main__':
main()
|
logicalhacking/ExtensionCrawler | ExtensionCrawler/request_manager.py | Python | gpl-3.0 | 1,347 | 0.001485 | import time
import random
from contextlib import contextmanager
from multiprocessing import Lock, BoundedSemaphore, Value
class RequestManager:
def __init__(self, max_workers):
self.max_workers = max_workers
self.lock = Lock()
self.sem = BoundedSemaphore(max_workers)
self.last_request = Value('d', 0.0)
self.last_restricted_request = Value('d', 0.0)
@contextmanager
def normal_request(self):
with self.lock:
self.sem.acquire()
| time.sleep(max(0.0, self.last_restricted_request.value + 0.6 + (random.random() * 0.15) - time.time()))
try:
yield
except Exception as e:
raise e
finally:
self.last_request.value = time.time()
self.sem.release()
| @contextmanager
def restricted_request(self):
with self.lock:
for i in range(self.max_workers):
self.sem.acquire()
time.sleep(max(0.0, self.last_request.value + 0.6 + (random.random() * 0.15) - time.time()))
try:
yield
except Exception as e:
raise e
finally:
self.last_request.value = time.time()
self.last_restricted_request.value = time.time()
for i in range(self.max_workers):
self.sem.release()
|
mweb/python | exercises/simple-cipher/example.py | Python | mit | 935 | 0 | from string import ascii_lowercase
from time import time
import random
class Cipher(object):
def __init__(self, key=None):
if not key:
random.seed(time())
key = ''.join(random.choice(ascii_lowercase) for i in range(100))
elif not key.isalpha() or not key.islower():
raise ValueError('Wrong key parameter!')
self.key = key
def encode(self, text):
text = ''.join(c for c in text if c.isalpha()).lower | ()
key = self.key * (len(text) // len(self.key) + 1)
return ''.join(chr((ord(c) - 194 + ord(k)) % 26 + 97)
for c, k in zip(text, key))
def decode(sel | f, text):
key = self.key * (len(text) // len(self.key) + 1)
return ''.join(chr((ord(c) - ord(k) + 26) % 26 + 97)
for c, k in zip(text, key))
class Caesar(Cipher):
def __init__(self):
Cipher.__init__(self, 'd')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.