repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ai-se/parGALE | algorithms/serial/gale/where.py | Python | unlicense | 11,293 | 0.013814 | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from binary_tree import BinaryTree
from utils.lib import *
from algorithms.serial.algorithm import Algorithm
def settings():
return O(
verbose = False,
b4 = '|.. ',
seed = 1,
is_binary = True,
)
def configs(**d):
return O(
minSize = 8, # min leaf size
depthMin = 2, # no pruning till this depth
depthMax = 10, # max tree depth
wriggle = 0.2, # min difference of 'better'
prune = True, # If pruning should be performed
).update(**d)
def sqrt(pop):
"""
Returns square root of length of list
:param pop: List
:return: Square root of size of list
"""
return len(pop) ** 0.5
class NodePoint(Point):
def __init__(self, decisions):
"""
Create a Nodepoint for a tree
:param decisions: Decisions for the point
:return:
"""
Point.__init__(self, decisions)
self.objectives = None
self.evaluated = False
self.normalized = False
self.a = None # Distance from East
self.b = None # Distance from West
self.c = None # Distance between East and West
self.x = None # Projection of point on "c"
def clone(self):
"""
Duplicate the NodePoint
:return:
"""
other = NodePoint(self.decisions)
other.decisions = other.decisions
other.objectives = clone(self.objectives)
other.evaluated = self.evaluated
other.a, other.b = self.a, self.b
other.c, other.x = self.c, self.x
other.normalized = self.normalized
return other
def clear(self):
self.decisions = None
self.objectives = None
self.evaluated = False
self.normalized = False
self.a = None # Distance from East
self.b = None # Distance from West
self.c = None # Distance between East and West
self.x = None # Projection of point on "c"
def dist(self, problem, one, is_obj=True):
if settings().is_binary:
return self.binary_dist(one, is_obj)
else:
return self.continuous_dist(problem, one, is_obj)
def binary_dist(self, one, is_obj=True):
if is_obj:
x, y = self.objectives, one.objectives
else:
x, y = self.decisions, one.decisions
return sum(x_i!=y_i for x_i, y_i in zip(x, y))
def continuous_dist(self, problem, one, is_obj=True):
"""
Estimate normalized euclidean distance between a point and another point
:param problem: Instance of the problem
:param one: point whose distance needs to be computed
:param is_obj: Flag indicating objective or decision
:return: Distance between self and one
"""
if is_obj:
self_normalized = self.normalized
one_normalized = one.normalized
self.normalized = True
one.normalized = True
return problem.dist(self.objectives, one.objectives,
one_norm = not self_normalized,
two_norm = not one_normalized,
is_obj = is_obj)
else :
return problem.dist(self.decisions, one.decisions,
is_obj = is_obj)
def manhattan_dist(self, problem, one, is_obj = True):
"""
Estimate manhattan distance between a point and another point
:param problem: Instance of the problem
:param one: point whose distance needs to be computed
:param is_obj: Flag indicating objective or decision
:return: Distance between self and one
"""
if is_obj:
self_normalized = self.normalized
one_normalized = one.normalized
self.normalized = True
one.normalized = True
return problem.manhattan_dist(self.objectives, one.objectives,
one_norm = not self_normalized,
two_norm = not one_normalized,
is_obj = is_obj)
else :
return problem.manhattan_dist(self.decisions, one.decisions,
is_obj = is_obj)
def closest(self, problem, pop, init=sys.maxint, better=less):
"""
:param problem: Problem used
:param pop: Population
:param init: Initial Value
:param better: Function that defines what is better
:return: farthest point from self in pop
"""
dist, out = init, None
for one in pop:
if one != self:
tmp = self.dist(problem, one, is_obj=False)
if better(tmp, dist):
dist, out = tmp, one
return out
def furthest(self, problem, pop):
"""
:param problem: Problem used
:param pop: Population
:return: farthest point from self in pop
"""
return self.closest(problem, pop, init=-sys.maxint, better=more)
def evaluate(self, problem):
self.objectives = problem.evaluate(self.decisions)
self.evaluated = True
class Node(BinaryTree):
"""
Represents node of a tree
"""
@staticmethod
def format(pop):
return [NodePoint(one) for one in pop]
@staticmethod
def projection(a, b, c):
"""
Fastmap projection distance
:param a: Distance from West
:param b: Distance from East
:param c: Distance between West and East
:return: FastMap projection distance(float)
"""
return (a**2 + c**2 - b**2) / (2*c+0.00001)
def __init__(self, problem, pop, total_size, parent=None, level=1, n=1):
"""
Initialize a node for the tree
:param problem: Instance of the problem
:param pop: Population for the node # Make sure format is called on pop first
:param total_size: Total number of points in the whole population
:param parent: Parent of the node
:param level: Level of the tree
:param n: Represents cut in the node
:return: Node
"""
BinaryTree.__init__(self)
self.problem = problem
self._pop = pop
self.level = level
self.N = n
self.total_size = total_size
self._parent = parent
self.east, self.west, self.c, self.x = None, None, None, None
self.abort = False
def fastmap(self, problem, pop):
"""
Fastmap function that projects all the points on the principal component
:param problem: Instance of the problem
:param pop: Set of points in the cluster population
:return:
"""
one = choice(pop)
self.west = one.furthest(problem, pop)
self.east = self.west.furt | hest(problem, pop)
self.c = self.west.dist(problem, self.east, is_obj=False)
for one in pop:
a = one.dist(problem, self.west, is_obj=False)
b = one.dist(problem, self.east, is_obj=False)
one.x = Node.projection(a, b, self.c)
one.c = self.c
one.a = a
one.b = b
pop = sorted(pop, key=lambda one: one.x)
return pop
| def split(self, pop, cut):
"""
Split the population at the midpoint
:param pop:
:return:
"""
self.x = pop[cut].x
self.east = pop[0]
self.west = pop[-1]
return pop[:cut], pop[cut:]
def divide(self, threshold, abort = False):
"""
Recursively partition tree
:param threshold:
:return:
"""
def afew(pop):
clones = [point.clone() for point in pop]
return clones
self._pop = self.fastmap(self.problem, self._pop)
self.n = len(self._pop)
n = len(self._pop)
cut, _ = self.binary_chop(self._pop, n//2, None, 2*n ** 0.5, n)
self.abort = abort
if not abort and n >= threshold:
# Splitting
wests, easts = self.split(self._pop, cut)
if self.west != self.east:
if self.N > cut:
little_n = cut
else:
little_n = self.N
west_abort = False
east_abort = False
if not self.east.evaluated:
self.east.evaluate(self.problem)
if not self.west.evaluated:
self.west.evaluate(self.problem)
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(self.west.objectives, weights)]
weighted_east = [c*w for c,w in zip(self.east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(
weighted_west,
|
joshmoore/openmicroscopy | components/tools/OmeroWeb/omeroweb/webadmin/controller/uploadfile.py | Python | gpl-2.0 | 1,449 | 0.008972 | #!/usr/bin/env python
#
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If n | ot, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from omero.rtypes import *
from omero.model import ExperimenterAnnotationLinkI
from django.utils.encoding import smart_str
from webadmin.controller import BaseController
class BaseUploadFile(BaseController):
def __init__(self, conn):
BaseController.__init__(self, conn)
def attach_photo(self, newFile):
| if newFile.content_type.startswith("image"):
f = newFile.content_type.split("/")
format = f[1].upper()
else:
format = newFile.content_type
self.conn.uploadMyUserPhoto(smart_str(newFile.name), format, newFile.read())
|
gkmngrgn/puding | puding/constants.py | Python | gpl-3.0 | 1,582 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# author: Gökmen Görgen
# license: GPLv3 (Read COPYING file.)
#
from os import getenv
LOCALE = "/usr/share/locale"
VERSION = "0.2"
HOME = "%s/.puding" % getenv("HOME")
SYSLINUX = "/usr/lib/syslinux"
URL = "http://www.gokmengorgen.net/puding"
CORE_DEVELOPER = u"Gökmen Görgen"
CORE_EMAIL = "gkmngrgn [at] gmail.com"
ART_CONTRIBUTOR = u"Yasin Özcan, hamfindik [at] gmail.com"
TRANSLATORS = u"""\
de: Stefan Wilhelm, sw1976de [ | at] googlemail.com
es: Klemens Häckel click3d [at] linuxmail. org
fr: Philippe Svetoslavsky, philsvet [at] gmail.com
nl: Anton Tolboom, atolboo [at] gmail.com
sv: Patrik Karlsson, patrik [at] pardus.nu
tr: Necmettin Begiter, necmettin.beg | iter [at] gmail.com
"""
SUMMARY = "An USB Image Creator For Pardus Linux."
DESCRIPTION = "Puding is an USB image creator for Pardus Linux."
YEAR = "2009"
COPYRIGHT = u"Copyright (c) %s TUBITAK / UEKAE" % YEAR
LICENSE_NAME = "GPLv3"
LICENSE = """%s
Puding is a free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Puding is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.\
""" % COPYRIGHT
|
ANCIR/siyazana.co.za | connectedafrica/scrapers/npo.py | Python | mit | 4,358 | 0.000688 | import logging
import json
import os
import re
#from pprint import pprint
#from itertools import count
from urlparse import urljoin
from lxml import html
from thready import threaded
import requests
from scrapekit.util import collapse_whitespace
from connectedafrica.scrapers.util import MultiCSV
from connectedafrica.scrapers.util import make_path
log = logging.getLogger('npo')
URL_PATTERN = "http://www.npo.gov.za/PublicNpo/Npo/DetailsAllDocs/%s"
def make_cache(i):
return make_path('.cache/npo/%s/%s/%s/%s/%s.json' % (
i % 10, i % 100, i % 1000, i % 10000, i))
def make_urls():
for i in xrange(1, 16000000) | :
yield i
def scrape_npo(csv, i):
url = URL_PATTERN % i
cache_path = make_cache(i)
if not os.path.exists(cache_path):
res = requests.get(url)
page = {
'url': url,
'http_status': res.status_code,
| 'content': res.content.decode('utf-8')
}
with open(cache_path, 'wb') as fh:
json.dump(page, fh)
else:
with open(cache_path, 'rb') as fh:
page = json.load(fh)
if 'internal server error' in page['content']:
return
data = {}
doc = html.fromstring(page['content'])
data = {
'source_url': url,
'name': doc.find('.//h1').find('.//span').text.strip(),
'status': doc.find('.//h1').find('.//span[@class="npo-status"]').text,
'email': None
}
log.info("Scraping: %s", data['name'])
sub_titles = doc.findall('.//h5')
next_heading = None
for sub_title in sub_titles:
text = collapse_whitespace(sub_title.text)
if 'Registration No' in text:
data['reg_no'] = sub_title.find('./span').text.strip()
next_heading = 'category'
elif 'Your Name' in text:
next_heading = None
elif next_heading == 'category':
data['category'] = text
next_heading = 'legal_form'
elif next_heading == 'legal_form':
data['legal_form'] = text
next_heading = None
for span in doc.findall('.//span'):
text = collapse_whitespace(span.text)
if text is not None and 'Registered on' in text:
match = re.search(r'\d+.\d+.\d+', text)
if match:
data['reg_date'] = match.group(0)
for addr in doc.findall('.//div[@class="address"]'):
addr_type = collapse_whitespace(addr.find('./h4').text)
addrs = [collapse_whitespace(a) for a in
addr.xpath('string()').split('\n')]
addrs = '\n'.join([a for a in addrs if len(a)][1:])
if 'Physical' in addr_type:
data['physical_address'] = addrs
elif 'Postal' in addr_type:
data['postal_address'] = addrs
elif 'Contact' in addr_type:
data['contact_name'] = collapse_whitespace(addr.find('./p').text)
for li in addr.findall('.//li'):
contact = collapse_whitespace(li.xpath('string()'))
contact_type = {
'phone': 'phone',
'mailinfo': 'email',
'fax': 'fax'
}.get(li.get('class'))
data[contact_type] = contact
off_div = './/li[@data-sha-context-enttype="Npo.AppointedOfficeBearer"]'
csv.write('npo/npo_organisations.csv', data)
for li in doc.findall(off_div):
s = li.find('.//strong')
a = s.find('./a')
id_number = li.find('.//div/span')
if id_number is not None:
id_number = id_number.text
id_number = id_number.replace('(', '')
id_number = id_number.replace(')', '')
id_number = id_number.strip()
if 'Neither ID or Passport' in id_number:
id_number = None
officer = {
'role': collapse_whitespace(s.text).replace(' :', ''),
'npo_name': data['name'],
'source_url': url,
'officer_id': urljoin(url, a.get('href')),
'officer_name': collapse_whitespace(a.text),
'officer_id_number': id_number
}
csv.write('npo/npo_officers.csv', officer)
def scrape_npos():
csv = MultiCSV()
threaded(make_urls(), lambda i: scrape_npo(csv, i), num_threads=30)
csv.close()
if __name__ == '__main__':
scrape_npos()
|
HorizonFoxtrot/Redlist | redlist.py | Python | gpl-3.0 | 13,314 | 0.005558 | # Redlist - a user tracker for Reddit
# Version 1.1.9
import praw, time, cmd, tabulate
from colorama import init, Fore, Back, Style
init(autoreset = True)
print("What platform are you running Redlist on?\nThis is required by the Reddit API guidelines.")
PLATFORM = input("--> ").lower()
print("\n")
CLIENT_VERSION = "1.1.9"
USER_AGENT = PLATFORM + ":redlist:v{} (by /u/HorizonFoxtrot)".format(CLIENT_VERSION)
DELAY = 3600
THING_LIMIT = 10
REPORT_NOTE = "\nNote: the number of subreddit alerts does not reflect the number of times the target user has posted/commented in that subreddit. If the post/comment contains a keyword and keyword alerts are enabled, the post/comment will trigger a keyword alert and NOT a subreddit alert."
class Red(cmd.Cmd):
def __init__(self):
super().__init__()
self.r = praw.Reddit(user_agent = USER_AGENT)
self.already_checked = []
self.target = None
self.keywords = []
self.subs = []
self.alert_keyword_activity = False
self.alert_sub_activity = False
self.alert_any_activity = False
self.set_prompt()
self.alerts_per_sub = []
self.sub_alerts_map = {}
self.alerts_per_keyword = []
self.keyword_alerts_map = {}
self.total_alerts = 0
def get_target(self):
try:
t = self.r.get_redditor(self.target)
except:
print(Back.RED + "Could not retrieve target Reddit account.")
return None
else:
print(Back.MAGENTA + "Target Reddit account retrieved.")
return t
def set_prompt(self):
target = self.target if self.target != None else "no target"
self.prompt = "\n" + Fore.CYAN + "[{}]--> ".format(target) + Style.BRIGHT
def do_EOF(self, line):
return True
def do_quit(self, line):
"""quit
Exit the program and return to the command line."""
return True
def do_debug(self, line):
exec(line)
def do_target(self, line):
"""target [user]
Set the specified Reddit account as the target for tracking."""
self.target = line.lower()
print(Back.MAGENTA + "Target changed.")
self.set_prompt()
def do_aword(self, line):
"""aword [word]
Add the specified word to the list of keywords."""
if line.lower() not in self.keywords:
self.keywords.append(line.lower())
self.alerts_per_keyword.append([line.lower(), 0])
self.keyword_alerts_map[line.lower()] = len(self.alerts_per_keyword) - 1
print(Back.MAGENTA + "Keyword added.")
else:
print(Back.MAGENTA + "Keyword already added.")
def do_asub(self, line):
"""asub [subreddit]
Add the specified subreddit to the list of key subreddits."""
if line.lower() not in self.subs:
self.subs.append(line.lower())
self.alerts_per_sub.append([line.lower(), 0])
self.sub_alerts_map[line.lower()] = len(self.alerts_per_sub) - 1
print(Back.MAGENTA + "Subreddit added.")
else:
print(Back.MAGENTA + "Subreddit already added.")
def do_rword(self, line):
"""rword [word]
Remove the specified word from the list of keywords."""
if line.lower() in self.keywords:
self.keywords.remove(line.lower())
print(Back.MAGENTA + "Keyword removed.")
else:
print(Back.MAGENTA + "Keyword not found.")
def do_rsub(self, line):
"""rsub [subreddit]
Remove the specified subreddit from the list of key subreddits."""
if line.lower() in self.subs:
self.subs.remove(line.lower())
print(Back.MAGENTA + "Subreddit removed.")
else:
print(Back.MAGENTA + "Subreddit not found.")
def do_alert(self, line):
"""alert [keyword/subreddit/any] [0/1]
Set whether or not to trigger alerts of the specified type."""
arguments = line.split()
if len(arguments) < 2:
print(Back.RED + "Must specify alert type and boolean value.")
return False
if arguments[0].lower() == "keyword":
if arguments[1] == "0":
self.alert_keyword_activity = False
print(Back.MAGENTA + "Keyword activity alerts disabled.")
elif arguments[1] == "1":
self.alert_keyword_activity = True
print(Back.MAGENTA + "Keyword activity alerts enabled.")
else:
print(Back.RED + "Must be set to 0 or 1.")
elif arguments[0].lower() == "subreddit":
if arguments[1] == "0":
self.alert_sub_activity = False
print(Back.MAGENTA + "Subreddit activity alerts disabled.")
elif arguments[1] == "1":
self.alert_sub_activity = True
print(Back.MAGENTA + "Subreddit activity alerts enabled.")
elif arguments[0].lower() == "any":
if arguments[1] == "0":
self.alert_any_activity = False
print(Back.MAGENTA + "Any activity alerts disabled.")
elif arguments[1] == "1":
self.alert_any_activity = True
print(Back.MAGENTA + "Any activity alerts enabled.")
else:
print(Back.RED + "Type not recognized.")
def do_show(self, line):
"""show [keywords/subreddits/alerts]
Displays a list of all words or subreddits marked for tracking."""
arguments = line.split()
if arguments[0].lower() == "keywords":
print(Back.WHITE + Fore.BLACK + "Keywords marked for tracking:")
if len(self.keywords) > 0:
for i in self.keywords:
print(i)
else:
print("None.")
elif arguments[0].lower() == "subreddits":
print(Back.WHITE + Fore.BLACK + "Subreddits marked for tracking:")
if len(self.subs) > 0:
for i in self.subs:
print(i)
else:
print("None.")
elif arguments[0].lower() == "alerts":
total_alerts_active = 0
print(Back.WHITE + Fore.BLACK + "Alerts activated:")
if self.alert_keyword_activity == True:
print("Keyword activity")
total_alerts_active += 1
if self.alert_sub_activity == True:
print("Subreddit activity")
total_alerts_active += 1
if self.alert_any_activity == True:
print("Any activity")
total_alerts_active += 1
if total_alerts_active == 0:
print("None.")
else:
print(Back.RED + "Must specify either keywords or subreddits.")
def do_start(self, line):
"""start
Begins tracking the target Reddit user."""
if self.target == None:
print(Back.RED + "Must select a target before initiating tracking.")
return False
if len(self.keywords) == 0 and len(self.subs) == 0:
decision = None
while decision == None:
print(Fore.WHITE + "No ke | ywords or subreddits have been marked. Do you want to initiate tracking?")
decision = input(Fore.CYAN + "[y/n]--> " + Style.BRIGHT)
if decision.lower() == "n":
return False
elif decision.lower() == "y":
break
if self.alert_any_activity == False and self.alert_sub_activity == False and self.alert_keyword_activity == False:
decision = None
while decision == None:
| print(Fore.WHITE + "No alerts have been activated. Do you want to initiate tracking?")
decision = input(Fore.CYAN + "[y/n]--> " + Style.BRIGHT)
if decision.lower() == "n":
return False
elif decision.lower() == "y":
break
print(Back.MAGENTA + "Checking target...")
t = self. |
rmikio/lottery | testjson.py | Python | gpl-3.0 | 1,125 | 0.018667 | import string
import urllib, json
# MassCash - start date: March 1991
#print("id;date;n1;n2;n3;n4;n5")
for year in xrange(1991,2018):
for month in xrange(1,12):
drawdate=str(year)+str(month).zfill(2)
url = "http://www.masslottery.com/data/json/search/lotterygames/12/"+drawdate+".json"
# print url
#url = "http://www.masslottery.com/games/lottery/search/results-lottery.html?game_id=12&mode=1&year=2017&month=8&x=12&y=14"
#url = "http://www.masslottery.com/data/json/search/lotterygames/12/201708.json?_=1503079479105"
#url = "http://www.masslottery.com/data/json/search/lotterygames/12/201708.json"
try:
response = urllib.urlopen(url)
data = json.loads(response.read())
min = int(data['min'])
max = int(data['max'])
range = int(max-min)
for element in data['draws']:
print("%s;%s;%s"%(element['draw_id'],element['draw_date'],string.replace(element['winning_num'],'-',';')))
except ValueError:
# print( | "No draws on %s"%(drawdate))
# print(";;;;;;")
| pass
|
SUSE/azure-sdk-for-python | azure-batch/azure/batch/models/cloud_pool.py | Python | mit | 16,446 | 0.001094 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudPool(Model):
"""A pool in the Azure Batch service.
:param id: A string that uniquely identifies the pool within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. It is
common to use a GUID for the id.
:type id: str
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param url: The URL of the pool.
:type url: str
:param e_tag: The ETag of the pool. This is an opaque string. You can use
it to detect whether the pool has changed between requests. In particular,
you can be pass the ETag when updating a pool to specify that your changes
should take effect only if nobody else has modified the pool in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the pool. This is the last
time at which the pool level data, such as the targetDedicatedNodes or
enableAutoscale settings, changed. It does not factor in node-level
changes such as a compute node changing state.
:type last_modified: datetime
:param creation_time: The creation time of the pool.
:type creation_time: datetime
:param state: The current state of the pool. active - The pool is
available to run tasks subject to the availability of compute nodes.
deleting - The user has requested that the pool be deleted, but the delete
operation has not yet completed. upgrading - The user has requested that
the operating system of the pool's nodes be upgraded, but the upgrade
operation has not yet completed (that is, some nodes in the pool have not
yet been upgraded). While upgrading, the pool may be able to run tasks
(with reduced capacity) but this is not guaranteed. Possible values
include: 'active', 'deleting', 'upgrading'
:type state: str or :class:`PoolState <azure.batch.models.PoolState>`
:param state_transition_time: The time at which the pool entered its
current state.
:type state_transition_time: datetime
:param allocation_state: Whether the pool is resizing. steady - The pool
is not resizing. There are no changes to the number of nodes in the pool
in progress. A pool enters this state when it is created and when no
operations are being performed on the pool to change the number of
dedicated nodes. resizing - The pool is resizing; that is, compute nodes
are being added to or removed from the pool. stopping - The pool was
resizing, but the user has requested that the resize be stopped, but the
stop request has not yet been completed. Possible values include:
'steady', 'resizing', 'stopping'
:type allocation_state: str or :class:`AllocationState
<azure.batch.models.AllocationState>`
:param allocation_state_transition_time: The time at which the pool
entered its current allocation state.
:type allocation_state_transition_time: datetime
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This | property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
| for the pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This is the timeout for the most recent resize operation. (The
initial sizing when the pool is created counts as a resize.) The default
value is 15 minutes.
:type resize_timeout: timedelta
:param resize_errors: A list of errors encountered while performing the
last resize on the pool. This property is set only if one or more errors
occurred during the last pool resize, and only when the pool
allocationState is Steady.
:type resize_errors: list of :class:`ResizeError
<azure.batch.models.ResizeError>`
:param current_dedicated_nodes: The number of dedicated compute nodes
currently in the pool.
:type current_dedicated_nodes: int
:param current_low_priority_nodes: The number of low-priority compute
nodes currently in the pool. Low-priority compute nodes which have been
preempted are included in this count.
:type current_low_priority_nodes: int
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool. This property is set only if the pool automatically
scales, i.e. enableAutoScale is true.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula.
This property is set only if the pool automatically scales, i.e.
enableAutoScale is true.
:type auto_scale_evaluation_interval: timedelta
:param auto_scale_run: The results and errors from the last execution of
the autoscale formula. This property is set only if the pool automatically
scales, i.e. enableAutoScale is true.
:type auto_scale_run: :class:`AutoScaleRun
<azure.batch.models.AutoScaleRun>`
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. This imposes restrictions on which nodes can
b |
SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/tempita/__init__.py | Python | mit | 39,809 | 0.000955 | """
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import re
import sys
import cgi
from urllib import quote as url_quote
import os
import tokenize
from cStringIO import StringIO
from tempita._looper import looper
from tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s | ' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' | % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimiters=None):
self.content = content
# set delimiters
if delimiters is None:
delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
assert len(delimiters) == 2 and all([isinstance(delimeter, basestring)
for delimeter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimiters[0]
self.default_namespace['end_braces'] = delimiters[1]
self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
"If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r"
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.iteritems():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
re |
okuta/chainer | tests/chainer_tests/functions_tests/math_tests/test_average.py | Python | mit | 6,055 | 0 | import unittest
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*(
testing.product({
'shape': [(3, 2, 4)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
}) +
testing.product({
'shape': [()],
'axis': [None],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
})))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
class TestAverage(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-1})
else:
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def before_test(self, test_name):
if self.use_weights and isinstance(self.axis, tuple):
# This condition is not supported
raise unittest.SkipTest(
'Tuple axis is not supported when weights is given')
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.axis is None:
w_shape = self.shape
elif isinstance(self.axis, int):
axis = self.axis
if axis < 0:
ndim = len(self.shape)
axis += ndim
w_shape = self.shape[axis],
else:
w_shape = tuple(self.shape[a] for a in self.axis)
# Sample weights. Weights should not sum to 0.
while True:
w = numpy.random.uniform(-2, 2, w_shape).astype(self.dtype)
w_sum_eps = 1.0 if self.dtype == numpy.float16 else 5e-2
if abs(w.sum()) > w_sum_eps:
break
return x, w
def forward(self, inputs, device):
x, w = inputs
if not self.use_weights:
w = None
if self.use_variable_method:
y = x.mean(axis=self.axis, weights=w, keepdims=self.keepdims)
else:
y = functions.average(
x, axis=self.axis, weights=w, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, w = inputs
if not self.use_weights:
w = None
y_expect = numpy.average(x, axis=self.axis, weights=w)
if self.keepdims:
# numpy.average does not support keepdims
axis = self.axis
if axis is None:
axis = list(six.moves.range(x.ndim))
elif isinstance(axis, int):
axis = axis,
shape = list(x.shape)
for i in six.moves.range(len(shape)):
if i in axis or i - len(shape) in axis:
shape[i] = 1
y_expect = y_expect.reshape(shape)
y_expect = utils.force_array(y_expect, dtype=self.dtype)
return y_expect,
@testing.parameterize(*(
testing.product({
'shape': [(30, 20, 40)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16],
'use_weights': [False], # np.average overflows when `weights` is used
'keepdims': [True, False],
})
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
@attr.slow
@testing.with_requires('numpy>=1.12') # NumPy #8222
class TestAverageOverflowingSum(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-2, 'rtol': 2e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self): |
x = numpy.random.uniform(3000, 7000, self | .shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.average(
x, self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expect = numpy.mean(
x.astype(numpy.float64), self.axis, keepdims=self.keepdims
).astype(self.dtype)
return utils.force_array(y_expect),
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestAverageDuplicateValueInAxis(unittest.TestCase):
def test_duplicate_value(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 0))
def test_duplicate_value_negative(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(1, -2))
def test_weights_and_axis(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
w = numpy.random.uniform(-1, 1, 6).reshape(2, 3).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 1), weights=w)
testing.run_module(__name__, __file__)
|
shastah/spacewalk | backend/server/rhnHandler.py | Python | gpl-2.0 | 3,217 | 0.000311 | #
# Copyright (c) 2008 | --2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl | -2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTranslate import _
from spacewalk.common.RPC_Base import RPC_Base
from spacewalk.server import rhnServer
# extend the RPC_Base base class
class rhnHandler(RPC_Base):
def __init__(self):
RPC_Base.__init__(self)
# extra class members we handle
self.server = None
self.server_id = None
# XXX Some subclasses set this as a string, others as an rhnUser
self.user = None
# defaults that can be easily overridden through assignement of self.*
# do we load the user infomation (seldomly needed)
self.load_user = 0
# do we check for entitlement of the server
self.check_entitlement = 1
# do we attempt throttling
self.throttle = CFG.THROTTLE
# attempt quality of service checks
self.set_qos = CFG.QOS
# do we update the checking counters
self.update_checkin = 1
# Authenticate a system based on the certificate. There are a lot
# of modifiers that can be set before this function is called (see
# the __init__ function for this class).
def auth_system(self, system_id):
log_debug(3)
server = rhnServer.get(system_id, load_user=self.load_user)
if not server:
# Invalid server certificate.
raise rhnFault(9, _(
"Please run rhn_register as root on this client"))
self.server_id = server.getid()
self.server = server
# update the latest checkin time
if self.update_checkin:
server.checkin()
# is the server entitled?
if self.check_entitlement:
entitlements = server.check_entitlement()
if not entitlements: # we require entitlement for this functionality
log_error("Server Not Entitled", self.server_id)
raise rhnFault(31, _(
'Service not enabled for system profile: "%s"')
% server.server["name"])
# Kind of poking where we shouldn't, but what the hell
if self.load_user and self.user is not None:
self.user = server.user.username
else:
self.user = None
if self.user is None:
self.user = ""
# Throttle users if necessary
if self.throttle:
server.throttle()
# Set QOS
if self.set_qos:
server.set_qos()
return server
|
rkmaddox/mne-python | mne/io/nirx/nirx.py | Python | bsd-3-clause | 19,728 | 0 | # Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
from configparser import ConfigParser, RawConfigParser
import glob as glob
import re as re
import os.path as op
import datetime as dt
import json
import numpy as np
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ..constants import FIFF
from ..meas_info import create_info, _format_dig_points
from ...annotations import Annotations
from ...source_space import get_mni_fiducials
from ...transforms import apply_trans, _get_trans
from ...utils import (logger, verbose, fill_doc, warn, _check_fname,
_validate_type, _check_option, _mask_to_onsets_offsets)
@fill_doc
def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None):
"""Reader for a NIRX fNIRS recording.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(saturated)s
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawNIRX
A Raw object containing NIRX data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(nirx_notes)s
"""
return RawNIRX(fname, saturated, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawNIRX(BaseRaw):
"""Raw object from a NIRX fNIRS file.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(saturated)s
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(nirx_notes)s
"""
@verbose
def __init__(self, fname, saturated, preload=False, verbose=None):
from ...externals.pymatreader import read_mat
logger.info('Loading %s' % fname)
_validate_type(fname, 'path-like', 'fname')
_validate_type(saturated, str, 'saturated')
_check_option('saturated', saturated, ('annotate', 'nan', 'ignore'))
fname = str(fname)
if fname.endswith('.hdr'):
fname = op.dirname(op.abspath(fname))
fname = _check_fname(fname, 'read', True, 'fname', need_dir=True)
json_config = glob.glob('%s/*%s' % (fname, "config.json"))
if len(json_config):
is_aurora = True
else:
is_aurora = False
if is_aurora:
# NIRSport2 devices using Aurora software
keys = ('hdr', 'config.json', 'description.json',
'wl1', 'wl2', 'probeInfo.mat', 'tri')
else:
# NIRScout devices and NIRSport1 devices
keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',
'config.txt', 'probeInfo.mat')
n_dat = len(glob.glob('%s/*%s' % (fname, 'dat')))
if n_dat != 1:
warn("A single dat file was expected in the specified path, "
f"but got {n_dat}. This may indicate that the file "
"structure has been modified since the measurement "
"was saved.")
# Check if required files exist and store names for later use
files = dict()
nan_mask = dict()
for key in keys:
files[key] = glob.glob('%s/*%s' % (fname, key))
fidx = 0
if len(files[key]) != 1:
if key not in ('wl1', 'wl2'):
raise RuntimeError(
f'Need one {key} file, got {len(files[key])}')
noidx = np.where(['nosatflags_' in op.basename(x)
for x in files[key]])[0]
if len(noidx) != 1 or len(files[key]) != 2:
raise RuntimeError(
f'Need one nosatflags and one standard {key} file, '
f'got {len(files[key])}')
# Here two files have been found, one that is called
# no sat flags. The nosatflag file has no NaNs in it.
noidx = noidx[0]
if saturated == 'ignore':
# Ignore NaN and return values
fidx = noidx
elif saturated == 'nan':
# Return NaN
fidx = 0 if noidx == 1 else 1
else:
assert saturated == 'annotate' # guaranteed above
fidx = noidx
nan_mask[key] = files[key][0 if noidx == 1 else 1]
files[key] = files[key][fidx]
# Read number of rows/samples of wavelength data
with _open(files['wl1']) as fid:
last_sample = fid.read().count('\n') - 1
# Read header file
# The header file isn't compliant with the configparser. So all the
# text between comments must be removed before passing to parser
with _open(files['hdr']) as f:
hdr_str_all = f.read()
hdr_str = re.sub('#.*?#', '', hdr_str_all, flags=re.DOTALL)
if is_aurora:
hdr_str = re.sub('(\\[DataStructure].*)', '',
hdr_str, flags=re.DOTALL)
hdr = RawConfigParser()
hdr.read_string(hdr_str)
# Check that the file format version is supported
if is_aurora:
# We may need to ease this requirement back
if hdr['GeneralInfo']['Version'] not in ['2021.4.0-34-ge9fdbbc8']:
warn("MNE has not been tested with Aurora version "
f"{hdr['GeneralI | nfo']['Version']}")
else:
if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"',
'"15.3"']:
raise RuntimeError('MNE does not support thi | s NIRStar version'
' (%s)' % (hdr['GeneralInfo']['NIRStar'],))
if "NIRScout" not in hdr['GeneralInfo']['Device'] \
and "NIRSport" not in hdr['GeneralInfo']['Device']:
warn("Only import of data from NIRScout devices have been "
"thoroughly tested. You are using a %s device. " %
hdr['GeneralInfo']['Device'])
# Parse required header fields
# Extract measurement date and time
if is_aurora:
datetime_str = hdr['GeneralInfo']['Date']
else:
datetime_str = hdr['GeneralInfo']['Date'] + \
hdr['GeneralInfo']['Time']
meas_date = None
# Several formats have been observed so we try each in turn
for dt_code in ['"%a, %b %d, %Y""%H:%M:%S.%f"',
'"%a, %d %b %Y""%H:%M:%S.%f"']:
try:
meas_date = dt.datetime.strptime(datetime_str, dt_code)
meas_date = meas_date.replace(tzinfo=dt.timezone.utc)
break
except ValueError:
pass
if meas_date is None:
warn("Extraction of measurement date from NIRX file failed. "
"This can be caused by files saved in certain locales. "
"Please report this as a github issue. "
"The date is being set to January 1st, 2000, "
"instead of {}".format(datetime_str))
meas_date = dt.datetime(2000, 1, 1, 0, 0, 0,
tzinfo=dt.timezone.utc)
# Extract frequencies of light used by machine
if is_aurora:
fnirs_wavelengths = [760, 850]
else:
fnirs_wavelengths = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters'][
'Wavelengths'])]
# Extract source-detectors
if is_aurora:
sources = re.findall(r'(\d+)-\d+', hdr_str_all.split("\n")[-2])
detectors = re.findall(r'\d+-(\d+)', hdr_str_all.split("\n")[-2])
sources = [int(s) + 1 for s in sources]
detectors = [int(d) + 1 for d in detectors]
else:
sources = np.asarray([int(s) for s in
re. |
Teamxrtc/webrtc-streaming-node | third_party/depot_tools/gcl.py | Python | mit | 52,013 | 0.011151 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import ssl
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import auth
import fix_encoding
import gclient_utils
import git_cl
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Printed when people upload patches using svn.
SWITCH_TO_GIT = """You're using svn to work on depot_tools.
Consider switching to git today, so that you're ready when svn stops working
and you need a functional checkout for a future fire."""
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
full_path = os.path.expanduser(os.path.join('~', filename))
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
| while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = | p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a |
sebp/scikit-survival | tests/test_forest.py | Python | gpl-3.0 | 9,388 | 0.001704 | import numpy
from numpy.testing import assert_array_almost_equal
import pytest
from sklearn.pipeline import make_pipeline
from sksurv.datasets import load_breast_cancer
from sksurv.ensemble import ExtraSurvivalTrees, RandomSurvivalForest
from sksurv.preprocessing import OneHotEncoder
from sksurv.testing import assert_cindex_almost_equal
FORESTS = [
RandomSurvivalForest,
ExtraSurvivalTrees,
]
@pytest.mark.parametrize(
'forest_cls, expected_c',
[(RandomSurvivalForest, (0.9026201280123488, 67831, 7318, 0, 14)),
(ExtraSurvivalTrees, (0.8389200122423452, 63044, 12105, 0, 14))]
)
def test_fit_predict(make_whas500, forest_cls, expected_c):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 100
pred = forest.predict(whas500.x)
assert numpy.isfinite(pred).all()
assert numpy.all(pred >= 0)
assert_cindex_almost_equal(
whas500.y["fstat"], whas500.y["lenfol"], pred, expected_c)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_int_time(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
y = whas500.y
y_int = numpy.empty(y.shape[0],
dtype=[(y.dtype.names[0], bool), (y.dtype.names[1], int)])
y_int[:] = y
forest_f = f | orest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y[50:])
forest_i = | forest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y_int[50:])
assert len(forest_f.estimators_) == len(forest_i.estimators_)
assert forest_f.n_features_in_ == forest_i.n_features_in_
assert forest_f.oob_score_ == forest_i.oob_score_
assert_array_almost_equal(forest_f.event_times_, forest_i.event_times_)
pred_f = forest_f.predict(whas500.x[:50])
pred_i = forest_i.predict(whas500.x[:50])
assert_array_almost_equal(pred_f, pred_i)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_predict_chf(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 10
chf = forest.predict_cumulative_hazard_function(whas500.x, return_array=True)
assert chf.shape == (500, forest.event_times_.shape[0])
assert numpy.isfinite(chf).all()
assert numpy.all(chf >= 0.0)
vals, counts = numpy.unique(chf[:, 0], return_counts=True)
assert vals[0] == 0.0
assert numpy.max(counts) == counts[0]
d = numpy.apply_along_axis(numpy.diff, 1, chf)
assert (d >= 0).all()
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_predict_surv(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 10
surv = forest.predict_survival_function(whas500.x, return_array=True)
assert surv.shape == (500, forest.event_times_.shape[0])
assert numpy.isfinite(surv).all()
assert numpy.all(surv >= 0.0)
assert numpy.all(surv <= 1.0)
vals, counts = numpy.unique(surv[:, 0], return_counts=True)
assert vals[-1] == 1.0
assert numpy.max(counts) == counts[-1]
d = numpy.apply_along_axis(numpy.diff, 1, surv)
assert (d <= 0).all()
@pytest.mark.parametrize(
'forest_cls, expected_oob_score',
[(RandomSurvivalForest, 0.753010685),
(ExtraSurvivalTrees, 0.752092510)]
)
def test_oob_score(make_whas500, forest_cls, expected_oob_score):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(oob_score=True, bootstrap=False, random_state=2)
with pytest.raises(ValueError, match="Out of bag estimation only available "
"if bootstrap=True"):
forest.fit(whas500.x, whas500.y)
forest.set_params(bootstrap=True)
forest.fit(whas500.x, whas500.y)
assert forest.oob_prediction_.shape == (whas500.x.shape[0],)
assert round(abs(forest.oob_score_ - expected_oob_score), 6) == 0.0
@pytest.mark.parametrize('forest_cls', FORESTS)
@pytest.mark.parametrize("func", ["predict_survival_function", "predict_cumulative_hazard_function"])
def test_predict_step_function(make_whas500, forest_cls, func):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x[10:], whas500.y[10:])
pred_fn = getattr(forest, func)
ret_array = pred_fn(whas500.x[:10], return_array=True)
fn_array = pred_fn(whas500.x[:10], return_array=False)
assert ret_array.shape[0] == fn_array.shape[0]
for fn, arr in zip(fn_array, ret_array):
assert_array_almost_equal(fn.x, forest.event_times_)
assert_array_almost_equal(fn.y, arr)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_oob_too_little_estimators(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=3, oob_score=True, random_state=2)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates."):
forest.fit(whas500.x, whas500.y)
def test_fit_no_bootstrap(make_whas500):
whas500 = make_whas500(to_numeric=True)
forest = RandomSurvivalForest(n_estimators=10, bootstrap=False, random_state=2)
forest.fit(whas500.x, whas500.y)
pred = forest.predict(whas500.x)
expected_c = (0.931881994437717, 70030, 5119, 0, 14)
assert_cindex_almost_equal(
whas500.y["fstat"], whas500.y["lenfol"], pred, expected_c)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_warm_start(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=11, max_depth=2, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 11
assert all((e.max_depth == 2 for e in forest.estimators_))
forest.set_params(warm_start=True)
with pytest.warns(UserWarning, match="Warm-start fitting without increasing "
"n_estimators does not fit new trees."):
forest.fit(whas500.x, whas500.y)
forest.set_params(n_estimators=3)
with pytest.raises(ValueError, match="n_estimators=3 must be larger or equal to "
r"len\(estimators_\)=11 when warm_start==True"):
forest.fit(whas500.x, whas500.y)
forest.set_params(n_estimators=23)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 23
assert all((e.max_depth == 2 for e in forest.estimators_))
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_with_small_max_samples(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
# First fit with no restriction on max samples
est1 = forest_cls(n_estimators=1, random_state=1, max_samples=None)
# Second fit with max samples restricted to just 2
est2 = forest_cls(n_estimators=1, random_state=1, max_samples=2)
est1.fit(whas500.x, whas500.y)
est2.fit(whas500.x, whas500.y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
@pytest.mark.parametrize('forest_cls', FORESTS)
@pytest.mark.parametrize("func", ["predict_survival_function", "predict_cumulative_hazard_function"])
def test_pipeline_predict(breast_cancer, forest_cls, func):
X_str, _ = load_breast_cancer()
X_num, y = breast_cancer
est = forest_cls(n_estimators=10, random_state=1)
est.fit(X_num[10:], y[10:])
pipe = make_pipeline(OneHotEncoder(), forest_cls(n_estimators=10, random_state=1))
pipe.fit(X_str[10:], y[10:])
tree_pred = getattr(est, func)(X_num[:10], return_array=True)
pipe_pred = getattr(pipe, func)(X_str[:10], return_array=True)
assert_array_almost_equal(tree_pred, pipe_pred)
@pytest.mark.parametrize('forest_cls', F |
thorbenk/knossos-svn | tools/kconfig.py | Python | gpl-2.0 | 9,251 | 0.003567 | #
# This file is a part of KNOSSOS.
#
# (C) Copyright 2007-2011
# Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V.
#
# KNOSSOS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 of
# the License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# For further information, visit http://www.knossostool.org or contact
# Joergen.Kornfeld@mpimf-heidelberg.mpg.de or
# Fabian.Svara@mpimf-heidelberg.mpg.de
#
import math, re, glob, os
class KnossosConfig():
def __init__(self):
self.xReg = re.compile('_x(\d*)')
self.yReg = re.compile('_y(\d*)')
self.zReg = re.compile('_z(\d*)')
self.magReg = re.compile('_mag(\d*)([\\/]$|$)')
self.expNameReg = re.compile(r'([^\\/]*)_x\d{4}_y\d{4}_z\d{4}.raw')
self.namePattern = re.compile('experiment name \"(.*)\";')
self.scaleXPattern = re.compile('scale x (.*\..*);')
self.scaleYPattern = re.compile('scale y (.*\..*);')
self.scaleZPattern = re.compile('scale z (.*\..*);')
self.boundaryXPattern = re.compile('boundary x (.*);')
self.boundaryYPattern = re.compile('boundary y (.*);')
self.boundaryZPattern = re.compile('boundary z (.*);')
self.magnificationPattern = re.compile('magnification (.*);')
def generate(self, path):
# try to read magnification factor from directory name
try:
magnification = self.magReg.search(path).groups()[0]
except AttributeError:
magnification = None
# read files in current directory
files = glob.glob(str(path) + "/x????/y????/z????/*.raw");
try:
filesize = float(os.stat(files[0]).st_size)
except (IndexError, OSError):
raise DataError("Error determining file size")
return
edgeLength = int(round(math.pow(filesize, 1. / 3.), 0))
try:
name = self.expNameReg.search(files[0]).groups()[0]
| except AttributeError:
raise DataError("Error matching file name")
return
xlen_datacubes, ylen_datacubes, zlen_datacubes = 0, 0, 0
for file in files:
try:
x = int(self.xReg.search(file).groups()[0])
y = int(self.yReg.search(file).groups()[0])
z = int(self.zReg.search(file).groups()[0])
except AttributeError:
raise DataError("Error matching file name")
return
| if x > xlen_datacubes and x < 9999:
xlen_datacubes = x
if y > ylen_datacubes and y < 9999:
ylen_datacubes = y
if z > zlen_datacubes and z < 9999:
zlen_datacubes = z
xlen_px = (xlen_datacubes + 1) * edgeLength
ylen_px = (ylen_datacubes + 1) * edgeLength
zlen_px = (zlen_datacubes + 1) * edgeLength
return name, (xlen_px, ylen_px, zlen_px), magnification
def read(self, path):
kconfigpath = os.path.abspath(path + "/knossos.conf")
try:
kFile = open(kconfigpath)
except IOError:
try:
name, boundaries, magnification = self.generate(path)
except DataError:
raise
return
configInfo = [True,
name,
path,
(None, None, None),
boundaries,
magnification,
""]
return configInfo
else:
configText = kFile.read()
kFile.close()
namePatternResult = self.namePattern.search(configText)
scaleXPatternResult = self.scaleXPattern.search(configText)
scaleYPatternResult = self.scaleYPattern.search(configText)
scaleZPatternResult = self.scaleZPattern.search(configText)
boundaryXPatternResult = self.boundaryXPattern.search(configText)
boundaryYPatternResult = self.boundaryYPattern.search(configText)
boundaryZPatternResult = self.boundaryZPattern.search(configText)
magnificationPatternResult = self.magnificationPattern.search(configText)
try:
name = namePatternResult.groups()[0]
except (AttributeError, ValueError):
name = ""
try:
scaleX = float(scaleXPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleX = None
try:
scaleY = float(scaleYPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleY = None
try:
scaleZ = float(scaleZPatternResult.groups()[0])
except (AttributeError, ValueError):
scaleZ = None
try:
boundaryX = int(boundaryXPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryX = 0
try:
boundaryY = int(boundaryYPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryY = 0
try:
boundaryZ = int(boundaryZPatternResult.groups()[0])
except (AttributeError, ValueError):
boundaryZ = 0
try:
magnification = int(magnificationPatternResult.groups()[0])
except (AttributeError, ValueError):
magnification = None
# [is incomplete?, name, path, scales, boundary, magnification, original config file contents]
configInfo = [False,
name,
path,
(scaleX, scaleY, scaleZ),
(boundaryX, boundaryY, boundaryZ),
magnification,
configText]
return configInfo
def write(self, configInfo):
try:
source = configInfo["Source"]
name = configInfo["Name"]
scales = configInfo["Scales"]
boundaries = configInfo["Boundaries"]
path = configInfo["Path"]
magnification = configInfo["Magnification"]
except KeyError:
return False
if self.namePattern.search(source):
source = self.namePattern.sub("experiment name \"%s\";" % name, source)
else:
source = source + "\nexperiment name \"%s\";" % name
if self.scaleXPattern.search(source):
source = self.scaleXPattern.sub("scale x %s;" % str(float(scales[0])), source)
else:
source = source + "\nscale x %s;" % str(float(scales[0]))
if self.scaleYPattern.search(source):
source = self.scaleYPattern.sub("scale y %s;" % str(float(scales[1])), source)
else:
source = source + "\nscale y %s;" % str(float(scales[1]))
if self.scaleZPattern.search(source):
source = self.scaleZPattern.sub("scale z %s;" % str(float(scales[2])), source)
else:
source = source + "\nscale z %s;" % str(float(scales[2]))
if self.boundaryXPattern.search(source):
source = self.boundaryXPattern.sub("boundary x %d;" % boundaries[0], source)
else:
source = source + "\nboundary x %d;" % boundaries[0]
if self.boundaryYPattern.search(source):
source = self.boundaryYPattern.sub("boundary y %d;" % boundaries[1], source)
else:
source = source + "\nboundary y %d;" % boundaries[1]
if self.boundaryZPattern.search(source):
source = self.boundaryZPattern.sub("boundary z %d;" % boundaries[2], source)
else:
source = source + "\nboundary z %d;" % boundaries[2]
if self |
PythonScanClient/PyScanClient | Test/test_alignment.py | Python | epl-1.0 | 1,016 | 0.002953 | """Unit test of the AlignmentScan
@author: Kay Kasemir
"""
from __future__ import print_function
import unittest
from scan.commands import Set, CommandSequence
from scan.alignment import AlignmentScan
class AlignmentTest(unittest.TestCase):
def testBasics(self):
align = AlignmentScan("motor_x", 0, 10, 0.5, "seconds", 0.5, "signal",
pre=Set("motor_y", 3),
find_command="FindPeak")
cmds = align.createScan()
print(CommandSequence(cmds))
self.assertEqual(str(cmds), "[Set('Demo:CS:Scan:Fit:Height', 0), Set('motor_y', 3), Loop('motor_x', 0, 10, 0.5, [ Delay(0.5), Log(' | signal', 'motor_x'), Script('WriteDataToPV', 'motor_x', 'Demo:CS:Scan:Fit:Data:X'), Script('WriteDataT | oPV', 'signal', 'Demo:CS:Scan:Fit:Data:Y', '-', '1') ]), Script('FindPeak', 'motor_x', 'signal', '-', '1', 'Demo:CS:Scan:Fit:Pos', 'Demo:CS:Scan:Fit:Height', 'Demo:CS:Scan:Fit:Width')]")
if __name__ == "__main__":
unittest.main() |
SintefRaufossManufacturing/python-hcn | demo.py | Python | lgpl-3.0 | 842 | 0.017815 | import numpy as np
import hcn
from IPython | import embed, get_ipython
import vtk_visualizer as vv
if __name__ == "__main__":
m = hcn.Model3D.from_file("b4.obj", "m")
m = m.select_x(0, 1)
m = m.select_y(0, 1)
m = m.select_z(0.01, 1)
vv.plotxyz(m.to_array(), block=True)
sm = m.smoothed(knn=160, order=1)
vv.plotxyz(sm.to_array(), block=True)
vv.plotxyz(m.to_array(), block=True)
#vv.plotxyz(m.to_array(), block=True)
#ch = m.get_convex_hull()
#vv.plotxyz(ch.to_array(), block=True)
#new | = sm.sampled(0.01)
#vv.plotxyz(new.to_array(), block=True)
#nn = m.compute_normals(20, 2)
#a = nn.to_array()
#print(a)
#b = nn.normals_to_array()
#print(b)
#res = np.hstack((a, b))
#print(res)
#vv.plothh(res, block=True)
#s = m.create_surface_model(0.1)
#embed()
|
mkyl/Physical-Web-CMS-Server | server/presentation.py | Python | apache-2.0 | 5,186 | 0.005594 | import os
import json
import urllib.request
import mimetypes
from pathlib import Path
from distutils.dir_util import copy_tree
HTML_PAGE_HEADER = '<!DOCTYPE html> \n <html> \n <head> \
<title> Physical Web Exhibit </title>\
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/shoelace-css/1.0.0-beta13/shoelace.css"> \
<style> \
:root { \
--body-color: black; \
--body-bg-color: white; \
}\
</style> \
</head> <body style="width: 60rem" class="padding-small">'
HTML_PAGE_FOOTER = '<script src="https://cdnjs.cloudflare.com/ajax/libs/shoelace-css/1.0.0-beta13/shoelace.js"></script> \
</body> \n </html>'
def _decorate_h1(text):
return "<h1>" + text + "</h1>"
def _decorate_subtitle(text):
return "<h3> <em>" + text + "</em> </h3>"
def _decorate_p(text):
return "<p>" + text + "</p>"
def _decorate_unordered_list(items, item_names):
result = []
result.append("Beacons:")
result.append("<ul>")
for item in items:
result.append("<li> <a href={address}>{name}</a></li>"
.format(address=item,
name=find_beacon_name(item, item_names)))
result.append("</ul>")
result = "".join(result)
return result
def find_beacon_name(item, item_names):
# reversed because latest name updates are appended to end
for beacon in reversed(item_names):
if beacon["address"] == item:
return beacon["friendly-name"]
return None
def _decorate_content(content):
url = urllib.request.pathname2url(content)
mimetype = mimetypes.guess_type(url)[0].lower()
if mimetype.startswith("image/"):
return _decorate_image_content(content)
elif mimetype.startswith("audio/"):
return _decorate_sound_content(content)
elif mimetype.startswith("video/"):
return _decorate_video_content(content)
else:
raise ValueError("Couldn't determine filetype: " + content)
def _decorate_image_content(image):
return '<img class="margin-medium" src="{image_url}"/>\n'.format(image_url=image)
def _decorate_sound_content(sound):
return '<audio class="margin-medium" controls src="{sound_url}"></audio>\n'.format(sound_url=sound)
def _decorate_video_content(video):
return '<video class="margin-medium" controls src="{video_url}"></video>\n'.format(video_url=video)
def _build_index_page(website_metadata, beacon_friendly_names,
public_folder):
index_filename = "index.html"
index_file = os.path.join(public_folder, index_filename)
Path(index_file).touch(exist_ok=True)
title = website_metadata["name"]
subtitle = website_metadata["description"]
beacons = []
for beacon in website_metadata["beacons"]:
beacons.append(beacon["address"])
page = []
page.append(HTML_PAGE_HEADER)
page.append(_decorate_h1(title))
page.append(_decorate_subtitle(subtitle))
page.append(_decorate_unordered_list(beacons, beacon_friendly_names))
page.append(HTML_PAGE_FOOTER)
page = ''.join(page)
with open(index_file, 'w') as f:
f.write(page)
def _build_content_page(beacon, beacon_friendly_names, public_folder):
content_filename = "index.html"
content_file = os.path.join(public_folder, content_filename)
Path(content_file).touch(exist_ok=True)
title = find_beacon_name(beacon["address"], beacon_friendly_names)
contents_html = []
for content in beacon["contents"]:
contents_html.append(_decorate_content(content))
contents_html = ''.join(contents_html)
page = []
page.append(HTML_PAGE_HEADER)
page.append(_decorate_h1(title))
page.append(contents_html)
page.append(HTML_PAGE_FOOTER)
page = ''.join(page)
with open(content_file, 'w') as f:
f.write(page)
def _find_active_exhibit(exhibits_folder):
metadata_file = os.path.join(exhibits_folder, 'metadata.json')
with open(metadata_file) as data_fil | e:
id = json.load(data_file)["active-exhibit"]
active_exhibit_folder = os.path.join(exhibits_folder, str(id))
return active_exhibit_folder
def _find_beacon_friendly_names(exhibits_folder):
metadata_file = os.path.join(exhibits_folder, 'metadata.json')
with open(metadata_file) as data_file:
return json.load(data_file)["beacon-names"]
def _fetch_metadata(content_folder):
metadata_file = os.path.join(content_folder, 'metadata.json')
with open(metad | ata_file) as data_file:
return json.load(data_file)
def build_website(exhibits_folder, public_folder):
print(" [i] Creating HTML")
os.makedirs(public_folder, exist_ok=True)
active_exhibit_folder = _find_active_exhibit(exhibits_folder)
beacon_friendly_names = _find_beacon_friendly_names(exhibits_folder)
copy_tree(active_exhibit_folder, public_folder)
exhibit_metadata = _fetch_metadata(active_exhibit_folder)
_build_index_page(exhibit_metadata, beacon_friendly_names, \
public_folder)
for beacon in exhibit_metadata["beacons"]:
beacon_folder = os.path.join(public_folder, beacon["address"])
_build_content_page(beacon, beacon_friendly_names, beacon_folder)
print(" [✓] Website exported to " + os.path.abspath(public_folder))
|
aspectron/jsx | extern/v8/tools/js2c.py | Python | mit | 16,982 | 0.010894 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a utility for converting JavaScript source code into C-style
# char arrays. It is used for embedded JavaScript code in the V8
# library.
import os, re, sys, string
import optparse
import jsmin
import bz2
import textwrap
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def ToCArray(byte_sequence):
result = []
for chr in byte_sequence:
result.append(str(ord(chr)))
joined = ", ".join(result)
return textwrap.fill(joined, 80)
def RemoveCommentsAndTrailingWhitespace(lines):
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
return lines
def ReadFile(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
def Validate(lines):
# Because of simplified context setup, eval and with is not
# allowed in the natives files.
if EVAL_PATTERN.search(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
# Pass lines through unchanged.
return lines
def ExpandConstants(lines, constants):
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
pattern_match = name_pattern.search(lines, pos)
while pattern_match is not None:
# Scan over the arguments
height = 1
start = pattern_match.start()
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = [0] # Wrap state into array, to work around Python "scoping"
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
while end < len(lines) and height > 0:
# We don't count commas at higher nesting levels.
if lines[end] == ',' and height == 1:
add_arg(lines[last_match:end])
last_match = end + 1
elif lines[end] in ['(', '{', '[']:
height = height + 1
elif lines[end] in [')', '}', ']']:
height = height - 1
end = end + 1
# Remember to add the last match.
add_arg(lines[last_match:end-1])
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
pattern_match = name_pattern.search(lines, start + len(result))
return lines
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
def expander(s):
return ExpandMacros(s, macros)
lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
def __init__(self, args, body):
self.args = args
self.body = body
def expand(self, mapping):
result = self.body
for key, value in mapping.items():
result = result.replace(key, value)
return result
class PythonMacro:
def __init__(self, args, fun):
self.args = args
self.fun = fun
def expand(self, mapping):
args = []
for arg in self.args:
args.append(mapping[arg])
return str(self.fun(*args))
CONST_PATTERN = re | .compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = | re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = []
macros = []
for line in lines.split('\n'):
hash = line.find('#')
if hash != -1: line = line[:hash]
line = line.strip()
if len(line) is 0: continue
const_match = CONST_PATTERN.match(line)
if const_match:
name = const_match.group(1)
value = const_match.group(2).strip()
constants.append((re.compile("\\b%s\\b" % name), value))
else:
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
body = macro_match.group(3).strip()
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
name = python_match.group(1)
args = [match.strip() for match in python_match.group(2).split(',')]
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
else:
raise Error("Illegal line: " + line)
return (constants, macros)
INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
def ExpandInlineMacros(lines):
pos = 0
while True:
macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
if macro_match is None:
# no more macros
return lines
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
if end_macro_match is None:
raise Error("Macro %s unclosed" % name)
body = lines[macro_match.end():end_macro_match.start()]
# remove macro definition
lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
name_pattern = re.compile("\\b%s\\(" % name)
macro = TextMacro(args, body)
# advance position to where the macro defintion was
pos = macro_match.start()
def non_expander(s):
return s
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
// This file was generated from .js source files by GYP. If you
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
#include "src/v8.h"
#include "src/natives.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
%(sources_declaration)s\
%(raw_sources_declaration)s\
template <>
int NativesCollection<%(type)s>::GetBuiltinsCount() {
return %(builtin_count)i;
|
tbenthompson/taskloaf | setup.py | Python | mit | 689 | 0.039187 | from setuptools import setup
version = open('VERSION').read()
try:
i | mport pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = open('README.md').read()
setup(
packages = ['taskloaf'],
install_requires = ['uvloop', 'cloudpickle', 'pycapnp', 'attrs', 'structlog', 'p | yzmq'],
zip_safe = False,
include_package_data = True,
name = 'taskloaf',
version = version,
description = '',
long_description = description,
url = 'https://github.com/tbenthompson/taskloaf',
author = 'T. Ben Thompson',
author_email = 't.ben.thompson@gmail.com',
license = 'MIT',
platforms = ['any']
)
|
IIazertyuiopII/PDS_sonification | python/VirtualScreen.py | Python | mit | 2,759 | 0.047119 | import math
class VirtualScreen: #cet ecran est normal a l'axe Z du Leap
def __init__(self,Xoffset=0,Yoffset=50,Zoffset=-50,Zlimit=220,length=350,height=300): #en mm
self.Xoffset = Xoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Yoffset = Yoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Zoffset = Zoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Zlimit = Zlimit # profondeur de la zone
self.length = length;
self.height = height;
self.UpperLeftCorner = [Xoffset-length/float(2),Yoffset+height]
self.Center = [self.Xoffset,self.Yoffset+0.5*self.height,Zoffset+0.5*Zlimit]
self.zoneUpperLeftCornerArray = [];
self.zoneHeight = height / float(2);
self.zoneLength = length / float(3);
for i in range(0,2):
for j in range(0,3):
self.zoneUpperLeftCornerArray.append([self.UpperLeftCorner[0]+self.zoneLength*j,self.UpperLeftCorner[1]-self.zoneHeight*i])
# print self.zoneUpperLeftCornerArray
def distanceFromScreen(self,position):
dX = max( max( position[0] - (self.Xoffset+self.length/float(2)), 0 ) , max (self.Xoffset-self.length/float(2) - position[0], 0 ) )
dY = max( max( position[1] - (self.Yoffset+self.height) , 0 ) , max (self.Yoffset - position[1], 0 ) )
dZ = max( max(self.Zoffset - position[2], 0 ) , max(position[2] - (self.Zlimit + self.Zoffset) , 0 ) )
return math.sqrt(dX**2+dY**2+dZ**2)
def isFacingTheScreen(self,po | sition): #donner un vecteur position 3d en mm suivant les axes du Leapmotion ([x,y,z])
isXvalid = (position[0] <= self.Xoffset+self.length/float(2)) and (position[0] >= self.Xoffset-self.length/float(2))
isYvalid = (position[1] <= self.Yoffset+self.height) and (position[1] >= self.Yoffset)
isZvalid = (position[2] >= self.Zoffset) and (position[2] <= self.Zlimit + self.Zoff | set)
return isXvalid and isYvalid and isZvalid
def getScreenZonePointedAt(self,position,direction):
if not self.isFacingTheScreen(position):
return -1
else:
lambdaIntersection = (self.Zoffset-position[2])/direction[2] # (Zoffset-Zpoint)/Zdirecteur
xIntersection = position[0] + lambdaIntersection*direction[0] # Xpoint + lambda * Xdirecteur
yIntersection = position[1] + lambdaIntersection*direction[1] # Ypoint + lambda * Ydirecteur
intersection = [xIntersection,yIntersection]
return(self.getScreenZoneFromPointOnScreen(intersection))
def getScreenZoneFromPointOnScreen(self,onScreenPosition):
for index,i in enumerate(self.zoneUpperLeftCornerArray):
if(onScreenPosition[0]>=i[0] and onScreenPosition[0]<i[0]+self.zoneLength and onScreenPosition[1]<=i[1] and onScreenPosition[1]>=i[1]-self.zoneHeight):
return index+1
return -1
|
tlatzko/spmcluster | .tox/docs/bin/rst2latex.py | Python | bsd-2-clause | 828 | 0.001208 | #!/Users/latzko/work/spmcluster/.tox/docs/bin/python2.7
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
| '<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', desc | ription=description)
|
dNG-git/pas_upnp | src/dNG/data/upnp/service.py | Python | gpl-2.0 | 19,564 | 0.007872 | # -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All | rights reserved
https://www.direct-netware.de/redirect?pas;upnp
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published | by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasUPnPVersion)#
#echo(__FILEPATH__)#
"""
# pylint: disable=import-error,no-name-in-module
from collections import OrderedDict
import re
try: from urllib.parse import urljoin
except ImportError: from urlparse import urljoin
from dNG.data.binary import Binary
from dNG.data.xml_resource import XmlResource
from dNG.module.named_loader import NamedLoader
from dNG.net.http.client import Client as HttpClient
from dNG.runtime.value_exception import ValueException
from .identifier_mixin import IdentifierMixin
from .pas_upnp_version_mixin import PasUpnpVersionMixin
from .service_proxy import ServiceProxy
from .spec_mixin import SpecMixin
from .variable import Variable
class Service(IdentifierMixin, PasUpnpVersionMixin, SpecMixin):
"""
The UPnP common service implementation.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: upnp
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
RE_CAMEL_CASE_SPLITTER = NamedLoader.RE_CAMEL_CASE_SPLITTER
"""
CamelCase RegExp
"""
RE_SERVICE_ID_URN = re.compile("^urn:(.+):(.+):(.*)$", re.I)
"""
serviceId URN RegExp
"""
def __init__(self):
"""
Constructor __init__(Service)
:since: v0.2.00
"""
IdentifierMixin.__init__(self)
SpecMixin.__init__(self)
self.actions = None
"""
Service actions defined in the SCPD
"""
self.log_handler = NamedLoader.get_singleton("dNG.data.logging.LogHandler", False)
"""
The LogHandler is called whenever debug messages should be logged or errors
happened.
"""
self.name = None
"""
UPnP service name
"""
self.service_id = None
"""
UPnP serviceId value
"""
self.url_base = None
"""
HTTP base URL
"""
self.url_control = None
"""
UPnP controlURL value
"""
self.url_event_control = None
"""
UPnP eventSubURL value
"""
self.url_scpd = None
"""
UPnP SCPDURL value
"""
self.variables = None
"""
Service variables defined in the SCPD
"""
#
def get_definition_variable(self, name):
"""
Returns the UPnP variable definition.
:param name: Variable name
:return: (dict) Variable definition
:since: v0.2.00
"""
if (self.variables is None or name not in self.variables): raise ValueException("'{0}' is not a defined SCPD variable".format(name))
return self.variables[name]
#
def get_name(self):
"""
Returns the UPnP service name (URN without version).
:return: (str) Service name
:since: v0.2.00
"""
return self.name
#
def get_proxy(self):
"""
Returns a callable proxy object for UPnP actions and variables.
:return: (ServiceProxy) UPnP proxy
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.get_proxy()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (not self.is_initialized()): self.init_scpd()
return ServiceProxy(self, self.actions, self.variables)
#
def get_service_id(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId value
:since: v0.2.00
"""
return self.service_id['id']
#
def get_service_id_urn(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId URN
:since: v0.2.00
"""
return self.service_id['urn']
#
def get_url_base(self):
"""
Returns the HTTP base URL.
:return: (str) HTTP base URL
:since: v0.2.00
"""
return self.url_base
#
def get_url_control(self):
"""
Returns the UPnP controlURL value.
:return: (str) SOAP endpoint URL
:since: v0.2.00
"""
return self.url_control
#
def get_url_event_control(self):
"""
Returns the UPnP eventSubURL value.
:return: (str) Event subscription endpoint; None if not set
:since: v0.2.00
"""
return self.url_event_control
#
def get_url_scpd(self):
"""
Returns the UPnP SCPDURL value.
:return: (str) SCPDURL value
:since: v0.2.00
"""
return self.url_scpd
#
def init_metadata_xml_tree(self, device_identifier, url_base, xml_resource):
"""
Initialize the service metadata from a UPnP description.
:param device_identifier: Parsed UPnP device identifier
:param url_base: HTTP base URL
:param xml_resource: UPnP description XML parser instance
:return: (bool) True if parsed successfully
:since: v0.2.00
"""
_return = True
if (xml_resource.count_node("upnp:service") > 0): xml_resource.set_cached_node("upnp:service")
else: _return = False
if (_return):
value = xml_resource.get_node_value("upnp:service upnp:serviceType")
re_result = (None if (value is None) else Service.RE_USN_URN.match(value))
if (re_result is None or re_result.group(2) != "service"): _return = False
else:
self.name = "{0}:service:{1}".format(re_result.group(1), re_result.group(3))
urn = "{0}:{1}".format(self.name, re_result.group(4))
self._set_identifier({ "device": device_identifier['device'],
"bootid": device_identifier['bootid'],
"configid": device_identifier['configid'],
"uuid": device_identifier['uuid'],
"class": "service",
"usn": "uuid:{0}::{1}".format(device_identifier['uuid'], value),
"urn": urn,
"domain": re_result.group(1),
"type": re_result.group(3),
"version": re_result.group(4)
})
#
#
if (_return):
value = xml_resource.get_node_value("upnp:service upnp:serviceId")
re_result = (None if (value is None) else Service.RE_SERVICE_ID_URN.match(value))
if (re_result is None or re_result.group(2) != "serviceId"): _return = False
else: self.service_id = { "urn": value[4:], "domain": re_result.group(1), "id": re_result.group(3) }
#
if (_return):
self.url_scpd = Binary.str(urljoin(url_base, xml_resource.get_node_value("upnp:service upnp:SCPDURL")))
self.url_control = Binary.str(urljoin(url_base, xml_resource.get_node_value("upnp:service upnp:controlURL")))
value = xml_resource.get_node_value("upnp:service upnp:eventSubURL") |
tamasgal/rlogbook | rlogbook/computing/migrations/0005_auto_20141127_1436.py | Python | mit | 1,018 | 0.001965 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('computing', '0004_auto_20141127_1425'),
]
operations = [
migrations.CreateModel(
name='Subnet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, pr | imary_key=True)),
('name', models.CharField(max_length=200)),
('from_ip', models.CharField(max_length=15)),
('to_ip', models.CharField(max_length=15)),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='Category',
),
migrations.AddField(
| model_name='computer',
name='subnet',
field=models.ForeignKey(blank=True, to='computing.Subnet', null=True),
preserve_default=True,
),
]
|
HunanTV/redis-ctl | test/auto_balance.py | Python | mit | 15,140 | 0 | import json
import hashlib
from daemonutils.auto_balance import add_node_to_balance_for
import config
import base
import models.node
import | models.cluster
import models.task
from models.cluster_plan import ClusterBalancePlan, get_balance_plan_by_addr
REDIS_SHA = hashlib.sha1('redis').hexdigest()
def _get_balance_plan(plan):
return ClusterBalancePlan(balance_plan_json=json.dumps(plan))
class AutoBalance(base.TestCase):
def test_get_plan(se | lf):
with self.app.test_client() as client:
n0 = models.node.create_instance('10.0.0.1', 6301)
n1 = models.node.create_instance('10.0.0.1', 6302)
n2 = models.node.create_instance('10.0.0.2', 6301)
c0 = models.cluster.create_cluster('the quick brown fox')
c1 = models.cluster.create_cluster('the lazy dog')
c0.nodes.append(n0)
c0.nodes.append(n1)
c1.nodes.append(n2)
self.db.session.add(c0)
self.db.session.add(c1)
self.db.session.commit()
c0_id = c0.id
c1_id = c1.id
r = client.post('/cluster/set_balance_plan', data={
'cluster': c1_id,
'pod': 'pod',
'aof': '0',
'slave_count': 0,
})
self.assertReqStatus(200, r)
p = get_balance_plan_by_addr('10.0.0.1', 6301)
self.assertIsNone(p)
p = get_balance_plan_by_addr('10.0.0.1', 6302)
self.assertIsNone(p)
p = get_balance_plan_by_addr('10.0.0.1', 6303)
self.assertIsNone(p)
p = get_balance_plan_by_addr('10.0.0.2', 6301)
self.assertIsNotNone(p)
self.assertEqual('pod', p.pod)
self.assertEqual(None, p.host)
self.assertEqual([], p.slaves)
self.assertEqual(False, p.aof)
r = client.post('/cluster/set_balance_plan', data={
'cluster': c0_id,
'pod': 'pod',
'aof': '1',
'master_host': '10.100.1.1',
'slave_count': 2,
'slaves': '10.100.1.2,',
})
self.assertReqStatus(200, r)
r = client.post('/cluster/del_balance_plan', data={
'cluster': c1_id,
})
self.assertReqStatus(200, r)
p = get_balance_plan_by_addr('10.0.0.2', 6301)
self.assertIsNone(p)
p0 = get_balance_plan_by_addr('10.0.0.1', 6301)
self.assertIsNotNone(p0)
self.assertEqual('pod', p0.pod)
self.assertEqual('10.100.1.1', p0.host)
self.assertEqual([{'host': '10.100.1.2'}, {}], p0.slaves)
self.assertEqual(True, p0.aof)
p1 = get_balance_plan_by_addr('10.0.0.1', 6302)
self.assertEqual(p0.id, p1.id)
def test_master_only(self):
with self.app.test_client() as client:
n = models.node.create_instance('127.0.0.1', 6301)
c = models.cluster.create_cluster('the quick brown fox')
c.nodes.append(n)
self.db.session.add(c)
self.db.session.commit()
cluster_id = c.id
self.replace_eru_client()
add_node_to_balance_for('127.0.0.1', 6301, _get_balance_plan({
'pod': 'std',
'aof': True,
'slaves': [],
}), [2, 3, 5, 7], self.app)
self.assertTrue(1 in self.app.container_client.deployed)
self.assertDictEqual({
'what': 'redis',
'pod': 'std',
'version': REDIS_SHA,
'entrypoint': 'macvlan',
'env': 'prod',
'group': config.ERU_GROUP,
'ncontainers': 1,
'ncores': 1,
'network': ['network:redis'],
'host_name': None,
}, self.app.container_client.deployed[1])
tasks = models.task.undone_tasks()
self.assertEqual(1, len(tasks))
t = tasks[0]
self.assertEqual(cluster_id, t.cluster_id)
self.assertEqual(models.task.TASK_TYPE_AUTO_BALANCE, t.task_type)
self.assertIsNotNone(t.acquired_lock())
steps = list(t.all_steps)
self.assertEqual(2, len(steps))
s = steps[0]
self.assertEqual('join', s.command)
self.assertDictEqual({
'cluster_id': 1,
'cluster_host': '127.0.0.1',
'cluster_port': 6301,
'newin_host': '10.0.0.1',
'newin_port': 6379,
}, s.args)
s = steps[1]
self.assertEqual('migrate', s.command)
self.assertDictEqual({
'src_host': '127.0.0.1',
'src_port': 6301,
'dst_host': '10.0.0.1',
'dst_port': 6379,
'slots': [2, 3],
}, s.args)
def test_master_with_slaves(self):
with self.app.test_client() as client:
n = models.node.create_instance('127.0.0.1', 6301)
c = models.cluster.create_cluster('the quick brown fox')
c.nodes.append(n)
self.db.session.add(c)
self.db.session.commit()
cluster_id = c.id
self.replace_eru_client()
add_node_to_balance_for('127.0.0.1', 6301, _get_balance_plan({
'pod': 'std',
'aof': True,
'slaves': [{}, {}],
}), [2, 3, 5, 7, 11, 13, 17], self.app)
self.assertTrue(1 in self.app.container_client.deployed)
self.assertDictEqual({
'what': 'redis',
'pod': 'std',
'version': REDIS_SHA,
'entrypoint': 'macvlan',
'env': 'prod',
'group': config.ERU_GROUP,
'ncontainers': 1,
'ncores': 1,
'network': ['network:redis'],
'host_name': None,
}, self.app.container_client.deployed[1])
self.assertTrue(2 in self.app.container_client.deployed)
self.assertEqual(self.app.container_client.deployed[1],
self.app.container_client.deployed[2])
self.assertTrue(3 in self.app.container_client.deployed)
self.assertEqual(self.app.container_client.deployed[1],
self.app.container_client.deployed[3])
tasks = models.task.undone_tasks()
self.assertEqual(1, len(tasks))
t = tasks[0]
self.assertEqual(cluster_id, t.cluster_id)
self.assertEqual(models.task.TASK_TYPE_AUTO_BALANCE, t.task_type)
self.assertIsNotNone(t.acquired_lock())
steps = list(t.all_steps)
self.assertEqual(4, len(steps))
s = steps[0]
self.assertEqual('join', s.command)
self.assertDictEqual({
'cluster_id': 1,
'cluster_host': '127.0.0.1',
'cluster_port': 6301,
'newin_host': '10.0.0.1',
'newin_port': 6379,
}, s.args)
s = steps[1]
self.assertEqual('replicate', s.command)
self.assertDictEqual({
'cluster_id': 1,
'master_host': '10.0.0.1',
'master_port': 6379,
'slave_host': '10.0.0.2',
'slave_port': 6379,
}, s.args)
s = steps[2]
self.assertEqual('replicate', s.command)
self.assertDictEqual({
'cluster_id': 1,
'master_host': '10.0.0.1',
'master_port': 6379,
'slave_host': '10.0.0.3',
'slave_port': 6379,
}, s.args)
s = steps[3]
self.assertEqual('migrate', s.command)
self.assertDictEqual({
'src_host': '127.0.0.1',
'src_port': 6301,
'dst_host': '10.0.0.1',
'dst_port': 6379,
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractFujoshimoshiBlogspotCom.py | Python | bsd-3-clause | 562 | 0.033808 |
def extractFujoshimoshiBlogspotCom(item):
'''
Parser for 'fujoshimoshi.blogspot.com'
'''
vol, chp, frag, postfix = | extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', | 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
mindbender-studio/core | avalon/lib.py | Python | mit | 7,716 | 0 | """Helper functions"""
import os
import sys
import json
import logging
import datetime
import importlib
import subprocess
import types
from . import schema
from .vendor import six, toml
PY2 = sys.version_info[0] == 2
log_ = logging.getLogger(__name__)
# Backwards compatibility
logger = log_
__all__ = [
"time",
"log",
]
def time():
"""Return file-system safe string of current date and time"""
return datetime.datetime.now().strftime("%Y%m%dT%H%M%SZ")
def log(cls):
"""Decorator for attaching a logger to the class `cls`
Loggers inherit the syntax {module}.{submodule}
Example
>>> @log
... class MyClass(object):
... pass
>>>
>>> myclass = MyClass()
| >>> myclass.log.info('Hello World')
"""
modul | e = cls.__module__
name = cls.__name__
# Package name appended, for filtering of LogRecord instances
logname = "%s.%s" % (module, name)
cls.log = logging.getLogger(logname)
# All messages are handled by root-logger
cls.log.propagate = True
return cls
def dict_format(original, **kwargs):
"""Recursively format the values in *original* with *kwargs*.
Example:
>>> sample = {"key": "{value}", "sub-dict": {"sub-key": "sub-{value}"}}
>>> dict_format(sample, value="Bob") == \
{'key': 'Bob', 'sub-dict': {'sub-key': 'sub-Bob'}}
True
"""
new_dict = dict()
new_list = list()
if isinstance(original, dict):
for key, value in original.items():
if isinstance(value, dict):
new_dict[key.format(**kwargs)] = dict_format(value, **kwargs)
elif isinstance(value, list):
new_dict[key.format(**kwargs)] = dict_format(value, **kwargs)
elif isinstance(value, six.string_types):
new_dict[key.format(**kwargs)] = value.format(**kwargs)
else:
new_dict[key.format(**kwargs)] = value
return new_dict
else:
assert isinstance(original, list)
for value in original:
if isinstance(value, dict):
new_list.append(dict_format(value, **kwargs))
elif isinstance(value, list):
new_list.append(dict_format(value, **kwargs))
elif isinstance(value, six.string_types):
new_list.append(value.format(**kwargs))
else:
new_list.append(value)
return new_list
def which(program):
"""Locate `program` in PATH
Arguments:
program (str): Name of program, e.g. "python"
"""
def is_exe(fpath):
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
return True
return False
for path in os.environ["PATH"].split(os.pathsep):
for ext in os.getenv("PATHEXT", "").split(os.pathsep):
fname = program + ext.lower()
abspath = os.path.join(path.strip('"'), fname)
if is_exe(abspath):
return abspath
return None
def which_app(app):
"""Locate `app` in PATH
Arguments:
app (str): Name of app, e.g. "python"
"""
for path in os.environ["PATH"].split(os.pathsep):
fname = app + ".toml"
abspath = os.path.join(path.strip('"'), fname)
if os.path.isfile(abspath):
return abspath
return None
def get_application(name):
"""Find the application .toml and parse it.
Arguments:
name (str): The name of the application to search.
Returns:
dict: The parsed application from the .toml settings.
"""
application_definition = which_app(name)
if application_definition is None:
raise ValueError(
"No application definition could be found for '%s'" % name
)
try:
with open(application_definition) as f:
app = toml.load(f)
log_.debug(json.dumps(app, indent=4))
schema.validate(app, "application")
except (schema.ValidationError,
schema.SchemaError,
toml.TomlDecodeError) as e:
log_.error("%s was invalid." % application_definition)
raise
return app
def launch(executable, args=None, environment=None, cwd=None):
"""Launch a new subprocess of `args`
Arguments:
executable (str): Relative or absolute path to executable
args (list): Command passed to `subprocess.Popen`
environment (dict, optional): Custom environment passed
to Popen instance.
Returns:
Popen instance of newly spawned process
Exceptions:
OSError on internal error
ValueError on `executable` not found
"""
CREATE_NO_WINDOW = 0x08000000
CREATE_NEW_CONSOLE = 0x00000010
IS_WIN32 = sys.platform == "win32"
PY2 = sys.version_info[0] == 2
abspath = executable
env = (environment or os.environ)
if PY2:
# Protect against unicode, and other unsupported
# types amongst environment variables
enc = sys.getfilesystemencoding()
env = {k.encode(enc): v.encode(enc) for k, v in env.items()}
kwargs = dict(
args=[abspath] + args or list(),
env=env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
# Output `str` through stdout on Python 2 and 3
universal_newlines=True,
)
# this won't do anything on linux/macos as `creationFlags` are
# only windows specific.
if IS_WIN32 and env.get("CREATE_NEW_CONSOLE"):
kwargs["creationflags"] = CREATE_NEW_CONSOLE
kwargs.pop("stdout")
kwargs.pop("stderr")
else:
if IS_WIN32:
kwargs["creationflags"] = CREATE_NO_WINDOW
popen = subprocess.Popen(**kwargs)
return popen
def modules_from_path(path):
"""Get python scripts as modules from a path.
Arguments:
path (str): Path to folder containing python scripts.
Returns:
List of modules.
"""
path = os.path.normpath(path)
if not os.path.isdir(path):
log_.warning("%s is not a directory" % path)
return []
modules = []
for fname in os.listdir(path):
# Ignore files which start with underscore
if fname.startswith("_"):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[mod_name] = module
except Exception as err:
print("Skipped: \"{0}\" ({1})".format(mod_name, err))
continue
modules.append(module)
return modules
def find_submodule(module, submodule):
"""Find and return submodule of the module.
Args:
module (types.ModuleType): The module to search in.
submodule (str): The submodule name to find.
Returns:
types.ModuleType or None: The module, if found.
"""
name = "{0}.{1}".format(module.__name__, submodule)
try:
return importlib.import_module(name)
except ImportError as exc:
if str(exc) != "No module name {}".format(name):
log_.warning("Could not find '%s' in module: %s",
submodule,
module)
|
linostar/timeline-clone | source/timelinelib/plugin/pluginbase.py | Python | gpl-3.0 | 1,075 | 0 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PA | RTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
class PluginBase(object):
def isplugin(self):
return True
def display_name(self):
raise NotImplementedError("displayname not implemented.")
def service(self):
raise NotImplementedError("service not implemented.")
def run(self, | *args, **kwargs):
raise NotImplementedError("run not implemented.")
|
kuggenhoffen/CoAPthon | coapthon/client/coap_protocol.py | Python | mit | 23,736 | 0.000843 | import os
import random
import re
import time
import twisted
from twisted.application.service import Application
from twisted.internet.error import AlreadyCancelled
from twisted.python import log
from twisted.python.log import ILogObserver, FileLogObserver
from twisted.python.logfile import DailyLogFile
from coapthon import defines
from coapthon.messages.message import Message
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from twisted.internet import task
from coapthon.utils import Tree
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
home = os.path.expanduser("~")
if not os.path.exists(home + "/.coapthon/"):
os.makedirs(home + "/.coapthon/")
# First, startLogging to capture stdout
logfile = DailyLogFile("CoAPthon_client.log", home + "/.coapthon/")
# Now add an observer that logs to a file
application = Application("CoAPthon_Client")
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
class CoAP(DatagramProtocol):
def __init__(self, server, forward):
# print "INIT CLIENT\n"
self._forward = forward
self.received = {}
self.sent = {}
self.sent_token = {}
self.received_token = {}
self.call_id = {}
self.relation = {}
self._currentMID = 1
import socket
try:
socket.inet_aton(server[0])
self.server = server
# legal
except socket.error:
# Not legal
data = socket.getaddrinfo(server[0], server[1])
self.server = (server[0], server[1])
# defer = reactor.resolve('coap.me')
# defer.addCallback(self.start)
# self.server = (None, 5683)
root = Resource('root', visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self.operations = []
self.l = None
@property
def current_mid(self):
return self._currentMID
@current_mid.setter
def current_mid(self, c):
self._currentMID = c
def set_operations(self, operations):
for op in operations:
function, args, kwargs, client_callback = op
self.operations.append((function, args, kwargs, client_callback))
host, port = self.server
if host is not None:
self.start(host)
def startProtocol(self):
# print "STARTPROTOCOL\n"
# self.transport.connect(self.server)
if self.server is None:
log.err("Server address for the client is not initialized")
exit()
self.l = task.LoopingCall(self.purge_mids)
self.l.start(defines.EXCHANGE_LIFETIME)
def stopProtocol(self):
self.l.stop()
def purge_mids(self):
log.msg("Purge mids")
now = time.time()
sent_key_to_delete = []
for key in self.sent.keys():
message, timestamp, callback, client_callback = self.sent.get(key)
if timestamp + defines.EXCHANGE_LIFETIME <= now:
sent_key_to_delete.append(key)
for key in sent_key_to_delete:
message, timestamp, callback, client_callback = self.sent.get(key)
key_token = hash(str(self.server[0]) + str(self.server[1]) + str(message.token))
try:
del self.sent[key]
except KeyError:
pass
try:
del self.received[key]
except KeyError:
pass
try:
del self.sent_token[key_token]
except KeyError:
pass
try:
del self.received_token[key_token]
except KeyError:
pass
def start(self, host):
# print "START\n"
# self.transport.connect(host, self.server[1])
function, args, kwargs, client_callback = self.get_operation()
function(client_callback, *args, **kwargs)
# def start_test(self, transport):
# self.transport = transport
# function, args, kwargs, client_callback = self.get_operation()
# function(client_callback, *args, **kwargs)
def get_operation(self):
try:
to_exec = self.operations.pop(0)
args = []
kwargs = {}
if len(to_exec) == 4:
function, args, kwargs, client_callback = to_exec
elif len(to_exec) == 3:
function, args, client_callback = to_exec
elif len(to_exec) == 2:
function, client_callback = to_exec[0]
else:
return None, None, None, None
return function, args, kwargs, client_callback
except IndexError:
return None, None, None, None
| def send(self, message):
# print "SEND\n"
serializer = Serializer()
if message.destination is None:
message.destination = self.server
host, port = message.destination
print "Message sent to " + host + ":" + str(port)
print "------------------------------ | ----------"
print message
print "----------------------------------------"
datagram = serializer.serialize(message)
log.msg("Send datagram")
self.transport.write(datagram, message.destination)
def send_callback(self, req, callback, client_callback):
if req.mid is None:
self._currentMID += 1
req.mid = self._currentMID
key = hash(str(self.server[0]) + str(self.server[1]) + str(req.mid))
key_token = hash(str(self.server[0]) + str(self.server[1]) + str(req.token))
self.sent[key] = (req, time.time(), callback, client_callback)
self.sent_token[key_token] = (req, time.time(), callback, client_callback)
if isinstance(client_callback, tuple) and len(client_callback) > 1:
client_callback, err_callback = client_callback
else:
err_callback = None
self.schedule_retrasmission(req, err_callback)
self.send(req)
def datagramReceived(self, datagram, host):
# print "RECEIVED\n"
serializer = Serializer()
try:
host, port = host
except ValueError:
host, port, tmp1, tmp2 = host
message = serializer.deserialize(datagram, host, port)
print "Message received from " + host + ":" + str(port)
print "----------------------------------------"
print message
print "----------------------------------------"
if isinstance(message, Response):
self.handle_response(message)
elif isinstance(message, Request):
log.err("Received request")
else:
self.handle_message(message)
key = hash(str(host) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
print "Separate Response"
else:
function, args, kwargs, client_callback = self.get_operation()
key = hash(str(host) + str(port) + str(message.token))
if function is None and len(self.relation) == 0:
if not self._forward:
reactor.stop()
elif key in self.relation:
response, timestamp, client_callback = self.relation.get(key)
self.handle_notification(message, client_callback)
else:
function(client_callback, *args, **kwargs)
def handle_message(self, message):
host, port = message.source
key = hash(str(host) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
return None
|
tseaver/google-cloud-python | monitoring/google/cloud/monitoring_v3/gapic/metric_service_client.py | Python | apache-2.0 | 43,991 | 0.002114 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.monitoring.v3 MetricService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic import metric_service_client_config
from google.cloud.monitoring_v3.gapic.transports import metric_service_grpc_transport
from google.cloud.monitoring_v3.proto import alert_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import group_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2_grpc
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-monitoring"
).version
class MetricServiceClient(object):
"""
Manages metric descriptors, monitored resource descriptors, and
time series data.
"""
SERVICE_ADDRESS = "monitoring.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.monitoring.v3.MetricService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetricServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def metric_descriptor_path(cls, project, metric_descriptor):
"""Return a fully-qualified metric_descriptor string."""
return google.api_core.path_template.expand(
"projects/{project}/metricDescriptors/{metric_descriptor=**}",
project=project,
metric_descriptor=metric_descriptor,
)
@classmethod
def monitored_resource_descriptor_path(cls, project, monitored_resource_descriptor):
"""Return a fully-qualified monitored_resource_descriptor string."""
return google.api_core.path_template.expand(
"projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}",
project=project,
monitored_resource_descriptor=monitored_resource_descriptor,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.MetricServiceGrpcTransport,
Callable[[~.Credentials, type], ~.MetricServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``cre | dentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the c | lient will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = metric_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=metric_service_grpc_transport.MetricServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
|
ella/mypage | mypage/rsswidgets/migrations/0003_add_config.py | Python | bsd-3-clause | 1,553 | 0.01159 |
from south.db import db
from django.db import models
from mypage.rsswidgets.models import *
class Migration:
def forwards(self, orm):
# Adding field 'MultiRSSWidget.config_json'
db.add_column('rsswidgets_multirsswidget', 'config_json', models.TextField())
def backwards(self, orm):
# Deleting field 'MultiRSSWidget.config_json'
db.delete_column('rsswidgets_multirsswidget', 'config_json')
models = {
'widgets.widget': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label','model'),)", 'db_table': "'django_content_type'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'rsswidgets.rsswidget': {
'Meta': {'_bases': ['mypage.widgets.models.Widget']},
'feed_url': ('models.URLField', [], {}),
'frequency_seconds': ('models.PositiveIntegerField', [], {'default': '1800'}),
'widget_ptr': ('models.OneToOneField', ["orm['widgets.Widget']"], {})
},
'rsswidgets.multirsswidget': {
'Meta': {'_bases': ['mypage.widgets | .models.Widget']},
'config_json': ('models.TextField', [], {}),
'widget_ptr': ('models | .OneToOneField', ["orm['widgets.Widget']"], {})
}
}
complete_apps = ['rsswidgets']
|
stefangri/s_s_productions | PHY341/V_302_Brueckenschaltung/Messdaten/auswertung.py | Python | mit | 10,629 | 0.029342 | import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
import math
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from pint import UnitRegistry
u = UnitRegistry()
Q_ = u.Quantity
#umrechnung einheiten mit var.to('unit')
# Einheiten für pint:dimensionless, meter, second, degC, kelvin
#beispiel:
a = ufloat(5, 2) * u.meter
b = Q_(unp.uarray([5,4,3], [0.1, 0.2, 0.3]), 'meter')
c = Q_(0, 'degC')
c.to('kelvin')
#print(c.to('kelvin'))
#print(a**2)
#print(b**2)
#variabel_1,variabel_2=np.genfromtxt('name.txt',unpack=True)
#Standartabweichung und Mittelwert
def mittel_und_abweichung(messreihe):
mittelwert=sum(messreihe)/len(messreihe)
abweichung_des_mittelwertes=1/(np.sqrt(len(messreihe)))*np.std(messreihe)
mittel_und_abweichung=ufloat(mittelwert,abweichung_des_mittelwertes)
return mittel_und_abweichung
#Standartabweichung und Mittelwert für Messreihe mit Intervallen
def mittel_und_abweichung_intervall(messreihe,intervall_laenge):
mittelwert_abweichung_liste=[]
for i in range(len(messreihe))[::intervall_laenge]:
mittelwert=sum(messreihe[i:i+intervall_laenge])/len(messreihe[i:i+intervall_laenge])
abweichung_des_mittelwertes=1/(np.sqrt(len(messreihe[i:i+intervall_laenge])))*np.std(messreihe[i:i+intervall_laenge])
mittelwert_abweichung_liste.append(ufloat(mittelwert,abweichung_des_mittelwertes))
return mittelwert_abweichung_liste
#Lineare regression
def linregress(x, y):
assert len(x) == len(y)
x, y = np.array(x), np.array(y)
N = len(y)
Delta = N * np.sum(x**2) - (np.sum(x))**2
# A ist die Steigung, B der y-Achsenabschnitt
A = (N * np.sum(x * y) - np.sum(x) * np.sum(y)) / Delta
B = (np.sum(x**2) * np.sum(y) - np.sum(x) * np.sum(x * y)) / Delta
sigma_y = np.sqrt(np.sum((y - A * x - B)**2) / (N - 2))
A_error = sigma_y * np.sqrt(N / Delta)
B_error = sigma_y * np.sqrt(np.sum(x**2) / Delta)
return A, A_error, B, B_error
###Angepasstes Programm
##Teil a)
teil_a_widerstand_2,teil_a_widerstand_3=np.genfromtxt('weath_brueck_teil_1.txt',unpack=True)
#teil_a_widerstand_2,teil_a_verhaeltniR34=np.linspace('Teila_widerstaende.txt'unpack=True)
#Einheitenzuteilung
#teil_a_widerstand_2=Q_(teil_a_widerstand_2,'ohm')
#teil_a_widerstand_3=Q_(teil_a_widerstand_3,'ohm')
r_g=1000
teil_a_widerstand_4=r_g-teil_a_widerstand_3
#Widerst | andberechnung
def wider(R_2,R_3,R_4):
teil_a_r_3durch4=R_3/R_4
return R_2*teil_a_r_3durch4
teil_a_widerstand_x=wider(teil_a_widerstand_2,teil_a_widerstand_3,teil_a_widerstand_4)
print(teil_a_widerstand_x)
teil_a_widerstand_x | _mittel=mittel_und_abweichung_intervall(teil_a_widerstand_x,3)
#print('Teil a, Widerstand R_x',teil_a_widerstand_x)
print('\n')
print('Teil a, r_2', teil_a_widerstand_2)
print('Teil a, R_3 und r_4', teil_a_widerstand_3,teil_a_widerstand_4)
print('Teil a, Widerstand R_x ',teil_a_widerstand_x)
print('Teil a, Widerstand R_x gemittelt',teil_a_widerstand_x_mittel)
print('\n')
##Teil b)
teil_b_c_2,teil_b_r_3=np.genfromtxt('kapazi_mess_teil_2_a.txt',unpack=True)
teil_b_c_2*=1e-9
##Einheitenzuteilung
#teil_b_c_2=Q_(teil_b_c_2*1e-9,'farad') #Nano Farad
#teil_b_r_3_u=Q_(teil_b_r_3,'ohm')
#
teil_b_widerstand_4=r_g-teil_b_r_3
#Kapazitätsbestimmung und Wiederstand
def capa(c_2,R_3,R_4):
teil_a_r_3durch4=R_4/R_3
return c_2*teil_a_r_3durch4
teil_b_capatität_cx_ideal=capa(teil_b_c_2,teil_b_r_3,teil_b_widerstand_4)
teil_b_capatität_cx_ideal_mittel=mittel_und_abweichung_intervall(teil_b_capatität_cx_ideal,3)
print('Teil b, c_2', teil_b_c_2)
print('Teil b, R_3 und r_4', teil_b_r_3,teil_b_widerstand_4)
print('Teil b, Kapazität C_x ',teil_b_capatität_cx_ideal)
print('Teil b, Kapazität Cx Ideal Mittel ',teil_b_capatität_cx_ideal_mittel)
print('\n')
##Teil 2 realer Kondensator
teil_b_c2_real,teil_b_r2_real,teil_b_r3_real=np.genfromtxt('kapazi_mess_real_teil_2_b.txt',unpack=True)
teil_b_c2_real*=1e-9
#teil_b_c2_real=Q_(teil_b_c2_real*1e-9,'farad')
#
#teil_b_r2_real=Q_(teil_b_r2_real,'ohm')
#teil_b_r3_real=Q_(teil_b_r3_real,'ohm')
teil_b_widerstand_4_real=r_g-teil_b_r3_real
teil_b_capatität_cx_real=capa(teil_b_c2_real,teil_b_r3_real,teil_b_widerstand_4_real)
teil_b_capatität_cx_real_mittel=mittel_und_abweichung_intervall(teil_b_capatität_cx_real,3)
teil_b_widerstand_rx_real=wider(teil_b_r2_real,teil_b_r3_real,teil_b_widerstand_4_real)
teil_b_widerstand_cx_real_mittel=mittel_und_abweichung_intervall(teil_b_widerstand_rx_real,3)
print('Teil B, c_2 real', teil_b_c2_real)
print('\n')
print('teil b, r-2 real',teil_b_r2_real)
print('\n')
print('Teil b, R_3 und r_4', teil_b_r3_real, teil_b_widerstand_4_real)
print('\n')
print('Teil b, Widerstand real R_x ',teil_b_widerstand_rx_real)
print('\n')
print('Teil b, Kapazität C_x ',teil_b_capatität_cx_real)
print('\n')
print('Teil b, Widerstand Rx real ',teil_b_widerstand_cx_real_mittel)
print('\n')
print('Teil b, Kapatität Cx real',teil_b_capatität_cx_real_mittel)
print('\n')
##Teil c)
teil_c_indu,teil_c_widerstand_2,teil_c_R3=np.genfromtxt('induktivitätmess_teil_3.txt',unpack=True)
teil_c_indu*=1e-3
#Einheitenzuteilung
#teil_c_indu=Q_(teil_c_indu,'henry')
#teil_c_widerstand_2=Q_(teil_c_widerstand_2,'ohm')
#teil_c_R3=Q_(teil_c_R3,'ohm')
teil_c_r4=r_g-teil_c_R3
#Induktivität und Widerstand
def indu(l_2,R_3,R_4):
teil_a_r_3durch4=R_3/R_4
return l_2*teil_a_r_3durch4
teil_c_widerstand_rx=wider(teil_c_widerstand_2,teil_c_R3,teil_c_r4)
teil_c_induktivitaet_lx=indu(teil_c_indu,teil_c_R3,teil_c_r4)
teil_c_widerstand_rx_mittel=mittel_und_abweichung_intervall(teil_c_widerstand_rx,3)
teil_c_induktivitaet_lx_mittel=mittel_und_abweichung_intervall(teil_c_induktivitaet_lx,3)
print('Teil c, Wiederstand 2Indu,',teil_c_widerstand_2)
print('\n')
print('Teil c, R_3', teil_c_R3)
print('\n')
print('Teil c, R_4', teil_c_r4)
print('\n')
print('Teil c, Induktivität in mH', teil_c_indu)
print('\n')
print('Teil c, Widerstand Rx ', teil_c_widerstand_rx)
print('Teil c, Widerstand Rx mittel ', teil_c_widerstand_rx_mittel)
print('\n')
print('Teil c, Indu Lx ',teil_c_induktivitaet_lx)
print('Teil c, Indu lx gemittelt (18, 16)', teil_c_induktivitaet_lx_mittel)
print('\n')
###Teil d)
teil_d_widerstand_2,teil_d_c2,teil_d_widerstand_4,teil_d_widerstand_3=np.genfromtxt('maxwell_bruecke_teil_4.txt',unpack=True)
teil_d_c2*=1e-9
##einheitenbestimmung
#teil_d_widerstand_2=Q_(teil_d_widerstand_2,'ohm')
#teil_d_widerstand_3=Q_(teil_d_widerstand_3_'ohm')
#teil_d_widerstand_4=Q_(teil_d_widerstand_4,'ohm')
#c_4=Q_(c_4*1e-9,'farad') #nano Farad
#
#Induktivitätbestimmung
#
def wider_max(r_2,r_3,r_4):
return(r_2*r_3)/r_4
#
def indu_max(r_2,r_3,c_4):
return r_2*r_3*c_4
teil_d_widerstand_rx=wider_max(teil_d_widerstand_2,teil_d_widerstand_3,teil_d_widerstand_4)
teil_d_indu_lx=indu_max(teil_d_widerstand_2,teil_d_widerstand_3,teil_d_c2)
teil_d_widerstand_rx_mittel=mittel_und_abweichung_intervall(teil_d_widerstand_rx,3)
teil_d_indu_lx_mittel=mittel_und_abweichung_intervall(teil_d_indu_lx,3)
print(teil_d_widerstand_2)
print('\n')
print('Teil d), Wiederstand Rx ',teil_d_widerstand_rx)
print('Teil d), Wiederstand Rx mittel ', teil_d_widerstand_rx_mittel)
print('\n')
print('Teil d), Induktivität Lx ',teil_d_indu_lx)
print('Teil d), Induktivität Lx mittel (16 ,18) ', teil_d_indu_lx_mittel)
print('\n')
###Teil e)
teil_e_frequenz,teil_e_u_br,teil_e_u_s=np.genfromtxt('wien_robison_teil_5.txt',unpack=True)
teil_e_test=teil_e_u_br/(2*np.sqrt(2))
teil_e_u_br*=0.5
teil_e_u_br*=1/(np.sqrt(2))
#print(teil_e_u_br)
#print(teil_e_test)
R=1000
C=993*1e-9
#R=Q_(R,'ohm')
#C=Q_(Q*1-9,'farad') #nano Farard
#teil_e_frequenz=Q_(teil_e_frequenz,'hertz')
#teil_e_u_s=Q_=(teil_e_u_s,'volt')
#teil_e_u_br=Q_(teil_e_u_br,'volt')
#
#print('Einheiten der Spannungen noch überprüfen, momentan: ')
#print(teil_e_u_s)
#print(teil_e_u_br)
#print('\n')
##bestimmung omega_0 und Onega
def freq(R,C):
return (1/(R*C))*((1/(2*np.pi)))
#
teil_e_omega_0=freq(R,C)
print('Teil e, omega_0 ', teil_e_omega_0)
#
def Omega(frequnz,omega_0):
return frequnz/omega_0
teil_e_Omega=Omega(teil_e_frequenz,teil_e_omega_0)
print('Teil e, OMEGA ', teil_e_Omega)
##bestimmung u_s/u_e
#
teil_e_quotient_usue=teil_e_u_br/teil_e_u_s #Hier nochm |
brickman1444/hft-wordchain | chain_generator/generateChains.py | Python | bsd-3-clause | 2,594 | 0.032382 |
import json
import copy
import pdb
# constants
wordPairFileName = "wordPairs.json"
wordChainFileName = "wordLists.json"
chainLength = 6
def appendWordsToChain(partialChain, retList, shortList, pairDictionary):
lastWord = partialChain[-1]
if lastWord not in pairDictionary:
# end of the line
shortList.append(partialChain)
return
nextWords = pairDictionary[lastWord]
foundContinuation = False
for nextWord in nextWords:
if nextWord in partialChain:
# repeat word
# don't add to the shortList here because the lastWord has other nextWords to try
# we don't know if it was a total dead end yet
continue
newChain = copy.copy(partialChain)
newChain.append(nextWord)
foundContinuation = True
if len(newChain) < chainLength:
appendWordsToChain(newChain,retList,shortList,pairDictionary)
else:
retList.append( newChain )
if not foundContinuation:
shortList.append(partialChain)
def naiveMethod(pairDictionary,reverseDictionary):
retList = []
shortList = [] # to hold lists that a | re too short
for firstWord in pairDictionary:
chain = [firstWord]
chain = appendWordsToChain(chain,retList,shortList,pairDictionary)
print("Short List:")
shortCount = 0
for list in shortList:
if len(list) == (chainLength - 1) and list[0] not in reverseDictionary:
print(list)
shortCount += 1
print("Just short count: %d" % shortCount)
return retList
def getListsFromDictionary(pairDictionary,reverseDictionary):
return naiveMethod(pairDictionary,reverseDictionary)
def getReve | rseDictionary(pairDictionary):
retDict = {}
# go through the whole list
for firstWord, secondWords in pairDictionary.items():
# for each second word
for secondWord in secondWords:
# only process a second word once
if secondWord in retDict:
continue
retDict[secondWord] = []
# go through the whole list again and collect all the first words for it
for firstWord2, secondWords2 in pairDictionary.items():
if secondWord in secondWords2:
if firstWord2 not in retDict[secondWord]:
#print("%s %s" % (secondWord, firstWord2))
retDict[secondWord].append(firstWord2)
return retDict
# main
file = open( wordPairFileName, 'r' )
pairRoot = json.load( file )
pairDictionary = pairRoot["word pairs"]
reverseDictionary = getReverseDictionary( pairDictionary )
lists = getListsFromDictionary( pairDictionary, reverseDictionary )
print("Generated %d lists" % len(lists))
listsRoot = {}
listsRoot["wordLists"] = lists
file = open(wordChainFileName, mode="w")
json.dump(listsRoot, file, indent=" ")
file.close() |
mohamedhagag/dvit-odoo | product_pack_pos/__manifest__.py | Python | agpl-3.0 | 665 | 0 | {
'name': 'Product Pack POS َfor IngAdhoc',
'summary': 'Product packs on POS',
'description': """
This module is extension َfor INGADHOC's module product_pack that will
Process product_pack pickings َfrom POS sales.
Note: this module works | with Fixed price packs only.
""",
'version': '10.0.0.2',
'category': 'Point oَf Sale',
'author': 'DVIT.me',
' | website': 'http://dvit.me',
'license': 'AGPL-3',
'depends': ['product_pack', 'point_oَf_sale'],
'data': [],
'demo': [],
"images": [
'static/description/banner.png'
],
'installable': True,
'auto_install': True,
'application': False,
}
|
matsumoto-r/synciga | src/build/android/bb_run_sharded_steps.py | Python | bsd-3-clause | 7,158 | 0.0095 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to shard build bot steps and save results to disk.
Our buildbot infrastructure requires each slave to run steps serially.
This is sub-optimal for android, where these steps can run independently on
multiple connected devices.
The buildbots will run this script multiple times per cycle:
- First: all steps listed in -s in will be executed in parallel using all
connected devices. Step results will be pickled to disk. Each step has a unique
name. The result code will be ignored if the step name is listed in
--flaky_steps.
The buildbot will treat this step as a regular step, and will not process any
graph data.
- Then, with -p STEP_NAME: at this stage, we'll simply print the file with the
step results previously saved. The buildbot will then process the graph data
accordingly.
The JSON steps file contains a dictionary in the format:
{
"step_name_foo": "script_to_execute foo",
"step_name_bar": "script_to_execute bar"
}
The JSON flaky steps file contains a list with step names which results should
be ignored:
[
"step_name_foo",
"step_name_bar"
]
Note that script_to_execute necessarily have to take at least the following
options:
--device: the serial number to be passed to all adb commands.
--keep_test_server_ports: indicates it's being run as a shard, and shouldn't
reset test server port allocation.
"""
import datetime
import json
import logging
import multiprocessing
import optparse
import pexpect
import pickle
import os
import signal
import shutil
import sys
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import ports
_OUTPUT_DIR = os.path.join(constants.CHROME_DIR, 'out', 'step_results')
def _SaveResult(result):
with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f:
f.write(pickle.dumps(result))
def _RunStepsPerDevice(steps):
results = []
for step in steps:
start_time = datetime.datetime.now()
print 'Starting %s: %s %s at %s' % (step['name'], step['cmd'],
start_time, step['device'])
output, exit_code = pexpect.run(
step['cmd'], cwd=os.path.abspath(constants.CHROME_DIR),
withexitstatus=True, logfile=sys.stdout, timeout=1800,
env=os.environ)
exit_code = exit_code or 0
end_time = datetime.datetime.now()
exit_msg = '%s %s' % (exit_code,
'(ignored, flaky step)' if step['is_flaky'] else '')
print 'Finished %s: %s %s %s at %s' % (step['name'], exit_msg, step['cmd'],
end_time, step['device'])
if step['is_flaky']:
exit_code = 0
result = {'name': step['name'],
'output': output,
'exit_code': exit_code,
'total_time': (end_time - start_time).seconds,
'device': step['device']}
_SaveResult(result)
results += [result]
return results
def _RunShardedSteps(steps, flaky_steps, devices):
assert steps
assert devices, 'No devices connected?'
if os.path.exists(_OUTPUT_DIR):
assert '/step_results' in _OUTPUT_DIR
shutil.rmtree(_OUTPUT_DIR)
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
step_names = sorted(steps.keys())
all_params = []
num_devices = len(devices)
shard_size = (len(steps) + num_devices - 1) / num_devices
for i, device in enumerate(devices):
steps_per_device = []
for s in steps.keys()[i * shard_size:(i + 1) * shard_size]:
steps_per_device += [{'name': s,
'device': device,
'is_flaky': s in flaky_steps,
'cmd': steps[s] + ' --device ' + device +
' --keep_test_server_ports'}]
all_params += [steps_per_device]
print 'Start sharding (note: output is not synchronized...)'
print '*' * 80
start_time = datetime.datetime.now()
pool = multiprocessing.Pool(processes=num_devices)
async_results = pool.map_async(_RunStepsPerDevice, all_params)
results_per_device = async_results.get(999999)
end_time = datetime.datetime.now()
print '*' * 80
print 'Finished sharding.'
print 'Summary'
total_time = 0
for results in results_per_device:
for result in results:
print('%s : exit_code=%d in %d secs at %s' %
(result['name'], result['exit_code'], result['total_time'],
result['device']))
total_time += result['total_time']
print 'Step time: %d secs' % ((end_time - start_time).seconds)
print 'Bots time: %d secs' % total_time
# No exit_code for the sharding step: the individual _PrintResults step
# will return the corresponding exit_code.
return 0
def _PrintStepOutput(step_name):
file_name = os.path.join(_OUTPUT_DIR, step_name)
if not os.path.exists(file_name):
print 'File not found ', file_name
return 1
with file(file_name, 'r') as f:
result = pickle.loads(f.read())
print result['output']
return result['exit_code']
def _KillPendingServers():
for retry in range(5):
for server in ['lighttpd', 'web-page-replay']:
pids = cmd_helper.GetCmdOutput(['pgrep', '-f', server])
pids = [pid.strip() for pid in pids.split('\n') if pid.strip()]
for pid in pids:
try:
logging.warning('Killing %s %s', server, pid)
os.kill(int(pid), signal.SIGQUIT)
except Exception as e:
logging.warning('Failed killing %s %s %s', server, pid, e)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-s', '--steps',
help='A JSON file containing all the steps to be '
'sharded.')
parser.add_option('--flaky_steps',
help='A JSON file containing steps that are flaky and '
'will have its exit code ignored.')
parser.add_option('-p', '--print_results',
help='Only prints the results for the previously '
'executed step, do not run it again.')
options, urls = parser.parse_args(argv)
if options.print_results:
return _PrintStepOutput(options.print_results)
# At this point, we should kill everything that may have been left over from
# previous runs.
_KillPendingServers()
# Reset the test port allocation. It's important to do it before starting
# to dispatch any step.
| if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
# Sort the devices so that we'll try to always run a step in the same device.
devices = sorted(android_commands.GetAttachedDevices())
if not devices:
print 'You must attach a device'
return 1
with file(options.steps, 'r') as f:
steps = json.load(f)
flaky_steps = | []
if options.flaky_steps:
with file(options.flaky_steps, 'r') as f:
flaky_steps = json.load(f)
return _RunShardedSteps(steps, flaky_steps, devices)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bgarrels/sky | sky/money.py | Python | bsd-3-clause | 3,699 | 0.002163 | import re
locale_US = {'symbol': r'\$',
'currency': r'dollar[s]*',
'units': [('million', 10**6), ('m', 10**6), ('mn', 10**6), ('mil', 10**6),
('thousand', 10**3), ('k', 10**3),
('billion', 10**9), ('b', 10**9), ('bn', 10**9),
('cent', 0.01),
('\\b', 1), ('', 1)]}
class MoneyMatcher():
def __init__(self, locale=None):
if locale is None:
locale = locale_US
self.unit_dict = {}
for k in locale['units']:
self.unit_dict[k[0]] = k[1]
self.unit_dict[k[0].title()] = k[1]
self.unit_dict[k[0].upper()] = k[1]
units = '({})'.format('|'.join([x[0] for x in locale['units']])) + '\\b'
number_regex = '([0-9]*[,.]*[0-9]+[,.]*)'
optional_space = '[ ]*'
self.symbol = re.compile(locale['symbol'] + optional_space +
number_regex + optional_space + units, re.IGNORECASE)
self.currency = re.compile(number_regex + optional_space +
units + optional_space + locale['currency'], re.IGNORECASE)
def find(self, text, min_amount=0, max_amount=10**12):
matches = []
for m in self.symbol.finditer(text):
matches.append([range(m.start(), m.end()), m.groups()])
for m in self.currency.finditer(text):
s = m.start()
e = m.end()
for mm in matches:
if s in mm[0]:
mm[0] = range(mm[0].start, e)
break
if e in mm[0]:
mm[0] = range(s, mm[0].stop)
break
else:
matches.append([range(m.start(), m.end()), m.groups()])
results = [(text[x[0].start:x[0].stop], self.convertMatchToVa | lue(
| x[1]), x[0].start, x[0].stop) for x in matches]
return [r for r in results if min_amount < r[1] < max_amount]
def convertMatchToValue(self, match):
value = match[0].replace(',', '.').strip('.')
modifier = 1000 ** (len(re.findall(r'\.\d{3}[^0-9]', value)) +
bool(re.search(r'\.\d{3}$', value)))
value = float(value)
unit_modifier = self.unit_dict[match[1]]
return value * modifier * unit_modifier
def investment_annotation(title, body, money, entities, indicators=None,
max_char_distance=100):
# Create annotation of (company, money) if they are within max_char_distance (title/body)
# Only if "invest", "fund" or "rais" are found nearby as well.
total_content = title + ' ' + body
if indicators is None:
indicators = ['Invest', 'Fund', 'Rais']
indicators = list(set(indicators + [x.lower() for x in indicators]))
entities_locations = [(e['text'], total_content.find(e['text']))
for e in entities if e['type'] in ['Person', 'Company']]
indicator_locations = [(x, total_content.find(x))
for x in indicators if total_content.find(x) > -1]
me_combinations = {}
for m in money:
for e in entities_locations:
for i in indicator_locations:
if abs(e[1] - m[1]) < max_char_distance and abs(e[1] - i[1]) < max_char_distance:
if m not in me_combinations:
me_combinations[m] = (e[0], abs(e[1] - m[1]))
elif abs(e[1] - m[1]) < me_combinations[m][1]:
me_combinations[m] = (e[0], abs(e[1] - m[1]))
return [{'company': me_combinations[m][0], 'amount': m[1]} for m in me_combinations]
|
brajput24/fabric-bolt | fabric_bolt/wsgi.py | Python | mit | 352 | 0.008523 | import os
import os. | path
import sys
# Add the project to the python path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
sys.stdout = sys.stderr
# Configure the application (Logan)
from fabric_bolt.utils.runner import config | ure
configure()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application() |
hugovk/coveragepy | ci/parse_relnotes.py | Python | apache-2.0 | 3,133 | 0.001596 | #!/usr/bin/env python3
"""
Parse CHANGES.md into a JSON structure.
Run with two arguments: the .md file to parse, and the JSON file to write:
python parse_relnotes.py CHANGES.md relnotes.json
Every section that has something that looks like a version number in it will
be recorded as the release notes for that version.
"""
import json
import re
import sys
class TextChunkBuffer:
"""Hold onto text chunks until needed."""
def __init__(self):
self.buffer = []
def append(self, text):
"""Add `text` to the buffer."""
self.buffer.append(text)
def clear(self):
"""Clear the buffer."""
self.buffer = []
def flush(self):
"""Produce a ("text", text) tuple if there's anything here."""
buffered = "".join(self.buffer).strip()
if buffered:
yield ("text", buffered)
self.clear()
def parse_md(lines):
"""Parse markdown lines, producing (type, text) chunks."""
buffer = TextChunkBuffer()
for line in lines:
header_match = re.search(r"^(#+) (.+)$", line)
is_header = bool(header_match)
if is_header:
yield from buffer.flush()
hashes, text = header_match.groups()
yield (f"h{len(hashes)}", text)
else:
buffer.append(line)
yield from buffer.flush()
def sections(parsed_data):
"""Convert a stream of parsed tokens into sections with text and notes.
Yields a stream of:
('h-level', 'header text', 'text')
"""
header = None
text = []
for ttype, ttext in parsed_data:
if ttype.startswith('h'):
if header:
yield (*header, "\n".join(text))
text = []
header = (ttype, ttext)
elif ttype == "text":
text.append(ttext)
else:
raise Exception(f"Don't know ttype {ttype!r}")
yield (*header, "\n".join(text))
def refind(regex, text):
"""Find a regex in some text, and return the matched text, or None."""
m = re.search(regex, text)
if m:
return m.group()
else:
return None
def relnotes(mdlines):
r"""Yield (version, text) pairs from markdown lines.
Each tuple is a separate version mentioned in the release notes.
A version is any section with \d\.\d in the header text.
"""
for _, htext, text in sections(parse_md(mdlines)):
version = refind(r"\d+\.\d[^ ]*", htext)
if version:
prerelease = any(c in version for c in "abc")
when = refind(r"\d+-\d+-\d+", htext)
yield {
| "version": version,
"text": text,
"prerelease": prerelease,
"when": when,
}
def parse(md_filename, json_filename):
"""Main function: parse markdown and write JSON."""
with open(md_filename) as mf:
markdown = mf.read()
with open(json_filename, "w") as jf:
json.dump(list(relnotes(markdown.splitlines(True))), jf, indent=4)
if __name__ == "__main__":
parse(*sys.argv[1:]) | # pylint: disable=no-value-for-parameter
|
hbock/bgasync | bgasync/aio/__init__.py | Python | bsd-2-clause | 47 | 0.021277 | """ |
bgasync.aio - asyncio support for B | GAPI
""" |
Chandra-MARX/marxs | marxs/math/polarization.py | Python | gpl-3.0 | 6,636 | 0.005274 | # Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
import astropy.units as u
from .utils import norm_vector, e2h
__all__ = ['polarization_vectors', 'Q_reflection', 'paralleltransport_matrix',
'parallel_transport']
def polarization_vectors(dir_array, angles):
'''Converts polarization angles to vectors in the direction of polarization.
Follows convention: Vector perpendicular to photon direction and closest to +y axis is
angle 0 for polarization direction, unless photon direction is parallel to the y axis,
in which case the vector closest to the +x axis is angle 0.
Parameters
----------
dir_array : nx4 np.array
each row is the homogeneous coordinates for a photon's direction vector
angles : np.array
1D array with the polarization angles
'''
n = len(angles)
polarization = np.zeros((n, 4))
x = np.array([1., 0., 0.])
y = np.array([0., 1., 0.])
# NOTE: The commented code works and is more readable, but the current code is faster.
# for i in range(0, n):
# r = h2e(dir_array[i])
# r /= np.linalg.norm(r)
# if not (np.isclose(r[0], 0.) and np.isclose(r[2], 0.)):
# # polarization relative to positive y at 0
# v_1 = y - (r * np.dot(r, y))
# v_1 /= np.linalg.norm(v_1)
# else:
# # polarization relative to positive x at 0
# v_1 = x - (r * np.dot(r, x))
# v_1 /= np.linalg.norm(v_1)
#
# # right hand coordinate system is v_1, v_2, r (photon direction)
# v_2 = np.cross(r, v_1)
# polarization[i, 0:3] = v_1 * np.cos(angles[i]) + v_2 * np.sin(angles[i])
# polarization[i, 3] = 0
r = dir_array.copy()[:,0:3]
r /= np.linalg.norm(r, axis=1)[:, np.newaxis]
pol_convention_x = np.isclose(r[:,0], 0.) & np.isclose(r[:,2], 0.)
if hasattr(angles, "unit") and (angles.unit is not None):
angles = angles.to(u.rad)
# polarization relative to positive y or x at 0
v_1 = ~pol_convention_x[:, np.newaxis] * (y - r * np.dot(r, y)[:, np.newaxis])
v_1 += pol_convention_x[:, np.newaxis] * (x - r * np.dot(r, x)[:, np.newaxis])
v_1 /= np.linalg.norm(v_1, axis=1)[:, np.newaxis]
# right hand coordinate system is v_1, v_2, r (photon direction)
v_2 = np.cross(r, v_1)
polarization[:, 0:3] = v_1 * np.cos(angles)[:, np.newaxis] + v_2 * np.sin(angles)[:, np.newaxis]
return polarization
def Q_reflection(delta_dir):
'''Reflection of a polarization vector on a non-polarizing surface.
This can also be used for other elements that change the direction of the
photon without adding any more polarization and where both sides
propagate in the same medium.
See `Yun (2011) <http://hdl.handle.net/10150/202979>`_, eqn 4.3.13 for details.
Parameters
----------
delta_dir : np.array of shape (n, 4)
Array of photon direction coordinates in homogeneous coordinates:
``delta_dir = photon['dir_old'] - photons['dir_new']``.
Note that this vector is **not** normalized.
Returns
-------
q : np.array of shape (n, 4, 4)
Array of parallel transport ray tracing matrices.
'''
if delta_dir.shape != 2:
raise ValueError('delta_dir must have dimension (n, 4).')
m = delta_dir[..., None, :] * delta_dir[..., :, None]
return np.eye(4) - 2 / (np.linalg.norm(delta_dir, axis=1)**2)[:, None, None] * m
def paralleltransport_matrix(dir1, dir2, jones=np.eye(2), replace_nans=True):
'''Calculate parallel transport ray tracing matrix.
Parallel transport for a vector implies that the component s
(perpendicular, from German *senkrecht*) to the planes spanned by
``dir1`` and ``dir2`` stays the same. If ``dir1`` is parallel to ``dir2``
this plane is not well defined and the resulting matrix elements will
be set to ``np.nan``, unless ``replace_nans`` is set.
Note that the ray matrix returned works on an eukledian 3d vector, not a
homogeneous vector. (Polarization is a vector, thus the forth element of the
homogeneous vector is always 0 and returning (4,4) matrices is just a waste
of space.)
Parameters
----------
dir1, dir2 : np.array of shape (n, 3)
Direction before and after the interaction.
jones : np.array of shape (2,2)
Jones matrix in the local s,p system of the optical element.
replace_nans : bool
If ``True`` return an identity matrix for those rays with
``dir1=dir2``. In those cases, the local coordinate system is not well
defined and thus no Jones matrix can be applied. In MARXS ``dir1=dir2``
often happens if some photons in a list miss the optical element in
question - these photons just pass through and their polarization vector
should be unchanged.
Returns
-------
p_mat : np.array of shape(n, 3, 3)
'''
dir1 = norm_vector(dir1)
dir2 = norm_vector(dir2)
jones_3 = np.eye(3)
jones_3[:2, :2] = jones
pmat = np.zeros((dir1.shape[0], 3, 3))
s = np.cross(dir1, dir2)
s_norm = np.linalg.norm(s, axis=1)
# Find dir values that remain unchanged
# For these the cross prodict will by 0
# and a numerical error is raised in s / norm(s)
# Expected output value for these depends on "replace_nans"
ind = np.isclose(s_norm, 0)
if (~ind).sum() > 0:
s = s[~ind, :] / s_norm[~ind][:, None]
p_in = np.cross(dir1[~ind, :], s)
p_out = np.cross(dir2[~ind, :], s)
Oininv = np.array([s, p_in, dir1[~ind, :]]).swapaxes(1, 0)
Oout = np.array([s, p_out, dir2[~ind, :]]).swapaxes(1, 2).T
temp = np.einsum('...ij,kjl->kil', jones_ | 3, Oininv)
pmat[~ind, :, :] = np.einsum('ijk,ikl->ijl', Oout, temp)
factor = 1 if replace_nans else | np.nan
pmat[ind, :, :] = factor * np.eye(3)[None, :, :]
return pmat
def parallel_transport(dir_old, dir_new, pol_old, **kwargs):
'''Parallel transport of the polarization vector with no polarization happening.
Parameters
----------
dir_old, dir_new : np.array of shape (n, 4)
Old and new photon direction in homogeneous coordinates.
pol_old : np.array of shape (n, 4)
Old polarization vector in homogeneous coordinates.
kwargs : dict
All other arguments are passed on to `~marxs.math.polarization.paralleltransport_matrix`.
Returns
-------
pol : np.array of shape (m, 4)
Parallel transported vectors.
'''
pmat = paralleltransport_matrix(dir_old[:, :3], dir_new[:, :3])
out = np.einsum('ijk,ik->ij', pmat, pol_old[:, :3])
return e2h(out, 0)
|
isohybrid/dotfile | vim/bundle/git:--github.com-klen-python-mode/pylibs/rope/contrib/codeassist.py | Python | bsd-2-clause | 25,440 | 0.00055 | import keyword
import sys
import warnings
import rope.base.codeanalyze
import rope.base.evaluate
from rope.base import pyobjects, pyobjectsdef, pynames, builtins, exceptions, worder
from rope.base.codeanalyze import SourceLinesAdapter
from rope.contrib import fixsyntax
from rope.refactor import functionutils
def code_assist(project, source_code, offset, resource=None,
templates=None, maxfixes=1, later_locals=True):
"""Return python code completions as a list of `CodeAssistProposal`\s
`resource` is a `rope.base.resources.Resource` object. If
provided, relative imports are handled.
`maxfixes` is the maximum number of errors to fix if the code has
errors in it.
If `later_locals` is `False` names defined in this scope and after
this line is ignored.
"""
if templates is not | None:
warnings.warn('Codeassist no longer | supports templates',
DeprecationWarning, stacklevel=2)
assist = _PythonCodeAssist(
project, source_code, offset, resource=resource,
maxfixes=maxfixes, later_locals=later_locals)
return assist()
def starting_offset(source_code, offset):
"""Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like::
completion = proposal.name
result = (source_code[:starting_offset] +
completion + source_code[offset:])
Where starting_offset is the offset returned by this function.
"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
return starting_offset
def get_doc(project, source_code, offset, resource=None, maxfixes=1):
"""Get the pydoc"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_doc(pyobject)
def get_calltip(project, source_code, offset, resource=None,
maxfixes=1, ignore_unknown=False, remove_self=False):
"""Get the calltip of a function
The format of the returned string is
``module_name.holding_scope_names.function_name(arguments)``. For
classes `__init__()` and for normal objects `__call__()` function
is used.
Note that the offset is on the function itself *not* after the its
open parenthesis. (Actually it used to be the other way but it
was easily confused when string literals were involved. So I
decided it is better for it not to try to be too clever when it
cannot be clever enough). You can use a simple search like::
offset = source_code.rindex('(', 0, offset) - 1
to handle simple situations.
If `ignore_unknown` is `True`, `None` is returned for functions
without source-code like builtins and extensions.
If `remove_self` is `True`, the first parameter whose name is self
will be removed for methods.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_calltip(pyobject, ignore_unknown, remove_self)
def get_definition_location(project, source_code, offset,
resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
Return a (`rope.base.resources.Resource`, lineno) tuple. If no
`resource` is given and the definition is inside the same module,
the first element of the returned tuple would be `None`. If the
location cannot be determined ``(None, None)`` is returned.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
if module is not None:
return module.get_module().get_resource(), lineno
return (None, None)
def find_occurrences(*args, **kwds):
import rope.contrib.findit
warnings.warn('Use `rope.contrib.findit.find_occurrences()` instead',
DeprecationWarning, stacklevel=2)
return rope.contrib.findit.find_occurrences(*args, **kwds)
class CompletionProposal(object):
"""A completion proposal
The `scope` instance variable shows where proposed name came from
and can be 'global', 'local', 'builtin', 'attribute', 'keyword',
'imported', 'parameter_keyword'.
The `type` instance variable shows the approximate type of the
proposed object and can be 'instance', 'class', 'function', 'module',
and `None`.
All possible relations between proposal's `scope` and `type` are shown
in the table below (different scopes in rows and types in columns):
| instance | class | function | module | None
local | + | + | + | + |
global | + | + | + | + |
builtin | + | + | + | |
attribute | + | + | + | + |
imported | + | + | + | + |
keyword | | | | | +
parameter_keyword | | | | | +
"""
def __init__(self, name, scope, pyname=None):
self.name = name
self.pyname = pyname
self.scope = self._get_scope(scope)
def __str__(self):
return '%s (%s, %s)' % (self.name, self.scope, self.type)
def __repr__(self):
return str(self)
@property
def parameters(self):
"""The names of the parameters the function takes.
Returns None if this completion is not a function.
"""
pyname = self.pyname
if isinstance(pyname, pynames.ImportedName):
pyname = pyname._get_imported_pyname()
if isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return pyobject.get_param_names()
@property
def type(self):
pyname = self.pyname
if isinstance(pyname, builtins.BuiltinName):
pyobject = pyname.get_object()
if isinstance(pyobject, builtins.BuiltinFunction):
return 'function'
elif isinstance(pyobject, builtins.BuiltinClass):
clsobj = pyobject.builtin
return 'class'
elif isinstance(pyobject, builtins.BuiltinObject) or \
isinstance(pyobject, builtins.BuiltinName):
return 'instance'
elif isinstance(pyname, pynames.ImportedModule):
return 'module'
elif isinstance(pyname, pynames.ImportedName) or \
isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return 'function'
if isinstance(pyobject, pyobjects.AbstractClass):
return 'class'
return 'instance'
def _get_scope(self, scope):
if isinstance(self.pyname, builtins.BuiltinName):
return 'builtin'
if isinstance(self.pyname, pynames.ImportedModule) or \
isinstance(self.pyname, pynames.ImportedName):
return 'imported'
return scope
def get_doc(self):
"""Get the proposed object's docstring.
Returns None if it can not be get.
"""
if not self.pyname:
return None
pyobject = self.pyname.get_object()
if not hasattr(pyobject, 'get_doc'):
return None
return self.pyname.get_object().get_doc()
@property
def kind(self):
warnings.warn("the |
AnshulYADAV007/Lean | Algorithm.Python/CoarseFundamentalTop3Algorithm.py | Python | apache-2.0 | 3,479 | 0.010929 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corpo | ration.
#
# Licensed under the Apache License, V | ersion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
### <summary>
### Demonstration of using coarse and fine universe selection together to filter down a smaller universe of stocks.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="fine universes" />
class CoarseFundamentalTop3Algorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014,3,24) #Set Start Date
self.SetEndDate(2014,4,7) #Set End Date
self.SetCash(50000) #Set Strategy Cash
# what resolution should the data *added* to the universe be?
self.UniverseSettings.Resolution = Resolution.Daily
# this add universe method accepts a single parameter that is a function that
# accepts an IEnumerable<CoarseFundamental> and returns IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
self.__numberOfSymbols = 3
self._changes = None
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(coarse, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.__numberOfSymbols] ]
def OnData(self, data):
self.Log(f"OnData({self.UtcTime}): Keys: {', '.join([key.Value for key in data.Keys])}")
# if we have no changes, do nothing
if self._changes is None: return
# liquidate removed securities
for security in self._changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 1/N allocation in each security in our universe
for security in self._changes.AddedSecurities:
self.SetHoldings(security.Symbol, 1 / self.__numberOfSymbols)
self._changes = None
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
self._changes = changes
self.Log(f"OnSecuritiesChanged({self.UtcTime}):: {changes}")
def OnOrderEvent(self, fill):
self.Log(f"OnOrderEvent({self.UtcTime}):: {fill}") |
mvendra/mvtools | git/collect_git_patch.py | Python | mit | 20,127 | 0.005366 | #!/usr/bin/env python3
import sys
import os
import git_lib
import path_utils
import fsquery_adv_filter
ERRMSG_EMPTY = "Empty contents"
def _known_states():
st = ["??", "R ", "D ", "A ", "M ", " M", " D", "UU", "MM", "AM", "DD", "UA", "UD", "DU", "AA", "AU", "RM"]
return st
def _test_repo_status(repo_path):
# mvtodo: supporting exotic statuses (such as merge conflicts and etc) bears complexity that does not justify the gains. the git backend
# also has segfault issues when trying to diff / deal with some of these states. it's best to avoid automating the handling of these
# status with policy / workflow awareness instead.
forbidden_items = []
unexpected_items = []
funcs = [git_lib.get_head_deleted_deleted_files, git_lib.get_head_updated_added_files, git_lib.get_head_updated_deleted_files, git_lib.get_head_deleted_updated_files, git_lib.get_head_added_added_files, git_lib.get_head_added_updated_files, git_lib.get_head_renamed_modified_files]
for f in funcs:
v, r = f(repo_path)
if not v:
return False, "Unable to probe for illegal statuses on repo [%s]: [%s]" % (repo_path, r)
forbidden_items += r
if len(forbidden_items) > 0:
return False, "The repo [%s] has invalid statuses" % repo_path
v, r = git_lib.repo_has_any_not_of_states(repo_path, _known_states())
if not v:
return False, "Unable to probe known states on repo: [%s]" % repo_path
unexpected_items = r
if len(unexpected_items) > 0:
opened_list_as_string = ""
for x in unexpected_items:
opened_list_as_string += "[%s] - " % x
if opened_list_as_string != "":
opened_list_as_string = opened_list_as_string[:len(opened_list_as_string)-3]
return False, "Repo [%s] has unknown states: %s" % (repo_path, opened_list_as_string)
return True, None
def _make_list_tuplelistadapter(list_of_tuples):
assembled_list = []
for it in list_of_tuples:
first, second = it
assembled_list.append(first)
assembled_list.append(second)
return assembled_list
def _apply_filters_tuplelistadapter(items_input, default_filter, include_list, exclude_list):
assembled_tuple_list = []
for items in items_input:
first, second = items
partial_step = _apply_filters([second], default_filter, include_list, exclude_list)
if partial_step is None:
return None
if len(partial_step) > 0:
assembled_tuple_list.append((first, second))
return assembled_tuple_list
def _apply_filters(items_input, default_filter, include_list, exclude_list):
filtering_required = (((default_filter == "include") and (len(exclude_list) > 0)) or (default_filter == "exclude"))
if not filtering_required:
return items_input
filters = []
items_filtered = []
if default_filter == "include":
filters.append( (fsquery_adv_filter.filter_all_positive, "not-used") )
for ei in exclude_list:
filters.append( (fsquery_adv_filter.filter_has_not_middle_pieces, path_utils.splitpath(ei, "auto")) )
items_filtered = fsquery_adv_filter.filter_path_list_and(items_input, filters)
elif default_filter == "exclude":
filters.append( (fsquery_adv_filter.filter_all_negative, "not-used") )
for ii in include_list:
filters.append( (fsquery_adv_filter.filter_has_middle_pieces, path_utils.splitpath(ii, "auto")) )
items_filtered = fsquery_adv_filter.filter_path_list_or(items_input, filters)
else:
return None
return items_filtered
def _assemble_list_from_functions(repo, func_list):
total_items = []
for f in func_list:
v, r = f(repo)
if not v:
return False, "Failed retrieving listing: [%s]" % r
total_items += r
return True, total_items
def collect_git_patch_cmd_generic(repo, storage_path, output_filename, log_title, content):
if len(content) == 0:
return False, ERRMSG_EMPTY
fullbasepath = path_utils.concat_path(storage_path, repo)
output_filename_full = path_utils.concat_path(fullbasepath, output_filename)
if not path_utils.guaranteefolder(fullbasepath):
return False, "Can't collect patch for [%s]: Failed guaranteeing folder [%s]." % (log_title, fullbasepath)
if os.path.exists(output_filename_full):
return False, "Can't collect patch for [%s]: [%s] already exists." % (log_title, output_filename_full)
with open(output_filename | _full, "w") as f:
f.write(content)
return True, output_filename_full
def collect_git_patch_head(repo, storage_path, default_filter, include_list, exclude_list):
head_items_final = []
head_items = []
funcs = [git_lib.get_head_modified_files, git_lib.get_head_deleted_files, gi | t_lib.get_head_updated_files, git_lib.get_head_modified_modified_files, git_lib.get_head_added_modified_files]
v, r = _assemble_list_from_functions(repo, funcs)
if not v:
return False, "Unable to assemble list of head items on repo [%s]: [%s]" % (repo, r)
head_items = r
head_items_filtered = _apply_filters(head_items.copy(), default_filter, include_list, exclude_list)
if head_items_filtered is None:
return False, "Unable to apply filters (head operation). Target repo: [%s]" % repo
head_items_final += head_items_filtered.copy()
head_patch_contents = ""
if len(head_items_final) > 0:
v, r = git_lib.diff_indexed(repo, head_items_final)
if not v:
return False, "Failed calling git command for head: [%s]. Repository: [%s]." % (r, repo)
head_patch_contents = r
return collect_git_patch_cmd_generic(repo, storage_path, "head.patch", "head", head_patch_contents)
def collect_git_patch_head_id(repo, storage_path):
v, r = git_lib.rev_parse_head(repo)
if not v:
return False, "Failed calling git command for head-id: [%s]. Repository: [%s]." % (r, repo)
return collect_git_patch_cmd_generic(repo, storage_path, "head_id.txt", "head-id", r)
def collect_git_patch_staged(repo, storage_path, default_filter, include_list, exclude_list):
final_file_list = []
# get staged-modified files
v, r = git_lib.get_staged_modified_files(repo)
if not v:
return False, "Unable to retrieve staged-modified files on repo [%s]: [%s]" % (repo, r)
staged_modified_files = r
# filter staged-modified files
staged_modified_files_filtered = _apply_filters(staged_modified_files.copy(), default_filter, include_list, exclude_list)
if staged_modified_files_filtered is None:
return False, "Unable to apply filters (staged-modified operation). Target repo: [%s]" % repo
final_file_list += staged_modified_files_filtered.copy()
# get staged-added files
v, r = git_lib.get_staged_added_files(repo)
if not v:
return False, "Unable to retrieve staged-added files on repo [%s]: [%s]" % (repo, r)
staged_added_files = r
# filter staged-added files
staged_added_files_filtered = _apply_filters(staged_added_files.copy(), default_filter, include_list, exclude_list)
if staged_added_files_filtered is None:
return False, "Unable to apply filters (staged-added operation). Target repo: [%s]" % repo
final_file_list += staged_added_files_filtered.copy()
# get staged-deleted files
v, r = git_lib.get_staged_deleted_files(repo)
if not v:
return False, "Unable to retrieve staged-deleted files on repo [%s]: [%s]" % (repo, r)
staged_deleted_files = r
# filter staged-deleted files
staged_deleted_files_filtered = _apply_filters(staged_deleted_files.copy(), default_filter, include_list, exclude_list)
if staged_deleted_files_filtered is None:
return False, "Unable to apply filters (staged-deleted operation). Target repo: [%s]" % repo
final_file_list += staged_deleted_files_filtered.copy()
# get staged-renamed files
v, r = git_lib.get_staged_renamed_files(repo)
if not v:
return False, "Unable to retrieve staged-renamed files on repo [%s]: [%s]" % (repo, r)
staged_ren |
sataako/fmio-server | fmio/forecast.py | Python | mit | 1,752 | 0.001142 | # coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
import pandas as pd
from pyoptflow import utils
from pyoptflow.core import extract_motion_proesmans
from pyoptflow.extrapolation import semilagrangian
def read_rainrate(filename):
return
def filter_rr(rr):
"""filtered version of data"""
return rr
def rr2ubyte(rr, R_min=0.05, R_max=10.0, filter_stddev=3.0):
return utils.rainfall_to_ubyte(rr, R_min=R_min, R_max=R_max,
filter_stddev=filter_stddev)
def motion(rr0ubyte, rr1ubyte, lam=25.0, num_iter=250, num_levels=6):
return extract_motion_proesmans(rr0ubyte, rr1ubyte, lam=lam,
num_iter=num_iter,
num_levels=num_levels)[0]
def extrapolate(rr, v, t, n_steps=15, n_iter=3, inverse=True):
return semilagrangian(rr, v, t, n_steps=n_steps, n_iter=n_iter,
inverse=inverse)
def forecast(cropped_rainrates, steps=13):
"""
cropped_rainrates: two-row Series of input rainrate fields
steps: number of time steps to extrapolate
"""
if cropped_rainrates.size != 2:
raise ValueError('cropped_rainrates must be a two-row pandas.S | eries')
tmax = c | ropped_rainrates.index.max()
tmin = cropped_rainrates.index.min()
dt = tmax-tmin
index = pd.DatetimeIndex(freq=dt, periods=steps, start=tmax+dt)
rr_ubyte = cropped_rainrates.apply(rr2ubyte)
v = motion(rr_ubyte.iloc[0], rr_ubyte.iloc[1])
fcast_list = []
for t in range(steps):
fcast_list.append(extrapolate(cropped_rainrates.loc[tmax], v, t+1))
return pd.Series(index=index, data=fcast_list, name='forecast')
|
OpenLD/enigma2-wetek | lib/python/Screens/ResolutionSelection.py | Python | gpl-2.0 | 1,827 | 0.031199 | from Screen import Screen
from Screens.ChoiceBox import ChoiceBox
class ResolutionSelection(Screen):
def __init__(self, session, infobar=None):
Screen.__init__(self, session)
self.session = session
xresString = open("/proc/stb/vmpeg/0/xres", "r").read()
yresString = open("/proc/stb/vmpeg/0/yres", "r").read()
fpsString = open("/proc/stb/vmpeg/0/framerate", "r").read()
xres = int(xresString, 16)
yres = int(yresString, 16)
fps = int(fpsString, 16)
fpsFloat = float(fps)
fpsFloat = fpsFloat/1000
selection = 0
tlist = []
tlist.append((_("Exit"), "exit"))
tlist.append((_("Auto(not available)"), "auto"))
tlist.append(("Video: " + str(xres) + "x" + str(yres) + "@" + str(fpsFloat) + "hz", ""))
tlist.append(("--", ""))
tlist.append(("576i", "576i50"))
tl | ist.append(("576p", "576p50"))
tlist.append(("720p", "720p50"))
tlist.append(("1080i", "1080i50"))
tlist.append(("1080p@23.976hz", "1080p23"))
tlist.append(("1080p@24hz", "1080p24"))
tlist.append(("1080p@25hz", "1080p25"))
keys = ["green", "yellow", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
mode = open("/proc/stb/video/videomode").read()[:-1]
print mode
for x in range | (len(tlist)):
if tlist[x][1] == mode:
selection = x
self.session.openWithCallback(self.ResolutionSelected, ChoiceBox, title=_("Please select a resolution..."), list = tlist, selection = selection, keys = keys)
#return
def ResolutionSelected(self, Resolution):
if not Resolution is None:
if isinstance(Resolution[1], str):
if Resolution[1] == "exit":
self.ExGreen_toggleGreen()
elif Resolution[1] != "auto":
open("/proc/stb/video/videomode", "w").write(Resolution[1])
from enigma import gFBDC
gFBDC.getInstance().setResolution(-1, -1)
self.ExGreen_toggleGreen()
return
|
spring-week-topos/horizon-week | openstack_dashboard/test/test_data/utils.py | Python | apache-2.0 | 4,617 | 0 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def load_test_data(load_onto=None):
from openstack_dashboard.test.test_data import ceilometer_data
from openstack_dashboard.test.test_data import cinder_data
from openstack_dashboard.test.test_data import exceptions
from openstack_dashboard.test.test_data import glance_data
from openstack_dashboard.test.test_data import heat_data
from openstack_dashboard.test.test_data import keystone_data
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data import nova_data
from openstack_dashboard.test.test_data import swift_data
from openstack_dashboard.test.test_data import trove_data
# The order of these loaders matters, some depend on others.
loaders = (
exceptions.data,
keystone_data.data,
glance_data.data,
nova_data.data,
cinder_data.data,
neutron_data.data,
swift_data.data,
heat_data.data,
ceilometer_data.data,
trove_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
| return TestData(*loaders)
class TestData(object):
"""Holder object for test data. Any functions passed to the init method
will be called wi | th the ``TestData`` object as their only argument. They
can then load data onto the object as desired.
The idea is to use the instantiated object like this::
>>> import glance_data
>>> TEST = TestData(glance_data.data)
>>> TEST.images.list()
[<Image: visible_image>, <Image: invisible_image>]
>>> TEST.images.first()
<Image: visible_image>
You can load as little or as much data as you like as long as the loaders
don't conflict with each other.
See the
:class:`~openstack_dashboard.test.test_data.utils.TestDataContainer`
class for a list of available methods.
"""
def __init__(self, *args):
for data_func in args:
data_func(self)
class TestDataContainer(object):
"""A container for test data objects.
The behavior of this class is meant to mimic a "manager" class, which
has convenient shortcuts for common actions like "list", "filter", "get",
and "add".
"""
def __init__(self):
self._objects = []
def add(self, *args):
"""Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
"""
for obj in args:
if obj not in self._objects:
self._objects.append(obj)
def list(self):
"""Returns a list of all objects in this container."""
return self._objects
def filter(self, filtered=None, **kwargs):
"""Returns objects in this container whose attributes match the given
keyword arguments.
"""
if filtered is None:
filtered = self._objects
try:
key, value = kwargs.popitem()
except KeyError:
# We're out of filters, return
return filtered
def get_match(obj):
return hasattr(obj, key) and getattr(obj, key) == value
return self.filter(filtered=filter(get_match, filtered), **kwargs)
def get(self, **kwargs):
"""Returns the single object in this container whose attributes match
the given keyword arguments. An error will be raised if the arguments
provided don't return exactly one match.
"""
matches = self.filter(**kwargs)
if not matches:
raise Exception("No matches found.")
elif len(matches) > 1:
raise Exception("Multiple matches found.")
else:
return matches.pop()
def first(self):
"""Returns the first object from this container."""
return self._objects[0]
def count(self):
return len(self._objects)
|
banglakit/spaCy | spacy/tests/sv/test_tokenizer.py | Python | mit | 882 | 0.00344 | # encoding: utf8
from __future__ import unicode_literals
import pytest
SV_TOKEN_EXCEPTION_TESTS = [
('Smörsåsen används bl.a. till fisk', ['Smörsåsen', 'används', 'bl.a.', 'till', 'fisk']),
('Jag kommer först kl. 13 p.g.a. diverse förseningar', ['Jag', 'kommer', 'först', 'kl.', '13', 'p.g.a.', 'diverse', 'förseningar'])
]
@pytest.mark.parametrize('text,expected_tokens', SV_TOKEN_EXCEPTION_TESTS)
def test_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens):
tokens = sv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize('text', ["driveru", "ha | jaru", "Serru", "Fixaru"])
| def test_tokenizer_handles_verb_exceptions(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].text == "u"
|
antong/ldaptor | ldaptor/samba/smbpassword.py | Python | lgpl-2.1 | 2,466 | 0.006894 | import string, warnings
from ldaptor import md4, config
lower='abcdefghijklmnopqrstuvwxyz'
upper=lower.upper()
toupper=string.maketrans(lower, upper)
def nthash(password=''):
"""Generates nt md4 password hash for a given password."""
password=password[:128]
password=''.join([c+'\000' for c in password])
return md4.new(password).hexdigest().translate(toupper);
def lmhash_locked(password=''):
"""
Generates a lanman password hash that matches no password.
Note that the author thinks LanMan hashes should be banished from
the face of the earth.
"""
return 32*'X'
def _no_lmhash(password=''):
if config.useLMhash():
warnings.warn("Cannot import Crypto.Cipher.DES, lmhash passwords disabled.")
return lmhash_locked()
def _have_lmhash(password=''):
"""
Generates lanman password hash for a given password.
Note that the author thinks LanMan hashes should be banished from
the face of the earth.
"""
if not config.useLMhash():
return lmhash_locked()
password = (password+14*'\0')[:14]
password = password.upper()
return _deshash(password[:7]) + _deshash(password[7:])
try:
from Crypto.Cipher import DES
except ImportError:
lmhash = _no_lmhash
else:
lmhash = _have_lmhash
LM_MAGIC = "KGS!@#$%"
def _deshash(p):
# Insert parity bits. I'm not going to bother myself with smart
# implementations.
bits = []
for byte in [ord(c) for c in p]:
bits.extend([bool(byte & 128),
bool(byte & 64),
bool(byte & 32),
bool(byte & 16),
bool(byte & 8),
bool(byte & 4),
bool(byte & 2),
bool(byte & 1)])
def _pack(bits):
x = ((bits[0] << 7)
+ (bits[1] << 6)
+ (bi | ts[2] << 5)
+ (bits[3] << 4)
+ (bits[4] << 3)
| + (bits[5] << 2)
+ (bits[6] << 1))
return x
bytes = (_pack(bits[:7]),
_pack(bits[7:14]),
_pack(bits[14:21]),
_pack(bits[21:28]),
_pack(bits[28:35]),
_pack(bits[35:42]),
_pack(bits[42:49]),
_pack(bits[49:]))
bytes = ''.join([chr(x) for x in bytes])
cipher = DES.new(bytes, DES.MODE_ECB)
raw = cipher.encrypt(LM_MAGIC)
l = ['%02X' % ord(x) for x in raw]
return ''.join(l)
|
mpi-sws-rse/datablox | blox/bookmark_client__1_0/bookmarks_ui/distributer.py | Python | apache-2.0 | 847 | 0.014168 | import subprocess
from subprocess import PIPE
import sys
if len(sys.argv) != 5:
print "%s: requires a script name, URL file name arguments, number of instances and number of URLs to distribute"
sys.exit(1)
script = s | ys.argv[1]
url_file = sys.argv[2]
instances = int(sys.argv[3])
num_urls = int(sys.argv[4])
start_index = 0
distance = int(num_urls/instances)
print "Num urls per instance:", distance
pipes = []
while(instances > 0):
end_index = start_index + distance if instances > 1 else num_urls
if end_index > num_urls:
| end_index = num_urls
command = [sys.executable, script, url_file, str(start_index), str(end_index)]
p1 = subprocess.Popen(command, stdout=PIPE)
instances -= 1
start_index += distance
pipes.append(p1)
f = open("distributer_output", 'w')
for p in pipes:
f.write(p.communicate()[0])
sys.exit(0) |
Shopify/shopify_python_api | test/tender_transaction_test.py | Python | mit | 526 | 0.003802 | import shopify
from test.test_helper import TestCase
class TenderTransactionTest(TestCase):
def setUp(self):
super(TenderTransactionTest, self).setUp()
self.fake("tender_transactions", method="GET", body=self.load_fixture("tender_transactions"))
def test_should_load_all_tender_transactions(self): |
t | ender_transactions = shopify.TenderTransaction.find()
self.assertEqual(3, len(tender_transactions))
self.assertEqual([1, 2, 3], list(map(lambda t: t.id, tender_transactions)))
|
dufresnedavid/canada | account_tax_expense_include/__init__.py | Python | agpl-3.0 | 1,045 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source M | anagement Solution
# Copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of | the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import (
account_tax
)
|
tritoanst/ccxt | python/ccxt/bitfinex2.py | Python | mit | 19,036 | 0.003572 | # -*- coding: utf-8 -*-
from ccxt.bitfinex import bitfinex
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import InsufficientFunds
class bitfinex2 (bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex v2',
'countries': 'VG',
'version': 'v2',
'hasCORS': True,
# old metainfo interface
'hasFetchOrder': True,
'hasFetchTickers': True,
'hasFetchOHLCV': True,
'hasWithdraw': True,
'hasDeposit': False,
'hasFetchOpenOrders': False,
'hasFetchClosedOrders': False,
# new metainfo interface
'has': {
'fetchOHLCV': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': False,
'fetchClosedOrd | ers': False,
'withdraw': True,
'deposit': False,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
' | 2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': 'https://api.bitfinex.com',
'www': 'https://www.bitfinex.com',
'doc': [
'https://bitfinex.readme.io/v2/docs',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'public': {
'get': [
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'symbols_details',
'stats1/{key}:{size}:{symbol}/{side}/{section}',
'stats1/{key}:{size}:{symbol}/long/last',
'stats1/{key}:{size}:{symbol}/long/hist',
'stats1/{key}:{size}:{symbol}/short/last',
'stats1/{key}:{size}:{symbol}/short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
],
},
'private': {
'post': [
'auth/r/wallets',
'auth/r/orders/{symbol}',
'auth/r/orders/{symbol}/new',
'auth/r/orders/{symbol}/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/positions',
'auth/r/funding/offers/{symbol}',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/info/margin/{key}',
'auth/r/info/funding/{key}',
'auth/r/movements/{currency}/hist',
'auth/r/stats/perf:{timeframe}/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
],
},
},
'markets': {
'AVT/BTC': {'id': 'tAVTBTC', 'symbol': 'AVT/BTC', 'base': 'AVT', 'quote': 'BTC'},
'AVT/ETH': {'id': 'tAVTETH', 'symbol': 'AVT/ETH', 'base': 'AVT', 'quote': 'ETH'},
'AVT/USD': {'id': 'tAVTUSD', 'symbol': 'AVT/USD', 'base': 'AVT', 'quote': 'USD'},
'CST_BCC/BTC': {'id': 'tBCCBTC', 'symbol': 'CST_BCC/BTC', 'base': 'CST_BCC', 'quote': 'BTC'},
'CST_BCC/USD': {'id': 'tBCCUSD', 'symbol': 'CST_BCC/USD', 'base': 'CST_BCC', 'quote': 'USD'},
'BCH/BTC': {'id': 'tBCHBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC'},
'BCH/ETH': {'id': 'tBCHETH', 'symbol': 'BCH/ETH', 'base': 'BCH', 'quote': 'ETH'},
'BCH/USD': {'id': 'tBCHUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD'},
'CST_BCU/BTC': {'id': 'tBCUBTC', 'symbol': 'CST_BCU/BTC', 'base': 'CST_BCU', 'quote': 'BTC'},
'CST_BCU/USD': {'id': 'tBCUUSD', 'symbol': 'CST_BCU/USD', 'base': 'CST_BCU', 'quote': 'USD'},
'BT1/BTC': {'id': 'tBT1BTC', 'symbol': 'BT1/BTC', 'base': 'BT1', 'quote': 'BTC'},
'BT1/USD': {'id': 'tBT1USD', 'symbol': 'BT1/USD', 'base': 'BT1', 'quote': 'USD'},
'BT2/BTC': {'id': 'tBT2BTC', 'symbol': 'BT2/BTC', 'base': 'BT2', 'quote': 'BTC'},
'BT2/USD': {'id': 'tBT2USD', 'symbol': 'BT2/USD', 'base': 'BT2', 'quote': 'USD'},
'BTC/USD': {'id': 'tBTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD'},
'BTC/EUR': {'id': 'tBTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
'BTG/BTC': {'id': 'tBTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC'},
'BTG/USD': {'id': 'tBTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD'},
'DASH/BTC': {'id': 'tDSHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC'},
'DASH/USD': {'id': 'tDSHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD'},
'DAT/BTC': {'id': 'tDATBTC', 'symbol': 'DAT/BTC', 'base': 'DAT', 'quote': 'BTC'},
'DAT/ETH': {'id': 'tDATETH', 'symbol': 'DAT/ETH', 'base': 'DAT', 'quote': 'ETH'},
'DAT/USD': {'id': 'tDATUSD', 'symbol': 'DAT/USD', 'base': 'DAT', 'quote': 'USD'},
'EDO/BTC': {'id': 'tEDOBTC', 'symbol': 'EDO/BTC', 'base': 'EDO', 'quote': 'BTC'},
'EDO/ETH': {'id': 'tEDOETH', 'symbol': 'EDO/ETH', 'base': 'EDO', 'quote': 'ETH'},
'EDO/USD': {'id': 'tEDOUSD', 'symbol': 'EDO/USD', 'base': 'EDO', 'quote': 'USD'},
'EOS/BTC': {'id': 'tEOSBTC', 'symbol': 'EOS/BTC', 'base': 'EOS', 'quote': 'BTC'},
'EOS/ETH': {'id': 'tEOSETH', 'symbol': 'EOS/ETH', 'base': 'EOS', 'quote': 'ETH'},
'EOS/USD': {'id': 'tEOSUSD', 'symbol': 'EOS/USD', 'base': 'EOS', 'quote': 'USD'},
'ETC/BTC': {'id': 'tETCBTC', 'symbol': 'ETC/BTC', 'base': 'ETC', 'quote': 'BTC'},
'ETC/USD': {'id': 'tETCUSD', 'symbol': 'ETC/USD', 'base': 'ETC', 'quote': 'USD'},
'ETH/BTC': {'id': 'tETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC'},
'ETH/USD': {'id': 'tETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD'},
'ETP/BTC': {'id': 'tETPBTC', 'symbol': 'ETP/BTC', 'base': 'ETP', 'quote': 'BTC'},
'ETP/ETH': {'id': 'tETPETH', 'symbol': 'ETP/ETH', 'base': 'ETP', 'quote': 'ETH'},
'ETP/USD': {'id': 'tETPUSD', 'symbol': 'ETP/USD |
andrejb/leap_mail | src/leap/mail/imap/tests/__init__.py | Python | gpl-3.0 | 13,000 | 0 | # -*- encoding: utf-8 -*-
"""
leap/email/imap/tests/__init__.py
----------------------------------
Module intialization file for leap.mx.tests, a module containing unittesting
code, using twisted.trial, for testing leap_mx.
@authors: Kali Kaneko, <kali@leap.se>
@license: GPLv3, see included LICENSE file
@copyright: © 2013 Kali Kaneko, see COPYLEFT file
"""
import os
import u1db
from leap.common.testing.basetest import BaseLeapTest
from leap.soledad.client import Soledad
from leap.soledad.common.document import SoledadDocument
__all__ = ['test_imap']
def run():
"""xxx fill me in"""
pass
# -----------------------------------------------------------------------------
# Some tests inherit from BaseSoledadTest in order to have a working Soledad
# instance in each test.
# -----------------------------------------------------------------------------
class BaseSoledadIMAPTest(BaseLeapTest):
"""
Instantiates GPG and Soledad for usage in LeapIMAPServer tests.
Copied from BaseSoledadTest, but moving setup to classmethod
"""
def setUp(self):
# open test dbs
self.db1_file = os.path.join(
self.tempdir, "db1.u1db")
self.db2_file = os.path.join(
self.tempdir, "db2.u1db")
self._db1 = u1db.open(self.db1_file, create=True,
document_factory=SoledadDocument)
self._db2 = u1db.open(self.db2_file, create=True,
document_factory=SoledadDocument)
# soledad config info
self.email = 'leap@leap.se'
secrets_path = os.path.join(
self.tempdir, Soledad.STORAGE_SECRETS_FILE_NAME)
local_db_path = os.path.join(
self.tempdir, Soledad.LOCAL_DATABASE_FILE_NAME)
server_url = ''
cert_file = None
self._soledad = self._soledad_instance(
self.email, '123',
secrets_path=secrets_path,
local_db_path=local_db_path,
server_url=server_url,
cert_file=cert_file)
def _soledad_instance(self, uuid, passphrase, secrets_path, local_db_path,
server_url, cert_file):
"""
Return a Soledad instance for tests.
"""
# mock key fetching and storing so Soledad doesn't fail when trying to
# reach the server.
Soledad._fetch_keys_from_shared_db = Mock(return_value=None)
Soledad._assert_keys_in_shared_db = Mock(return_value=None)
# instantiate soledad
def _put_doc_side_effect(doc):
self._doc_put = doc
class MockSharedDB(object):
get_doc = Mock(return_value=None)
put_doc | = Mock(side_effect=_put_doc_side_effect)
def __call__(self):
return self
Soledad._shared_db = MockSharedDB()
return Soledad(
uuid,
passphrase,
secrets_path=secrets_path,
local_db_path=local_db_path,
server_url=server_url,
cert_file=cert_file,
)
def tear | Down(self):
self._db1.close()
self._db2.close()
self._soledad.close()
# Key material for testing
KEY_FINGERPRINT = "E36E738D69173C13D709E44F2F455E2824D18DDF"
PUBLIC_KEY = """
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
mQINBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
tBxMZWFwIFRlc3QgS2V5IDxsZWFwQGxlYXAuc2U+iQI3BBMBCAAhBQJQvfnZAhsD
BQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEC9FXigk0Y3fT7EQAKH3IuRniOpb
T/DDIgwwjz3oxB/W0DDMyPXowlhSOuM0rgGfntBpBb3boezEXwL86NPQxNGGruF5
hkmecSiuPSvOmQlqlS95NGQp6hNG0YaKColh+Q5NTspFXCAkFch9oqUje0LdxfSP
QfV9UpeEvGyPmk1I9EJV/YDmZ4+Djge1d7qhVZInz4Rx1NrSyF/Tc2EC0VpjQFsU
Y9Kb2YBBR7ivG6DBc8ty0jJXi7B4WjkFcUEJviQpMF2dCLdonCehYs1PqsN1N7j+
eFjQd+hqVMJgYuSGKjvuAEfClM6MQw7+FmFwMyLgK/Ew/DttHEDCri77SPSkOGSI
txCzhTg6798f6mJr7WcXmHX1w1Vcib5FfZ8vTDFVhz/XgAgArdhPo9V6/1dgSSiB
KPQ/spsco6u5imdOhckERE0lnAYvVT6KE81TKuhF/b23u7x+Wdew6kK0EQhYA7wy
7LmlaNXc7rMBQJ9Z60CJ4JDtatBWZ0kNrt2VfdDHVdqBTOpl0CraNUjWE5YMDasr
K2dF5IX8D3uuYtpZnxqg0KzyLg0tzL0tvOL1C2iudgZUISZNPKbS0z0v+afuAAnx
2pTC3uezbh2Jt8SWTLhll4i0P4Ps5kZ6HQUO56O+/Z1cWovX+mQekYFmERySDR9n
3k1uAwLilJmRmepGmvYbB8HloV8HqwgguQINBFC9+dkBEAC0I/xn1uborMgDvBtf
H0sEhwnXBC849/32zic6udB6/3Efk9nzbSpL3FSOuXITZsZgCHPkKarnoQ2ztMcS
sh1ke1C5gQGms75UVmM/nS+2YI4vY8OX/GC/on2vUyncqdH+bR6xH5hx4NbWpfTs
iQHmz5C6zzS/kuabGdZyKRaZHt23WQ7JX/4zpjqbC99DjHcP9BSk7tJ8wI4bkMYD
uFVQdT9O6HwyKGYwUU4sAQRAj7XCTGvVbT0dpgJwH4RmrEtJoHAx4Whg8mJ710E0
GCmzf2jqkNuOw76ivgk27Kge+Hw00jmJjQhHY0yVbiaoJwcRrPKzaSjEVNgrpgP3
lXPRGQArgESsIOTeVVHQ8fhK2YtTeCY9rIiO+L0OX2xo9HK7hfHZZWL6rqymXdyS
fhzh/f6IPyHFWnvj7Brl7DR8heMikygcJqv+ed2yx7iLyCUJ10g12I48+aEj1aLe
dP7lna32iY8/Z0SHQLNH6PXO9SlPcq2aFUgKqE75A/0FMk7CunzU1OWr2ZtTLNO1
WT/13LfOhhuEq9jTyTosn0WxBjJKq18lnhzCXlaw6EAtbA7CUwsD3CTPR56aAXFK
3I7KXOVAqggrvMe5Tpdg5drfYpI8hZovL5aAgb+7Y5ta10TcJdUhS5K3kFAWe/td
U0cmWUMDP1UMSQ5Jg6JIQVWhSwARAQABiQIfBBgBCAAJBQJQvfnZAhsMAAoJEC9F
Xigk0Y3fRwsP/i0ElYCyxeLpWJTwo1iCLkMKz2yX1lFVa9nT1BVTPOQwr/IAc5OX
NdtbJ14fUsKL5pWgW8OmrXtwZm1y4euI1RPWWubG01ouzwnGzv26UcuHeqC5orZj
cOnKtL40y8VGMm8LoicVkRJH8blPORCnaLjdOtmA3rx/v2EXrJpSa3AhOy0ZSRXk
ZSrK68AVNwamHRoBSYyo0AtaXnkPX4+tmO8X8BPfj125IljubvwZPIW9VWR9UqCE
VPfDR1XKegVb6VStIywF7kmrknM1C5qUY28rdZYWgKorw01hBGV4jTW0cqde3N51
XT1jnIAa+NoXUM9uQoGYMiwrL7vNsLlyyiW5ayDyV92H/rIuiqhFgbJsHTlsm7I8
oGheR784BagAA1NIKD1qEO9T6Kz9lzlDaeWS5AUKeXrb7ZJLI1TTCIZx5/DxjLqM
Tt/RFBpVo9geZQrvLUqLAMwdaUvDXC2c6DaCPXTh65oCZj/hqzlJHH+RoTWWzKI+
BjXxgUWF9EmZUBrg68DSmI+9wuDFsjZ51BcqvJwxyfxtTaWhdoYqH/UQS+D1FP3/
diZHHlzwVwPICzM9ooNTgbrcDzyxRkIVqsVwBq7EtzcvgYUyX53yG25Giy6YQaQ2
ZtQ/VymwFL3XdUWV6B/hU4PVAFvO3qlOtdJ6TpE+nEWgcWjCv5g7RjXX
=MuOY
-----END PGP PUBLIC KEY BLOCK-----
"""
PRIVATE_KEY = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
lQcYBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
AA/+JHtlL39G1wsH9R6UEfUQJGXR9MiIiwZoKcnRB2o8+DS+OLjg0JOh8XehtuCs
E/8oGQKtQqa5bEIstX7IZoYmYFiUQi9LOzIblmp2vxOm+HKkxa4JszWci2/ZmC3t
KtaA4adl9XVnshoQ7pijuCMUKB3naBEOAxd8s9d/JeReGIYkJErdrnVfNk5N71Ds
FmH5Ll3XtEDvgBUQP3nkA6QFjpsaB94FHjL3gDwum/cxzj6pCglcvHOzEhfY0Ddb
J967FozQTaf2JW3O+w3LOqtcKWpq87B7+O61tVidQPSSuzPjCtFF0D2LC9R/Hpky
KTMQ6CaKja4MPhjwywd4QPcHGYSqjMpflvJqi+kYIt8psUK/YswWjnr3r4fbuqVY
VhtiHvnBHQjz135lUqWvEz4hM3Xpnxydx7aRlv5NlevK8+YIO5oFbWbGNTWsPZI5
jpoFBpSsnR1Q5tnvtNHauvoWV+XN2qAOBTG+/nEbDYH6Ak3aaE9jrpTdYh0CotYF
q7csANsDy3JvkAzeU6WnYpsHHaAjqOGyiZGsLej1UcXPFMosE/aUo4WQhiS8Zx2c
zOVKOi/X5vQ2GdNT9Qolz8AriwzsvFR+bxPzyd8V6ALwDsoXvwEYinYBKK8j0OPv
OOihSR6HVsuP9NUZNU9ewiGzte/+/r6pNXHvR7wTQ8EWLcEIAN6Zyrb0bHZTIlxt
VWur/Ht2mIZrBaO50qmM5RD3T5oXzWXi/pjLrIpBMfeZR9DWfwQwjYzwqi7pxtYx
nJvbMuY505rfnMoYxb4J+cpRXV8MS7Dr1vjjLVUC9KiwSbM3gg6emfd2yuA93ihv
Pe3mffzLIiQa4mRE3wtGcioC43nWuV2K2e1KjxeFg07JhrezA/1Cak505ab/tmvP
4YmjR5c44+yL/YcQ3HdFgs4mV+nVbptRXvRcPpolJsgxPccGNdvHhsoR4gwXMS3F
RRPD2z6x8xeN73Q4K |
wanghaven/nupic | src/nupic/support/pymysqlhelpers.py | Python | agpl-3.0 | 4,179 | 0.003589 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Helper utilities for python scripts that use pymysql
import inspect
import logging
from socket import error as socket_error
import pymysql
from pymysql.constants import ER
from nupic.support.decorators import retry as make_retry_decorator
# Client mysql error codes of interest; pymysql didn't have constants for these
# at the time of this writing.
# (per https://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html)
CR_CONNECTION_ERROR = 2002
""" Can't connect to local MySQL server through socket '%s' (%d) """
CR_CONN_HOST_ERROR = 2003
""" Can't connect to MySQL server on '%s' (%d) """
CR_UNKNOWN_HOST = 2005
""" Unknown MySQL server host '%s' (%d) """
CR_SERVER_GONE_ERROR = 2006
""" MySQL server has gone away """
CR_TCP_CONNECTION = 2011
""" %s via TCP/IP """
CR_SERVER_HANDSHAKE_ERR = 2012
""" Error in server handshake """
CR_SERVER_LOST = 2013
""" Lost connection to MySQL server during query """
CR_SERVER_LOST_EXTENDED = 2055
""" Lost connection to MySQL server at '%s', system error: %d """
_RETRIABLE_CLIENT_ERROR_CODES = (
CR_CONNECTION_ERROR,
CR_CONN_HOST_ERROR,
CR_UNKNOWN_HOST,
CR_SERVER_GONE_ERROR,
CR_TCP_CONNECTION,
CR_SERVER_HANDSHAKE_ERR,
CR_SERVER_LOST,
CR_SERVER_LOST_EXTENDED,
)
_RETRIABLE_SERVER_ERROR_CODES = (
ER.TABLE_DEF_CHANGED,
ER.LOCK_WAIT_TIMEOUT,
ER.LOCK_DEADLOCK,
#Maybe these also?
# ER_TOO_MANY_DELAYED_THREADS
# ER_BINLOG_PURGE_EMFILE
# ER_TOO_MANY_CONCURRENT_TRXS
# ER_CON_COUNT_ERROR
# ER_OUTOFMEMORY
)
_ALL_RETRIABLE_ERROR_CODES = set(_RETRIABLE_CLIENT_ERROR_CODES +
_RETRIABLE_SERVER_ERROR_CODES)
def retrySQL(timeoutSec=60*5, logger=None):
""" Return a closure suitable for use as a decorator for
retrying a pymysql DAO function on certain failures that warrant retries (
e.g., RDS/MySQL server down temporarily, transaction deadlock, etc.).
We share this function across multiple scripts (e.g., ClientJobsDAO,
StreamMgr) for consitent behavior.
NOTE: please ensure that the operation being retried is idempotent.
timeoutSec: How many seconds from time of initial call to stop retrying
(floating point)
logger: User-supplied logger instance.
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no outpu | t; see nupic.support.initLogging()
@retrySQL()
def jobInfo(self, jobID):
...
"""
if logger is None:
logger = logging.getLogger(__name__)
def retryFilter(e, a | rgs, kwargs):
if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)):
if e.args and e.args[0] in _ALL_RETRIABLE_ERROR_CODES:
return True
elif isinstance(e, pymysql.Error):
if (e.args and
inspect.isclass(e.args[0]) and issubclass(e.args[0], socket_error)):
return True
return False
retryExceptions = tuple([
pymysql.InternalError,
pymysql.OperationalError,
pymysql.Error,
])
return make_retry_decorator(
timeoutSec=timeoutSec, initialRetryDelaySec=0.1, maxRetryDelaySec=10,
retryExceptions=retryExceptions, retryFilter=retryFilter,
logger=logger)
|
saiias/pymamemose | test/__init__.py | Python | bsd-3-clause | 341 | 0.01173 | # | ! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from subprocess import Popen,STDOUT
import os.path
def suite():
cmd = ['py.test','r']
cmd.append(os.path.dirname(os.path.abspath(__file__)))
errno = Popen(cmd, stdout=sys.stdout, st | derr=STDOUT).wait()
raise SystemExit(errno)
if __name__ == "__main__":
suite()
|
vkscool/nupic | nupic/simple_server.py | Python | gpl-3.0 | 3,594 | 0.005565 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple web server for interacting with NuPIC.
Note: Requires web.py to run (install using '$ pip install web.py')
"""
import os
import sys
# The following loop removes the nupic package from the
# PythonPath (sys.path). This is necessary in order to let web
# import the built in math module rather than defaulting to
# nupic.math
while True:
try:
sys.path.remove(os.path.dirname(os.path.realpath(__file__)))
except:
break
import datetime
import json
import web
from nupic.frameworks.opf.modelfactory import ModelFactory
g_models = {}
urls = (
# Web UI
"/models", "ModelHandler",
r"/models/([-\w]*)", "ModelHandler",
r"/models/([-\w]*)/run", "ModelRunner",
)
class ModelHandler(object):
def GET(self):
"""
/models
returns:
[model1, model2, model3, ...] list of model names
"""
global g_models
return json.dumps({"models": g_models.keys()} | )
def POST(self, name):
"""
/models/{name}
schema:
{
"modelParams": dict containing model parameters
"predictedFieldName": str
}
returns:
{"success":name}
"""
global g_models
data = json.loads(web.data())
modelParams = data["modelParams"]
| predictedFieldName = data["predictedFieldName"]
if name in g_models.keys():
raise web.badrequest("Model with name <%s> already exists" % name)
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': predictedFieldName})
g_models[name] = model
return json.dumps({"success": name})
class ModelRunner(object):
def POST(self, name):
"""
/models/{name}/run
schema:
{
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
}
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
{
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore
}
"""
global g_models
data = json.loads(web.data())
data["timestamp"] = datetime.datetime.strptime(
data["timestamp"], "%m/%d/%y %H:%M")
if name not in g_models.keys():
raise web.notfound("Model with name <%s> does not exist." % name)
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences["anomalyScore"]
return json.dumps({"predictionNumber": predictionNumber,
"anomalyScore": anomalyScore})
web.config.debug = False
app = web.application(urls, globals())
if __name__ == "__main__":
app.run()
|
creyer/nursery | nursery.py | Python | apache-2.0 | 7,857 | 0.013491 | #!/usr/bin/env python
from __future__ import with_statement
import argparse
import sys
import logging
import urllib, urllib2
import json
from fabric.operations import local
from fabric.api import hide
import yaml
VERSION = "0.0.1"
SERVER_FILE = ".server"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def get_repo_info():
with hide('commands'):
f_out = local('git remote -v|grep push|grep origin', capture = True)
remote_git = ""
start = f_out.find("http")
end = f_out.find(".git")
remote_git = f_out[start:end]
repo_name = remote_git[remote_git.rfind('/')+1:]
return repo_name
def get_current_branch():
with hide('commands'):
f_out = local('git branch', capture = True)
start = f_out.find('* ')
end = f_out.find('\n')
branch = f_out[start+2:end]
return branch
def get_last_hash():
with hide('commands'):
f_out = local('git rev-parse HEAD', capture = True)
start = 0
end = f_out.find('\n')
branch = f_out[start:end]
return branch
class Server(object):
def __init__(self):
try:
with open(".server") as f:
self.address = f.readlines()[0]
self.repo = get_repo_info()
self.current_branch = get_current_branch()
ok = self.post_to_server('info')
logging.debug("endpoint: %s" % (ok))
except IOError:
self.address = None
def parse_yaml(self,yaml_file):
try:
data = yaml.load(yaml_file.read())
if data is not None:
return data
return False
except Exception as e:
logging.error(e)
return False
""" Run a normal client deployment """
def deploy(self, git_hash = None):
if git_hash is None:
git_hash = get_last_hash()
deploy = {'hash': git_hash, 'branch': get_current_branch()}
req = self.post_to_server("deploy", deploy)
result = json.loads(req)
self.parse_server_response(result)
def parse_server_response(self,result):
if result['status'] == "ok":
print result['msg']
else:
logging.error(result)
print ("Error occured: %s" % (result['msg']))
sys.exit()
"""" Sends a new init configuration for deployment on a branch and current repo """
def init_config(self, config_file):
conf = {'conf':self.parse_yaml(config_file)}
if not conf['conf']:
print "Your config file could not be parsed"
sys.exit()
req = self.post_to_server("init.config", conf)
result = json.loads(req)
self.parse_server_response(result)
""" Creates the base url for the api """
def get_base_url(self, command = None):
return {
'info': 'http://%s' % (self.address),
'init.config': 'http://%s/api/%s/init/' % (self.address, self.repo),
'deploy': 'http://%s/api/%s/deploy/' % (self.address, self.repo),
}.get(command, 'http://%s/api/%s' % (self.address, self.repo))
""" Post requests to deploy server """
def post_to_server(self, command = None, data_dict = None):
if self.address is not None:
url_2 = self.get_base_url(command)
if data_dict is not None:
logging.debug("sending post data: %s to: %s" % (data_dict, url_2))
data = urllib.urlencode(data_dict)
req = urllib2.Request(url_2, data)
try:
rsp = urllib2.urlopen(req)
except urllib2.URLError, e:
logging.error("Error 2: couldn't communicate with the server on: %s" % (url_2))
sys.exit()
| else:
req = urllib2.Request(url_2)
try:
logging.debug("executing get on: %s" % (url_2))
rsp = urllib2.urlopen(req)
| except urllib2.URLError, e:
logging.error("Error 3: couldn't communicate with the server on: %s" % (url_2))
sys.exit()
return rsp.read()
else:
logging.error("Error 4: Can't comunicate with the server")
sys.exit()
class DeployAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('DeployAction %r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
if values is None:
server.deploy()
else:
server.deploy(values)
""" This will read a local config yaml which will be sent to the server
If the server will have this repo and branch already configured
an error will be trigered.
This method can't be used to overwrite config data """
class InitAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('%r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
server.init_config(values)
# TODO verify with the server if exists already an initiated config for this repo
# if exists an error will be displayed
class SetupAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.debug('%r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values)
server = values
# write hidden file with the server address
f = open(SERVER_FILE,'w')
f.write('%s' %(server)) # python will convert \n to os.linesep
f.close()
server = Server()
parser = argparse.ArgumentParser(description = 'Nursery deplkoy system')
parser.add_argument('-v','--version', action = 'version', version = '%(prog)s '+VERSION)
parser.add_argument('-s','--setup', nargs='?', metavar='Server', action = SetupAction,help = 'setup a nursery deploy system, you need to specify the nursery server endpoint like: http://www.my-nursery-server.com')
# each branch needs it's own config file
parser.add_argument('-c','--config', metavar='config.yaml', action = InitAction, type = file,help = 'init a new repo deployment with config file you specify')
parser.add_argument('-d','--deploy',nargs='?', metavar='hash', action = DeployAction, type = file,help = 'create a new async deploy')
parser.add_argument('-i','--info', action='store_true', help = 'some info Nursery Client knows about')
if not len(sys.argv) > 1:
parser.print_help()
else:
args = parser.parse_args()
logging.debug(args)
if args.info:
if server.address is not None:
print ("remote deploy server: %s" % server.address)
print ("repo: %s" % server.repo)
print ("branch: %s" % server.current_branch)
# comication with the server - done
# setup server (with amazon credentials & stuff)
# initialize branch deploy with deploy server
# read config yaml and send it to the server - file sent - ok
# read the response and show it - ok
# read the file on the server - ok
#TODO
# on the server store the git deploy command so it can be processed assync
# 3 way to deploy git, client, forced
# - client
# client -> git deploy (last hash) -> ok
# store in db the command if allow_multiple_deploy & stuff
# parse the command assync
# build file list
# get instances
# get scripts
# make the deployment
# on the server we need to modelate this yaml file to the db
# find a good way to insert instances in db
# filter a deployment based on touced files
# make a deployment
|
Ziloi/ArrayHQ | tools/remove-after-character/remove-after-character.py | Python | unlicense | 758 | 0.010554 | import sys
# Get | Filename and get file contents
filename = raw_input('Enter a file name: ')
file = open(filename)
filecontents = filename.readlines()
f.close()
# Process... But first get input after how many letters or certain letter
process_option = raw_input('Would you prefer to remove everything from a line after a certain character "1", or after a certai | n amount of characters "2" (1/2): ')
if process_option == 1:
letter = raw_input('Which letter?: ')
if process_option == 2:
number = raw_input('How many characters?: ')
for line in filecontents:
[line[i:i+number] for i in range(0, len(line), n)]
array_option = raw_input('Would you like to print an array instead of text (y/n): ')
if array_option == "y":
if array_option == "n":
|
MithileshCParab/HackerRank-10DaysOfStatistics | Problem Solving/Data Structure/Stacks/maximum_element.py | Python | apache-2.0 | 1,142 | 0.056918 |
class ArrayStack:
def __init__(self):
self._data = []
self.max = 0
def __len__(self):
return len(self._data)
def is_empty(self):
return len(self._data) == 0
def get_max(self | ):
return self.max
def push(self, e):
self._data. | append(e)
if self.max < e:
self.max = e
def pop(self):
if self.is_empty():
raise Empty('Stack is empty')
else:
delelem = self._data[-1]
del(self._data[-1])
if self.is_empty():
self.max = 0
elif self.max == delelem:
self._cal_max()
def _cal_max(self):
self.max = 0
for elem in self._data:
if self.max < elem:
self.max = elem
def print_stack(self):
print(self._data)
if __name__ == "__main__":
"""
stack = ArrayStack()
stack.push(10)
stack.push(20)
stack.pop()
stack.print_stack()
stack.push(92)
stack.push(1)
stack.print_stack()
print(stack.get_max())
"""
n = int(input())
stack = ArrayStack()
for i in range(n):
oprval = input().rstrip().split()
opr = int(oprval[0])
if opr == 1:
val = int(oprval[1])
stack.push(val)
elif opr ==2:
stack.pop()
elif opr == 3:
print(stack.get_max())
|
zofuthan/airmozilla | bin/crontab/gen-crons.py | Python | bsd-3-clause | 1,205 | 0 | #!/usr/bin/env python
import os
from optparse import OptionParser
from jinja2 import Template
HEADER = '!!AUTO-GENERATED!! Edit bin/crontab/crontab.tpl instead.'
TEMPLATE = open(os.path.join(os.path.dirname(__file__), 'crontab.tpl')).read()
def main():
parser = OptionParser()
parser.add_option('-w', '--webapp',
help='Location of web app (required)')
parser.add_option('-u', '--user',
help=('Prefix cron with this user. '
'Only define for cron.d style crontabs.'))
parser.add_option('-p', '--python', default='/usr/bin/python2.7',
help='Python interpreter to use.')
(opts, args) = parser.parse_args()
if not opts.webapp:
parser.error('-w must be d | efined')
ctx = {'django': 'cd %s; %s manage.py' % (opts.webapp, opts.python)}
ctx['cron'] = '%s cron' % ctx['django']
if opts.user:
for k, v in ct | x.iteritems():
ctx[k] = '%s %s' % (opts.user, v)
# Needs to stay below the opts.user injection.
ctx['python'] = opts.python
ctx['header'] = HEADER
print Template(TEMPLATE).render(**ctx)
if __name__ == '__main__':
main()
|
deeptir96/my_proj | src/my_proj/views.py | Python | mit | 248 | 0.008065 | from django.views import | generic
class HomePage(generic.TemplateView):
template_name = "home.html"
class AboutPage(generic.TemplateView):
template_name = "about.html"
class GBNC(generic.TemplateView):
template_name = "menug.h | tml"
|
nbari/db-sandbox | riak/cluster_read.py | Python | bsd-3-clause | 597 | 0.001675 | import riak
import time
from random import randint
start_time = time.time()
pool = ['10.15.129.215',
'10.15.129.216',
'10.15.129.217',
'10.15.129.218',
'10.15.129.219']
# Connect to Riak.
client = []
for ip in pool:
print ip
c = riak.RiakClient(host=ip)
c = c.bucket('test')
client.append(c)
for key in range(1000 | ):
print 'Stored data for key %d-%s: %s' % (
key, pool[key % 5], client[randint(0, 4)].g | et('%d-%s' % (key, pool[key % 5])).data)
elapsed_time = time.time() - start_time
print 'Execution time: %.4f' % (elapsed_time)
|
thekingofkings/ChicagoTaxi | dataPreprocess.py | Python | mit | 2,086 | 0.009588 | """
Pre-process Chicago taxi data for visualization.
Author: Hongjian
Date: 11/7/2015
The raw file is in the ../../../dataset/ChicagoTaxi/2013-dec.txt
We map those trips to CA
"""
from shapely.geometry import Polygon, Point, box
from Block import Block
fraw = '../../dataset/ChicagoTaxi/2013-dec.txt'
if __name__ == '__main__':
cas = Block.createAllCAObjects()
with open(fraw, 'r') as fin, open('data/taxi.txt', 'w') as fout:
header = fin.readline()
for line in fin:
try:
ls = line.split("\t")
trp = []
trp.append( ls[2] ) # travel time
trp.append(ls[7]) # meter on time
pickup = ls[9].replace('"', '')
pc = pickup.split(",")
p = Point( float(pc[1]), float(pc[0]) )
flag = False
for t, ca in cas.items():
if ca. | containPoint(p):
trp.append(str(t)) # meter on CA id
flag = True
break
if not flag:
trp.append('-1')
|
trp.append(pickup) # meter on position
trp.append(ls[8]) # meter off time
dropoff = ls[11].replace('"', '')
dc = dropoff.split(",")
d = Point( float(dc[1]), float(dc[0]) )
flag = False
for t, ca in cas.items():
if ca.containPoint(d):
trp.append(str(t)) # meter off CA id
flag = True
break
if not flag:
trp.append('-1')
trp.append(dropoff) # meter off position
fout.write(','.join(trp) + '\n')
except IndexError:
continue |
jahs/ssexp | test.py | Python | mit | 2,221 | 0.004052 | import unittest
import ssexp
class JsonTests(unittest.TestCase):
def setUp(self):
class Parrot(object):
def __init__(self, is_dead=True, from_egg=None):
self.is_dead = is_dead
self.from_egg = from_egg
self.preserializer = ssexp.SsexpPreserializer()
self.preserializer.register(Parrot, version=2)
| class Egg(object):
def __init__(self, from_parrot=None):
| self.from_parrot = from_parrot
self.preserializer.register(Egg)
self.parrot = Parrot()
self.parrot.from_egg = Egg(from_parrot=self.parrot)
def test_int(self):
obj = 123
result = u"123"
self.assertEqual(ssexp.dumps(obj), result)
def test_float(self):
obj = 3.1415927
result = u"3.1415927"
self.assertEqual(ssexp.dumps(obj), result)
def test_str(self):
obj = u'The Knights who say "Ni!".'
result = u'"The Knights who say \\"Ni!\\"."'
self.assertEqual(ssexp.dumps(obj), result)
def test_bool(self):
obj = False
result = u"#f"
self.assertEqual(ssexp.dumps(obj), result)
def test_none(self):
obj = None
result = u"(none)"
self.assertEqual(ssexp.dumps(obj), result)
def test_list(self):
obj = [123, 3.1415927, u'The Knights who say "Ni!".', False, None]
result = '(123 3.1415927 "The Knights who say \\"Ni!\\"." #f (none))'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict(self):
obj = {'brian': 'naughty boy'}
result = '(: brian: "naughty boy")'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict_args(self):
obj = {'brian': 'naughty boy', 3: 'Antioch'}
result = '(: ("brian" "naughty boy") (3 "Antioch"))'
self.assertEqual(ssexp.dumps(obj), result)
def test_dict_args_cyclic(self):
obj = {'brian': 'naughty boy', 3: 'Antioch', 'ouroboros': self.parrot}
result = '(: ("brian" "naughty boy") (3 "Antioch") ("ouroboros" #0=(parrot :version: 2 dead?: #t from-egg: (egg from-parrot: #0#))))'
self.assertEqual(ssexp.dumps(obj, self.preserializer), result)
|
adrn/gala | gala/util.py | Python | mit | 4,794 | 0.000417 | """ General utilities. """
# Standard library
from collections.abc import Mapping
# Third-party
import numpy as np
__all__ = ['rolling_window', 'atleast_2d', 'assert_angles_allclose']
class ImmutableDict(Mapping):
@classmethod
def from_dict(cls, somedict):
return cls(**somedict)
def __init__(self, **kwargs):
self._dict = kwargs
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
if self._hash is None:
self._hash = hash(frozenset(self._dict.items()))
return self._hash
def __eq__(self, other):
return self._dict == other._dict
def __repr__(self):
return f"<ImmutableDict {self._dict.__repr__()}>"
def __str__(self):
return self._dict.__str__()
def copy(self):
import copy
return copy.deepcopy(self._dict)
def rolling_window(arr, window_size, stride=1, return_idx=False):
"""
There is an example of an iterator for pure-Python objects in:
http://stackoverflow.com/questions/6822725/rolling-or-sliding-window-iterator-in-python
This is a rolling-window iterator Numpy arrays, with window size and
stride control. See examples below for demos.
Parameters
----------
arr : array_like
Input numpy array.
window_size : int
Width of the window.
stride : int (optional)
Number of indices to advance the window each iteration step.
return_idx : bool (optional)
Whether to return the slice indices alone with the array segment.
Examples
--------
>>> a = np.array([1, 2, 3, 4, 5, 6])
>>> for x in rolling_window(a, 3):
... print(x)
[1 2 3]
[2 3 4]
[3 4 5]
[4 5 6]
>>> for x in rolling_window(a, 2, stride=2):
... print(x)
[1 2]
[3 4]
[5 6]
>>> for (i1, i2), x in rolling_window(a, 2, stride=2, return_idx=True): # doctest: +SKIP
... print(i1, i2, x)
(0, 2, array([1, 2]))
(2, 4, array([3, 4]))
(4, 6, array([5, 6]))
"""
window_size = int(window_size)
stride | = int(stride)
if window_size < 0 or stride < 1:
raise ValueError
arr_len = len(arr)
if arr_len < window_size:
if return_idx:
yield (0, arr_len), arr
else:
yield arr
ix1 = 0
while ix1 < arr_len:
ix2 = ix1 + window_size
result = arr[ix1:ix2]
if return_idx:
yield | (ix1, ix2), result
else:
yield result
if len(result) < window_size or ix2 >= arr_len:
break
ix1 += stride
def atleast_2d(*arys, **kwargs):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
insert_axis : int (optional)
Where to create a new axis if input array(s) have <2 dim.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
Examples
--------
>>> atleast_2d(3.0) # doctest: +FLOAT_CMP
array([[3.]])
>>> x = np.arange(3.0)
>>> atleast_2d(x) # doctest: +FLOAT_CMP
array([[0., 1., 2.]])
>>> atleast_2d(x, insert_axis=-1) # doctest: +FLOAT_CMP
array([[0.],
[1.],
[2.]])
>>> atleast_2d(x).base is x
True
>>> atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
insert_axis = kwargs.pop('insert_axis', 0)
slc = [slice(None)] * 2
slc[insert_axis] = None
slc = tuple(slc)
res = []
for ary in arys:
ary = np.asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[slc]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def assert_angles_allclose(x, y, **kwargs):
"""
Like numpy's assert_allclose, but for angles (in radians).
"""
c2 = (np.sin(x)-np.sin(y))**2 + (np.cos(x)-np.cos(y))**2
diff = np.arccos((2.0 - c2)/2.0) # a = b = 1
assert np.allclose(diff, 0.0, **kwargs)
class GalaDeprecationWarning(Warning):
"""
A warning class to indicate a deprecated feature. Use this over the built-in
DeprecationWarning because those are silenced by default.
"""
|
jespino/python-taiga | tests/test_roles.py | Python | mit | 530 | 0.001887 | from taiga.requestmaker import RequestMaker
from taiga.models import Role, Roles
import unittest
from mock import patch
class TestRoles(unittest.TestCa | se):
@patch('taiga.models.base.ListResource._new_resource')
def test_create_role(self, mock_new_resource):
| rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
mock_new_resource.return_value = Role(rm)
sv = Roles(rm).create(1, 'RL 1')
mock_new_resource.assert_called_with(
payload={'project': 1, 'name': 'RL 1'}
)
|
antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_0/ar_/test_artificial_1024_Fisher_MovingMedian_0__100.py | Python | bsd-3-clause | 266 | 0.086466 | import pyaf | .Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Fisher", sigma = 0.0, e | xog_count = 100, ar_order = 0); |
jcchoiling/learningPython | s13/Day12/practice/rpc_server.py | Python | gpl-3.0 | 1,091 | 0.008249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Janice Cheng
import pika
import json
creds_broker = pika.PlainCredentials("janice", "janice123")
conn_params = pika.ConnectionParameters("172.16.201.134",
virtual_host = "/",
credentials = creds_broker)
conn_broker = pika.BlockingConnection(conn_params)
channel = conn_broker.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
def api_ping(channel, method, header, body):
"""'ping' API call."""
channel.basic_ack(delivery_tag=method.delivery_tag)
msg_dict = json.loads(body)
print("Received API call...replying...")
channel.basic_publish(body="Pong!" + str(msg_dict["time"]),
exchange='rpc',
routing_key=header.reply_to)
channel.basic_consume(api_ping,
| queue="ping",
consumer_tag="ping")
print("Waiting for RPC calls...")
channel.start_consuming()
conn_broker.close() | |
capergroup/bayou | src/main/python/bayou/experiments/low_level_sketches/utils.py | Python | apache-2.0 | 2,543 | 0.00118 | # Copyright 2017 Rice University
#
# Licensed under the A | pache License, Version 2.0 (the "License");
# you may not use this file excep | t in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import re
import json
import random
from itertools import chain
import tensorflow as tf
CONFIG_GENERAL = ['latent_size', 'batch_size', 'num_epochs',
'learning_rate', 'print_step', 'alpha', 'beta']
CONFIG_ENCODER = ['name', 'units', 'tile']
CONFIG_DECODER = ['units', 'max_tokens']
CONFIG_DECODER_INFER = ['chars', 'vocab', 'vocab_size']
C0 = 'CLASS0'
UNK = '_UNK_'
CHILD_EDGE = 'V'
SIBLING_EDGE = 'H'
def length(tensor):
elems = tf.sign(tf.reduce_max(tensor, axis=2))
return tf.reduce_sum(elems, axis=1)
# split s based on camel case and lower everything (uses '#' for split)
def split_camel(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1#\2', s) # UC followed by LC
s1 = re.sub('([a-z0-9])([A-Z])', r'\1#\2', s1) # LC followed by UC
split = s1.split('#')
return [s.lower() for s in split]
# Do not move these imports to the top, it will introduce a cyclic dependency
import bayou.experiments.low_level_sketches.evidence
# convert JSON to config
def read_config(js, save_dir, infer=False):
config = argparse.Namespace()
for attr in CONFIG_GENERAL:
config.__setattr__(attr, js[attr])
config.evidence = bayou.experiments.low_level_sketches.evidence.Evidence.read_config(js['evidence'], save_dir)
config.decoder = argparse.Namespace()
for attr in CONFIG_DECODER:
config.decoder.__setattr__(attr, js['decoder'][attr])
if infer:
for attr in CONFIG_DECODER_INFER:
config.decoder.__setattr__(attr, js['decoder'][attr])
return config
# convert config to JSON
def dump_config(config):
js = {}
for attr in CONFIG_GENERAL:
js[attr] = config.__getattribute__(attr)
js['evidence'] = [ev.dump_config() for ev in config.evidence]
js['decoder'] = {attr: config.decoder.__getattribute__(attr) for attr in
CONFIG_DECODER + CONFIG_DECODER_INFER}
return js
|
cjahangir/geodash | geonode/documents/tests.py | Python | gpl-3.0 | 11,741 | 0.000767 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
"""
import StringIO
import json
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from guardian.shortcuts import get_anonymous_user
from .forms import DocumentCreateForm
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.base.populate_test_data import create_models
class LayersTest(TestCase):
fixtures = ['intial_data.json', 'bobby']
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
def setUp(self):
create_models('document')
create_models('map')
self.imgfile = StringIO.StringIO(
'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
self.anonymous_user = get_anonymous_user()
def test_create_document_with_no_rel(self):
"""Tests the creation of a document with no relations"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
superuser = get_user_model().objects.get(pk=2)
c = Document.objects.create(
| doc_file=f,
owner=superuser,
title='theimg')
c.set_default_permissions()
self.assertEquals(Document.objects.get(pk=c.id).title, 'theimg')
def test_create_document_with_rel(self):
"""Tests the creation of a document with no a map related"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
| 'image/gif')
superuser = get_user_model().objects.get(pk=2)
m = Map.objects.all()[0]
ctype = ContentType.objects.get_for_model(m)
c = Document.objects.create(
doc_file=f,
owner=superuser,
title='theimg',
content_type=ctype,
object_id=m.id)
self.assertEquals(Document.objects.get(pk=c.id).title, 'theimg')
def test_create_document_url(self):
"""Tests creating an external document instead of a file."""
superuser = get_user_model().objects.get(pk=2)
c = Document.objects.create(doc_url="http://geonode.org/map.pdf",
owner=superuser,
title="GeoNode Map",
)
doc = Document.objects.get(pk=c.id)
self.assertEquals(doc.title, "GeoNode Map")
self.assertEquals(doc.extension, "pdf")
def test_create_document_url_view(self):
"""
Tests creating and updating external documents.
"""
self.client.login(username='admin', password='admin')
form_data = {
'title': 'GeoNode Map',
'permissions': '{"users":{"AnonymousUser": ["view_resourcebase"]},"groups":{}}',
'doc_url': 'http://www.geonode.org/map.pdf'}
response = self.client.post(reverse('document_upload'), data=form_data)
self.assertEqual(response.status_code, 302)
d = Document.objects.get(title='GeoNode Map')
self.assertEqual(d.doc_url, 'http://www.geonode.org/map.pdf')
form_data['doc_url'] = 'http://www.geonode.org/mapz.pdf'
response = self.client.post(
reverse(
'document_replace',
args=[
d.id]),
data=form_data)
self.assertEqual(response.status_code, 302)
d = Document.objects.get(title='GeoNode Map')
self.assertEqual(d.doc_url, 'http://www.geonode.org/mapz.pdf')
def test_upload_document_form(self):
"""
Tests the Upload form.
"""
form_data = dict()
form = DocumentCreateForm(data=form_data)
self.assertFalse(form.is_valid())
# title is required
self.assertTrue('title' in form.errors)
# permissions are required
self.assertTrue('permissions' in form.errors)
# since neither a doc_file nor a doc_url are included __all__ should be
# in form.errors.
self.assertTrue('__all__' in form.errors)
form_data = {
'title': 'GeoNode Map',
'permissions': '{"anonymous":"document_readonly","authenticated":"resourcebase_readwrite","users":[]}',
'doc_url': 'http://www.geonode.org/map.pdf'}
form = DocumentCreateForm(data=form_data)
self.assertTrue(form.is_valid())
self.assertTrue(isinstance(form.cleaned_data['permissions'], dict))
# if permissions are not JSON serializable, the field should be in
# form.errors.
form_data['permissions'] = 'non-json string'
self.assertTrue(
'permissions' in DocumentCreateForm(
data=form_data).errors)
form_data = {
'title': 'GeoNode Map',
'permissions': '{"anonymous":"document_readonly","authenticated":"resourcebase_readwrite","users":[]}',
}
file_data = {
'doc_file': SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')}
form = DocumentCreateForm(form_data, file_data)
self.assertTrue(form.is_valid())
# The form should raise a validation error when a url and file is
# present.
form_data['doc_url'] = 'http://www.geonode.org/map.pdf'
form = DocumentCreateForm(form_data, file_data)
self.assertFalse(form.is_valid())
self.assertTrue('__all__' in form.errors)
def test_document_details(self):
"""/documents/1 -> Test accessing the detail view of a document"""
d = Document.objects.get(pk=1)
d.set_default_permissions()
response = self.client.get(reverse('document_detail', args=(str(d.id),)))
self.assertEquals(response.status_code, 200)
def test_access_document_upload_form(self):
"""Test the form page is returned correctly via GET request /documents/upload"""
log = self.client.login(username='bobby', password='bob')
self.assertTrue(log)
response = self.client.get(reverse('document_upload'))
self.assertTrue('Upload Documents' in response.content)
def test_document_isuploaded(self):
"""/documents/upload -> Test uploading a document"""
f = SimpleUploadedFile(
'test_img_file.gif',
self.imgfile.read(),
'image/gif')
m = Map.objects.all()[0]
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('document_upload'),
data={
'file': f,
'title': 'uploaded_document',
'q': m.id,
'type': 'map',
'permissions': '{"users":{"AnonymousUser": ["view_resourcebase"]}}'},
follow=True)
self.assertEquals(response.status_code |
frreiss/tensorflow-fred | tensorflow/python/distribute/multi_process_runner.py | Python | apache-2.0 | 57,381 | 0.004305 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
import weakref
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import contex | t
from tensorflow.python.framework import test_util
from tensorflow.python.util.tf_export import tf_export
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthan | dler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo',
['task_type', 'task_id', 'is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
# visible_gpus: If not None, CUDA_VISIBLE_DEVICES is set to visible_gpus.
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly', 'visible_gpus'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
# The timeout in seconds to wait to force kill a child process. When a child
# process times out we first try to SIGTERM it so that it has a chance to dump
# stacktraces. However dumping stacktrace can take a long time.
_FORCE_KILL_WAIT_SEC = 30
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
fn,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_output=True,
return_output=False,
use_dill_for_args=True,
daemon=False,
dependence_on_chief=True,
auto_restart=False,
share_gpu=True,
args=None,
kwargs=None):
"""Instantiation of a `MultiProcessRunner`.
Args:
fn: Function to be run on child processes. This will be run on processes
for all task types.
cluster_spec: Dict for cluster spec. The utility function
`tf.__internal__.distribute.multi_process_runner.create_cluster_spec`
can be conveniently used to create such dict. The following is an
example of cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc'.
max_run_time: `None` or integer. If not `None`, child processes are forced
to exit at approximately this many seconds after this utility is called.
We achieve this through `signal.alarm()` api. Note that this is best
effort at Python level since Python signal handler does not get executed
when it runs lower level C/C++ code. So it can be delayed for
arbitrarily long time. If any of the child process is still running when
`max_run_time` is up, they will be force-terminated and an
`UnexpectedSubprocessExitError` may be raised. If `None`, child
processes are not forced to exit.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_output: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
return_output: If True, the output/error from the subprocesses should be
collected to be attached to the resulting namedtuple returned from
`join()`. The list of output can be retrieved via `stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
dependence_on_chief: Whether to terminates the cluster if the chief exits.
If auto_restart is True, it only terminates the cluster if the chief
exits with a zero exit code.
auto_restart: Whether to automatically restart processes that exit with
non-zero exit code.
share_gpu: Whether to share GPUs among workers. If False, each worker is
assigned different GPUs in a roundrobin fashion. This should be True
whenever possible for better test execution coverage; some situations
that need it to be False are tests that runs NCCL.
args: Positional arguments to be sent to `fn` run on subprocesses.
kwargs: Keyword arguments to be sent to `fn` run on subprocesses.
Raises:
RuntimeError: if |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/booking/urls.py | Python | mit | 466 | 0.004292 | from django.conf.urls import url
from waldur_mastermind. | booking import views
def register_in(router):
router.register(
r'booking-resources', views.ResourceViewSet, basename='booking-resource'
)
router.register(
r'booking-offerings', views.OfferingViewSet, basename='booking-offering'
)
urlpatterns = [
url(
r'^api/marketplace-bookings/(? | P<uuid>[a-f0-9]+)/$',
views.OfferingBookingsViewSet.as_view(),
),
]
|
eagleamon/home-assistant | tests/components/emulated_hue/test_hue_api.py | Python | apache-2.0 | 14,968 | 0 | """The tests for the emulated Hue component."""
import asyncio
import json
from unittest.mock import patch
import pytest
from homeassistant import bootstrap, const, core
import homeassistant.components as core_components
from homeassistant.components import (
emulated_hue, http, light, script, media_player, fan
)
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.components.emulated_hue.hue_api import (
HUE_API_STATE_ON, HUE_API_STATE_BRI, HueUsernameView,
HueAllLightsStateView, HueOneLightStateView, HueOneLightChangeView)
from homeassistant.components.emulated_hue import Config
from tests.common import (
get_test_instance_port, mock_http_component_app)
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = 'http://127.0.0.1:{}'.format(BRIDGE_SERVER_PORT) + '{}'
JSON_HEADERS = {const.HTTP_HEADER_CONTENT_TYPE: const.CONTENT_TYPE_JSON}
@pytest.fixture
def hass_hue(loop, hass):
"""Setup a hass instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(
core_components.async_setup(hass, {core.DOMAIN: {}}))
loop.run_until_complete(bootstrap.async_setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}))
with patch('homeassistant.components'
'.emulated_hue.UPNPResponderThread'):
loop.run_until_complete(
bootstrap.async_setup_component(hass, emulated_hue.DOMAIN, {
emulated_hue.DOMAIN: {
emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT,
emulated_hue.CONF_EXPOSE_BY_DEFAULT: True
}
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, light.DOMAIN, {
'light': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, script.DOMAIN, {
'script': {
'set_kitchen_light': {
'sequence': [
{
'service_template':
"light.turn_{{ requested_state }}",
'data_template': {
'entity_id': 'light.kitchen_lights',
'brightness': "{{ requested_level }}"
}
}
]
}
}
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, media_player.DOMAIN, {
'media_player': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, fan.DOMAIN, {
'fan': [
{
'platform': 'demo',
}
]
}))
# Kitchen light is explicitly excluded from being exposed
kitchen_light_entity = hass.states.get('light.kitchen_lights')
attrs = dict(kitchen_light_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = False
hass.states.async_set(
kitchen_light_entity.entity_id, kitchen_light_entity.state,
attributes=attrs)
# Expose the script
script_entity = hass.states.get('script.set_kitchen_light')
attrs = dict(script_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = True
hass.states.async_set(
script_entity.entity_id, script_entity.state, attributes=attrs
)
return hass
@pytest.fixture
def hue_client(loop, hass_hue, test_client):
"""Create web client for emulated hue api."""
web_app = mock_http_component_app(hass_hue)
config = Config(None, {'type': 'alexa'})
HueUsernameView().register(web_app.router)
HueAllLightsStateView(config).register(web_app.router)
HueOneLightStateView(config).register(web_app.router)
HueOneLightChangeView(config).register(web_app.router)
return loop.run_until_complete(test_client(web_app))
@as | yncio.coroutine
def test_discover_lights(hue_client):
"""Test the discovery of lights."""
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
devices = set(val['uniqueid'] for val in result_json.values( | ))
# Make sure the lights we added to the config are there
assert 'light.ceiling_lights' in devices
assert 'light.bed_light' in devices
assert 'script.set_kitchen_light' in devices
assert 'light.kitchen_lights' not in devices
assert 'media_player.living_room' in devices
assert 'media_player.bedroom' in devices
assert 'media_player.walkman' in devices
assert 'media_player.lounge_room' in devices
assert 'fan.living_room_fan' in devices
@asyncio.coroutine
def test_get_light_state(hass_hue, hue_client):
"""Test the getting of light state."""
# Turn office light on and set to 127 brightness
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 127
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is True
assert office_json['state'][HUE_API_STATE_BRI] == 127
# Check all lights view
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
assert 'light.ceiling_lights' in result_json
assert result_json['light.ceiling_lights']['state'][HUE_API_STATE_BRI] == \
127
# Turn bedroom light off
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{
const.ATTR_ENTITY_ID: 'light.bed_light'
},
blocking=True)
bedroom_json = yield from perform_get_light_state(
hue_client, 'light.bed_light', 200)
assert bedroom_json['state'][HUE_API_STATE_ON] is False
assert bedroom_json['state'][HUE_API_STATE_BRI] == 0
# Make sure kitchen light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.kitchen_lights', 404)
@asyncio.coroutine
def test_put_light_state(hass_hue, hue_client):
"""Test the seeting of light states."""
yield from perform_put_test_on_ceiling_lights(hass_hue, hue_client)
# Turn the bedroom light on first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{const.ATTR_ENTITY_ID: 'light.bed_light',
light.ATTR_BRIGHTNESS: 153},
blocking=True)
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_ON
assert bed_light.attributes[light.ATTR_BRIGHTNESS] == 153
# Go through the API to turn it off
bedroom_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.bed_light', False)
bedroom_result_json = yield from bedroom_result.json()
assert bedroom_result.status == 200
assert 'application/json' in bedroom_result.headers['content-type']
assert len(bedroom_result_json) == 1
# Check to make sure the state changed
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_OFF
# Make sure we can't change the kitchen light state
kitchen_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.kitchen_light', True)
assert kitchen_result.status == 404
@asyncio.coroutine
def test_put_light_state_script(hass_hue, hue_client):
"""Test the setting of script variables."""
# Turn the kitchen light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TU |
kagklis/Frequent-Itemset-Hiding-Toolbox-x86 | Heuristic_Coeff.py | Python | mit | 4,188 | 0.010267 | #-------------------------------------------------------------------------------
# Name: Heuristic_Coeff.py
# Purpose: Implements FIH algorithm found in "An Integer Linear Programming Scheme to Sanitize Sensitive Frequent Itemsets" by Kagklis et al.
# Author: Vasileios Kagklis
# Created: 20/03/2014
# Copyright: (c) Vasileios Kagklis
#-------------------------------------------------------------------------------
from __future__ import print_function
from time import clock
from math import ceil
import cplex
from cplex import SparsePair
from fim import apriori
import myiolib
import hcba_ext
from SetOp import *
###################################################
def findMin(S):
result = []
for i in xrange(len(S)):
flag = True
for j in xrange(len(S)):
if i == j:
continue
if len(S[i]) >= len(S[j]) and S[i].issuperset(S[j]):
flag = False
break
elif len(S[i]) < len(S[j]) and S[i].issubset(S[j]):
flag = True
elif len(S[i]) == len(S[j]):
flag = True
if flag:
result.append(S[i])
if len(result) == 0:
return(S)
else:
return(result)
###################################################
def convert2frozen(rev_fd):
result = []
for itemset in rev_fd:
for item in itemset:
if isinstance(item, float):
temp = itemset - frozenset([item])
result.append(temp)
return(result)
###################################################
def Heuristic_Coeff_main(fname1, fname2, fname3, sup, mod_name):
change_raw_data = 0
L = []
solution = None
k =0
# Read dataset and identify discrete items
lines, tid = myiolib.readDataset(fname3)
I = hcba_ext.get_1itemsets(tid)
# Calculate support count
abs_supp = ceil(sup*lines-0.5)
# Load F from file
F = myiolib.readLargeData(fname1)
# Load S from file
S = minSet(myiolib.readSensitiveSet(fname2))
# Calculate the revised F
start_time = clock()
SS = supersets(S, F)
Rev_Fd = list(set(F)-SS)
rev_t = clock() - start_time
Rev_Fd.sort(key = len, reverse = True)
# Calculate minimal set of S
sens_ind =[]
for i in xrange(lines):
for itemset in S:
if itemset.issubset(tid[i]):
sens_ind.append(i)
break
start_time = clock()
coeffs, rem = hcba_ext.calculateCoeffs(tid, sup, sens_ind, S, F, Rev_Fd)
# The initial objective => Elastic filtering
cpx = cplex.Cplex()
cpx.set_results_stream(None)
# Add obj. sense and columns
cpx.objective.set_sense(cpx.objective.sense.minimize)
cpx.variables.add(obj = coeffs, lb =[0]*len(coeffs),
ub=[1]*len(coeffs),
types=[cpx.variables.type.integer]*len(coeffs))
# Build constraints for minimal S
for itemset in S:
ind = []
cur_supp = 0
for i in xrange(len(sens_ind)):
if itemset.issubset(tid[sens_ind[i]]):
ind.append(i)
cur_supp += 1
cpx.linear_constraints.add(lin_expr = [SparsePair(ind = ind, val=[1]*len(ind))],
senses=["G"], rhs=[cur_supp - abs_supp + 1], names=["c"+str(k)])
k+=1
cpx.solve()
solution = map(int, cpx.solution.get_values())
# Apply sanitization
for i in hcba_ext.get_indices(solution, 1):
tid[sens_ind[i]] = tid[sens_ind[i]] - rem[i]
change_raw_data += len(rem[i])
coeffs = None
cpx = None
F = None
Rev_Fd = None
exec_time = clock()-start_time
######----create out files-----######
out_file = open(mod_name+'_results.txt', | 'w')
for i in xrange(lines):
k = ' '.join(sorted(tid[i]))
print(k, file = out_file)
out_file.close()
tid = None
return("Not Applicabl | e", change_raw_data, rev_t+exec_time)
|
francisar/rds_manager | aliyun/api/rest/Rds20130528SwitchDBInstanceNetTypeRequest.py | Python | mit | 380 | 0.026316 | '''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20130528SwitchDBInstanceNetTypeRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ConnectionStringPrefix = None
se | lf.DBInstanceId = None
def getapiname(self):
re | turn 'rds.aliyuncs.com.SwitchDBInstanceNetType.2013-05-28'
|
matwey/rpmlint | test/test_cli.py | Python | gpl-2.0 | 1,865 | 0.001609 | from pathlib import PosixPath
import pytest
from rpmlint.cli import process_lint_args
from rpmlint.config import Config
from rpmlint.lint import Lint
@pytest.mark.parametrize('test_arguments', [['-c', 'rpmlint/configs/thisdoesntexist.toml']])
def test_parsing_non_existing_config_file(test_arguments):
with pytest.raises(SystemExit) as exc:
process_lint_args(test_arguments)
assert exc.value.code == 2
@pytest.mark.parametrize('test_arguments', [['-c', 'rpmlint/configdefaults.toml']])
def test_parsing_config_file(test_arguments):
parsed = process_lint_args(test_arguments)
assert len(parsed['config']) == 1
assert parsed['config'][0] == PosixPath('rpmlint/configdefaults.toml')
@pytest.mark.parametrize('test_arguments', [['-c', 'configs/openSUSE']])
| def test_parsing_opensuse_conf(test_arguments):
parsed = process_lint_args(test_arguments)
assert len(parsed['config']) == 7
assert PosixPath('configs/openSUSE/opensuse.toml') in parsed['config']
assert PosixPath('configs/openSUSE/licenses. | toml') in parsed['config']
assert PosixPath('configs/openSUSE/pie-executables.toml') in parsed['config']
defaultcfg = Config()
lint = Lint(parsed)
default_checks = defaultcfg.configuration['Checks']
checks = lint.config.configuration['Checks']
# Verify that all original Checks are enabled and some new are added
for check in default_checks:
assert check in checks
assert len(checks) > len(default_checks)
# Verify that all scoring keys are a known checks
checks = set(lint.output.error_details.keys())
checks |= set(defaultcfg.configuration['Descriptions'].keys())
score_keys = lint.config.configuration['Scoring'].keys()
for score_key in score_keys:
if score_key.startswith('percent-in-'):
continue
assert score_key in checks
|
ijzer/cwbot-ndy | kol/request/BootClanMemberRequest.py | Python | bsd-3-clause | 475 | 0.002105 | from GenericRequest import | GenericRequest
class BootClanMemberRequest(GenericRequest):
def __init__(self, session, userId):
super(BootClanMemberRequest, | self).__init__(session)
self.url = session.serverURL + "clan_members.php"
self.requestData['pwd'] = session.pwd
self.requestData['action'] = 'modify'
self.requestData['begin'] = '1'
self.requestData['pids[]'] = userId
self.requestData['boot%s' % userId] = 'on'
|
dideher/ekloges_dieuthinton | ekloges.py | Python | gpl-2.0 | 18,757 | 0.007097 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import codecs
import re
import argparse
import os
from prettytable import PrettyTable
report08_schools = {}
report08_employees = {}
report08_school_employees = {}
report16_employee = None
# The following (combined with report16_absence_reasons) is used when an employee is absent, has multiple assignments and not all schools have input this absence
report16_absents = {}
# we will store employee school exclusion in the employee_school_exclusions dict
# format: key -> employee afm
employee_school_exclusions = {}
# school exclusions
excluced_schools = list()
# employee exclusions
excluced_employees = dict()
def filterAFM(rawAFM):
return re.search('=\"(\d*)\"', rawAFM).group(1)
def csv_unireader(f, encoding="utf-8"):
for row in csv.reader(codecs.iterencode(codecs.iterdecode(f, encoding), "utf-8"), delimiter=';', quotechar='"'):
yield [e.decode("utf-8") for e in row]
def parseEmployeeExclusionList(reportPath):
"""
Parses a CSV which in the first column contains the IDs of all employees that need to be excluded from
processing
:param reportPath:
:return: a list of schools ids to exclude
"""
result = dict()
with open(reportPath, 'rb') as report_csvfile:
reader = csv_unireader(report_csvfile, encoding='iso8859-7')
for row in reader:
afm = str(row[0])
afm = afm if len(afm)==9 else '0'+afm
result[afm]=(row[1] if len(row)>1 and row[1] != u'' else u'Άγνωστος λόγος εξαίρεσεις')
return result
def parseSchoolExclusionList(reportPath):
"""
Parses a CSV which in the first column contains the IDs of all schools that need to be excluded from
processing
:param reportPath:
:return: a list of schools ids to exclude
"""
result = list()
with open(reportPath, 'rb') as report_csvfile:
reader = csv_unireader(report_csvfile, encoding='iso8859-7')
for row in reader:
result.append(row[0])
return result
def parseReport16(reportPath='/Users/slavikos/Downloads/CSV_2015-06-03-100905.csv'):
"""
Parse report 16 (Κατάλογος Εκπαιδευτικών που Απουσιάζουν από Σχολικές Μονάδες)
:param reportPath:
:return:
"""
report16_absence_reasons = [u'ΜΑΚΡΟΧΡΟΝΙΑ ΑΔΕΙΑ (>10 ημέρες)',u'ΑΠΟΣΠΑΣΗ ΣΤΟ ΕΞΩΤΕΡΙΚΟ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΦΟΡΕΑ ΥΠ. ΠΑΙΔΕΙΑΣ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΑΛΛΟ ΠΥΣΠΕ / ΠΥΣΔΕ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΦΟΡΕΑ ΕΚΤΟΣ ΥΠ. ΠΑΙΔΕΙΑΣ',u'ΟΛΙΚΗ ΔΙΑΘΕΣΗ ΣΕ ΑΠΟΚΕΝΤΡΩΜΕΝΕΣ ΥΠΗΡΕΣΙΕΣ ΥΠ. ΠΑΙΔΕΙΑΣ']
result = {}
with open(reportPath, 'rb') as report_csvfile:
reader = csv_unireader(report_csvfile, encoding='iso8859-7')
firstRow = True
for row in reader:
if firstRow:
# first row contains
firstRow = False
continue
# note that employee with employeeAfm is missing from school schoolId
result[filterAFM(row[12])] = { "schoolId": row[6], "reason": "%s (%s)" % (row[22], row[23]) }
# check if generally absent (in case of multiple assignments) and insert in report16_absents
if row[24] in report16_absence_reasons or unicode(row[24]).startswith(u'ΜΑΚΡΟΧΡΟΝΙΑ ΑΔΕΙΑ (>10 ημέρες)'):
report16_absents[filterAFM(row[12])] = row[24]
return result
def parseReport08(reportPath='/Users/slavikos/Downloads/CSV_2015-06-02-130003.csv'):
excluded_school_types = [u'Νηπιαγωγεία']
with open(reportPath, 'rb') as report08_csvfile:
spamreader = csv_unireader(report08_csvfile, encoding='iso8859-7')
firstRow = True
for row in spamreader:
if firstRow:
| firstRow = False
continue
#exclude some school types
if row[4] in excluded_school_types:
continue
# check if the school id is excluded
if row[6] in excluced_schools:
continue
# get school object
schoolObj = report08_schools.get(row[6], None)
if not schoolObj:
# first time we see tha | t school
schoolObj = {
'id': row[6],
'title': row[7],
'email': row[10],
'employees': list()
}
# add school to dict
report08_schools[row[6]] = schoolObj
# fetch employee from cache
employeeAfm = filterAFM(row[16])
employeeObj = report08_employees.get(employeeAfm, None)
if not employeeObj:
# first time we see that employee
employeeObj = {
'id': row[15] if row[15] else '',
'afm': employeeAfm,
'name': row[19],
'surname': row[18],
'fatherName': row[20],
'specialization': row[28],
'assigments': list()
}
# add the employee in the dict
report08_employees[employeeObj.get('afm')] = employeeObj
# add to the school as dict as well
schoolObj['employees'].append(employeeObj)
else:
# employee exists in the report08_employee dict, so add it
# (if he does not exist) in the schools dict as well
if employeeObj not in schoolObj['employees']:
schoolObj['employees'].append(employeeObj)
assigmentObj = {
'schoolId': schoolObj['id'],
'type': row[33],
'assigment': row[34],
'isMaster': True if row[35] == u'Ναι' else False,
'hours': int(row[44]) if row[44] else 0, # Ώρες Υποχ. Διδακτικού Ωραρίου Υπηρέτησης στο Φορέα
'teachingHours': (int(row[46]) if row[46] else 0) + (int(row[47]) if row[47] else 0),
}
employeeObj['assigments'].append(assigmentObj)
# report08_school_employees[schoolObj['id']].append(assigmentObj)
def isExcluded(employeeAfm, schoolId):
"""
Determines if an employee is excluded from school unit id. If the schoolId is None, then
the operation will check the general exclusion list. The operation will
return None if the employee is not excluded or a description if the employee
should be excluded
:param employeeAfm: The employee's AFM
:type employeeAfm: str
:param schoolId: The school ID to check for exclusion
:type schoolId: str
:return: None if the employee is not excluded or a description if the employee should be excluded
"""
if schoolId is None:
return excluced_employees.get(employeeAfm, None)
if len(employee_school_exclusions) > 0:
exclusion = employee_school_exclusions.get(employeeAfm, None)
if exclusion:
# employee is probably excluded
if exclusion.get('schoolId', '') == schoolId:
return exclusion.get('reason', u"Άγνωστος λόγος εξαίρεσεις")
else:
return None
else:
return None
else:
return None
def processSchool(id, filter0=False):
schoolObj = report08_schools.get(id, None)
acceptedList = list()
rejectedList = list()
# fetch school employees, if school is not excluded
schoolEmployees = schoolObj.get('employees', list()) if id not in excluced_schools else list()
for employee in schoolEmployees:
# check if the employee is in the general exclusion list
excludedReason = isExcluded(employeeAfm=employee['afm'], schoolId=None)
# check if the employee is in the exclusion list (for the given school)
if excludedReason is None:
excludedReason = isExcluded(employeeAfm=employee['afm'], schoolId=schoolObj['id'])
if excludedReason:
# employee has been excluded
rejectedList.append(
{
'employee': employee,
'excludedReason': excludedReason,
}
)
continue
if report16_absents and e |
fastmonkeys/vakautin | vakautin/__init__.py | Python | mit | 49 | 0 | from . i | mport app
__ | version__ = app.__version__
|
klen/pyserve | pyserve/icons.py | Python | lgpl-3.0 | 2,305 | 0 | by_ext = [
('py.png' | , 'py'),
('python.png', 'pyc'),
('page_white_text_width.png', ['md', 'markdown', 'rst', 'rtf']),
('page_white_text.png', 'txt'),
('page_white_code.png', ['html', 'htm', 'cgi']),
('page_white_visualstudio.png', ['asp', 'vb']),
('page_white_ruby.png', 'rb'),
('page_code.png', 'xhtml'),
('pag | e_white_code_red.png', ['xml', 'xsl', 'xslt', 'yml']),
('script.png', ['js', 'json', 'applescript', 'htc']),
('layout.png', ['css', 'less']),
('page_white_php.png', 'php'),
('page_white_c.png', 'c'),
('page_white_cplusplus.png', 'cpp'),
('page_white_h.png', 'h'),
('database.png', ['db', 'sqlite', 'sqlite3']),
('page_white_database.png', 'sql'),
('page_white_gear.png', ['conf', 'cfg', 'ini', 'reg', 'sys']),
('page_white_zip.png', ['zip', 'tar', 'gz', 'tgz', '7z', 'alz', 'rar',
'bin', 'cab']),
('cup.png', 'jar'),
('page_white_cup.png', ['java', 'jsp']),
('application_osx_terminal.png', 'sh'),
('page_white_acrobat.png', 'pdf'),
('package.png', ['pkg', 'dmg']),
('shape_group.png', ['ai', 'svg', 'eps']),
('application_osx.png', 'app'),
('cursor.png', 'cur'),
('feed.png', 'rss'),
('cd.png', ['iso', 'vcd', 'toast']),
('page_white_powerpoint.png', ['ppt', 'pptx']),
('page_white_excel.png', ['xls', 'xlsx', 'csv']),
('page_white_word.png', ['doc', 'docx']),
('page_white_flash.png', 'swf'),
('page_white_actionscript.png', ['fla', 'as']),
('comment.png', 'smi'),
('disk.png', ['bak', 'bup']),
('application_xp_terminal.png', ['bat', 'com']),
('application.png', 'exe'),
('key.png', 'cer'),
('cog.png', ['dll', 'so']),
('pictures.png', 'ics'),
('picture.png', ['gif', 'png', 'jpg', 'jpeg', 'bmp', 'ico']),
('film.png', ['avi', 'mkv']),
('error.png', 'log'),
('music.png', ['mpa', 'mp3', 'off', 'wav']),
('font.png', ['ttf', 'eot']),
('vcard.png', 'vcf')
]
ICONS_BY_NAME = dict(
Makefile='page_white_gear.png',
Rakefile='page_white_gear.png',
README='page_white_text_width.png',
LICENSE='shield.png',
)
ICONS_BY_EXT = dict()
for icon, exts in by_ext:
if not isinstance(exts, list):
exts = [exts]
for e in exts:
ICONS_BY_EXT[e] = icon
|
mhnatiuk/phd_sociology_of_religion | scrapper/build/lxml/benchmark/bench_objectify.py | Python | gpl-2.0 | 3,322 | 0.001204 | import sys, copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
| path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
| path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this(
"test",
E.will(
E.do("nothing"),
E.special,
)
)
if __name__ == '__main__':
benchbase.main(BenchMark)
|
pablodecm/cartographer | cartographer/coverers.py | Python | mit | 4,786 | 0 | """
Coverers of the filtrated space
"""
from __future__ import print_function
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class HyperRectangleCoverer(BaseEstimator, TransformerMixin):
""" Covers the space using overlapping hyperectangles
Parameters
----------
intervals: integer or list of integers
number of intervals in each filtered space dimension, if an integer
is specified the same number is used in all dimensions.
overlap: float or list of floats
fraction of overlap between hyperectangles in each space dimension,
if a single float is specified the same overlap is used in all
dimensions.
Attributes
----------
"""
def __init__(self, intervals=10, overlap=0.5):
self.intervals = intervals
self.overlap = overlap
def fit(self, X, y=None):
""" Creates the space covering for the input data
It creates a hyperectangle covering of the multidimensional space of X.
Parameters
----------
X: array-like, shape=(n_samples, n_features)
Data which will be covered.
"""
if y is not None:
raise ValueError("y value will not be used")
if np.iterable(self.intervals):
if len(self.intervals) != X.shape[1]:
raise ValueError("length of intervals not matches X dimension")
else:
intervals = np.array(self.intervals, dtype=int)
else:
intervals = np.full((X.shape[1]), self.intervals, dtype=int)
if np.iterable(self.overlap):
if len(self.overlap) != X.shape[1]:
raise ValueError("length of overlap not matches X dimension")
else:
overlap = np.array(self.overlap, dtype=float)
else:
overlap = np.full((X.shape[1]), self.overlap, dtype=float)
# partition each dimension, incluiding last point
bbs, ws = zip(*[np.linspace(*min_max_num, endpoint=True, retstep=True)
for min_max_num in
zip(np.min(X, axis=0),
np.max(X, axis=0), intervals + 1)])
# get cover lower and upper bounds
self.lowerbounds = np.array(np.meshgrid(*[bb[:-1] - shift for
bb, shift in
zip(bbs, ws * overlap)])) \
.T.reshape(-1, X.shape[1])
self.upperbounds = np.array(np.meshgrid(*[bb[1:] + shift for
bb, shi | ft in
zip(bbs, ws * overlap)])) \
.T.reshape(-1, X.shape[1])
return self
def transform(self, X, y=None):
""" Returns boolean array of space partition membership
Returns a (n_samples, n_partitions) boolean array whose elements
are true when the sample (row) is a member of each space partition
(column). This will be used to filter in the clustering space.
Parameters
| ----------
X: array-like, shape=(n_samples, n_features)
Data which will be partition in hyperectangles.
Returns
-------
m_matrix: boolean array, shape=(n_samples, n_partitions)
Boolean matrix of sample membership to each partition
"""
if y is not None:
raise ValueError("y value will not be used")
return np.logical_and(
np.all(X[:, :, np.newaxis] > self.lowerbounds.T, axis=1),
np.all(X[:, :, np.newaxis] < self.upperbounds.T, axis=1))
def overlap_matrix(self):
""" Returns a boolean array with the overlaps between space partitions
Returns a (n_partitions, n_partitions) boolean array whose elements
are true when there is overlap between the i and j partitions, only
upper triangle is filled (rest is False).
Returns
-------
overlap_matrix: boolean array, shape=(n_partitions, n_partitions)
Boolean matrix of overlaping between partitions, only the upper
triangle is filled and the rest is False.
"""
overlap_matrix = None
i_min_leq_j_min = self.lowerbounds[
:, :, np.newaxis] <= self.lowerbounds.T
i_max_geq_j_min = self.upperbounds[
:, :, np.newaxis] >= self.lowerbounds.T
overlap_matrix = np.all((i_min_leq_j_min, i_max_geq_j_min), axis=0)
overlap_matrix = np.any((overlap_matrix, overlap_matrix.T), axis=0)
overlap_matrix = np.all(overlap_matrix, axis=1)
# only upper triagular filled
np.fill_diagonal(overlap_matrix, False)
return np.triu(overlap_matrix)
|
kasbah/sooperlooper | src/test_plugin/testdef_simple_state_record_undoall.py | Python | gpl-2.0 | 1,543 | 0.004537 | import time
from nose_parameterized import parameterized
import testdef_simple_state
import engine_wrapper
class simpleStateTestsRecordUndoAll(testdef_simple_state.simpleStateTest):
def setUp(self):
testdef_simple_state.simpleStateTest.setUp(self)
self.engine.request("RECORD")
self.engine.request("R | ECORD")
| self.engine.request("UNDO_ALL")
def testOff(self):
time.sleep(0.001)
self.assertState("Off")
def testUndo(self):
self.engine.request("RECORD")
self.engine.request("UNDO")
time.sleep(0.001)
self.assertState("Off")
def testUndoAll(self):
self.engine.request("RECORD")
self.engine.request("UNDO_ALL")
time.sleep(0.001)
self.assertState("Off")
def testMuteOn(self):
self.engine.request("MUTE_ON")
time.sleep(0.001)
self.assertState("OffMuted")
def testDelay(self):
self.engine.request("DELAY")
time.sleep(0.001)
self.assertState("Delay")
def testRedo(self):
self.engine.request("REDO")
time.sleep(0.001)
self.assertState("Playing")
def testRedoAll(self):
self.engine.request("REDO_ALL")
time.sleep(0.001)
self.assertState("Playing")
@parameterized.expand([(c,) for c in engine_wrapper.commands if c not in [ "RECORD","DELAY","MUTE_ON","MUTE", "REDO", "REDO_ALL"]])
def testAllOff(self, c):
self.engine.request(c)
time.sleep(0.001)
self.assertState("Off")
|
jarvisqi/learn_python | flaskweb/core/doc2vector.py | Python | mit | 1,476 | 0.004219 | # -*- coding: UTF-8 -*-
import sys
sys.path.append("./")
import pandas as pd
import gensim
from utility.mongodb import MongoDBManager
from utility.sentence import segment, sent2vec
class Doc2Vector(object):
"""
文本转向量
"""
def __init__(self):
"""
:param keep_val: 设定的阈值
"""
self.mongo_db = MongoDBManager()
def doc2vect(self):
"""
所有文档转成向量存储到数据库
:return:
"""
model = gensim.models.Doc2Vec.load('./models/doc2vec_v1.model')
df_data = pd.read_excel("./data/new_prd.xlsx", names=["SysNo", "Title", "Content"])
content = []
title = []
for idx, row in df_data.iterrows():
seg_title = segment(row.Title)
seg_content = segment(row.Content)
# 转向量
| content_vect = sent2vec(model, ' '.join(seg_content))
title_vect = sent2vec(model, ' '.join(seg_title))
content_vect = map(str, content_vect.tolist())
title_vect = map(str, title_vect.tolist())
conten | t.append({"_id": int(idx) + 1, "data": list(content_vect)})
title.append({"_id": int(idx) + 1, "data": list(title_vect)})
self.mongo_db.insert("content_vector", content)
self.mongo_db.insert("title_vector", title)
print("finished")
if __name__ == '__main__':
doc2vect = Doc2Vector()
doc2vect.doc2vect()
|
ecreall/lagendacommun | lac/views/admin_process/manage_keywords.py | Python | agpl-3.0 | 2,736 | 0 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.schema import Schema
from pontus.widget import Select2Widget
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from deform_treepy.utilities.tree_utility import tree_to_keywords
from lac.content.processes.admin_process.behaviors import (
ManageKeywords)
from lac.content.lac_application import (
CreationCulturelleApplication)
from lac import _
from lac.content.keyword import ROOT_TREE
class ManageKeywordsViewStudyReport(BasicView):
title = 'Alert for keywords'
name = 'alertforkeywordsmanagement'
template = 'lac:views/admin_process/templates/alert_event_keywords.pt'
def update(self):
result = {}
values = {}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
@colander.deferred
def targets_choice(node, kw):
request = node.bindings['request']
site = request.get_site_folder
keywords = [kw.split('/') for kw in tree_to_keywords(site.tree)]
keywords = list(set([item for sublist in keywords for item in sublist]))
if ROOT_TREE in keywords:
keywords.remove(ROOT_TREE)
values = [(v, v) for v in sorted(keywords)]
return Select2Widget(
values=values,
multiple=True
)
class ManageKeywordsSchema(Schema):
targets = colander.SchemaNode(
colander.Set(),
widget=targets_choice,
title=_("Keywords")
)
source = colander.SchemaNode(
colander.String(),
title=_("New keyword")
)
class ManageKeywordsFormView(FormView):
title = _('Manage keywords')
schema = ManageKeywordsSchema()
behaviors = [ManageKeywords, Cancel]
formid = 'formmanagekeywords'
name = 'formmanagekeywords'
@view_config(
name='managekeywords',
context=CreationCulturelleApplication,
renderer='pontus:templates/views | _templates/grid.pt',
)
class ManageKeywordsView(MultipleView):
title = _('Manage keywords')
name = 'managekeywords'
viewid = 'managekeywords'
template = 'd | aceui:templates/mergedmultipleview.pt'
views = (ManageKeywordsViewStudyReport, ManageKeywordsFormView)
validators = [ManageKeywords.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update({ManageKeywords: ManageKeywordsView})
|
integricho/heroku-buildpack-python-ffmpeg2-lame | vendor/pip-1.2.1/tests/test_all_pip.py | Python | mit | 3,610 | 0.001939 | import os
import re
import sys
import subprocess
import shutil
from os.path import dirname, abspath
from pip.backwardcompat import urllib
from pip.util import rmtree
src_folder = dirname(dirname(abspath(__file__)))
def all_projects():
data = urllib.urlopen('http://pypi.python.org/simple/').read()
projects = [m.group(1) for m in re.finditer(r'<a.*?>(.+)</a>', data)]
return projects
def main(args=None):
if args is None:
args = sys.argv[1:]
if not args:
print('Usage: test_all_pip.py <output-dir>')
sys.exit(1)
output = os.path.abspath(args[0])
if not os.path.exists(output):
print('Creating %s' % output)
os.makedirs(output)
pending_fn = os.path.join(output, 'pending.txt')
if not os.path.exists(pending_fn):
print('Downloading pending list')
projects = all_projects()
print('Found %s projects' % len(projects))
f = open(pending_fn, 'w')
for name in projects:
f.write(name + '\n')
f.close()
print('Starting testing...')
while os.stat(pending_fn).st_size:
_test_packages(output, pending_fn)
print('Finished all pending!')
def _test_packages(output, pending_fn):
package = get_last_item(pending_fn)
print('Testing package %s' % package)
dest_dir = os.path.join(output, package)
print('Creating virtualenv in %s' % dest_dir)
create_venv(dest_dir)
print('Uninstalling actual pip')
code = subprocess.check_call([os.path.join(dest_dir, 'bin', 'pi | p'),
'uninstall', '-y', 'pip'])
assert not code, 'pip uninstallation failed'
print('Installing development pip')
code = subprocess.check_call([os.path.join(dest_dir, 'bin', 'python'),
| 'setup.py', 'install'],
cwd=src_folder)
assert not code, 'pip installation failed'
print('Trying installation of %s' % dest_dir)
code = subprocess.check_call([os.path.join(dest_dir, 'bin', 'pip'),
'install', package])
if code:
print('Installation of %s failed' % package)
print('Now checking easy_install...')
create_venv(dest_dir)
code = subprocess.check_call([os.path.join(dest_dir, 'bin', 'easy_install'),
package])
if code:
print('easy_install also failed')
add_package(os.path.join(output, 'easy-failure.txt'), package)
else:
print('easy_install succeeded')
add_package(os.path.join(output, 'failure.txt'), package)
pop_last_item(pending_fn, package)
else:
print('Installation of %s succeeded' % package)
add_package(os.path.join(output, 'success.txt'), package)
pop_last_item(pending_fn, package)
rmtree(dest_dir)
def create_venv(dest_dir):
if os.path.exists(dest_dir):
rmtree(dest_dir)
print('Creating virtualenv in %s' % dest_dir)
code = subprocess.check_call(['virtualenv', '--no-site-packages', dest_dir])
assert not code, "virtualenv failed"
def get_last_item(fn):
f = open(fn, 'r')
lines = f.readlines()
f.close()
return lines[-1].strip()
def pop_last_item(fn, line=None):
f = open(fn, 'r')
lines = f.readlines()
f.close()
if line:
assert lines[-1].strip() == line.strip()
lines.pop()
f = open(fn, 'w')
f.writelines(lines)
f.close()
def add_package(filename, package):
f = open(filename, 'a')
f.write(package + '\n')
f.close()
if __name__ == '__main__':
main()
|
facom/GeoTrans | PaperFigures.py | Python | gpl-2.0 | 75,999 | 0.031632 | from geotrans import *
from system import *
#########################################
#SYSTEM
#########################################
Ringed=System
NotRinged=copyObject(Ringed)
NotRinged.Ringext.b=NotRinged.Ringint.b=0.0
VAR=1
FIX=0
SHOW=1
HIDE=0
DEF=0
MIN=1
MAX=2
FUNC=3
SCAL=4
STAT=5
IDENT=lambda x:x
POW10=lambda x:10**x
PARAMETERS=dict(
#PLANETARY RADIUS: SATURN/FSTAR, JUPITER/MSTAR
Rplanet=[RSAT,
RSAT,
RJUP,
IDENT,
"S.Rstar",
FIX],
#STELLAR MASS
Mstar=[1.0*MSUN,
0.6*MSUN,
1.2*MSUN,
IDENT,
"MSUN",
FIX],
#ORBITAL SEMI MAJOR AXIS
ap=[1.0*AU,
1.0*AU,
3.0*AU,
IDENT,
"AU",
FIX],
#ECCENTRICITY
ep=[0.0,
0.0,
0.5,
IDENT,
"1",
FIX],
#ORBITAL INCLINATION
iorb=[0.0,
0.0,
1.0,
arccos,
"DEG",
FIX],
#ECCENTRICITY
wp=[0.0*DEG,
0.0*DEG,
360.0*DEG,
IDENT,
"DEG",
FIX],
#RING RADIUS
fe=[2.35,
2.00,
4.00,
IDENT,
"1",
FIX],
fi=[1.58,
1.50,
2.00,
IDENT,
"1",
FIX],
#ORBITAL INCLINATION
ir=[1.0,
0.0,
1.0,
arccos,
"DEG",
FIX],
#ROLL
phir=[0.0*DEG,
#-90.0*DEG,
0.0*DEG,
#+90.0*DEG,
360.0*DEG,
IDENT,
"DEG",
FIX],
#OPACITY
tau=[log10(4.0),
log10(1.0),
log10(4.0),
POW10,
"1",
FIX],
)
PARKEYS=sorted(PARAMETERS.keys())
PROPERTIES=dict(
ieff=[0,
0,
0,
IDENT,
"DEG",
SHOW],
teff=[0,
0,
0,
IDENT,
"DEG",
SHOW],
r=[0,
0,
0,
IDENT,
"1",
SHOW],
p=[0,
0,
0,
IDENT,
"1",
SHOW],
PR=[0,
0,
0,
IDENT,
"1",
SHOW],
logPR=[0,
0,
0,
IDENT,
"1",
SHOW],
)
PROPKEYS=sorted(PROPERTIES.keys())
def transitPosition(Rp,fe,i,t,B,direction=+1,sign=+1,qcorrected=False):
"""
direction = +1 (out of disk), -1 (toward disk)
sign: -1 (before contact), +1 (after contact)
Example:
Contact 1: direction=-1, sign=-1
Contact 2: direction=-1, sign=+1
Contact 3: direction=+1, sign=-1
Contact 4: direction=+1, sign=+1
"""
a=fe*Rp
b=fe*Rp*cos(i)
if qcorrected:
if cos(i)>0.6:
| xp=direction*sqrt((1+direction*sign*a)**2-B**2)
return xp
a=fe*Rp
b=fe*Rp*cos(i)
xp=direction*sqrt(1-a**2*(sin(t)-sign*B/a)**2*\
(1-b**2/a))+\
sign*a*cos(t)
#COMPARE WITH THE NOT-RINGED CASE
xpP=direction*sqrt((1+direction*sign*Rp)**2-B**2)
if sign<0:
if xpP<xp:xp=xpP
else:
if xpP>xp:xp=xpP
return x | p
def testTransitDepth():
print BARL,"Test Transit Depth",RBAR
#========================================
#FIX RINGED PROPERTIES BY HAND
#========================================
#MANUAL i,t
i=60.0*DEG;t=30*DEG
Ringed.ieff=i;Ringed.teff=t
Ringed.Ringext.b=Ringed.Ringext.a*cos(i)
Ringed.Ringext.cost=cos(t);Ringed.Ringext.sint=sin(t)
Ringed.Ringint.b=Ringed.Ringint.a*cos(i)
Ringed.Ringint.cost=cos(t);Ringed.Ringint.sint=sin(t)
Ringed.block=blockFactor(Ringed.tau,i)
#========================================
#NOT RINGED TRANSIT DEPTH
#========================================
Anr=pi*NotRinged.Rp**2
print TAB,"Transit area (not ringed): %.17e"%Anr
#========================================
#ANALYTICAL TRANSIT DEPTH
#========================================
Aarg=analyticalTransitArea(Ringed.Rp,Ringed.block,Ringed.fi,Ringed.fe,Ringed.ieff)
print TAB,"Analytical Transit area (ringed): %.17e"%Aarg
#========================================
#RINGED TRANSIT DEPTH
#========================================
Arg=ringedPlanetArea(Ringed)
print TAB,"Transit area (ringed): %.17e"%Arg
r=sqrt(Arg/Anr)
print TAB,"Ratio of depths: %.17e"%(Arg/Anr)
print TAB,"Ratio of radii: %.17e"%(r)
#========================================
#MONTECARLO AREA
#========================================
NP=10000
#"""
Am,dA,xs,ys=transitAreaMontecarlo(Ringed.Planet,
Ringed.Ringext,
Ringed.Ringint,
NP=NP)
print TAB,"Montecarlo area: %.17e +/- %.1e"%(Am,dA)
#"""
#========================================
#PLOT
#========================================
fig=plt.figure(figsize=(8,8))
ax=fig.gca()
ax.plot(xs,ys,'ro',markersize=1)
plotEllipse(ax,Ringed.Star,color='y')
plotEllipse(ax,Ringed.Planet,color='b')
plotEllipse(ax,Ringed.Ringext,color='k')
plotEllipse(ax,Ringed.Ringint,color='r')
rng=1.5
Re=Ringed.Ringext.a
#Re=1.0
xmin=Ringed.Planet.C[0]-rng*Re;
xmax=Ringed.Planet.C[0]+rng*Re
ymin=Ringed.Planet.C[1]-rng*Re;
ymax=Ringed.Planet.C[1]+rng*Re
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
ax.grid()
fig.savefig("figures/TestAreas.png")
def testTransitDuration():
fig=plt.figure(figsize=(8,8))
ax=fig.gca()
#========================================
#FIX RINGED PROPERTIES BY HAND
#========================================
#MANUAL i,t
i=80.0*DEG;t=60*DEG
Ringed.ieff=i;Ringed.teff=t
Ringed.Ringext.b=Ringed.Ringext.a*cos(i)
Ringed.Ringext.cost=cos(t);Ringed.Ringext.sint=sin(t)
Ringed.Ringint.b=Ringed.Ringint.a*cos(i)
Ringed.Ringint.cost=cos(t);Ringed.Ringint.sint=sin(t)
Ringed.block=blockFactor(Ringed.tau,i)
print BARL,"Test Transit Duration",RBAR
print "Orientation parameters:"
print TAB,"i = %.2f deg"%(i*RAD)
print TAB,"t = %.2f deg"%(t*RAD)
#========================================
#PROPERTIES
#========================================
RingedC=copyObject(Ringed)
ap=Ringed.ap/Ringed.Rstar
P=Ringed.Porb/HOUR
ip=Ringed.iorb
#========================================
#NOT RINGED TRANSIT DURATION (NUMERICAL)
#========================================
tcsp=contactTimes(NotRinged)
tT=(tcsp[-1]-tcsp[1])/HOUR
tF=(tcsp[-2]-tcsp[2])/HOUR
"""
updatePosition(NotRinged,tcsp[1])
plotEllipse(ax,NotRinged.Planet,color='b',
linestyle='-',linewidth=1)
updatePosition(NotRinged,tcsp[-1])
plotEllipse(ax,NotRinged.Planet,color='b',
linestyle='-',linewidth=1)
#"""
print TAB,"Transit duration numerical (not ringed):"
print 2*TAB,"Full: %.17e"%tT
print 2*TAB,"Total: %.17e"%tF
#========================================
#NOT RINGED TRANSIT DURATION (ANALYTICAL)
#========================================
print TAB,"Transit duration analytical (not ringed):"
xp=sqrt((1+NotRinged.Rp)**2-NotRinged.Borb**2)
xm=sqrt((1-NotRinged.Rp)**2-NotRinged.Borb**2)
"""
CP1=Figure(AR(-xp,Ringed.Borb),NotRinged.Rp,NotRinged.Rp,
1.0,0.0,'Contact 1')
CP4=Figure(AR(+xp,Ringed.Borb),NotRinged.Rp,NotRinged.Rp,
1.0,0.0,'Contact 4')
plotEllipse(ax,CP1,color='k',linestyle=':',linewidth=2)
plotEllipse(ax,CP4,color='k',linestyle=':',linewidth=2)
#"""
tT=P*arcsin(2*xp/(ap*sin(ip)))/(2*pi)
tF=P*arcsin(2*xm/(ap*sin(ip)))/(2*pi)
print 2*TAB,"Full: %.17e"%tT
print 2*TAB,"Total: %.17e"%tF
xp1=-xp
xp2=-xm
xp3=xm
xp4=xp
print 2*TAB,"Contact point:"
print 3*TAB,"xp1 = %.17e"%xp1
print 3*TAB,"xp2 = %.17e"%xp2
print 3*TAB,"xp3 = %.17e"%xp3
print 3*TAB,"xp4 = %.17e"%xp4
#========================================
#R |
codetry/mgsub | mgsub/forms.py | Python | mit | 2,627 | 0.000381 | import logging
import six
from django import forms
from django.conf import settings
from django.template import Context
from django.utils.translation import ugettext as _
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template, TemplateDoesNotExist
from mgsub.mailgun import MailgunList
logger = logging.getLogger('mgsub')
class SignupForm(forms.Form):
email = forms.EmailField()
def __init__(self, list_email=None, *args, **kwargs):
self.mailinglist = MailgunList(list_email)
super(SignupForm, self).__init__(*args, **kwargs)
def is_valid(self):
if not super(SignupForm, self).is_valid():
return False
if not self.subscribe():
self.add_error(None, 'There was a failure adding you to the mailing list')
return False
if getattr(settings, 'MGSUB_SEND_WELCOME', True):
return self.send_welcome()
return True
def subscribe(self):
try:
return self.mailinglist.subscribe(self.cleaned_data['email'])
except Exception, e:
logger.error(e)
return False
def unsubscribe(self):
try:
return self.mailinglist.unsubscribe(self.cleaned_data['email'])
except Exception, e:
logger.error(e)
return False
def send_welcome(self):
email = self.cleaned_data['email']
subject = _(getattr(settings, 'MGSUB_WELCOME_SUBJECT', 'Welcome!'))
from_address = _(getattr(settings, 'MGSUB_WELCOME_FROM',
settings.SERVER_EMAIL))
reply_to = _(getattr(settings, 'MGSUB_WELCOME_REPLY_TO', None))
welcome_template = getattr(settings, 'MGSUB_WELCOME_TEMPLATE',
'mgsub/welcome.html')
welcome_plain = getattr(settings, 'MGSUB_WELCOME_TEMPLATE_PLAIN',
'mgsub/welcome.txt')
if reply_to is not None and isinstance(reply_to, six.string_types):
reply_to = | [reply_to]
context = Context()
welcome_txt = get_template(welcome_plain).render(context)
attach_html = True
try:
welcome_html = get_template(welcome_template).render(context)
except TemplateDoesNotExist:
attach_html = False
| message = EmailMultiAlternatives(subject, welcome_txt, from_address,
to=[email], reply_to=reply_to)
if attach_html:
message.attach_alternative(welcome_html, 'text/html')
message.send()
return True
|
snnn/tensorflow | tensorflow/python/ops/sparse_ops.py | Python | apache-2.0 | 90,456 | 0.002819 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation.
See the [Sparse Ops](https://tensorflow.org/api_guides/python/sparse_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
def _make_int64_tensor(value, name):
if isinstance(value, compat.integral_types):
return ops.convert_to_tensor(value, name=name, dtype=dtypes.int64)
if not isinstance(value, ops.Tensor):
raise TypeError("{} must be an integer value".format(name))
if value.dtype == dtypes.int64:
return value
return math_ops.cast(value, dtypes.int64)
@tf_export("sparse.expand_dims")
def sparse_expand_dims(sp_input, axis=None, name=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `sp_input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `sp_input`'s shape. The dimension index `axis`
starts at zero; if you specify a negative number for `axis` it is counted
backwards from the end.
Args:
sp_input: A `SparseTensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(sp_input) - 1,
rank(sp_input)]`.
name: The name of the output `SparseTensor`.
Returns:
A `SparseTensor` with the same data as `sp_input`, but its shape has an
additional dimension of size 1 added.
"""
rank = sp_input.dense_shape.get_shape()[0]
axis = -1 if axis is None else axis
with ops.name_scope(name, default_name="expand_dims", values=[sp_input]):
if isinstance(axis, compat.integral_types):
axis = ops.convert_to_tensor(axis, name="axis", dtype=dtypes.int32)
elif not isinstance(axis, ops.Tensor):
raise TypeError("axis must be an integer value in range [-rank(sp_input)"
" - 1, rank(sp_input)]")
# Convert axis to a positive value if it is negative.
axis = array_ops.where(axis >= 0, axis, axis + rank + 1)
# Create the new column of indices for the sparse tensor by slicing
# the indices and inserting a new column of indices for the new dimension.
column_size = array_ops.shape(sp_input.indices)[0]
new_index = array_ops.zeros([column_size, 1], dtype=dtypes.int64)
indices_before = array_ops.slice(sp_input.indices, [0, 0], [-1, axis])
indices_after = array_ops.slice(sp_input.indices, [0, axis], [-1, -1])
indices = array_ops.concat(
[indices_before, new_index, indices_after], axis=1)
# Create the new dense shape by splicing the tensor [1] in the correct
# dimension of the existing shape.
shape_before = array_ops.slice(sp_input.dense_shape, [0], [axis])
shape_after = array_ops.slice(sp_input.dense_shape, [axis], [-1])
new_shape = ops.convert_to_tensor([1], name="new_shape", dtype=dtypes.int64)
shape = array_ops.concat([shape_before, new_sh | ape, shape_after], axis=0)
# Create the output sparse tensor.
return sparse_tensor.SparseTensor(
indices=indices, values=sp_input.values, dense_shape=shape)
@tf_export("sparse.eye")
def sparse_eye(num_rows,
num_column | s=None,
dtype=dtypes.float32,
name=None):
"""Creates a two-dimensional sparse tensor with ones along the diagonal.
Args:
num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
of rows in the resulting matrix.
num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
the number of columns in the resulting matrix. Defaults to `num_rows`.
dtype: The type of element in the resulting `Tensor`.
name: A name for this `Op`. Defaults to "eye".
Returns:
A `SparseTensor` of shape [num_rows, num_columns] with ones along the
diagonal.
"""
with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]):
num_rows = _make_int64_tensor(num_rows, "num_rows")
num_columns = num_rows if num_columns is None else _make_int64_tensor(
num_columns, "num_columns")
# Create the sparse tensor.
diag_size = math_ops.minimum(num_rows, num_columns)
diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
return sparse_tensor.SparseTensor(
indices=array_ops.stack([diag_range, diag_range], axis=1),
values=array_ops.ones(diag_size, dtype=dtype),
dense_shape=[num_rows, num_columns])
# pylint: disable=protected-access
@tf_export("sparse.concat", "sparse_concat")
@deprecation.deprecated_endpoints("sparse_concat")
@deprecation.deprecated_args(
None, "concat_dim is deprecated, use axis instead", "concat_dim")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum o |
whatevsz/autobackup | autobackup/cron.py | Python | gpl-3.0 | 17,954 | 0.000279 | # -*- encoding: utf-8 -*-
# Copyright (c) 2013 Hannes Körber <hannes.koerber@gmail.com>
#
# T | his file is part of autobackup.
#
# autoba | ckup is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autobackup is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module implements a cron scheduling flavor.
"""
import datetime
_ranges = (range(60), range(24), range(1, 32), range(1, 13),
range(1900, 3000), range(1, 8)) # Creating year 3000 problem
# This list contains the indices of the fields that should be considered for
# all operations of that module. The value range(0,6) means that all fields
# should be considered, which is the desired behaviour. For now, the last
# field (week) is ignored. When changing this value, look at the methods
# _datetime_to_tuple() and _tuple_to_datetime() too to read and write datetime
# objects correctly.
_check_range = range(0, 5)
# mapping strings to interger for every field, so you can for example use
# JUN-OCT instead of 6-10 in the "month" field
_name_mapping = ({},
{},
{},
{"JAN": 1, "FEB": 2, "MAR": 3, "APR": 4, "MAY": 5,
"JUN": 6, "JUL": 7, "AUG": 8, "SEP": 9, "OCT": 10,
"NOV": 11, "DEC": 12},
{},
{"MON": 1, "TUE": 2, "WED": 3, "THU": 4, "FRI": 5, "SAT": 6,
"SUN": 7})
class Cronjob(object):
"""
Represents a single cronjob schedule. It will not execute any code, but will
provice methods to poll information about that cronjob in relation to a
specific time, for example whether the cronjob elapsed between to different
times and so on.
Look here (https://en.wikipedia.org/wiki/Cron#CRON_expression) for an
deeper insight into the formatting of a cron expression. This class does not
support all formatting options mentioned in this article, and the order of
the fields differ.
Fields:
<minute> <hour> <day_of_month> <month> <year> [<weekday>]
Here are the possible values for all fields:
minute : 0..59
hour : 0..23
day_of_month: 1..31
month : 1..12, JAN..DEC
year: : 1900..3000
weekday : 0..7 (1 = monday ... 7 = sunday, 0 = sunday), MON..SUN
The following matching expressions are supported:
- <integer> to match <integer>
- '<start>-<end>' to match the range from <start> (inclusive) to <end>
(inclusive).
- '*' to match all possible values for the given position.
- '/<step>' as the last specifier to only match all values of the be
preceding range that can be reached by starting at the first matched value
and going steps of size <step>.
- ',' to separate different expressions, the union of all given expressions
will be matched.
Examples:
0 * * * * * matches the beginning of every hour.
3,*/5 1,4 * * * * matches the third and every fifth minute beginning at 0 of
the first and forth hour everyday..
3-59/5 2,4 * * * * does the same as above, apart from maching the third and
every fifth minute starting at the second one instead of starting at 0.
IMPORTANT: <weekday> is not yet supported and can be omitted. For all
comparisons in this class, the weekday information is ignored.
"""
def __init__(self, schedule_string):
self.cronstring = schedule_string
self.schedule = _parse_cronjob_string(schedule_string)
def matches(self, date_time):
"""
Determines whether a given datetime has a match in the cronjob, that
means that there is a cronjob occurence now or in the past that
matches the datetime. Only the year, month, day, hour and minute
values of the datetime are used to determine a match, all other values
are ignored.
:param date_time: The datetime to check.
:type date_time: datetime instance
:returns: True if the datetime matches the cronjob, False otherwise.
:rtype: bool
"""
d_schedule = _datetime_to_tuple(date_time)
for i in _check_range:
if not d_schedule[i] in self.schedule[i]:
return False
return True
def has_occured_between(self, date_time_1, date_time_2):
"""
Determines whether the cronjob has occured between two datetimes
(inclusive), what means that there was any match in this period. If
date_time_1 and date_time_2 represent the same point in time, the
behaviour is identical to matches(date_time_1)
:param date_time_1: The datetime determining the start of the period.
:type date_time_1: datetime instance
:param date_time_2: The datetime determining the end of the period.
:type date_time_2: datetime instance
:returns: True if the cronjob has occured between the two datetimes,
False otherwise.
:rtype: bool
:raises: ValueError if date_time_1 is older than date_time_2
"""
if not date_time_1 <= date_time_2:
raise ValueError(
"date_time_1 has to be older than or equal to date_time_2.")
min_val = self.get_min_time()
max_val = self.get_max_time()
if date_time_1 < min_val:
date_time_1 = min_val
if date_time_2 > max_val:
date_time_2 = max_val
if date_time_2 < min_val:
return False
if date_time_1 > max_val:
return False
if ((date_time_1 < min_val and date_time_2 < min_val) or
(date_time_1 > max_val and date_time_2 > max_val)):
return False
most_recent_occurence = self.get_most_recent_occurence(date_time_2)
return most_recent_occurence >= date_time_1
def has_occured_since(self, date_time):
"""
Determines whether the cronjob has ever occured since date_time
(inclusive), what means that there was any match in this period. If
date_time represents now, the bahaviour is identical to
matches(date_time)
:param date_time: The datetime in the past to check against.
:type date_time: datetime instance
:returns: True if the cronjob has occured since date_time, False
otherwise.
:raises: ValueError if date_time is in the future.
"""
return self.has_occured_between(date_time, datetime.datetime.now())
def get_max_time(self):
"""
Determines the last possible datetime at which the cronjob occurs.
:returns: The last possible datetime at which the cronjob occurs.
:rtype: datetime
"""
return _tuple_to_datetime([max(val) for val in self.schedule])
def get_min_time(self):
"""
Determines the first possible datetime at which the cronjob occurs.
:returns: The first possible datetime at which the cronjob occurs.
:rtype: datetime
"""
return _tuple_to_datetime([min(val) for val in self.schedule])
def get_most_recent_occurence(self, date_time=None):
"""
Determines the most recent occurence of the cronjob relative to a
specific datetime.
:param d: The datetime relative to which to determine the most
recent occurence. If None is given, datetime.datetime.now() is used
instead.
:type d: datetime instance
:returns: The most recent occurence of the cronjob relative to d.
:rtype: datetime
:raises: ValueError if d is older than the first possible occurence
of the cronjob.
"""
if not date_time:
date_time = datetime.datetime.now()
|
googleapis/python-bigquery | samples/magics/_helpers.py | Python | apache-2.0 | 823 | 0.001215 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the | License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def strip_region_tags(sample_text):
"""Remove blank lines and region tags from sa | mple text"""
magic_lines = [
line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
]
return "\n".join(magic_lines)
|
shearern/python-text-objects | src/txtobjs_test/servers_use_case/ServiceSchema.py | Python | gpl-2.0 | 519 | 0.001927 | from txtobjs.schema.TextO | bjectSchema import TextObjectSchema
from txtobjs.schema.SimpleTextField import SimpleTextField
from txtobjs.schema.SubObjectDict import SubObjectDict
from txtobjs.schema.ValueListField import ValueListField
class ServiceSchema(TextObjectSchema):
text_class = 'Service'
name = SubObjectDictKey()
role_name = SimpleTextField('role_name')
hosts = ObjIdLi | st('hosts', text_class='Machine')
firewall = SubObjectDict('firewall', schema=ServiceFirewallSchema())
|
Mobleyta/GasChromino | PythonCode/serial_ports.py | Python | gpl-3.0 | 1,189 | 0 | #!/usr/bin/env python
"""This utility script was adopted from StackExchange:
http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
Adopted for use with arduino_GC connection project
"""
import sys
import glob
import serial
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' | % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this exclu | des your current terminal "/dev/tty"
ports = glob.glob('/dev/cu[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/cu.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
if __name__ == '__main__':
print(serial_ports())
|
MAECProject/python-maec | maec/bundle/behavior.py | Python | bsd-3-clause | 4,501 | 0.00511 | # MAEC Behavior Class
# Copyright (c) 2018, The MITRE Corporation
# All rights reserved
from mixbox import fields
from mixbox import idgen
import maec
from . import _namespace
import maec.bindings.maec_bundle as bundle_binding
from cybox.core.action_reference import ActionReference
from cybox.common.measuresource import MeasureSource
from cybox.common.platform_specification import PlatformSpecification
from cybox.objects.code_object import Code
class BehavioralActionEquivalenceReference(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionEquivalenceReferenceType
_namespace = _namespace
action_equivalence_idref = fields.TypedField('action_equivalence_idref')
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActionReference(ActionReference):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionReferenceType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralAction(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActions(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionsType
_namespace = _namespace
#TODO: action_collection.type_ is set below to avoid circular import.
action_collection = fields.TypedField('Action_Collection', None, multiple=True)
action = fields.TypedField('Action', BehavioralAction, multiple=True)
action_reference = fields.TypedField('Action_Reference', BehavioralActionReference, multiple=True)
action_equivalence_reference = fields.TypedField('Action_Equivalence_R | eference', BehavioralActionEquivalenceReference, multiple=True)
class PlatformList(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.PlatformListType
_namespace = _namespace
platform = fields.TypedField("Platform", PlatformSpecification, multiple=True)
class CVEVulnerability(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.CVEVulnerabilityType
| _namespace = _namespace
cve_id = fields.TypedField('cve_id')
description = fields.TypedField('Description')
class Exploit(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.ExploitType
_namespace = _namespace
known_vulnerability = fields.TypedField('known_vulnerability')
cve = fields.TypedField('CVE', CVEVulnerability)
cwe_id = fields.TypedField('CWE_ID', multiple=True)
targeted_platforms = fields.TypedField('Targeted_Platforms', PlatformList)
class BehaviorPurpose(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorPurposeType
_namespace = _namespace
description = fields.TypedField('Description')
vulnerability_exploit = fields.TypedField('Vulnerability_Exploit', Exploit)
class AssociatedCode(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.AssociatedCodeType
_namespace = _namespace
code_snippet = fields.TypedField("Code_Snippet", Code, multiple=True)
class Behavior(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorType
_namespace = _namespace
id_ = fields.TypedField('id')
ordinal_position = fields.TypedField('ordinal_position')
status = fields.TypedField('status')
duration = fields.TypedField('duration')
purpose = fields.TypedField('Purpose', BehaviorPurpose)
description = fields.TypedField('Description')
discovery_method = fields.TypedField('Discovery_Method', MeasureSource)
action_composition = fields.TypedField('Action_Composition', BehavioralActions)
associated_code = fields.TypedField('Associated_Code', AssociatedCode)
#relationships = fields.TypedField('Relationships', BehaviorRelationshipList) # TODO: implement
def __init__(self, id = None, description = None):
super(Behavior, self).__init__()
if id:
self.id_ = id
else:
self.id_ = idgen.create_id(prefix="behavior")
self.description = description
from maec.bundle.bundle import ActionCollection
BehavioralActions.action_collection.type_ = ActionCollection
|
Tan0/ironic | ironic/tests/test_raid.py | Python | apache-2.0 | 10,900 | 0 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from ironic.common import exception
from ironic.common import raid
from ironic.drivers import base as drivers_base
from ironic.tests import base
from ironic.tests.db import base as db_base
from ironic.tests.objects import utils as obj_utils
from ironic.tests import raid_constants
class ValidateRaidConfigurationTestCase(base.TestCase):
def setUp(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
self.schema = json.load(raid_schema_fobj)
super(ValidateRaidConfigurationTestCase, self).setUp()
def test_validate_configuration_okay(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_OKAY)
raid.validate_configuration(
raid_config, raid_config_schema=self.schema)
def test_validate_configuration_no_logical_disk(self):
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
{},
raid_config_schema=self.schema)
def test_validate_configuration_zero_logical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_LOGICAL_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_max_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_MAX_SIZE_GB)
raid.validate_configuration(raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_v | alidate_configuration_invalid_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
| raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_multiple_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_MULTIPLE_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_share_physical_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_SHARE_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_disk_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_DISK_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_int_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_INT_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_number_of_phy_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_NUM_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_physical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_PHY_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_additional_property(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_ADDITIONAL_PROP)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_custom_schema(self):
raid_config = json.loads(raid_constants.CUSTOM_SCHEMA_RAID_CONFIG)
schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
raid.validate_configuration(raid_config,
raid_config_schema=schema)
class RaidPublicMethodsTestCase(db_base.DbTestCase):
def test_get_logical_disk_properties(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
schema = json.load(raid_schema_fobj)
logical_disk_properties = raid.get_logical_disk_properties(schema)
self.assertIn('raid_level', logical_disk_properties)
self.assertIn('size_gb', logical_disk_properties)
self.assertIn('volume_name', logical_disk_properties)
self.assertIn('is_root_volume', logical_disk_properties)
self.assertIn('share_physical_disks', logical_disk_properties)
self.assertIn('disk_type', logical_disk_properties)
self.assertIn('interface_type', logical_disk_properties)
self.assertIn('number_of_physical_disks', logical_disk_properties)
self.assertIn('controller', logical_disk_properties)
self.assertIn('physical_disks', logical_disk_properties)
def test_get_logical_disk_properties_custom_schema(self):
raid_schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
logical_disk_properties = raid.get_logical_disk_properties(
raid_config_schema=raid_schema)
sel |
spacy-io/spaCy | spacy/lang/id/syntax_iterators.py | Python | mit | 1,515 | 0.00132 | from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", | "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj | = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
|
chakki-works/elephant_sense | scripts/data/normalization.py | Python | apache-2.0 | 477 | 0.004376 | # - | *- coding: utf-8 -*-
import re
import unicodedata
def lower_text(text):
return text.lower()
def normalize_unicode(text, form='NFKC'):
normalized_text = unicodedata.normalize(form, text)
return normalized_text
def normalize_number(text):
"""
pattern = r'\d+'
replacer = re.compile(pattern)
result = replacer.sub('0', text)
"""
# 連続した数字を0で置換
replaced_text = re.sub(r'\d+', '0', text)
| return replaced_text
|
clicumu/uspto-tools | scripts/zip_to_sqlite.py | Python | mit | 10,428 | 0.000575 | """ Patent ZIP-archive to sqlite.
This script parses and transfer patents from the stupid text-format
in bulk_download.py to a query-friendly sqlite datbase.
"""
import collections
import sqlite3
import re
import os
import itertools
import datetime
import numpy as np
import hashlib
with open(os.path.join(os.path.dirname(__file__), 'create_db.sql')) as f:
INIT_DB = f.read()
INSERT_PATENT = "insert or replace into patentdata values (?, ?, ?, ?, ?, ?)"
INSERT_IGNORE_PNUM = "insert or ignore into patents (PNum) values (?)"
INSERT_FULLTEXT = """ insert into fulltexts values (
?,
(select Id from texttypes where Name=?),
?
)
"""
INSERT_CITATION = "insert into citations values (?, ?)"
def connect_and_init_db(path):
""" Connect to database and initialize schema.
Parameters
----------
path : str
Path to sqlite-database file.
Returns
-------
sqlite3.Connection, sqlite3.Cursor
"""
conn = sqlite3.connect(path)
cur = conn.cursor()
cur.execute('PRAGMA foreign_keys = ON')
cur.executescript(INIT_DB)
return conn, cur
def load_patents(file):
""" Read and split patent file.
Parameters
----------
file : str, file-like
Path to, or file-handle to file containing patents in stupid
text format.
Returns
-------
list[str]
List of patents in stupid text-format.
"""
if isinstance(file, str):
try:
with open(file, encoding='utf-8') as f:
contents = f.read()
except OSError:
contents = file
else:
contents = file.read()
if isinstance(contents, bytes):
contents = contents.decode('utf-8')
if '\r\n' in contents:
patents = contents.split('PATENT\r\n')
else:
patents = contents.split('PATENT\n')
if not patents[0]:
patents = patents[1:]
return patents
def parse_patent(patent_str):
""" Parse a single patent in stupid text-format in dict.
Parameters
----------
patent_str : str
A single patent in stupid text-format.
Returns
-------
dict
"""
keys = [
'PATENT NUMBER',
'SERIES CODE',
'APPLICATION NUMBER',
'APPLICATION TYPE',
'APPLICATION DATE',
'TITLE',
'ABSTRACT',
'BRIEF SUMMARY',
'DESCRIPTION',
'CLAIMS',
'REFERENCES',
'DESIGN CLAIMS',
]
current_key = None
parsed = collections.defaultdict(list)
for line in patent_str.splitlines():
if any(line.startswith('{}: '.format(k)) for k in keys):
current_key, data = line.split(': ', 1)
else:
data = line
parsed[current_key].append(data)
parsed = {key: '\n'.join(data) for key, data in parsed.items()}
parsed['REFERENCES'] = parsed.get('REFERENCES', '').strip().split(';')
return parsed
def insert_patents(patents, cursor, root_dir=None):
""" Insert parsed patents into database.
Parameters
----------
patent : list[dict[str, str]]
Parsed patent dict.
cursor : sqlite3.Cursor
Database cursor.
"""
values = list()
fulltexts = list()
references = list()
for i, patent in enumerate(patents):
try:
field_type, p_num, app_num, series_code, date = _get_patent_info(patent)
except Exception:
logging.exception('Failed parse. Skips patent {}.'.format(i))
continue
try:
# Very rarely, application numbers can pop as as HUGE
# numbers. Max size of sqlite INTEGER is 64 bits, if application
# number doesn't fit let it be None.
app_num = int(np.int64(app_num))
except OverflowError:
logging.warning(('Overflowing application number '
'of patent {}. Set to null.').format(p_num))
app_num = None
values.append((
p_num,
field_type,
app_num,
series_code if series_code != 'None' else None,
date,
patent['TITLE']
))
fulltexts.extend(_get_fulltexts(patent, p_num))
references.extend(_get_references(patent, p_num))
cursor.executemany(INSERT_IGNORE_PNUM, [(v[0], ) for v in values])
cursor.executemany(INSERT_PATENT, values)
save_fulltex | ts(cursor, fulltexts, root_dir)
referred = set((ref, ) for p, ref in references)
cursor.executemany(INSERT_IGNORE_PNUM, referred)
cursor.executemany(INSERT_CITATION, references)
def save_fulltexts(cursor, fulltexts, root_dir=None):
to_db = list()
for pnum, key, body in fulltexts:
md5 = hashlib.md5(str(pnum).encode('ascii')).hexdigest()
top_dir = int( | md5[:16], 16) % 100
bottom_dir = int(md5[16:], 16) % 100
path = '{}/{}/{}/{}.txt'.format(key, top_dir, bottom_dir, pnum)
if root_dir is not None:
path = '{}/{}'.format(root_dir, path)
os_path = os.path.join(*path.split('/'))
os.makedirs(os.path.dirname(os_path), exist_ok=True)
with open(os_path, 'w') as f:
f.write(body)
to_db.append((pnum, key, path))
cursor.executemany(INSERT_FULLTEXT, to_db)
def _get_references(patent, patent_number):
references = list()
for raw_ref in patent['REFERENCES']:
try:
_, ref = _parse_patent_number(raw_ref)
except ValueError:
continue
references.append(ref)
return list(zip(itertools.repeat(patent_number), references))
def _get_fulltexts(patent, patent_number):
fulltexts = list()
for key in ('DESCRIPTION', 'ABSTRACT', 'BRIEF SUMMARY', 'CLAIMS'):
if patent.get(key, 'None') != 'None':
fulltexts.append((patent_number, key, patent[key]))
if patent.get('DESIGN CLAIMS', 'None') != 'None':
fulltexts.append((patent_number, 'CLAIMS', patent['DESIGN CLAIMS']))
return fulltexts
def _get_patent_info(patent):
raw_p_num = patent['PATENT NUMBER']
field_type, p_num = _parse_patent_number(raw_p_num)
app_num = re.sub(r'[^0-9]', '', patent['APPLICATION NUMBER'])
series_code = patent['SERIES CODE']
date = patent['APPLICATION DATE']
if date != 'None':
try:
date = _safe_date(date, raw_p_num)
except ValueError:
logging.warning('Failed to parse date of patent: {}'.format(raw_p_num))
date = None
else:
date = None
return field_type, p_num, app_num, series_code, date
def _safe_date(date_str, pnum):
try:
date = datetime.datetime.strptime(date_str, '%Y%m%d')
except ValueError as e:
if str(e) == 'day is out of range for month':
# Some dates has been wrongly entered into the original database
# meaning that some dates does not exist. If non-existing day,
# decrement day until exists.
new_date = '{}{:02d}'.format(date_str[:-2], int(date_str[-2:]) - 1)
logging.warning('Day out of range, decrements date (Pnum {})'.format(pnum))
return _safe_date(new_date, pnum)
else:
if date_str.endswith('00'):
# Some days are entered as double zero, set date to first of
# month instead.
logging.warning('Day 00, set day to 01 (Pnum {})'.format(pnum))
return _safe_date(date_str[:-2] + '01', pnum)
else:
raise e
return date.toordinal()
def _parse_patent_number(raw_p_num):
if raw_p_num.isdigit():
p_num = int(raw_p_num)
field_type = None
else:
try:
field_type, p_num = re.match(r'([A-z]+)(\d+)', raw_p_num).groups()
p_num = int(p_num)
except (TypeError, AttributeError):
raise ValueError('bad patent-number: {}'.format(raw_p_num))
return field_type, p_num
def _make_parser():
import argparse
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('archive',
help='Zip archives containing patent text-files.')
default_output = 'patents.db' |
oleg-cherednik/hackerrank | Python/Strings/Mutations/solution.py | Python | apache-2.0 | 257 | 0.003891 | #!/bin/python3
def mutate_string(string, position, chara | cter):
return string[:position] + character + string[position + 1:]
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate | _string(s, int(i), c)
print(s_new)
|
badp/ganeti | qa/qa_instance.py | Python | gpl-2.0 | 44,869 | 0.01023 | #
#
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# | along with this program; if not, write to the Free Software
# F | oundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Instance related QA tests.
"""
import os
import re
import time
from ganeti import utils
from ganeti import constants
from ganeti import query
from ganeti import pathutils
import qa_config
import qa_utils
import qa_error
from qa_utils import AssertCommand, AssertEqual
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
from qa_instance_utils import CheckSsconfInstanceList, \
CreateInstanceDrbd8, \
CreateInstanceByDiskTemplate, \
CreateInstanceByDiskTemplateOneNode, \
GetGenericAddParameters
def _GetDiskStatePath(disk):
return "/sys/block/%s/device/state" % disk
def GetInstanceInfo(instance):
"""Return information about the actual state of an instance.
@type instance: string
@param instance: the instance name
@return: a dictionary with the following keys:
- "nodes": instance nodes, a list of strings
- "volumes": instance volume IDs, a list of strings
- "drbd-minors": DRBD minors used by the instance, a dictionary where
keys are nodes, and values are lists of integers (or an empty
dictionary for non-DRBD instances)
- "disk-template": instance disk template
- "storage-type": storage type associated with the instance disk template
"""
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
# node1.fqdn
# node2.fqdn,node3.fqdn
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
# FIXME This works with no more than 2 secondaries
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$")
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0]
nodes = []
for nodeinfo in info["Nodes"]:
if "primary" in nodeinfo:
nodes.append(nodeinfo["primary"])
elif "secondaries" in nodeinfo:
nodestr = nodeinfo["secondaries"]
if nodestr:
m = re_nodelist.match(nodestr)
if m:
nodes.extend(filter(None, m.groups()))
else:
nodes.append(nodestr)
disk_template = info["Disk template"]
if not disk_template:
raise qa_error.Error("Can't get instance disk template")
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
vols = []
drbd_min = {}
for (count, diskinfo) in enumerate(info["Disks"]):
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
if dtype == constants.DT_DRBD8:
for child in diskinfo["child devices"]:
vols.append(child["logical_id"])
for key in ["nodeA", "nodeB"]:
m = re_drbdnode.match(diskinfo[key])
if not m:
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key])
node = m.group(1)
minor = int(m.group(2))
minorlist = drbd_min.setdefault(node, [])
minorlist.append(minor)
elif dtype == constants.DT_PLAIN:
vols.append(diskinfo["logical_id"])
assert nodes
assert len(nodes) < 2 or vols
return {
"nodes": nodes,
"volumes": vols,
"drbd-minors": drbd_min,
"disk-template": disk_template,
"storage-type": storage_type,
}
def _DestroyInstanceDisks(instance):
"""Remove all the backend disks of an instance.
This is used to simulate HW errors (dead nodes, broken disks...); the
configuration of the instance is not affected.
@type instance: dictionary
@param instance: the instance
"""
info = GetInstanceInfo(instance.name)
# FIXME: destruction/removal should be part of the disk class
if info["storage-type"] == constants.ST_LVM_VG:
vols = info["volumes"]
for node in info["nodes"]:
AssertCommand(["lvremove", "-f"] + vols, node=node)
elif info["storage-type"] in (constants.ST_FILE, constants.ST_SHARED_FILE):
# Note that this works for both file and sharedfile, and this is intended.
storage_dir = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
idir = os.path.join(storage_dir, instance.name)
for node in info["nodes"]:
AssertCommand(["rm", "-rf", idir], node=node)
elif info["storage-type"] == constants.ST_DISKLESS:
pass
def _GetInstanceField(instance, field):
"""Get the value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: string
"""
master = qa_config.GetMasterNode()
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
"--units", "m", "-o", field, instance])
return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
def _GetBoolInstanceField(instance, field):
"""Get the Boolean value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: bool
"""
info_out = _GetInstanceField(instance, field)
if info_out == "Y":
return True
elif info_out == "N":
return False
else:
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:"
" %s" % (field, instance, info_out))
def _GetNumInstanceField(instance, field):
"""Get a numeric value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: int or float
"""
info_out = _GetInstanceField(instance, field)
try:
ret = int(info_out)
except ValueError:
try:
ret = float(info_out)
except ValueError:
raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
" %s" % (field, instance, info_out))
return ret
def GetInstanceSpec(instance, spec):
"""Return the current spec for the given parameter.
@type instance: string
@param instance: Instance name
@type spec: string
@param spec: one of the supported parameters: "memory-size", "cpu-count",
"disk-count", "disk-size", "nic-count"
@rtype: tuple
@return: (minspec, maxspec); minspec and maxspec can be different only for
memory and disk size
"""
specmap = {
"memory-size": ["be/minmem", "be/maxmem"],
"cpu-count": ["vcpus"],
"disk-count": ["disk.count"],
"disk-size": ["disk.size/ "],
"nic-count": ["nic.count"],
}
# For disks, first we need the number of disks
if spec == "disk-size":
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
fields = ["disk.size/%s" % k for k in range(0, numdisk)]
else:
assert spec in specmap, "%s not in %s" % (spec, specmap)
fields = specmap[spec]
values = [_GetNumInstanceField(instance, f) for f in fields]
return (min(values), max(values))
def IsFailoverSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsMigrationSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsDiskReplacingSupported(instance):
return instance.disk_template == constants.DT_DRBD8
def IsDiskSupported(instance):
return instance.disk_template != constants.DT_DISKLESS
def TestInstanceAddWithPlainDisk(nodes, fail=False):
"""gnt-instance add -t plain"""
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
instance = Cr |
Raynxxx/CUIT-ACM-Website | view/ajax.py | Python | mit | 35,176 | 0.00426 | # coding=utf-8
import os
from __init__ import *
import traceback, cStringIO, re
from flask import current_app
from werkzeug.datastructures import FileStorage
from server import user_server, article_server, status_server, form, \
account_server, news_server, resource_server
from server import general, honor_server
from dao.dbACCOUNT import Account
from dao import dbCompetition, dbPlayer
from util import json, CJsonEncoder
from flask.globals import _app_ctx_stack
from flask import request, jsonify
from sqlalchemy.exc import IntegrityError
from server.account_server import AccountUpdatingException, AccountExistException
from util import function
#
# @blueprint: ajax
# @created: 2015/06/22
# @author: Z2Y
#
ajax = blueprints.Blueprint('ajax', __name__)
#
# @brief: json for recent contest
# @route: /ajax/contest.json
# @allowed user: public
#
@ajax.route("/ajax/contest.json", methods=['GET'])
def recent_contests():
import json
json_file = open(RECENT_CONTEST_JSON, 'r').read()
json_contests = json.JSONDecoder().decode(json_file)
contests = []
for contest in json_contests:
name, link = contest['name'], contest['link']
new_contest = {
'oj': contest['oj'],
'name': '<a href="' + link + '" class="contest-name" title="' + name + '">' + name + '</a>',
'start_time': contest['start_time'],
'access': contest['access'],
}
contests.append(new_contest)
return json.dumps({ 'data': contests })
#
# @brief: ajax rank list
# @route: /ajax/rank_list
# @allowed user: student and coach
#
@ajax.route('/ajax/main_rank_table')
def main_rank_table():
main_rank_list = general.get_rank_list()
return json.dumps({ 'data': main_rank_list })
#
# @brief: ajax html for one user item
# @allowed user: admin and coach
#
@login_required
def get_user_list_item(user):
return render_template('ajax/user_list_item.html',
user = user,
school_mapper = SCHOOL_MAP,
college_mapper = SCHOOL_COLLEGE_MAP)
#
# @brief: ajax user list
# @route: /ajax/user_list
# @allowed user: admin and coach
#
@ajax.route('/ajax/user_list', methods=["GET", "POST"])
@login_required
def get_users():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
search = request.args.get('search', None)
per_page = USER_MANAGE_PER_PAGE
pagination = None
if current_user.is_admin:
pagination = user_server.get_list_pageable(page, per_page, search=search)
elif current_user.is_coach:
pagination = user_server.get_list_pageable(page, per_page, search=search,
school=current_user.school)
page_list = list(pagination.iter_pages(left_current=1, right_current=2))
| return jsonify(items=[get_user_list_item(user) for user in pagination.items],
prev_num=pagination.prev_num,
next_num=pagination.next_num,
page_list=page_list,
page=pagination.page,
pages=pagination.pages)
#
# @brief: add user
# @route: /ajax/create_user
# @accepted methods: [post]
# @allowed user: admi | n and coach
# @ajax return: 用户是否添加成功
#
@ajax.route('/ajax/create_user', methods=["POST"])
@login_required
def create_user():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
reg_form = form.RegisterForm()
if reg_form.validate_on_submit():
try:
rights_list = request.form.getlist('rights')
rights = 0
for item in rights_list:
rights = rights | int(item)
ret = user_server.create_user(reg_form, rights)
if ret == 'OK':
return u"添加用户成功"
return u"添加用户失败: " + ret
except Exception, e:
current_app.logger.error(traceback.format_exc())
return u"添加用户失败: " + e.message
else:
#print reg_form.errors
return u"添加用户失败: 表单填写有误"
#
# @brief: add many users
# @route: /ajax/create_users
# @accepted methods: [post]
# @allowed user: admin and coach
# @ajax return: 用户添加成功的数量
#
@ajax.route('/ajax/create_users', methods=["POST"])
@login_required
def create_users():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
reg_form = form.MultiRegisterForm()
if reg_form.validate_on_submit():
try:
ret = user_server.create_many_users(reg_form, current_user)
return ret
except Exception, e:
current_app.logger.error(traceback.format_exc())
return u"添加用户失败: " + e.message
else:
#print reg_form.errors
return u"添加用户失败: 表单填写有误"
#
# @brief: check apply user
# @route: /ajax/check_apply
# @accepted methods: [post]
# @allowed user: admin and coach
# @ajax return: 操作结果
#
@ajax.route("/ajax/check_apply", methods= ['POST'])
@login_required
def check_apply():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
try:
apply_id = request.form.get('uid')
user = user_server.get_by_id(apply_id)
opt = request.form.get('opt')
ret = user_server.update_apply(apply_id, opt)
if ret == 'OK':
function.reply_of_apply(mail, user.serialize, _app_ctx_stack.top, opt)
return ret
except Exception:
current_app.logger.error(traceback.format_exc())
return u'操作失败'
#
# @brief: edit user
# @route: /ajax/edit_user
# @accepted methods: [post]
# @allowed user: admin and coach
#
@ajax.route('/ajax/edit_user', methods=["POST"])
@login_required
def edit_user():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
user_modify_form = form.UserModifyForm()
if user_modify_form.validate_on_submit():
try:
rights_list = request.form.getlist('rights')
rights = 0
for item in rights_list:
rights = rights | int(item)
ret = user_server.update_user(user_modify_form, rights)
if ret == 'OK':
return u"修改用户成功"
return u'修改用户失败: ' + ret
except Exception, e:
current_app.logger.error(traceback.format_exc())
return u"修改用户失败: " + e.message
else:
#print user_modify_form.errors
return u"修改用户失败: 表单填写有误"
#
# @brief: edit user for self
# @route: /ajax/edit_user_self
# @accepted methods: [post]
# @allowed user: all
#
@ajax.route('/ajax/edit_user_self', methods=["POST"])
@login_required
def edit_user_self():
user_modify_form = form.UserModifyForm()
if user_modify_form.validate_on_submit():
try:
ret = user_server.update_user(user_modify_form, for_self=True)
if ret == 'OK':
return u"修改用户成功"
return u'修改用户失败: ' + ret
except Exception, e:
current_app.logger.error(traceback.format_exc())
return u"修改用户失败: " + e.message
else:
#print user_modify_form.errors
return u"修改用户失败: 表单填写有误"
#
# @brief: modify password
# @route: /ajax/modify_password
# @accepted methods: [post]
# @allowed user: all
# @ajax return: 密码是否修改成功 => string
#
@ajax.route('/ajax/modify_password', methods=['POST'])
@login_required
def modify_password():
pwd_modify_form = form.PasswordModifyForm()
if pwd_modify_form.validate_on_submit():
if not current_user.verify_password(pwd_modify_form.password.data):
return u"当前密码输入错误"
return user_server.modify_password(pwd_modify_form, current_user)
return u"修改密码失败"
#
# @brief: delete user
# @route: /ajax/delete_user
# @accepted methods: [post]
# @allowed user: admin and coach
#
@ajax.route('/ajax/delete_user', methods=["POST"])
@login_required
def delete_user():
if not current_user.is_admin and not current_user.is_coach:
return redirect(url_for('main.index'))
try:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.