code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pandas as pd
from bokeh.sampledata.glucose import data
from bokeh.plotting import *
output_file("glucose.html", title="glucose.py example")
hold()
dates = data.index.to_series()
figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
line(dates, data['glucose'], color='red', legend='glucose')
line(dates, data['isig'], color='blue', legend='isig')
curplot().title = "Glucose Measurements"
xax, yax = axis()
xax.axis_label = 'Date'
yax.axis_label = 'Value'
day = data.ix['2010-10-06']
highs = day[day['glucose'] > 180]
lows = day[day['glucose'] < 80]
figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
line(day.index.to_series(), day['glucose'],
line_color="gray", line_dash="4 4", line_width=1, legend="glucose")
scatter(highs.index.to_series(), highs['glucose'], size=6, color='tomato', legend="high")
scatter(lows.index.to_series(), lows['glucose'], size=6, color='navy', legend="low")
curplot().title = "Glucose Range"
xgrid()[0].grid_line_color=None
ygrid()[0].grid_line_alpha=0.5
xax, yax = axis()
xax.axis_label = 'Time'
yax.axis_label = 'Value'
data['inrange'] = (data['glucose'] < 180) & (data['glucose'] > 80)
window = 30.5*288 #288 is average number of samples in a month
inrange = pd.rolling_sum(data.inrange, window)
inrange = inrange.dropna()
inrange = inrange/float(window)
figure(x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
line(inrange.index.to_series(), inrange, line_color="navy")
curplot().title = "Glucose In-Range Rolling Sum"
xax, yax = axis()
xax.axis_label = 'Date'
yax.axis_label = 'Proportion In-Range'
# open a browser
show()
|
jakevdp/bokeh
|
examples/plotting/file/glucose.py
|
Python
|
bsd-3-clause
| 1,674
|
# -*- coding: utf-8 -*-
"""This module provides an implementation of full matrix adagrad."""
from __future__ import division
from base import Minimizer
from mathadapt import sqrt, ones_like, clip, zero_like
from scipy.linalg import pinv as scipy_pinv, polar
import numpy as np
from fjlt.SubsampledRandomizedFourrierTransform import SubsampledRandomizedFourrierTransform
from scipy.linalg import sqrtm
class AdagradFull(Minimizer):
"""Full Matrix AdaGrad optimizer.
AdaGrad [duchi, ...]_ is a method that ...
Let :math:`f'(\\theta_t)` be the derivative of the loss with respect to the
parameters at time step :math:`t`. In its
basic form, given a step rate :math:`\\alpha`, a decay term
:math:`\\gamma` and an offset :math:`\\epsilon` we perform the following
updates:
.. math::
g_t &=& (1 - \\gamma)~f'(\\theta_t)^2 + \\gamma g_{t-1}
where :math:`g_0 = 0`. Let :math:`s_0 = 0` for updating the parameters:
.. math::
\\Delta \\theta_t &=& \\alpha {\sqrt{s_{t-1} + \\epsilon} \over \sqrt{g_t + \\epsilon}}~f'(\\theta_t), \\\\
\\theta_{t+1} &=& \\theta_t + \\Delta \\theta_t.
Subsequently we adapt the moving average of the steps:
.. math::
s_t &=& (1 - \\gamma)~\\Delta\\theta_t^2 + \\gamma s_{t-1}.
To extend this with Nesterov's accelerated gradient, we need a momentum
coefficient :math:`\\beta` and incorporate it by using slightly different
formulas:
.. math::
\\theta_{t + {1 \over 2}} &=& \\theta_t + \\beta \\Delta \\theta_{t-1}, \\\\
g_t &=& (1 - \\gamma)~f'(\\theta_{t + {1 \over 2}})^2 + \\gamma g_{t-1}, \\\\
\\Delta \\theta_t &=& \\alpha {\sqrt{s_{t-1} + \\epsilon} \over \sqrt{g_t + \\epsilon}}~f'(\\theta_{t + {1 \over 2}}).
In its original formulation, the case :math:`\\alpha = 1, \\beta = 0` was
considered only.
"""
state_fields = 'n_iter g_avg Gt eta lamb delta'.split()
def __init__(self, wrt, fprime, eta, lamb, delta, n_classes=None, args=None):
"""Create a AadaGradFull object.
Parameters
----------
wrt : array_like
Array that represents the solution. Will be operated upon in
place. ``fprime`` should accept this array as a first argument.
fprime : callable
Callable that given a solution vector as first parameter and *args
and **kwargs drawn from the iterations ``args`` returns a
search direction, such as a gradient.
step_rate : scalar or array_like, optional [default: 1]
Value to multiply steps with before they are applied to the
parameter vector.
args : iterable
Iterator over arguments which ``fprime`` will be called with.
"""
super(AdagradFull, self).__init__(wrt, args=args)
self.n_classes = n_classes
self.fprime = fprime
self.g_avg = zero_like(wrt)
self.Gt = np.zeros((self.g_avg.shape[0], self.g_avg.shape[0])) # TODO: make eye
self.eta = eta
self.lamb = lamb
self.delta = delta
self.I_delta = np.diag(np.ones(self.Gt.shape[0]) * delta)
self.eye_Gt = np.eye(*self.Gt.shape)
if self.n_classes is not None:
self.n_param = wrt.shape[0]
self.n_features = (self.n_param-self.n_classes)/self.n_classes
def _iterate(self):
for args, kwargs in self.args:
gradient = self.fprime(self.wrt, *args, **kwargs)
self.Gt += np.outer(gradient, gradient)
St = self._my_sqrtm(self.I_delta + self.Gt)
Ht_inv = np.linalg.inv(St)
if self.n_classes is None:
uppro = np.dot(Ht_inv, gradient)
else:
vec = np.dot(Ht_inv, gradient)
uppro = np.r_[np.array([vec[self.n_features * i:self.n_features * (i + 1)] for i in range(self.n_classes)]).flatten(), vec[self.n_param - self.n_classes:]]
self.wrt -= self.eta * uppro
self.n_iter += 1
yield {
'n_iter': self.n_iter,
'gradient': gradient,
'args': args,
'kwargs': kwargs,
}
def _my_sqrtm(self, X):
# tol = 1e-7
return np.real(sqrtm(X)) # + self.eye_Gt * tol)
|
gabobert/climin
|
climin/adagrad_full.py
|
Python
|
bsd-3-clause
| 4,355
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility library to support interaction with the Tool Results service."""
import time
import urllib
import urlparse
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.third_party.py27 import py27_collections as collections
_STATUS_INTERVAL_SECS = 3
class BadMatrixException(exceptions.ToolException):
"""BadMatrixException is for test matrices that fail prematurely."""
class ToolResultsIds(
collections.namedtuple('ToolResultsIds', ['history_id', 'execution_id'])):
"""A tuple to hold the history & execution IDs returned from Tool Results.
Fields:
history_id: a string with the Tool Results history ID to publish to.
execution_id: a string with the ID of the Tool Results execution.
"""
def CreateToolResultsUiUrl(project_id, tool_results_ids):
"""Create a URL to the Tool Results UI for a test.
Args:
project_id: string containing the user's GCE project ID.
tool_results_ids: a ToolResultsIds object holding history & execution IDs.
Returns:
A url to the Tool Results UI.
"""
url_base = properties.VALUES.test.results_base_url.Get()
if not url_base:
url_base = 'https://console.developers.google.com'
url_end = (
'project/{p}/testlab/mobile/histories/{h}/executions/{e}'.format(
p=urllib.quote(project_id),
h=urllib.quote(tool_results_ids.history_id),
e=urllib.quote(tool_results_ids.execution_id)))
return urlparse.urljoin(url_base, url_end)
def GetToolResultsIds(matrix, matrix_monitor,
status_interval=_STATUS_INTERVAL_SECS):
"""Gets the Tool Results history ID and execution ID for a test matrix.
Sometimes the IDs are available immediately after a test matrix is created.
If not, we keep checking the matrix until the Testing and Tool Results
services have had enough time to create/assign the IDs, giving the user
continuous feedback using gcloud core's ProgressTracker class.
Args:
matrix: a TestMatrix which was just created by the Testing service.
matrix_monitor: a MatrixMonitor object.
status_interval: float, number of seconds to sleep between status checks.
Returns:
A ToolResultsIds tuple containing the history ID and execution ID, which
are shared by all TestExecutions in the TestMatrix.
Raises:
BadMatrixException: if the matrix finishes without both ToolResults IDs.
"""
history_id = None
execution_id = None
msg = 'Creating individual test executions'
with console_io.ProgressTracker(msg, autotick=True):
while True:
if matrix.resultStorage.toolResultsExecution:
history_id = matrix.resultStorage.toolResultsExecution.historyId
execution_id = matrix.resultStorage.toolResultsExecution.executionId
if history_id and execution_id:
break
if matrix.state in matrix_monitor.completed_matrix_states:
raise BadMatrixException(
'\nMatrix [{m}] unexpectedly reached final status {s} without '
'returning a URL to any test results in the Developers Console. '
'Please re-check the validity of your APK file(s) and test '
'parameters and try again.'
.format(m=matrix.testMatrixId, s=matrix.state))
time.sleep(status_interval)
matrix = matrix_monitor.GetTestMatrixStatus()
return ToolResultsIds(history_id=history_id, execution_id=execution_id)
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/api_lib/test/tool_results.py
|
Python
|
bsd-3-clause
| 4,080
|
"""
Tests for images support code.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "nicu.tofan@gmail.com"
if __name__ == '__main__':
unittest.main()
|
TNick/pyl2extra
|
pyl2extra/testing/tests/test_images.py
|
Python
|
bsd-3-clause
| 286
|
"""Conuntries Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class Countries(APIClassTemplate):
"""The Countries Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "iso2", "iso3"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/countries"
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
def __init__(self, fmc, **kwargs):
"""
Initialize Countries object.
Set self.type to "Country" and parse the kwargs.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for Countries class.")
self.parse_kwargs(**kwargs)
self.type = "Country"
def post(self):
"""POST method for API for Countries not supported."""
logging.info("POST method for API for Countries not supported.")
pass
def put(self):
"""PUT method for API for Countries not supported."""
logging.info("PUT method for API for Countries not supported.")
pass
def delete(self):
"""DELETE method for API for Countries not supported."""
logging.info("DELETE method for API for Countries not supported.")
pass
|
daxm/fmcapi
|
fmcapi/api_objects/object_services/countries.py
|
Python
|
bsd-3-clause
| 1,340
|
"""
owtf.protocols.smtp
~~~~~~~~~~~~~~~~~~~
Description:
This is the OWTF SMTP handler, to simplify sending emails.
"""
from email.mime import base, multipart, text as mimetext
from email import encoders
import logging
import os
import smtplib
from owtf.utils.file import FileOperations, get_file_as_list
__all__ = ["smtp"]
class SMTP(object):
def __init__(self):
self.msg_prefix = "OWTF SMTP Client - "
def pprint(self, message):
logging.info(self.msg_prefix + message)
def create_connection_with_mail_server(self, options):
return smtplib.SMTP(options["SMTP_HOST"], int(options["SMTP_PORT"]))
def connect(self, options):
try:
mail_server = self.create_connection_with_mail_server(options)
mail_server.ehlo()
except Exception:
self.pprint("Error connecting to {!s} on port {!s}".format(options["SMTP_HOST"], options["SMTP_PORT"]))
return None
try:
mail_server.starttls() # Give start TLS a shot
except Exception as e:
self.pprint("{} - Assuming TLS unsupported and trying to continue..".format(str(e)))
try:
mail_server.login(options["SMTP_LOGIN"], options["SMTP_PASS"])
except Exception as e:
self.pprint("ERROR: {} - Assuming open-relay and trying to continue..".format(str(e)))
return mail_server
def is_file(self, target):
return os.path.isfile(target)
def get_file_content_as_list(self, options):
return get_file_as_list(options["EMAIL_TARGET"])
def build_target_list(self, options):
"""Build a list of targets for simplification purposes."""
if self.is_file(options["EMAIL_TARGET"]):
target_list = self.get_file_content_as_list(options)
else:
target_list = [options["EMAIL_TARGET"]]
return target_list
def send(self, options):
num_errors = 0
for target in self.build_target_list(options):
target = target.strip()
if not target:
continue # Skip blank lines!
self.pprint("Sending email for target: {!s}".format(target))
try:
message = self.build_message(options, target)
mail_server = self.connect(options)
if mail_server is None:
raise Exception("Error connecting to {}".format(str(target)))
mail_server.sendmail(options["SMTP_LOGIN"], target, message.as_string())
self.pprint("Email relay successful!")
except Exception as e:
logging.error("Error delivering email: %s", str(e))
num_errors += 1
return num_errors == 0
def build_message(self, options, target):
message = multipart.MIMEMultipart()
for name, value in list(options.items()):
if name == "EMAIL_BODY":
self.add_body(message, value)
elif name == "EMAIL_ATTACHMENT":
self.add_attachment(message, value)
else: # From, To, Subject, etc.
self.set_option(message, name, value, target)
return message
def set_option(self, message, option, value, target):
if option == "EMAIL_FROM":
message["From"] = value
elif option == "EMAIL_TARGET":
message["To"] = target
elif option == "EMAIL_PRIORITY":
if value == "yes":
message["X-Priority"] = " 1 (Highest)"
message["X-MSMail-Priority"] = " High"
elif option == "EMAIL_SUBJECT":
message["Subject"] = value
def add_body(self, message, text):
# If a file has been specified as Body, then set Body to file contents.
if os.path.isfile(text):
body = FileOperations.open(text).read().strip()
else:
body = text
message.attach(mimetext.MIMEText(body, message))
def add_attachment(self, message, attachment):
if not attachment:
return False
binary_blob = base.MIMEBase("application", "octet-stream")
binary_blob.set_payload(FileOperations.open(attachment, "rb").read())
encoders.encode_base64(binary_blob) # base64 encode the Binary Blob.
# Binary Blob headers.
binary_blob.add_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(attachment)))
message.attach(binary_blob)
return True
smtp = SMTP()
|
owtf/owtf
|
owtf/protocols/smtp.py
|
Python
|
bsd-3-clause
| 4,525
|
import guava
class IndexController(guava.controller.Controller):
def index(self):
self.write("Hello World!")
|
StarfruitStack/guava
|
benchmark/python/guava/index.py
|
Python
|
bsd-3-clause
| 123
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christian Boos <cboos@edgewall.org>
import re
from genshi.core import Markup
from genshi.builder import tag
from trac.config import IntOption, ListOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.resource import ResourceNotFound
from trac.util import Ranges
from trac.util.text import to_unicode, wrap
from trac.util.translation import _
from trac.versioncontrol.api import (Changeset, NoSuchChangeset,
RepositoryManager)
from trac.versioncontrol.web_ui.changeset import ChangesetModule
from trac.versioncontrol.web_ui.util import *
from trac.web import IRequestHandler
from trac.web.chrome import (Chrome, INavigationContributor, add_ctxtnav,
add_link, add_script, add_script_data,
add_stylesheet, auth_link, web_context)
from trac.wiki import IWikiSyntaxProvider, WikiParser
class LogModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
IWikiSyntaxProvider)
default_log_limit = IntOption('revisionlog', 'default_log_limit', 100,
"""Default value for the limit argument in the TracRevisionLog.
(''since 0.11'')""")
graph_colors = ListOption('revisionlog', 'graph_colors',
['#cc0', '#0c0', '#0cc', '#00c', '#c0c', '#c00'],
doc="""Comma-separated list of colors to use for the TracRevisionLog
graph display. (''since 1.0'')""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'browser'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
return ['LOG_VIEW']
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/log(/.*)?$', req.path_info)
if match:
req.args['path'] = match.group(1) or '/'
return True
def process_request(self, req):
req.perm.require('LOG_VIEW')
mode = req.args.get('mode', 'stop_on_copy')
path = req.args.get('path', '/')
rev = req.args.get('rev')
stop_rev = req.args.get('stop_rev')
revs = req.args.get('revs')
format = req.args.get('format')
verbose = req.args.get('verbose')
limit = int(req.args.get('limit') or self.default_log_limit)
rm = RepositoryManager(self.env)
reponame, repos, path = rm.get_repository_by_path(path)
if not repos:
if path == '/':
raise TracError(_("No repository specified and no default"
" repository configured."))
else:
raise ResourceNotFound(_("Repository '%(repo)s' not found",
repo=reponame or path.strip('/')))
if reponame != repos.reponame: # Redirect alias
qs = req.query_string
req.redirect(req.href.log(repos.reponame or None, path)
+ ('?' + qs if qs else ''))
normpath = repos.normalize_path(path)
# if `revs` parameter is given, then we're restricted to the
# corresponding revision ranges.
# If not, then we're considering all revisions since `rev`,
# on that path, in which case `revranges` will be None.
if revs:
revranges = RevRanges(repos, revs, resolve=True)
rev = revranges.b
else:
revranges = None
rev = repos.normalize_rev(rev)
# The `history()` method depends on the mode:
# * for ''stop on copy'' and ''follow copies'', it's `Node.history()`
# unless explicit ranges have been specified
# * for ''show only add, delete'' we're using
# `Repository.get_path_history()`
cset_resource = repos.resource.child('changeset')
show_graph = False
curr_revrange = []
if mode == 'path_history':
def history():
for h in repos.get_path_history(path, rev):
if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])):
yield h
elif revranges:
show_graph = path == '/' and not verbose \
and not repos.has_linear_changesets \
and len(revranges) == 1
def history():
separator = False
for a, b in reversed(revranges.pairs):
curr_revrange[:] = (a, b)
node = get_existing_node(req, repos, path, b)
for p, rev, chg in node.get_history():
if repos.rev_older_than(rev, a):
break
if 'CHANGESET_VIEW' in req.perm(cset_resource(id=rev)):
separator = True
yield p, rev, chg
else:
separator = False
if separator:
yield p, rev, None
else:
show_graph = path == '/' and not verbose \
and not repos.has_linear_changesets
def history():
node = get_existing_node(req, repos, path, rev)
for h in node.get_history():
if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])):
yield h
# -- retrieve history, asking for limit+1 results
info = []
depth = 1
previous_path = normpath
count = 0
history_remaining = True
for old_path, old_rev, old_chg in history():
if stop_rev and repos.rev_older_than(old_rev, stop_rev):
break
old_path = repos.normalize_path(old_path)
item = {
'path': old_path, 'rev': old_rev, 'existing_rev': old_rev,
'change': old_chg, 'depth': depth,
}
if old_chg == Changeset.DELETE:
item['existing_rev'] = repos.previous_rev(old_rev, old_path)
if not (mode == 'path_history' and old_chg == Changeset.EDIT):
info.append(item)
if old_path and old_path != previous_path and \
not (mode == 'path_history' and old_path == normpath):
depth += 1
item['depth'] = depth
item['copyfrom_path'] = old_path
if mode == 'stop_on_copy':
break
elif mode == 'path_history':
depth -= 1
if old_chg is None: # separator entry
stop_limit = limit
else:
count += 1
stop_limit = limit + 1
if count >= stop_limit:
break
previous_path = old_path
else:
history_remaining = False
if not info:
node = get_existing_node(req, repos, path, rev)
if repos.rev_older_than(stop_rev, node.created_rev):
# FIXME: we should send a 404 error here
raise TracError(_("The file or directory '%(path)s' doesn't "
"exist at revision %(rev)s or at any "
"previous revision.", path=path,
rev=repos.display_rev(rev)),
_('Nonexistent path'))
# Generate graph data
graph = {}
if show_graph:
threads, vertices, columns = \
make_log_graph(repos, (item['rev'] for item in info))
graph.update(threads=threads, vertices=vertices, columns=columns,
colors=self.graph_colors,
line_width=0.04, dot_radius=0.1)
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/log_graph.js')
add_script_data(req, graph=graph)
def make_log_href(path, **args):
link_rev = rev
if rev == str(repos.youngest_rev):
link_rev = None
params = {'rev': link_rev, 'mode': mode, 'limit': limit}
params.update(args)
if verbose:
params['verbose'] = verbose
return req.href.log(repos.reponame or None, path, **params)
if format in ('rss', 'changelog'):
info = [i for i in info if i['change']] # drop separators
if info and count > limit:
del info[-1]
elif info and history_remaining and count >= limit:
# stop_limit reached, there _might_ be some more
next_rev = info[-1]['rev']
next_path = info[-1]['path']
next_revranges = None
if curr_revrange:
new_revrange = (curr_revrange[0], next_rev) \
if info[-1]['change'] else None
next_revranges = revranges.truncate(curr_revrange,
new_revrange)
next_revranges = unicode(next_revranges) or None
if next_revranges or not revranges:
older_revisions_href = make_log_href(
next_path, rev=next_rev, revs=next_revranges)
add_link(req, 'next', older_revisions_href,
_('Revision Log (restarting at %(path)s, rev. '
'%(rev)s)', path=next_path,
rev=repos.display_rev(next_rev)))
# only show fully 'limit' results, use `change == None` as a marker
info[-1]['change'] = None
revisions = [i['rev'] for i in info]
changes = get_changes(repos, revisions, self.log)
extra_changes = {}
if format == 'changelog':
for rev in revisions:
changeset = changes[rev]
cs = {}
cs['message'] = wrap(changeset.message, 70,
initial_indent='\t',
subsequent_indent='\t')
files = []
actions = []
for cpath, kind, chg, bpath, brev in changeset.get_changes():
files.append(bpath if chg == Changeset.DELETE else cpath)
actions.append(chg)
cs['files'] = files
cs['actions'] = actions
extra_changes[rev] = cs
data = {
'context': web_context(req, 'source', path, parent=repos.resource),
'reponame': repos.reponame or None, 'repos': repos,
'path': path, 'rev': rev, 'stop_rev': stop_rev,
'display_rev': repos.display_rev, 'revranges': revranges,
'mode': mode, 'verbose': verbose, 'limit': limit,
'items': info, 'changes': changes, 'extra_changes': extra_changes,
'graph': graph,
'wiki_format_messages': self.config['changeset']
.getbool('wiki_format_messages')
}
if format == 'changelog':
return 'revisionlog.txt', data, 'text/plain'
elif format == 'rss':
data['email_map'] = Chrome(self.env).get_email_map()
data['context'] = web_context(req, 'source',
path, parent=repos.resource,
absurls=True)
return 'revisionlog.rss', data, 'application/rss+xml'
item_ranges = []
range = []
for item in info:
if item['change'] is None: # separator
if range: # start new range
range.append(item)
item_ranges.append(range)
range = []
else:
range.append(item)
if range:
item_ranges.append(range)
data['item_ranges'] = item_ranges
add_stylesheet(req, 'common/css/diff.css')
add_stylesheet(req, 'common/css/browser.css')
path_links = get_path_links(req.href, repos.reponame, path, rev)
if path_links:
data['path_links'] = path_links
if path != '/':
add_link(req, 'up', path_links[-2]['href'], _('Parent directory'))
rss_href = make_log_href(path, format='rss', revs=revs,
stop_rev=stop_rev)
add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'),
'application/rss+xml', 'rss')
changelog_href = make_log_href(path, format='changelog', revs=revs,
stop_rev=stop_rev)
add_link(req, 'alternate', changelog_href, _('ChangeLog'),
'text/plain')
add_ctxtnav(req, _('View Latest Revision'),
href=req.href.browser(repos.reponame or None, path))
if 'next' in req.chrome['links']:
next = req.chrome['links']['next'][0]
add_ctxtnav(req, tag.span(tag.a(_('Older Revisions'),
href=next['href']),
Markup(' →')))
return 'revisionlog.html', data, None
# IWikiSyntaxProvider methods
# int rev ranges or any kind of rev range
REV_RANGE = r"(?:%(int)s|%(cset)s(?:[:-]%(cset)s)?)" % \
{'int': Ranges.RE_STR, 'cset': ChangesetModule.CHANGESET_ID}
def get_wiki_syntax(self):
yield (
# [...] form, starts with optional intertrac: [T... or [trac ...
r"!?\[(?P<it_log>%s\s*)" % WikiParser.INTERTRAC_SCHEME +
# <from>:<to> + optional path restriction
r"(?P<log_revs>%s)(?P<log_path>[/?][^\]]*)?\]" % self.REV_RANGE,
lambda x, y, z: self._format_link(x, 'log1', y[1:-1], y, z))
yield (
# r<from>:<to> form + optional path restriction (no intertrac)
r"(?:\b|!)r%s\b(?:/[a-zA-Z0-9_/+-]+)?" % Ranges.RE_STR,
lambda x, y, z: self._format_link(x, 'log2', '@' + y[1:], y))
def get_link_resolvers(self):
yield ('log', self._format_link)
LOG_LINK_RE = re.compile(r"([^@:]*)[@:]%s?" % REV_RANGE)
def _format_link(self, formatter, ns, match, label, fullmatch=None):
if ns == 'log1':
groups = fullmatch.groupdict()
it_log = groups.get('it_log')
revs = groups.get('log_revs')
path = groups.get('log_path') or '/'
target = '%s%s@%s' % (it_log, path, revs)
# prepending it_log is needed, as the helper expects it there
intertrac = formatter.shorthand_intertrac_helper(
'log', target, label, fullmatch)
if intertrac:
return intertrac
path, query, fragment = formatter.split_link(path)
else:
assert ns in ('log', 'log2')
if ns == 'log':
match, query, fragment = formatter.split_link(match)
else:
query = fragment = ''
match = ''.join(reversed(match.split('/', 1)))
path = match
revs = ''
if self.LOG_LINK_RE.match(match):
indexes = [sep in match and match.index(sep) for sep in ':@']
idx = min([i for i in indexes if i is not False])
path, revs = match[:idx], match[idx+1:]
rm = RepositoryManager(self.env)
try:
reponame, repos, path = rm.get_repository_by_path(path)
if not reponame:
reponame = rm.get_default_repository(formatter.context)
if reponame is not None:
repos = rm.get_repository(reponame)
if repos:
if 'LOG_VIEW' in formatter.perm:
reponame = repos.reponame or None
path = path or '/'
revranges = RevRanges(repos, revs)
if revranges.has_ranges():
href = formatter.href.log(reponame, path,
revs=unicode(revranges))
else:
# try to resolve if single rev
repos.normalize_rev(revs)
href = formatter.href.log(reponame, path,
rev=revs or None)
if query and '?' in href:
query = '&' + query[1:]
return tag.a(label, class_='source',
href=href + query + fragment)
errmsg = _("No permission to view change log")
elif reponame:
errmsg = _("Repository '%(repo)s' not found", repo=reponame)
else:
errmsg = _("No default repository defined")
except TracError, e:
errmsg = to_unicode(e)
return tag.a(label, class_='missing source', title=errmsg)
class RevRanges(object):
def __init__(self, repos, revs=None, resolve=False):
self.repos = repos
self.resolve = resolve
self.pairs = []
self.a = self.b = None
if revs:
self._append(revs)
def has_ranges(self):
n = len(self.pairs)
return n > 1 or n == 1 and self.a != self.b
def truncate(self, curr_pair, new_pair=None):
curr_pair = tuple(curr_pair)
if new_pair:
new_pair = tuple(new_pair)
revranges = RevRanges(self.repos, resolve=self.resolve)
pairs = revranges.pairs
for pair in self.pairs:
if pair == curr_pair:
if new_pair:
pairs.append(new_pair)
break
pairs.append(pair)
if pairs:
revranges.a = pairs[0][0]
revranges.b = pairs[-1][1]
revranges._reduce()
return revranges
def _normrev(self, rev):
if not rev:
raise NoSuchChangeset(rev)
if self.resolve:
return self.repos.normalize_rev(rev)
elif self.repos.has_linear_changesets:
try:
return int(rev)
except (ValueError, TypeError):
return rev
else:
return rev
_cset_range_re = re.compile(r"""(?:
%(cset)s[:-]%(cset)s | # int or hexa revs
[0-9]+[:-][A-Za-z_0-9]+ | # e.g. 42-head
[A-Za-z_0-9]+[:-][0-9]+ | # e.g. head-42
[^:]+:[^:]+ # e.g. master:dev-42
)\Z
""" % {'cset': ChangesetModule.CHANGESET_ID}, re.VERBOSE)
def _append(self, revs):
if not revs:
return
pairs = []
for rev in re.split(u',\u200b?', revs):
a = b = None
if self._cset_range_re.match(rev):
for sep in ':-':
if sep in rev:
a, b = rev.split(sep)
break
if a is None:
a = b = self._normrev(rev)
elif a == b:
a = b = self._normrev(a)
else:
a = self._normrev(a)
b = self._normrev(b)
pairs.append((a, b))
self.pairs.extend(pairs)
self._reduce()
def _reduce(self):
if all(isinstance(pair[0], (int, long)) and
isinstance(pair[1], (int, long))
for pair in self.pairs):
try:
ranges = Ranges(unicode(self), reorder=True)
except:
pass
else:
self.pairs[:] = ranges.pairs
else:
seen = set()
pairs = self.pairs[:]
for idx, pair in enumerate(pairs):
if pair in seen:
pairs[idx] = None
else:
seen.add(pair)
if len(pairs) != len(seen):
self.pairs[:] = filter(None, pairs)
if self.pairs:
self.a = self.pairs[0][0]
self.b = self.pairs[-1][1]
else:
self.a = self.b = None
def __len__(self):
return len(self.pairs)
def __unicode__(self):
sep = '-' if self.repos.has_linear_changesets else ':'
return ','.join(sep.join(map(unicode, pair)) if pair[0] != pair[1]
else unicode(pair[0])
for pair in self.pairs)
|
exocad/exotrac
|
trac/versioncontrol/web_ui/log.py
|
Python
|
bsd-3-clause
| 21,201
|
"""Auto-generated file, do not edit by hand. CC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CC = PhoneMetadata(id='CC', country_code=61, international_prefix='(?:14(?:1[14]|34|4[17]|[56]6|7[47]|88))?001[14-689]',
general_desc=PhoneNumberDesc(national_number_pattern='[1458]\\d{5,9}', possible_number_pattern='\\d{6,10}'),
fixed_line=PhoneNumberDesc(national_number_pattern='89162\\d{4}', possible_number_pattern='\\d{8,9}', example_number='891621234'),
mobile=PhoneNumberDesc(national_number_pattern='4(?:[0-2]\\d|3[0-57-9]|4[47-9]|5[0-37-9]|6[6-9]|7[07-9]|8[7-9])\\d{6}', possible_number_pattern='\\d{9}', example_number='412345678'),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:80(?:0\\d{2})?|3(?:00\\d{2})?)\\d{4}', possible_number_pattern='\\d{6,10}', example_number='1800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='190[0126]\\d{6}', possible_number_pattern='\\d{10}', example_number='1900123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='500\\d{6}', possible_number_pattern='\\d{9}', example_number='500123456'),
voip=PhoneNumberDesc(national_number_pattern='550\\d{6}', possible_number_pattern='\\d{9}', example_number='550123456'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
preferred_international_prefix='0011',
national_prefix='0',
national_prefix_for_parsing='0')
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_CC.py
|
Python
|
bsd-3-clause
| 1,840
|
from __future__ import division, print_function
import os
import sys
import pickle
import copy
import sysconfig
import warnings
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from numpy._build_utils.apple_accelerate import (
uses_accelerate_framework, get_sgemv_fix
)
from numpy.compat import npy_load_module
from setup_common import *
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
# bogus value for affected strides in order to help smoke out bad stride usage
# when relaxed stride checking is enabled.
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args, m = tup[0], tup[1], fname2def(tup[0])
elif len(tup) == 3:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
else:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((m, 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
return priv, pub
except Exception:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {'short': [2], 'int': [4], 'long': [8, 4],
'float': [4], 'double': [8], 'long double': [16, 12, 8],
'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
'off_t': [8, 4]}
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
python = 'python'
if '__pypy__' in sys.builtin_module_names:
python = 'pypy'
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "
"install {0}-dev|{0}-devel.".format(python))
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
res = config_cmd.check_header("sys/endian.h")
if res:
private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = npy_load_module('_'.join(n.split('.')),
generate_umath_py, ('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Use relaxed stride checking
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Use bogus stride debug aid when relaxed strides are enabled
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
config.add_include_dirs(join(build_dir, "src", "npymath"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
if sys.platform[:3] == "aix":
config.add_define_macros([("_LARGE_FILES", None)])
else:
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources=[join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
join('src', 'npymath', 'npy_math.c'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib',
build_info={'include_dirs' : []}) # empty list required for creating npy_math_internal.h
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'cblasfuncs.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'npy_config.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('src', 'private', 'mem_overlap.h'),
join('src', 'private', 'ufunc_override.h'),
join('src', 'private', 'binop_override.h'),
join('src', 'private', 'npy_extint128.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'mem_overlap.c'),
join('src', 'private', 'ufunc_override.c'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
if uses_accelerate_framework(blas_info):
multiarray_src.extend(get_sgemv_fix())
else:
extra_info = {}
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
join('src', 'private', 'mem_overlap.c'),
join('src', 'private', 'ufunc_override.c')]
umath_deps = [
generate_umath_py,
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('src', 'private', 'mem_overlap.h'),
join('src', 'private', 'ufunc_override.h'),
join('src', 'private', 'binop_override.h')] + npymath_sources
config.add_extension('umath',
sources=umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends=deps + umath_deps,
libraries=['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources=[join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources=[join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources=[join('src', 'multiarray', 'multiarray_tests.c.src'),
join('src', 'private', 'mem_overlap.c')],
depends=[join('src', 'private', 'mem_overlap.h'),
join('src', 'private', 'npy_extint128.h')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources=[join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
bringingheavendown/numpy
|
numpy/core/setup.py
|
Python
|
bsd-3-clause
| 40,820
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
CategoricalDtype,
DataFrame,
NaT,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
class TestUpdate:
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", Series([10, 61, 12])),
([61, 63], float, Series([10.0, 61.0, 12.0])),
([61, 63], object, Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", Series([10, 61, 12])),
([61.0, 63.0], float, Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", Series([10, (61,), 12])),
([(61,), (63,)], "int64", Series([10, (61,), 12])),
([(61,), (63,)], float, Series([10.0, (61,), 12.0])),
([(61,), (63,)], object, Series([10, (61,), 12])),
],
)
def test_update_dtypes(self, other, dtype, expected):
ser = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
ser.update(other)
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"series, other, expected",
[
# update by key
(
Series({"a": 1, "b": 2, "c": 3, "d": 4}),
{"b": 5, "c": np.nan},
Series({"a": 1, "b": 5, "c": 3, "d": 4}),
),
# update by position
(Series([1, 2, 3, 4]), [np.nan, 5, 1], Series([1, 5, 1, 4])),
],
)
def test_update_from_non_series(self, series, other, expected):
# GH 33215
series.update(other)
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"data, other, expected, dtype",
[
(["a", None], [None, "b"], ["a", "b"], "string"),
pytest.param(
["a", None],
[None, "b"],
["a", "b"],
"arrow_string",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
([1, None], [None, 2], [1, 2], "Int64"),
([True, None], [None, False], [True, False], "boolean"),
(
["a", None],
[None, "b"],
["a", "b"],
CategoricalDtype(categories=["a", "b"]),
),
(
[Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT],
[NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")],
[Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2,
"datetime64[ns, Europe/London]",
),
],
)
def test_update_extension_array_series(self, data, other, expected, dtype):
result = Series(data, dtype=dtype)
other = Series(other, dtype=dtype)
expected = Series(expected, dtype=dtype)
result.update(other)
tm.assert_series_equal(result, expected)
def test_update_with_categorical_type(self):
# GH 25744
dtype = CategoricalDtype(["a", "b", "c", "d"])
s1 = Series(["a", "b", "c"], index=[1, 2, 3], dtype=dtype)
s2 = Series(["b", "a"], index=[1, 2], dtype=dtype)
s1.update(s2)
result = s1
expected = Series(["b", "a", "c"], index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
|
datapythonista/pandas
|
pandas/tests/series/methods/test_update.py
|
Python
|
bsd-3-clause
| 4,683
|
from test.support import run_unittest
from test.support.import_helper import unload, CleanImport
from test.support.warnings_helper import check_warnings
import unittest
import sys
import importlib
from importlib.util import spec_from_file_location
import pkgutil
import os
import os.path
import tempfile
import shutil
import zipfile
# Note: pkgutil.walk_packages is currently tested in test_runpy. This is
# a hack to get a major issue resolved for 3.3b2. Longer term, it should
# be moved back here, perhaps by factoring out the helper code for
# creating interesting package layouts to a separate module.
# Issue #15348 declares this is indeed a dodgy hack ;)
class PkgutilTests(unittest.TestCase):
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.dirname)
sys.path.insert(0, self.dirname)
def tearDown(self):
del sys.path[0]
def test_getdata_filesys(self):
pkg = 'test_getdata_filesys'
# Include a LF and a CRLF, to test that binary data is read back
RESOURCE_DATA = b'Hello, world!\nSecond line\r\nThird line'
# Make a package with some resources
package_dir = os.path.join(self.dirname, pkg)
os.mkdir(package_dir)
# Empty init.py
f = open(os.path.join(package_dir, '__init__.py'), "wb")
f.close()
# Resource files, res.txt, sub/res.txt
f = open(os.path.join(package_dir, 'res.txt'), "wb")
f.write(RESOURCE_DATA)
f.close()
os.mkdir(os.path.join(package_dir, 'sub'))
f = open(os.path.join(package_dir, 'sub', 'res.txt'), "wb")
f.write(RESOURCE_DATA)
f.close()
# Check we can read the resources
res1 = pkgutil.get_data(pkg, 'res.txt')
self.assertEqual(res1, RESOURCE_DATA)
res2 = pkgutil.get_data(pkg, 'sub/res.txt')
self.assertEqual(res2, RESOURCE_DATA)
del sys.modules[pkg]
def test_getdata_zipfile(self):
zip = 'test_getdata_zipfile.zip'
pkg = 'test_getdata_zipfile'
# Include a LF and a CRLF, to test that binary data is read back
RESOURCE_DATA = b'Hello, world!\nSecond line\r\nThird line'
# Make a package with some resources
zip_file = os.path.join(self.dirname, zip)
z = zipfile.ZipFile(zip_file, 'w')
# Empty init.py
z.writestr(pkg + '/__init__.py', "")
# Resource files, res.txt, sub/res.txt
z.writestr(pkg + '/res.txt', RESOURCE_DATA)
z.writestr(pkg + '/sub/res.txt', RESOURCE_DATA)
z.close()
# Check we can read the resources
sys.path.insert(0, zip_file)
res1 = pkgutil.get_data(pkg, 'res.txt')
self.assertEqual(res1, RESOURCE_DATA)
res2 = pkgutil.get_data(pkg, 'sub/res.txt')
self.assertEqual(res2, RESOURCE_DATA)
names = []
for moduleinfo in pkgutil.iter_modules([zip_file]):
self.assertIsInstance(moduleinfo, pkgutil.ModuleInfo)
names.append(moduleinfo.name)
self.assertEqual(names, ['test_getdata_zipfile'])
del sys.path[0]
del sys.modules[pkg]
def test_unreadable_dir_on_syspath(self):
# issue7367 - walk_packages failed if unreadable dir on sys.path
package_name = "unreadable_package"
d = os.path.join(self.dirname, package_name)
# this does not appear to create an unreadable dir on Windows
# but the test should not fail anyway
os.mkdir(d, 0)
self.addCleanup(os.rmdir, d)
for t in pkgutil.walk_packages(path=[self.dirname]):
self.fail("unexpected package found")
def test_walkpackages_filesys(self):
pkg1 = 'test_walkpackages_filesys'
pkg1_dir = os.path.join(self.dirname, pkg1)
os.mkdir(pkg1_dir)
f = open(os.path.join(pkg1_dir, '__init__.py'), "wb")
f.close()
os.mkdir(os.path.join(pkg1_dir, 'sub'))
f = open(os.path.join(pkg1_dir, 'sub', '__init__.py'), "wb")
f.close()
f = open(os.path.join(pkg1_dir, 'sub', 'mod.py'), "wb")
f.close()
# Now, to juice it up, let's add the opposite packages, too.
pkg2 = 'sub'
pkg2_dir = os.path.join(self.dirname, pkg2)
os.mkdir(pkg2_dir)
f = open(os.path.join(pkg2_dir, '__init__.py'), "wb")
f.close()
os.mkdir(os.path.join(pkg2_dir, 'test_walkpackages_filesys'))
f = open(os.path.join(pkg2_dir, 'test_walkpackages_filesys', '__init__.py'), "wb")
f.close()
f = open(os.path.join(pkg2_dir, 'test_walkpackages_filesys', 'mod.py'), "wb")
f.close()
expected = [
'sub',
'sub.test_walkpackages_filesys',
'sub.test_walkpackages_filesys.mod',
'test_walkpackages_filesys',
'test_walkpackages_filesys.sub',
'test_walkpackages_filesys.sub.mod',
]
actual= [e[1] for e in pkgutil.walk_packages([self.dirname])]
self.assertEqual(actual, expected)
for pkg in expected:
if pkg.endswith('mod'):
continue
del sys.modules[pkg]
def test_walkpackages_zipfile(self):
"""Tests the same as test_walkpackages_filesys, only with a zip file."""
zip = 'test_walkpackages_zipfile.zip'
pkg1 = 'test_walkpackages_zipfile'
pkg2 = 'sub'
zip_file = os.path.join(self.dirname, zip)
z = zipfile.ZipFile(zip_file, 'w')
z.writestr(pkg2 + '/__init__.py', "")
z.writestr(pkg2 + '/' + pkg1 + '/__init__.py', "")
z.writestr(pkg2 + '/' + pkg1 + '/mod.py', "")
z.writestr(pkg1 + '/__init__.py', "")
z.writestr(pkg1 + '/' + pkg2 + '/__init__.py', "")
z.writestr(pkg1 + '/' + pkg2 + '/mod.py', "")
z.close()
sys.path.insert(0, zip_file)
expected = [
'sub',
'sub.test_walkpackages_zipfile',
'sub.test_walkpackages_zipfile.mod',
'test_walkpackages_zipfile',
'test_walkpackages_zipfile.sub',
'test_walkpackages_zipfile.sub.mod',
]
actual= [e[1] for e in pkgutil.walk_packages([zip_file])]
self.assertEqual(actual, expected)
del sys.path[0]
for pkg in expected:
if pkg.endswith('mod'):
continue
del sys.modules[pkg]
def test_walk_packages_raises_on_string_or_bytes_input(self):
str_input = 'test_dir'
with self.assertRaises((TypeError, ValueError)):
list(pkgutil.walk_packages(str_input))
bytes_input = b'test_dir'
with self.assertRaises((TypeError, ValueError)):
list(pkgutil.walk_packages(bytes_input))
def test_name_resolution(self):
import logging
import logging.handlers
success_cases = (
('os', os),
('os.path', os.path),
('os.path:pathsep', os.path.pathsep),
('logging', logging),
('logging:', logging),
('logging.handlers', logging.handlers),
('logging.handlers:', logging.handlers),
('logging.handlers:SysLogHandler', logging.handlers.SysLogHandler),
('logging.handlers.SysLogHandler', logging.handlers.SysLogHandler),
('logging.handlers:SysLogHandler.LOG_ALERT',
logging.handlers.SysLogHandler.LOG_ALERT),
('logging.handlers.SysLogHandler.LOG_ALERT',
logging.handlers.SysLogHandler.LOG_ALERT),
('builtins.int', int),
('builtins:int', int),
('builtins.int.from_bytes', int.from_bytes),
('builtins:int.from_bytes', int.from_bytes),
('builtins.ZeroDivisionError', ZeroDivisionError),
('builtins:ZeroDivisionError', ZeroDivisionError),
('os:path', os.path),
)
failure_cases = (
(None, TypeError),
(1, TypeError),
(2.0, TypeError),
(True, TypeError),
('', ValueError),
('?abc', ValueError),
('abc/foo', ValueError),
('foo', ImportError),
('os.foo', AttributeError),
('os.foo:', ImportError),
('os.pth:pathsep', ImportError),
('logging.handlers:NoSuchHandler', AttributeError),
('logging.handlers:SysLogHandler.NO_SUCH_VALUE', AttributeError),
('logging.handlers.SysLogHandler.NO_SUCH_VALUE', AttributeError),
('ZeroDivisionError', ImportError),
('os.path.9abc', ValueError),
('9abc', ValueError),
)
# add some Unicode package names to the mix.
unicode_words = ('\u0935\u092e\u0938',
'\xe9', '\xc8',
'\uc548\ub155\ud558\uc138\uc694',
'\u3055\u3088\u306a\u3089',
'\u3042\u308a\u304c\u3068\u3046',
'\u0425\u043e\u0440\u043e\u0448\u043e',
'\u0441\u043f\u0430\u0441\u0438\u0431\u043e',
'\u73b0\u4ee3\u6c49\u8bed\u5e38\u7528\u5b57\u8868')
for uw in unicode_words:
d = os.path.join(self.dirname, uw)
try:
os.makedirs(d, exist_ok=True)
except UnicodeEncodeError:
# When filesystem encoding cannot encode uw: skip this test
continue
# make an empty __init__.py file
f = os.path.join(d, '__init__.py')
with open(f, 'w') as f:
f.write('')
f.flush()
# now import the package we just created; clearing the caches is
# needed, otherwise the newly created package isn't found
importlib.invalidate_caches()
mod = importlib.import_module(uw)
success_cases += (uw, mod),
if len(uw) > 1:
failure_cases += (uw[:-1], ImportError),
# add an example with a Unicode digit at the start
failure_cases += ('\u0966\u0935\u092e\u0938', ValueError),
for s, expected in success_cases:
with self.subTest(s=s):
o = pkgutil.resolve_name(s)
self.assertEqual(o, expected)
for s, exc in failure_cases:
with self.subTest(s=s):
with self.assertRaises(exc):
pkgutil.resolve_name(s)
class PkgutilPEP302Tests(unittest.TestCase):
class MyTestLoader(object):
def create_module(self, spec):
return None
def exec_module(self, mod):
# Count how many times the module is reloaded
mod.__dict__['loads'] = mod.__dict__.get('loads', 0) + 1
def get_data(self, path):
return "Hello, world!"
class MyTestImporter(object):
def find_spec(self, fullname, path=None, target=None):
loader = PkgutilPEP302Tests.MyTestLoader()
return spec_from_file_location(fullname,
'<%s>' % loader.__class__.__name__,
loader=loader,
submodule_search_locations=[])
def setUp(self):
sys.meta_path.insert(0, self.MyTestImporter())
def tearDown(self):
del sys.meta_path[0]
def test_getdata_pep302(self):
# Use a dummy finder/loader
self.assertEqual(pkgutil.get_data('foo', 'dummy'), "Hello, world!")
del sys.modules['foo']
def test_alreadyloaded(self):
# Ensure that get_data works without reloading - the "loads" module
# variable in the example loader should count how many times a reload
# occurs.
import foo
self.assertEqual(foo.loads, 1)
self.assertEqual(pkgutil.get_data('foo', 'dummy'), "Hello, world!")
self.assertEqual(foo.loads, 1)
del sys.modules['foo']
# These tests, especially the setup and cleanup, are hideous. They
# need to be cleaned up once issue 14715 is addressed.
class ExtendPathTests(unittest.TestCase):
def create_init(self, pkgname):
dirname = tempfile.mkdtemp()
sys.path.insert(0, dirname)
pkgdir = os.path.join(dirname, pkgname)
os.mkdir(pkgdir)
with open(os.path.join(pkgdir, '__init__.py'), 'w') as fl:
fl.write('from pkgutil import extend_path\n__path__ = extend_path(__path__, __name__)\n')
return dirname
def create_submodule(self, dirname, pkgname, submodule_name, value):
module_name = os.path.join(dirname, pkgname, submodule_name + '.py')
with open(module_name, 'w') as fl:
print('value={}'.format(value), file=fl)
def test_simple(self):
pkgname = 'foo'
dirname_0 = self.create_init(pkgname)
dirname_1 = self.create_init(pkgname)
self.create_submodule(dirname_0, pkgname, 'bar', 0)
self.create_submodule(dirname_1, pkgname, 'baz', 1)
import foo.bar
import foo.baz
# Ensure we read the expected values
self.assertEqual(foo.bar.value, 0)
self.assertEqual(foo.baz.value, 1)
# Ensure the path is set up correctly
self.assertEqual(sorted(foo.__path__),
sorted([os.path.join(dirname_0, pkgname),
os.path.join(dirname_1, pkgname)]))
# Cleanup
shutil.rmtree(dirname_0)
shutil.rmtree(dirname_1)
del sys.path[0]
del sys.path[0]
del sys.modules['foo']
del sys.modules['foo.bar']
del sys.modules['foo.baz']
# Another awful testing hack to be cleaned up once the test_runpy
# helpers are factored out to a common location
def test_iter_importers(self):
iter_importers = pkgutil.iter_importers
get_importer = pkgutil.get_importer
pkgname = 'spam'
modname = 'eggs'
dirname = self.create_init(pkgname)
pathitem = os.path.join(dirname, pkgname)
fullname = '{}.{}'.format(pkgname, modname)
sys.modules.pop(fullname, None)
sys.modules.pop(pkgname, None)
try:
self.create_submodule(dirname, pkgname, modname, 0)
importlib.import_module(fullname)
importers = list(iter_importers(fullname))
expected_importer = get_importer(pathitem)
for finder in importers:
spec = pkgutil._get_spec(finder, fullname)
loader = spec.loader
try:
loader = loader.loader
except AttributeError:
# For now we still allow raw loaders from
# find_module().
pass
self.assertIsInstance(finder, importlib.machinery.FileFinder)
self.assertEqual(finder, expected_importer)
self.assertIsInstance(loader,
importlib.machinery.SourceFileLoader)
self.assertIsNone(pkgutil._get_spec(finder, pkgname))
with self.assertRaises(ImportError):
list(iter_importers('invalid.module'))
with self.assertRaises(ImportError):
list(iter_importers('.spam'))
finally:
shutil.rmtree(dirname)
del sys.path[0]
try:
del sys.modules['spam']
del sys.modules['spam.eggs']
except KeyError:
pass
def test_mixed_namespace(self):
pkgname = 'foo'
dirname_0 = self.create_init(pkgname)
dirname_1 = self.create_init(pkgname)
self.create_submodule(dirname_0, pkgname, 'bar', 0)
# Turn this into a PEP 420 namespace package
os.unlink(os.path.join(dirname_0, pkgname, '__init__.py'))
self.create_submodule(dirname_1, pkgname, 'baz', 1)
import foo.bar
import foo.baz
# Ensure we read the expected values
self.assertEqual(foo.bar.value, 0)
self.assertEqual(foo.baz.value, 1)
# Ensure the path is set up correctly
self.assertEqual(sorted(foo.__path__),
sorted([os.path.join(dirname_0, pkgname),
os.path.join(dirname_1, pkgname)]))
# Cleanup
shutil.rmtree(dirname_0)
shutil.rmtree(dirname_1)
del sys.path[0]
del sys.path[0]
del sys.modules['foo']
del sys.modules['foo.bar']
del sys.modules['foo.baz']
# XXX: test .pkg files
class NestedNamespacePackageTest(unittest.TestCase):
def setUp(self):
self.basedir = tempfile.mkdtemp()
self.old_path = sys.path[:]
def tearDown(self):
sys.path[:] = self.old_path
shutil.rmtree(self.basedir)
def create_module(self, name, contents):
base, final = name.rsplit('.', 1)
base_path = os.path.join(self.basedir, base.replace('.', os.path.sep))
os.makedirs(base_path, exist_ok=True)
with open(os.path.join(base_path, final + ".py"), 'w') as f:
f.write(contents)
def test_nested(self):
pkgutil_boilerplate = (
'import pkgutil; '
'__path__ = pkgutil.extend_path(__path__, __name__)')
self.create_module('a.pkg.__init__', pkgutil_boilerplate)
self.create_module('b.pkg.__init__', pkgutil_boilerplate)
self.create_module('a.pkg.subpkg.__init__', pkgutil_boilerplate)
self.create_module('b.pkg.subpkg.__init__', pkgutil_boilerplate)
self.create_module('a.pkg.subpkg.c', 'c = 1')
self.create_module('b.pkg.subpkg.d', 'd = 2')
sys.path.insert(0, os.path.join(self.basedir, 'a'))
sys.path.insert(0, os.path.join(self.basedir, 'b'))
import pkg
self.addCleanup(unload, 'pkg')
self.assertEqual(len(pkg.__path__), 2)
import pkg.subpkg
self.addCleanup(unload, 'pkg.subpkg')
self.assertEqual(len(pkg.subpkg.__path__), 2)
from pkg.subpkg.c import c
from pkg.subpkg.d import d
self.assertEqual(c, 1)
self.assertEqual(d, 2)
class ImportlibMigrationTests(unittest.TestCase):
# With full PEP 302 support in the standard import machinery, the
# PEP 302 emulation in this module is in the process of being
# deprecated in favour of importlib proper
def check_deprecated(self):
return check_warnings(
("This emulation is deprecated and slated for removal in "
"Python 3.12; use 'importlib' instead",
DeprecationWarning))
def test_importer_deprecated(self):
with self.check_deprecated():
pkgutil.ImpImporter("")
def test_loader_deprecated(self):
with self.check_deprecated():
pkgutil.ImpLoader("", "", "", "")
def test_get_loader_avoids_emulation(self):
with check_warnings() as w:
self.assertIsNotNone(pkgutil.get_loader("sys"))
self.assertIsNotNone(pkgutil.get_loader("os"))
self.assertIsNotNone(pkgutil.get_loader("test.support"))
self.assertEqual(len(w.warnings), 0)
@unittest.skipIf(__name__ == '__main__', 'not compatible with __main__')
def test_get_loader_handles_missing_loader_attribute(self):
global __loader__
this_loader = __loader__
del __loader__
try:
with check_warnings() as w:
self.assertIsNotNone(pkgutil.get_loader(__name__))
self.assertEqual(len(w.warnings), 0)
finally:
__loader__ = this_loader
def test_get_loader_handles_missing_spec_attribute(self):
name = 'spam'
mod = type(sys)(name)
del mod.__spec__
with CleanImport(name):
sys.modules[name] = mod
loader = pkgutil.get_loader(name)
self.assertIsNone(loader)
def test_get_loader_handles_spec_attribute_none(self):
name = 'spam'
mod = type(sys)(name)
mod.__spec__ = None
with CleanImport(name):
sys.modules[name] = mod
loader = pkgutil.get_loader(name)
self.assertIsNone(loader)
def test_get_loader_None_in_sys_modules(self):
name = 'totally bogus'
sys.modules[name] = None
try:
loader = pkgutil.get_loader(name)
finally:
del sys.modules[name]
self.assertIsNone(loader)
def test_find_loader_missing_module(self):
name = 'totally bogus'
loader = pkgutil.find_loader(name)
self.assertIsNone(loader)
def test_find_loader_avoids_emulation(self):
with check_warnings() as w:
self.assertIsNotNone(pkgutil.find_loader("sys"))
self.assertIsNotNone(pkgutil.find_loader("os"))
self.assertIsNotNone(pkgutil.find_loader("test.support"))
self.assertEqual(len(w.warnings), 0)
def test_get_importer_avoids_emulation(self):
# We use an illegal path so *none* of the path hooks should fire
with check_warnings() as w:
self.assertIsNone(pkgutil.get_importer("*??"))
self.assertEqual(len(w.warnings), 0)
def test_iter_importers_avoids_emulation(self):
with check_warnings() as w:
for importer in pkgutil.iter_importers(): pass
self.assertEqual(len(w.warnings), 0)
def test_main():
run_unittest(PkgutilTests, PkgutilPEP302Tests, ExtendPathTests,
NestedNamespacePackageTest, ImportlibMigrationTests)
# this is necessary if test is run repeated (like when finding leaks)
import zipimport
import importlib
zipimport._zip_directory_cache.clear()
importlib.invalidate_caches()
if __name__ == '__main__':
test_main()
|
brython-dev/brython
|
www/src/Lib/test/test_pkgutil.py
|
Python
|
bsd-3-clause
| 21,886
|
from setuptools import setup, find_packages
setup(
name='django-citeit',
version='3.0.0',
packages=find_packages(exclude=['tests*']),
description='A Django app for the creation of an annotated bibliography.',
long_description=('Visit https://github.com/unt-libraries/django-citeit '
'for the latest documentation.'),
include_package_data=True,
url='https://github.com/unt-libraries/django-citeit',
author='University of North Texas Libraries',
author_email='mark.phillips@unt.edu',
license='BSD',
keywords=['django', 'annotated', 'bibliography'],
classifiers=[
'Natural Language :: English',
'Environment :: Web Environment',
'Framework :: Django :: 2.2',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
'Programming Language :: Python :: 3.6'
'Programming Language :: Python :: 3.7'
]
)
|
unt-libraries/django-citeit
|
setup.py
|
Python
|
bsd-3-clause
| 992
|
import sys
from setuptools import setup, find_packages
setup(
name='bluebird',
version='0.1.0',
author='Josh Bohde',
author_email='josh@joshbohde.com',
description=('bluebird is a client for Kestrel queues',),
license='BSD',
packages=['bluebird', 'bluebird.thrift_kestrel'],
install_requires=[
'thrift >= 0.9.0',
],
classifiers=[
"License :: OSI Approved :: BSD License",
],
)
|
joshbohde/bluebird
|
setup.py
|
Python
|
bsd-3-clause
| 433
|
from datetime import date, datetime
from unittest import TestCase
from ccy import period, date2juldate, juldate2date, todate
from ccy import date2yyyymmdd, yyyymmdd2date
class PeriodTests(TestCase):
def testPeriod(self):
a = period('5Y')
self.assertEqual(a.years, 5)
b = period('1y3m')
self.assertEqual(b.years, 1)
self.assertEqual(b.months, 3)
c = period('-3m')
self.assertEqual(c.years, 0)
self.assertEqual(c.months, -3)
def testAdd(self):
a = period('4Y')
b = period('1Y3M')
c = a + b
self.assertEqual(c.years, 5)
self.assertEqual(c.months, 3)
def testAddString(self):
a = period('4y')
self.assertEqual(a+'3m', period('4y3m'))
self.assertEqual('3m'+a, period('4y3m'))
def testSubtract(self):
a = period('4Y')
b = period('1Y')
c = a - b
self.assertEqual(c.years, 3)
self.assertEqual(c.months, 0)
c = period('3Y') - period('1Y3M')
self.assertEqual(c.years, 1)
self.assertEqual(c.months, 9)
self.assertEqual(str(c), '1Y9M')
def testSubtractString(self):
a = period('4y')
self.assertEqual(a-'3m', period('3y9m'))
self.assertEqual('5y'-a, period('1y'))
self.assertEqual('3m'-a, period('-3y9m'))
def testCompare(self):
a = period('4Y')
b = period('4Y')
c = period('1Y2M')
self.assertTrue(a == b)
self.assertTrue(a >= b)
self.assertTrue(a <= b)
self.assertTrue(c <= a)
self.assertTrue(c < a)
self.assertFalse(c == a)
self.assertFalse(c >= b)
self.assertTrue(c > a-b)
def testWeek(self):
p = period('7d')
self.assertEqual(p.weeks, 1)
self.assertEqual(str(p), '1W')
p.add_weeks(3)
self.assertEqual(p.weeks, 4)
self.assertEqual(str(p), '4W')
self.assertFalse(p.isempty())
p = period('3w2d')
self.assertFalse(p.isempty())
self.assertEqual(p.weeks, 3)
self.assertEqual(str(p), '3W2D')
def testEmpty(self):
self.assertFalse(period('3y').isempty())
self.assertFalse(period('1m').isempty())
self.assertFalse(period('3d').isempty())
self.assertTrue(period().isempty())
def testAddperiod(self):
p = period('3m')
a = period('6m')
self.assertEqual(a.add_tenure(p), a)
self.assertEqual(str(a), '9M')
def testError(self):
self.assertRaises(ValueError, period, '5y6g')
def testSimple(self):
self.assertEqual(period('3m2y').simple(), '27M')
self.assertEqual(period('-3m2y').simple(), '-27M')
self.assertEqual(period('3d2m').simple(), '63D')
self.assertEqual(period('2y').simple(), '2Y')
class DateConverterTest(TestCase):
def setUp(self):
self.dates = [(date(2010, 6, 11), 40340, 20100611, 1276210800),
(date(2009, 4, 2), 39905, 20090402, 1238626800),
(date(1996, 2, 29), 35124, 19960229, 825552000),
(date(1970, 1, 1), 25569, 19700101, 0),
(date(1900, 1, 1), 1, 19000101, None)]
def testdate2JulDate(self):
for d, jd, y, ts in self.dates:
self.assertEqual(jd, date2juldate(d))
def testJulDate2Date(self):
for d, jd, y, ts in self.dates:
self.assertEqual(d, juldate2date(jd))
def testDate2YyyyMmDd(self):
for d, jd, y, ts in self.dates:
self.assertEqual(y, date2yyyymmdd(d))
def testYyyyMmDd2Date(self):
for d, jd, y, ts in self.dates:
self.assertEqual(d, yyyymmdd2date(y))
def test_datetime2Juldate(self):
jd = date2juldate(datetime(2013, 3, 8, 11, 20, 45))
self.assertAlmostEqual(jd, 41341.47274305556)
def test_Juldate2datetime(self):
dt = juldate2date(41341.47274305556)
dt2 = datetime(2013, 3, 8, 11, 20, 45)
self.assertEqual(dt, dt2)
def test_string(self):
target = date(2014, 1, 5)
self.assertEqual(todate('2014 Jan 05'), target)
# def testDate2Timestamp(self):
# for d,jd,y,ts in self.dates:
# if ts is not None:
# self.assertEqual(ts,date2timestamp(d))
# def testTimestamp2Date(self):
# for d,jd,y,ts in self.dates:
# if ts is not None:
# self.assertEqual(d,timestamp2date(ts))
|
artisavotins/ccy
|
tests/datetests.py
|
Python
|
bsd-3-clause
| 4,493
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate
from utils import create_login_ticket
class LoginForm(forms.Form):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
#warn = forms.BooleanField(required=False) # TODO: Implement
lt = forms.CharField(widget=forms.HiddenInput, initial=create_login_ticket)
def __init__(self, service=None, renew=None, gateway=None, request=None, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.request = request
if service is not None:
self.fields['service'] = forms.CharField(widget=forms.HiddenInput, initial=service)
|
Nitron/django-cas-provider
|
cas_provider/forms.py
|
Python
|
bsd-3-clause
| 757
|
from __future__ import unicode_literals
import array
import fcntl
import signal
import six
import termios
import tty
def get_size(fileno):
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
"""
Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple.
"""
# assert stdout.isatty()
# Buffer for the C call
buf = array.array(u'h' if six.PY3 else b'h', [0, 0, 0, 0])
# Do TIOCGWINSZ (Get)
fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf, True)
# fcntl.ioctl(0, termios.TIOCGWINSZ, buf, True)
# Return rows, cols
return buf[0], buf[1]
class raw_mode(object):
"""
::
with raw_mode(stdin):
''' the pseudo-terminal stdin is now used in raw mode '''
"""
def __init__(self, fileno):
self.fileno = fileno
self.attrs_before = termios.tcgetattr(fileno)
def __enter__(self):
# NOTE: On os X systems, using pty.setraw() fails. Therefor we are using this:
newattr = termios.tcgetattr(self.fileno)
newattr[tty.LFLAG] = self._patch(newattr[tty.LFLAG])
termios.tcsetattr(self.fileno, termios.TCSANOW, newattr)
def _patch(self, attrs):
return attrs & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
def __exit__(self, *a, **kw):
termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before)
class cooked_mode(raw_mode):
"""
(The opposide of ``raw_mode``::
with cooked_mode(stdin):
''' the pseudo-terminal stdin is now used in cooked mode. '''
"""
def _patch(self, attrs):
return attrs | (termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
class call_on_sigwinch(object):
"""
Context manager which Installs a SIGWINCH callback.
(This signal occurs when the terminal size changes.)
"""
def __init__(self, callback):
self.callback = callback
def __enter__(self):
self.previous_callback = signal.signal(signal.SIGWINCH, lambda *a: self.callback())
def __exit__(self, *a, **kw):
signal.signal(signal.SIGWINCH, self.previous_callback)
class EventHook(object):
"""
Event hook::
e = EventHook()
e += handler_function # Add event handler.
e.fire() # Fire event.
Thanks to Michael Foord:
http://www.voidspace.org.uk/python/weblog/arch_d7_2007_02_03.shtml#e616
"""
def __init__(self):
self.__handlers = []
def __iadd__(self, handler):
self.__handlers.append(handler)
return self
def __isub__(self, handler):
self.__handlers.remove(handler)
return self
def fire(self, *args, **keywargs):
for handler in self.__handlers:
handler(*args, **keywargs)
|
Carreau/python-prompt-toolkit
|
prompt_toolkit/utils.py
|
Python
|
bsd-3-clause
| 2,862
|
from .robotcontroller import RobotControllerBlock,RobotControllerIO
from klampt.model import trajectory
from klampt.io import loader
class TrajectoryPositionController(RobotControllerBlock):
"""A (robot) controller that takes in a trajectory and outputs the position
along the trajectory. If type is a 2-tuple, this will also output the
derivative of the trajectory"""
def __init__(self,traj,type=('qcmd','dqcmd')):
self.traj = traj
self.outputType = type
self.startTime = None
RobotControllerBlock.__init__(self)
for t in type:
self._outputs.addChannel(t)
def advance(self,**inputs):
t = inputs['t']
if self.startTime == None:
self.startTime = t
t = t - self.startTime
if isinstance(self.outputType,(tuple,list)):
assert len(self.outputType)==2
return {self.outputType[0]:self.traj.eval(t),
self.outputType[1]:self.traj.deriv(t)}
else:
return {self.outputType:self.traj.eval(t)}
def __getstate__(self):
return {'startTime':self.startTime,'traj':loader.toJson(self.traj)}
def __setstate__(self,state):
self.startTime = state['startTime']
self.traj = loader.fromJson(state['traj'],'Trajectory')
def signal(self,type,*inputs):
if type=='reset':
self.startTime = None
class TrajectoryWithFeedforwardTorqueController(RobotControllerBlock):
"""A controller that takes in a joint trajectory and a feedforward torque
trajectory."""
def __init__(self,traj,torquetraj):
self.traj = traj
self.torquetraj = torquetraj
self.startTime = None
RobotControllerBlock.__init__(self)
def advance(self,**inputs):
api = RobotControllerIO(inputs)
t = api.time()
if self.startTime == None:
self.startTime = t
t = t - self.startTime
return api.makeFeedforwardPIDCommand(self.traj.eval(t),self.traj.deriv(t),self.torquetraj.eval(t))
def __getstate__(self):
return {'startTime':self.startTime,'traj':loader.toJson(self.traj),'torquetraj':loader.toJson(self.torqueTraj)}
def __setstate__(self,state):
self.startTime = state['startTime']
self.traj = loader.fromJson(state['traj'],'Trajectory')
self.torquetraj = loader.fromJson(state['torquetraj'],'Trajectory')
def signal(self,type,**inputs):
if type=='reset':
self.startTime = None
def make(robot,file="mypath.path",ff_torque_file=None):
if robot == None:
l = trajectory.Trajectory()
else:
l = trajectory.RobotTrajectory(robot)
l.load(file)
if ff_torque_file is not None:
tcmd = trajectory.Trajectory()
tcmd.load(ff_torque_file)
return TrajectoryWithFeedforwardTorqueController(l,ff_torque_file)
return TrajectoryPositionController(l)
|
krishauser/Klampt
|
Python/klampt/control/blocks/trajectory_tracking.py
|
Python
|
bsd-3-clause
| 2,931
|
# Standard imports
import jsonpickle as jpickle
import logging
# Our imports
import emission.storage.timeseries.abstract_timeseries as esta
import emission.analysis.modelling.tour_model.similarity as similarity
import emission.analysis.modelling.tour_model.similarity as similarity
import emission.analysis.modelling.tour_model.data_preprocessing as preprocess
RADIUS=500
def loadModelStage(filename):
import jsonpickle.ext.numpy as jsonpickle_numpy
jsonpickle_numpy.register_handlers()
model = loadModel(filename)
return model
def loadModel(filename):
fd = open(filename, "r")
all_model = fd.read()
all_model = jpickle.loads(all_model)
fd.close()
return all_model
def in_bin(bin_location_features,new_trip_location_feat,radius):
start_b_lon = new_trip_location_feat[0]
start_b_lat = new_trip_location_feat[1]
end_b_lon = new_trip_location_feat[2]
end_b_lat = new_trip_location_feat[3]
for feat in bin_location_features:
start_a_lon = feat[0]
start_a_lat = feat[1]
end_a_lon = feat[2]
end_a_lat = feat[3]
start = similarity.within_radius(start_a_lat, start_a_lon, start_b_lat, start_b_lon,radius)
end = similarity.within_radius(end_a_lat, end_a_lon, end_b_lat, end_b_lon, radius)
if start and end:
continue
else:
return False
return True
def find_bin(trip, bin_locations, radius):
trip_feat = preprocess.extract_features([trip])[0]
trip_loc_feat = trip_feat[0:4]
first_round_label_set = list(bin_locations.keys())
sel_fl = None
for fl in first_round_label_set:
# extract location features of selected bin
sel_loc_feat = bin_locations[fl]
# Check if start/end locations of the new trip and every start/end locations in this bin are within the range of
# radius. If so, the new trip falls in this bin. Then predict the second round label of the new trip
# using this bin's model
if in_bin(sel_loc_feat, trip_loc_feat, radius):
sel_fl = fl
break
if not sel_fl:
logging.debug(f"sel_fl = {sel_fl}, early return")
return -1
return sel_fl
# Predict labels and also return the number of trips in the matched cluster
def predict_labels_with_n(trip):
user = trip['user_id']
logging.debug(f"At stage: extracting features")
trip_feat = preprocess.extract_features([trip])[0]
trip_loc_feat = trip_feat[0:4]
logging.debug(f"At stage: loading model")
try:
# load locations of bins(1st round of clustering)
# e.g.{'0': [[start lon1, start lat1, end lon1, end lat1],[start lon, start lat, end lon, end lat]]}
# another explanation: -'0': label from the 1st round
# - the value of key '0': all trips that in this bin
# - for every trip: the coordinates of start/end locations
bin_locations = loadModelStage('locations_first_round_' + str(user))
# load user labels in all clusters
# assume that we have 1 cluster(bin) from the 1st round of clustering, which has label '0',
# and we have 1 cluster from the 2nd round, which has label '1'
# the value of key '0' contains all 2nd round clusters
# the value of key '1' contains all user labels and probabilities in this cluster
# e.g. {'0': [{'1': [{'labels': {'mode_confirm': 'shared_ride', 'purpose_confirm': 'home', 'replaced_mode': 'drove_alone'}}]}]}
user_labels = loadModelStage('user_labels_first_round_' + str(user))
# Get the number of trips in each cluster from the number of locations in each bin
# This is a bit hacky; in the future, we might want the model stage to save a metadata file with this and potentially other information
cluster_sizes = {k: len(bin_locations[k]) for k in bin_locations}
except IOError as e:
logging.info(f"No models found for {user}, no prediction")
return [], -1
logging.debug(f"At stage: first round prediction")
pred_bin = find_bin(trip, bin_locations, RADIUS)
logging.debug(f"At stage: matched with bin {pred_bin}")
if pred_bin == -1:
logging.info(f"No match found for {trip['data']['start_fmt_time']} early return")
return [], 0
user_input_pred_list = user_labels[pred_bin]
this_cluster_size = cluster_sizes[pred_bin]
logging.debug(f"At stage: looked up user input {user_input_pred_list}")
return user_input_pred_list, this_cluster_size
# For backwards compatibility
def predict_labels(trip):
return predict_labels_with_n(trip)[0]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
all_users = esta.TimeSeries.get_uuid_list()
# case 1: the new trip matches a bin from the 1st round and a cluster from the 2nd round
user = all_users[0]
trips = preprocess.read_data(user)
filter_trips = preprocess.filter_data(trips, RADIUS)
new_trip = filter_trips[4]
# result is [{'labels': {'mode_confirm': 'shared_ride', 'purpose_confirm': 'church', 'replaced_mode': 'drove_alone'},
# 'p': 0.9333333333333333}, {'labels': {'mode_confirm': 'shared_ride', 'purpose_confirm': 'entertainment',
# 'replaced_mode': 'drove_alone'}, 'p': 0.06666666666666667}]
pl = predict_labels(new_trip)
assert len(pl) > 0, f"Invalid prediction {pl}"
# case 2: no existing files for the user who has the new trip:
# 1. the user is invalid(< 10 existing fully labeled trips, or < 50% of trips that fully labeled)
# 2. the user doesn't have common trips
user = all_users[1]
trips = preprocess.read_data(user)
new_trip = trips[0]
# result is []
pl = predict_labels(new_trip)
assert len(pl) == 0, f"Invalid prediction {pl}"
# case3: the new trip is novel trip(doesn't fall in any 1st round bins)
user = all_users[0]
trips = preprocess.read_data(user)
filter_trips = preprocess.filter_data(trips, radius)
new_trip = filter_trips[0]
# result is []
pl = predict_labels(new_trip)
assert len(pl) == 0, f"Invalid prediction {pl}"
# case 4: the new trip falls in a 1st round bin, but predict to be a new cluster in the 2nd round
# result is []
# no example for now
|
e-mission/e-mission-server
|
emission/analysis/modelling/tour_model_first_only/load_predict.py
|
Python
|
bsd-3-clause
| 6,342
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# orientdbcli documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import orientdbcli
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OrientDB cli'
copyright = u'2015, Ricardo M. Vilchis'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = orientdbcli.__version__
# The full version, including alpha/beta/rc tags.
release = orientdbcli.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'orientdbclidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'orientdbcli.tex',
u'OrientDB cli Documentation',
u'Ricardo M. Vilchis', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'orientdbcli',
u'OrientDB cli Documentation',
[u'Ricardo M. Vilchis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'orientdbcli',
u'OrientDB cli Documentation',
u'Ricardo M. Vilchis',
'orientdbcli',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
ajkaanbal/orientdbcli
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,467
|
from enum import Enum
from django.contrib.gis.db import models
from django.template.defaulttags import register
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from emstrack.models import UpdatedByModel
from emstrack.util import make_choices
@register.filter
def get_equipment_type(type):
return EquipmentType[type].value
class EquipmentType(Enum):
B = _('Boolean')
I = _('Integer')
S = _('String')
EquipmentTypeDefaults = {
EquipmentType.B.name: "True",
EquipmentType.I.name: "0",
EquipmentType.S.name: ""
}
class Equipment(models.Model):
name = models.CharField(_('name'), max_length=254, unique=True)
type = models.CharField(_('type'), max_length=1,
choices=make_choices(EquipmentType))
default = models.CharField(_('default'), max_length=254)
def save(self, *args, **kwargs):
# set default value
if not self.default:
self.default = EquipmentTypeDefaults[self.type]
# call super
super().save(*args, **kwargs)
def __str__(self):
return "{} ({})".format(self.name, self.type)
def get_absolute_url(self):
return reverse('equipment:detail', kwargs={'pk': self.id})
class EquipmentSet(models.Model):
name = models.CharField(_('name'), max_length=254, unique=True)
def get_absolute_url(self):
return reverse('equipment:detail-set', kwargs={'pk': self.id})
def __str__(self):
return self.name
class EquipmentSetItem(UpdatedByModel):
equipment_set = models.ForeignKey(EquipmentSet,
on_delete=models.CASCADE,
verbose_name=_('equipment_set'))
equipment = models.ForeignKey(Equipment,
on_delete=models.CASCADE,
verbose_name=_('equipment'))
class EquipmentHolder(models.Model):
equipmentsets = models.ManyToManyField(EquipmentSet, blank=True, verbose_name=_('equipmentsets'))
def is_hospital(self):
return hasattr(self, 'hospital')
def is_ambulance(self):
return hasattr(self, 'ambulance')
def get_type(self):
if self.is_hospital():
return "hospital"
elif self.is_ambulance():
return "ambulance"
return None
def get_name(self):
if self.is_hospital():
return self.hospital.name
elif self.is_ambulance():
return self.ambulance.identifier
return None
def __str__(self):
retval = "Equipment '{}'".format(self.id)
if self.is_hospital():
retval += ", Hospital '{}'".format(self.hospital)
elif self.is_ambulance():
retval += ", Ambulance '{}'".format(self.ambulance)
else:
retval += ", Unknown"
return retval
def get_absolute_url(self):
if self.is_hospital():
return reverse('hospital:detail', kwargs={'pk': self.hospital.id})
elif self.is_ambulance():
return reverse('ambulance:detail', kwargs={'pk': self.ambulance.id})
else:
return reverse('equipment:detail-holder', kwargs={'pk': self.id})
class EquipmentItem(UpdatedByModel):
equipmentholder = models.ForeignKey(EquipmentHolder,
on_delete=models.CASCADE,
verbose_name=_('equipmentholder'))
equipment = models.ForeignKey(Equipment,
on_delete=models.CASCADE,
verbose_name=_('equipment'))
value = models.CharField(_('value'), max_length=254)
def save(self, *args, **kwargs):
# creation?
created = self.pk is None
# if no value, set default value
if not self.value:
self.value = self.equipment.default
# save to EquipmentItem
super().save(*args, **kwargs)
from mqtt.publish import SingletonPublishClient
# publish to mqtt
client = SingletonPublishClient()
client.publish_equipment_item(self)
class Meta:
unique_together = ('equipmentholder', 'equipment',)
def __str__(self):
return "EquipmentHolder: {}, Equipment: {}, Count: {}".format(self.equipmentholder, self.equipment, self.value)
|
EMSTrack/WebServerAndClient
|
equipment/models.py
|
Python
|
bsd-3-clause
| 4,379
|
import numpy, pylab, os, sys, csv, pickle
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
#dp='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/combinedfom.txt'
#savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/parsedresults/allfom'
SYSTEM=21
cabools=[0, 0, 0, 0, 0, 0]
bmcpavebool=True
if SYSTEM==-1:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summarytemp'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
bmcpavebool=False
elif SYSTEM==0:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 1, 1, 1, 1, 0]
cpbools=[1, 1, 1, 1, 1, 0]
cabools=[0, 0, 0, 0, 1, 0]
elif SYSTEM==1:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 1, 1, 1, 0]
cpbools=[1, 0, 1, 1, 1, 0]
elif SYSTEM==2:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[1, 0, 0, 0, 1, 0]
elif SYSTEM==21:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15indiv6'
xlims=(250, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
cabools=[0, 0, 0, 0, 1, 0]
bmcpavebool=False
elif SYSTEM==3:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys3CP5'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[0, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
elif SYSTEM==4:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1CV3CP5'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
elif SYSTEM==41:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1CV3CP5indiv'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
bmcpavebool=False
elif SYSTEM==5:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345indiv'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 1, 1, 1, 0]
cpbools=[1, 0, 1, 1, 1, 0]
bmcpavebool=False
elif SYSTEM==6:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345indiv6'
xlims=(220, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 1, 1, 1, 1]
cpbools=[1, 0, 1, 1, 1, 1]
bmcpavebool=False
elif SYSTEM==7:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15indiv6'
xlims=(220, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
bmcpavebool=False
p1='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/selectsamplesnesteddlist.pck'
p2='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/selectsamplesnesteddlist.pck'
p3='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/selectsamplesnesteddlist.pck'
p4='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130610NiFeCoCesingle_6321/results/selectsamplesnesteddlist.pck'
p5='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/benchmarking/selectsamplesnesteddlist.pck'
p6='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/yunsamples/selectsamplesnesteddlist.pck'
dallsamples=[[693, 693, 170, 170, 170, 170], [3022, 3022, 725, 725, 725, 725], [5047, 5047, 1326, 1326, 1326, 1326], [5050, 5050, 1329, 1329, 1329, 1329], [692, 692, 169, 169, 169, 169]]# list of "compositions" in in terms of sample number. for each composition there should be a corresponding sample number for each of the dall
if not os.path.exists(savefolder):
os.mkdir(savefolder)
os.chdir(savefolder)
#BMCP102010_dallindex=[\
#[numpy.array([0.355, 0.389, 0.374]), numpy.array([0.007, 0.014, 0.011])], \
#[numpy.array([0.376, 0.425, 0.380]), numpy.array([0.017, 0.033, 0.017])], \
#[numpy.array([0.377, 0.419, 0.379]), numpy.array([0.017, 0.034, 0.021])], \
#numpy.nan, numpy.nan]#indexed same as dall and then within is a list of 2 arrays, 0th is vs OER and 1st is STD from 3 repeat measurements
f=open(p1, mode='r')
dall1=pickle.load(f)
f.close()
f=open(p2, mode='r')
dall2=pickle.load(f)
f.close()
f=open(p3, mode='r')
dall3=pickle.load(f)
f.close()
f=open(p4, mode='r')
dall4=pickle.load(f)
f.close()
f=open(p5, mode='r')
dall5=pickle.load(f)
f.close()
f=open(p6, mode='r')
dall6=pickle.load(f)
f.close()
dallinds1={}
dallinds2={}
dallinds3={}
dallinds4={}
dallinds5={}
dallinds6={}
for sl in dallsamples:
il=[]
for s, da, di in zip(sl, [dall1, dall2, dall3, dall4, dall5, dall6], [dallinds1, dallinds2, dallinds3, dallinds4, dallinds5, dallinds6]):
for k, dl in da.iteritems():
stemp=[d['Sample'] for d in dl]
if not k in di.keys():
di[k]=[]
if s in stemp:
di[k]+=[stemp.index(s)]
else:
di[k]+=[numpy.nan]
print 'no data found for sample ', s, k
def CPTafel_sampleind(dallsamplei, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
if cpbools[2]:
d=dall3['Tafel'][dallinds3['Tafel'][dallsamplei]]
dydx=1./(d['TafelCPSlopeVperdec']*1000.)
y0=d['TafelCPLogExCurrent']+5.
x=numpy.array(xlims)
y=x*dydx+y0
pylab.plot(x, y, 'r--', label='CP3fit')
def allbmcvfig_sampleind(dallsamplei):
d1=dall5['bmcv'][dallinds5['bmcv'][dallsamplei]]
for count, k in enumerate(['complete02', 'complete03', 'complete04']):
d=d1[k]
# x=d['Ewe(VOER)']*1000.
# i=numpy.argmax(x)
# x=x[:i]
# y=d['I(mAcm2)'][:i]
x=d['Ewe(VOER)_LinSub']*1000.
y=d['I(mAcm2)_LinSub']
posinds=numpy.where(y>1e-1)[0][1:]
x=x[posinds]
y=numpy.log10(y[posinds])
if count==0:
pylab.plot(x, y, '-', color='c', label='bmCVs')
else:
pylab.plot(x, y, '-', color='c')
def allbmcpfig_sampleind(dallsamplei, avebool=True, plot2hr=True):#booleans not implemented yet
d1=dall5['bmstepcp'][dallinds5['bmstepcp'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
if avebool:
x=xarr.mean(axis=0)
xe=xarr.std(axis=0)
y=numpy.log10(yarr.mean(axis=0))
pylab.errorbar(x, y, xerr=xe, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCP')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+'$', label='bmCP')
else:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+'$')
if plot2hr:
d1=dall5['bm2hrcp'][dallinds5['bm2hrcp'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
x2=xarr.mean()
xe2=xarr.std()
y2=numpy.log10(yarr.mean())
if avebool:
pylab.errorbar(x2, y2, xerr=xe2, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCP 2hr')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+"'$", label='bmCP 2hr')
else:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+"'$")
def allbmcafig_sampleind(dallsamplei, avebool=True):#booleans not implemented yet
d1=dall5['bmstepca'][dallinds5['bmstepca'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
if avebool:
x=xarr.mean(axis=0)
xe=xarr.std(axis=0)
y=numpy.log10(yarr.mean(axis=0))
pylab.errorbar(x, y, xerr=xe, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCA')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='pink', mfc='pink', marker=r'$'+`count+2`+'$', label='bmCA')
else:
pylab.plot(x, y, ls='None', mec='pink', mfc='pink', marker=r'$'+`count+2`+'$')
def allLogIvsVfig_sampleind(dallsamplei, cvsmoothpts=8, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
d=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
vsh=-(.187-0.045)
d1=d
if cvbools[0]:
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='k', label='CVv1')
if cvbools[1]:
d=dall2['CV3'][dallinds2['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='b', label='CVv2')
if cvbools[2]:
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='r', label='CVv3')
if cvbools[3]:
d=dall4['CV3'][dallinds4['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='y', label='CVv4')
if cvbools[3]:
d=dall4['CV3postCP'][dallinds4['CV3postCP'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='g', label='CVv4postCP')
if cpbools[0]:
d=dall1['CP1'][dallinds1['CP1'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='k', label='CPv1')
if cpbools[2]:
d=dall3['CP4'][dallinds3['CP4'][dallsamplei]]
vsh=-(.187-0.043)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r', label='CPv3')
d=dall3['CP5'][dallinds3['CP5'][dallsamplei]]
vsh=-(.187-0.043)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r')
d=dall3['CP6'][dallinds3['CP6'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r')
if cpbools[3]:
d=dall3['CP4'][dallinds3['CP4'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g', label='CPv4')
d=dall3['CP5'][dallinds3['CP5'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g')
d=dall3['CP6'][dallinds3['CP6'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g')
#
#pylab.legend(loc=4)
# pylab.ylabel('Log(J / mA cm$^{-2}$)')
# pylab.xlabel('Potential (mV vs OER)')
#
# t='Sample%d,%d:' %(d1['Sample'], d['Sample'])
# t+=''.join([el+'%d' %(100*v) for el, v in zip(d['elements'], d['compositions'])])
# pylab.title(t)
def yuncvplot(dallsamplei):
if cvbools[5]:
d=dall6['CV'][dallinds6['CV'][dallsamplei]]['sample1cv']
x=d['Ewe(VOER)_LinSub']*1000.
#x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(mAcm2)_LinSub']
#y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-1)[0]
x=x[posinds]
y=numpy.log10(y[posinds])
pylab.plot(x, y, '-', color='brown', label='CVv6')
def LinIvsVfig_sampleind(dallsamplei, cvsmoothpts=8, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
if cvbools[0]:
d=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
vsh=-(.187-0.045)
d1=d
if cvbools[0]:
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)']+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)']*1.e5
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
x=x[15:-15]
y=y[15:-15]
pylab.plot(x, y, '-', color='k', label='CVv1')
if cvbools[4]:
d1=dall5['bmcv'][dallinds5['bmcv'][dallsamplei]]
for count, k in enumerate(['complete02', 'complete03', 'complete04']):
d=d1[k]
x=d['Ewe(VOER)']*1000.
y=d['I(mAcm2)']
if count==0:
pylab.plot(x, y, '-', color='c', label='bmCVs')
else:
pylab.plot(x, y, '-', color='c')
if cvbools[5]:
d=dall6['CV'][dallinds6['CV'][dallsamplei]]['sample1cv']
x=d['Ewe(VOER)']*1000.
#x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(mAcm2)']
#y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-1)[0]
pylab.plot(x, y, '-', color='brown', label='CVv6')
for dallsamplei in range(5):
pylab.figure(num=dallsamplei)
allLogIvsVfig_sampleind(dallsamplei, cvbools=cvbools, cpbools=cpbools)
CPTafel_sampleind(dallsamplei, cvbools=cvbools, cpbools=cpbools)
if cpbools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcpfig_sampleind(dallsamplei, avebool=bmcpavebool, plot2hr=True)
if cabools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcafig_sampleind(dallsamplei, avebool=bmcpavebool)
if cvbools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcvfig_sampleind(dallsamplei)
if cvbools[5]:
for dallsamplei in [2]:
pylab.figure(num=dallsamplei)
yuncvplot(dallsamplei)
for dallsamplei in range(5):
pylab.figure(num=dallsamplei)
pylab.legend(loc = 'lower right', bbox_to_anchor = (1.12, 0.))
pylab.ylabel('Log(J / mA cm$^{-2}$)')
pylab.xlabel('Potential (mV vs OER)')
d1=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
t='Sample%d-%d_' %(d1['Sample'], d['Sample'])
t+=''.join([el+'%d' %(int(round(100*v))) for el, v in zip(d['elements'], d['compositions'])])
pylab.title(t)
pylab.xlim(xlims)
pylab.ylim(ylims)
pylab.savefig(t+'.png')
pylab.savefig(t+'.eps')
for dallsamplei in [2]:
pylab.figure()
LinIvsVfig_sampleind(dallsamplei)
pylab.legend(loc = 'upper left')
pylab.ylabel('J / mA cm$^{-2}$')
pylab.xlabel('Potential (mV vs OER)')
d1=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
t='Sample%d-%d_' %(d1['Sample'], d['Sample'])
t+=''.join([el+'%d' %(int(round(100*v))) for el, v in zip(d['elements'], d['compositions'])])
pylab.title(t)
pylab.xlim(-100, 460)
pylab.ylim(-8, 180)
t+='LinCV'
pylab.savefig(t+'.png')
pylab.savefig(t+'.eps')
pylab.show()
|
johnmgregoire/JCAPdatavis
|
plotcustom_selectsamples.py
|
Python
|
bsd-3-clause
| 17,415
|
#! /usr/bin/env python3
# Import
from json import loads as JSONLoad
from math import pi as pi,\
sin as Sine,\
cos as Cosine,\
acos as ArcCosine
from os.path import exists as Exists
from pickle import dump as Pickle,\
load as UnPickle
import sys
# Maybe import MySql
try:
from pymysql import connect as MySqlConnect
except ImportError:
pass
# Constants
sys.setrecursionlimit(100000)
poiTypes = ('bar', 'restaurant')
poiType2Skip = {
'bar' : (358, 409, 466),
'restaurant' : (940, 1050),
}
# Walking constants
radiansPerDegree = pi / 180
radiusOfEarth = 6371e3
meterPerMin = 1.4 * 60
meterPerLat = 111194.
meterPerLng = 87882.
worldLimits = {
'latitudeMinimum' : 37.708,
'latitudeMaximum' : 37.8125,
'longitudeMinimum' : -122.515,
'longitudeMaximum' : -122.355,
}
maximumTreePerMeter = 10
maximumDistanceReduction = 0.3
# Classes
class Address:
__slots__ = [
'address',
'latitude',
'longitude',
]
def __init__(self, *args):
self.address, self.latitude, self.longitude = args
return
class Tree:
'''https://data.sfgov.org/Public-Works/Street-Tree-List/tkzw-k3nq'''
__slots__ = [
'id',
'variety',
'latitude',
'longitude',
]
def __init__(self, *args):
if 4 == len(args):
self.id, self.variety, latitude, longitude = args
self.latitude = self.longitude = None
if latitude is not None and longitude is not None:
self.latitude = float(latitude)
self.longitude = float(longitude)
elif 5 == len(args):
junk, id, variety, latitude, longitude = args
self.id = int(id)
self.variety = variety.decode('latin-1')
self.latitude = float(latitude)
self.longitude = float(longitude)
return
class POI:
'''http://www.yelp.com/developers/documentation/v2/search_api#rValue'''
__slots__ = [
'id',
'poiType',
'name',
'nodeIds',
'offsets',
'latitude',
'longitude',
'address',
'city',
'state',
'imageUrl',
'yelpUrl',
]
def __init__(self, *args):
if 4 == len(args):
json, poiType, nodeIds, offsets = args
self.id = POIHash(json, poiType)
self.poiType = poiType
self.name = json.get('name')
self.nodeIds = nodeIds
self.offsets = offsets
location = json.get('location')
latlng = location.get('latlng')
self.latitude = self.longitude = None
if latlng is not None:
self.latitude = latlng[0]
self.longitude = latlng[1]
self.address = location.get('address')
self.city = location.get('city')
self.state = location.get('state_code')
self.imageUrl = json.get('image_url')
self.yelpUrl = json.get('url')
elif 12 == len(args):
id, poiType, name, nodeIds, offsets, latitude, longitude, address, city, state, imageUrl, yelpUrl = args
self.id = int(id)
self.poiType = poiType.decode('latin-1')
self.name = name.decode('latin-1')
self.nodeIds = bool(nodeIds) and list(int(nodeId) for nodeId in nodeIds.decode('latin-1').split(',') if nodeId) or []
self.offsets = bool(offsets) and list(float(offset) for offset in offsets.decode('latin-1').split(',') if offset) or []
self.latitude = float(latitude)
self.longitude = float(longitude)
self.address = address.decode('latin-1')
self.city = city.decode('latin-1')
self.state = state.decode('latin-1')
self.imageUrl = imageUrl.decode('latin-1')
self.yelpUrl = yelpUrl.decode('latin-1')
return
def __hash__(self):
return self.id
class Node:
'''http://wiki.openstreetmap.org/wiki/Node'''
__slots__ = [
'id',
'isIntersection',
'latitude',
'longitude',
'nodeIds',
'edgeIds',
'lengths',
'poiIds',
]
def __init__(self, *args):
if 1 == len(args):
nodeXml, = args
self.id = self.longitude = self.latitude = self.label = None
for key, value in nodeXml.attrib.items():
if 'id' == key:
self.id = int(value)
elif 'lat' == key:
self.latitude = float(value)
elif 'lon' == key:
self.longitude = float(value)
self.count = 0
self.nodeIds = []
self.edgeIds = []
self.lengths = []
self.poiIds = []
elif 8 == len(args):
id, isIntersection, latitude, longitude, nodeIds, edgeIds, lengths, poiIds = args
self.id = int(id)
self.isIntersection = bool(isIntersection)
self.latitude = float(latitude)
self.longitude = float(longitude)
self.nodeIds = bool(nodeIds) and list(int(nodeId) for nodeId in nodeIds.decode('latin-1').split(',') if nodeId) or []
self.edgeIds = bool(edgeIds) and list(int(edgeId) for edgeId in edgeIds.decode('latin-1').split(',') if edgeId) or []
self.lengths = bool(lengths) and list(float(length) for length in lengths.decode('latin-1').split(',') if length) or []
self.poiIds = bool(poiIds) and list(int(poiId) for poiId in poiIds.decode('latin-1').split(',') if poiId) or []
return
def __sub__(self, other):
return LatLngDistance(self.latitude, self.longitude, other.latitude, other.longitude)
def Count(self):
self.count += 1
return
class Edge:
'''http://wiki.openstreetmap.org/wiki/Way'''
__slots__ = [
'id',
'name',
'nodeIds',
'treeCount',
]
def __init__(self, *args):
if 2 == len(args):
xml, id2Node = args
self.id = int(xml.attrib.get('id'))
self.name = None
for tag in xml.iter('tag'):
key, value = (tag.attrib.get(moniker) for moniker in ('k', 'v'))
if 'name' == key:
self.name = value
self.nodeIds = [int(nodeXml.attrib.get('ref')) for nodeXml in xml.iter('nd')]
# Touch each sub-node
for nodeId in set(self.nodeIds):
# Kick out unknown nodes
if nodeId not in id2Node:
continue
id2Node.get(nodeId).Count()
self.treeCount = 0
elif 4 == len(args):
id, name, nodeIds, treeCount = args
self.id = int(id)
self.name = name != b'None' and name.decode('latin-1') or ''
self.nodeIds = bool(nodeIds) and list(int(nodeId) for nodeId in nodeIds.decode('latin-1').split(',') if nodeId) or []
self.treeCount = treeCount
elif 5 == len(args):
junk, self.id, self.name, self.nodeIds, self.treeCount = args
return
# Functions
def PrintNow(*arguments, sep = '\n', end = '\n'):
from sys import stdout
print(*arguments, sep = sep, end = end)
stdout.flush()
return
def GeoJSON(thing):
'''http://en.wikipedia.org/wiki/GeoJSON'''
if isinstance(thing, list):
return [GeoJSON(element) for element in thing]
geoJSON = {
'type' : 'Feature',
'geometry' : {
'type' : 'Point',
'coordinates' : [thing.longitude, thing.latitude],
}
}
if isinstance(thing, Address):
geoJSON.update({
'properties' : {
'title' : thing.address,
'icon' : {
'clickable' : 'true',
'iconUrl' : './static/img/star-24.svg',
'iconSize' : [30, 30],
},
}
})
elif isinstance(thing, POI):
iconUrl = {
'bar' : './static/img/bar-24.svg',
'park' : './static/img/park-24.svg',
'restaurant' : './static/img/restaurant-24.svg',
}
geoJSON.update({
'properties' : {
'title' : thing.name,
'icon' : {
'clickable' : 'true',
'iconUrl' : iconUrl[thing.poiType],
'iconSize' : [30, 30],
},
'imageUrl' : thing.imageUrl,
'yelpUrl' : thing.yelpUrl,
}
})
elif isinstance(thing, Tree):
geoJSON.update({
'properties' : {
'title' : thing.variety,
'icon' : {
'iconUrl' : './static/img/circle-24.svg',
'iconSize' : [12, 12],
}
}
})
return geoJSON
def POIHash(json, poiType):
latlng = json.get('location').get('latlng')
return hash((json.get('id'), poiType, latlng and tuple(latlng) or ())) // 1000
# Pre MySQL
def ReadOsmFile(osmFileName):
'''http://wiki.openstreetmap.org/wiki/OSM_XML'''
import xml.etree.ElementTree as ET
PrintNow('Reading {:s} ... '.format(osmFileName), end = '')
root = ET.parse(osmFileName).getroot()
PrintNow('done')
return root
def InBounds(location):
return worldLimits.get('latitudeMinimum') <= location.latitude <= worldLimits.get('latitudeMaximum') and \
worldLimits.get('longitudeMinimum') <= location.longitude <= worldLimits.get('longitudeMaximum')
def ParseOSMNodes(osmRoot):
PrintNow('Parsing OSM nodes ... ', end = '')
id2Node = [Node(child) for child in osmRoot if 'node' == child.tag]
# Throw out nodes which are out of bounds
id2Node = {node.id : node for node in id2Node if InBounds(node)}
PrintNow('found {:d}'.format(len(id2Node)))
return id2Node
def ParseOSMWays(osmRoot, id2Node):
PrintNow('Parsing OSM ways ... ', end = '')
id2Edge = [Edge(child, id2Node) for child in osmRoot if 'way' == child.tag]
id2Edge = {edge.id : edge for edge in id2Edge}
PrintNow('found {:d}'.format(len(id2Edge)))
# Clean up edge names (non-latin-1 and ")
for edge in id2Edge.values():
edge.name = edge.name and edge.name.replace('–', '-').replace('"', '')
return id2Edge
def LinkNodes(nodeOne, nodeTwo, edge, id2Node):
# Attach node
nodeOne.nodeIds.append(nodeTwo.id)
nodeTwo.nodeIds.append(nodeOne.id)
# Attach edge
nodeOne.edgeIds.append(edge.id)
nodeTwo.edgeIds.append(edge.id)
# Calculate length
index = edge.nodeIds.index(nodeOne.id)
jndex = edge.nodeIds.index(nodeTwo.id)
if index > jndex:
index, jndex = jndex, index
nodeIds = [nodeId for nodeId in edge.nodeIds[index : jndex + 1] if nodeId in id2Node]
assert(len(nodeIds))
length = sum(id2Node.get(nodeIds[index + 1]) - id2Node.get(nodeIds[index]) for index in range(len(nodeIds) - 1))
# Attach it
nodeOne.lengths.append(length)
nodeTwo.lengths.append(length)
return
def BuildGraph(id2Node, id2Edge):
PrintNow('Building graph ... ', end = '')
intersectionIds = set()
# Iterate over edges
for edge in id2Edge.values():
intersections = []
# ... and their sub-nodes
for nodeId in edge.nodeIds:
node = id2Node.get(nodeId)
# Each node which is shared among multiple edges is an intersection
if node is not None and node.count > 1:
intersections.append(node)
intersectionIds.update((nodeId, ))
# Link intersection nodes
prev = None
for curr in intersections:
if prev is not None and prev.id != curr.id:
LinkNodes(prev, curr, edge, id2Node)
prev = curr
PrintNow('found {:d} intersections'.format(len(intersectionIds)))
return
def FloodFill(startId, id2Node, label):
'''http://en.wikipedia.org/wiki/Flood_fill'''
# Initialize
count = 0
node = id2Node.get(startId)
# If this node is unlabeled ...
if node.label is None:
# ... count it ...
node.label = label
count += 1
# ... and recurse to its neighbors
for nodeId in node.nodeIds:
# The count is passed up the tree to the initiating node
count += FloodFill(nodeId, id2Node, label)
return count
def TrimGraph(id2Node, id2Edge):
PrintNow('Finding largest subgraph ... ', end = '')
# Label every node according to the subgraph to which it belongs
label2Count = {}
label = 0
for nodeId in id2Node:
count = FloodFill(nodeId, id2Node, label)
if count > 2:
label2Count[label] = count
label += 1
# Find the largest subgraph
graphLabel = maxCount = 0
for label, count in label2Count.items():
if count >= maxCount:
graphLabel, maxCount = label, count
# Gather IDs from the largest subgraph
graphIds = [nodeId for nodeId, node in id2Node.items() if node.label == graphLabel]
PrintNow('contains {:d} nodes'.format(len(graphIds)))
# Gather the IDs from other subgraphs and trim them
trimNodeIds = [nodeId for nodeId, node in id2Node.items() if node.label in label2Count and node.label!= graphLabel]
for nodeId in trimNodeIds:
id2Node.pop(nodeId)
# Trim edges
trimEdgeIds = [edge.id for edge in id2Edge.values() if not any(nodeId in id2Node for nodeId in edge.nodeIds)]
for edgeId in trimEdgeIds:
id2Edge.pop(edgeId)
PrintNow('Graph trimmed to {:d} nodes and {:d} edges'.format(len(id2Node), len(id2Edge)))
return graphIds
def SimpleDistance(latitude1, longitude1, latitude2, longitude2):
return ((meterPerLat * (latitude1 - latitude2)) ** 2 + (meterPerLng * (longitude1 - longitude2)) ** 2)
def ZeroLink(nodeIds, id2Node, id2Edge):
# Find unique edgeId
edgeId = max(id2Edge)
# Iterate over every pairing
for index in range(len(nodeIds)):
nodeId = nodeIds[index]
iNode = id2Node.get(nodeId)
for jndex in range(index + 1, len(nodeIds)):
nodeJd = nodeIds[jndex]
jNode = id2Node.get(nodeJd)
# Attach nodes
iNode.nodeIds.append(nodeJd)
jNode.nodeIds.append(nodeId)
# Create fake edge and attach it
edgeId += 1
id2Edge[edgeId] = Edge(False, edgeId, 'Edge to close graph between {:d} and {:d}'.format(nodeId, nodeJd), [nodeId, nodeJd], 0)
iNode.edgeIds.append(edgeId)
jNode.edgeIds.append(edgeId)
# Attach length
iNode.lengths.append(0)
jNode.lengths.append(0)
return
def CloseGraph(id2Node, id2Edge, graphIds):
PrintNow('Closing graph ... ', end = '')
# Order nodes by position
graphIds = sorted(graphIds, key = lambda graphId: (id2Node.get(graphId).latitude, id2Node.get(graphId).longitude))
# Iterate over graph nodes
threshold = 5
latlng2NodeIds = {}
for nodeId in graphIds:
node = id2Node.get(nodeId)
latitude, longitude = node.latitude, node.longitude
match = False
for latlng in latlng2NodeIds:
if SimpleDistance(latlng[0], latlng[1], latitude, longitude) < threshold:
match = True
break
if match:
latlng2NodeIds[latlng].append(node.id)
else:
latlng2NodeIds[(latitude, longitude)] = [node.id]
count = 0
for latlng, nodeIds in latlng2NodeIds.items():
if len(nodeIds) > 1:
count += 1
ZeroLink(nodeIds, id2Node, id2Edge)
PrintNow('closed {:d} disconnections.'.format(count))
return
def NearestNode(latitude, longitude, nodeIds, id2Node):
minimumId, minimumDistance = None, 1e9
for nodeId in nodeIds:
node = id2Node.get(nodeId)
distance = SimpleDistance(latitude, longitude, node.latitude, node.longitude)
if distance < minimumDistance:
minimumId, minimumDistance = nodeId, distance
return minimumId, minimumDistance ** 0.5
def SnapPOIs(id2Node, nodeIds, datDirectory):
# Grab dog-friendly POI's
dogOKFileName = '{}/dogOKs.dat'.format(datDirectory)
with open(dogOKFileName) as f:
dogOKs = [dogOK.strip() for dogOK in f.readlines()]
# Iterate over POI types
id2Poi = {}
for poiType in poiTypes:
PrintNow('Snapping {} to intersections ...'.format(poiType))
# POI specifics
skip = poiType2Skip.get(poiType)
# Read POI .json
jsonFileName = '{}/{}.json'.format(datDirectory, poiType)
PrintNow('Reading {:s} ... '.format(jsonFileName), end = '')
with open(jsonFileName, 'r') as f:
json = JSONLoad(f.read())
PrintNow('done')
# Iterate over businesses
businesses = json.get('businesses')
length = len(businesses)
for index in range(length):
# Skip junk data and dog-unfriendly
yelpUrl = businesses[index].get('url')
if index in skip or yelpUrl not in dogOKs:
continue
json = businesses[index]
poiId = POIHash(json, poiType)
latlng = json.get('location').get('latlng')
# Kick out ill-defined POI's
if latlng is None:
continue
# Attach ...
PrintNow('{:4d}/{:4d}:\t{} .. '.format(index + 1, length, json.get('name')), end = '')
latitude, longitude = latlng
nodeId, offset = NearestNode(latitude, longitude, nodeIds, id2Node)
PrintNow('to {}'.format(nodeId))
# ... POI onto node ...
id2Node.get(nodeId).poiIds.append(poiId)
# ... and node onto POI
if poiId in id2Poi:
id2Poi.get(poiId).nodeIds.append(nodeId)
id2Poi.get(poiId).offsets.append(offset)
else:
id2Poi[poiId] = POI(json, poiType, [nodeId], [offset])
PrintNow('Added {:d} POIs'.format(len(id2Poi)))
return id2Poi
def SnapTrees(id2Node, id2Edge, graphIds, datDirectory):
id2Tree = {}
PrintNow('Snapping trees to edges ...')
# Map nodeId to edgeIds
nodeId2EdgeIds = {}
for edgeId, edge in id2Edge.items():
# Kick out non-subgraph edges
if not any(nodeId in graphIds for nodeId in edge.nodeIds):
continue
for nodeId in edge.nodeIds:
# Kick out missing nodes
if nodeId not in id2Node:
continue
try:
nodeId2EdgeIds[nodeId].append(edgeId)
except KeyError:
nodeId2EdgeIds[nodeId] = [edgeId]
# Order nodes by position
nodeIds = sorted(nodeId2EdgeIds.keys(), key = lambda nodeId: (id2Node.get(nodeId).latitude, id2Node.get(nodeId).longitude))
# Read tree .json
jsonFileName = '{}/{}.json'.format(datDirectory, treeFileName)
PrintNow('Reading {:s} ... '.format(jsonFileName), end = '')
with open(jsonFileName, 'r') as f:
json = JSONLoad(f.read())
PrintNow('done')
# Iterate over trees
trees = json.get('data')
# Order trees by position
trees = sorted((tree for tree in trees if tree[23] is not None and tree[24] is not None), key = lambda tree: (tree[23], tree[24]))
length = len(trees)
prevLatLng, prevNodeId = (None, None), None
for index in range(length):
treeList = trees[index]
treeId = treeList[0]
variety = treeList[10]
latitude, longitude = treeList[23 : 25]
# Kick out ill-defined or repeat trees
if latitude is None or longitude is None:
continue
# Snap to a node ...
PrintNow('{:5d}/{:5d} .. '.format(index + 1, length, treeList[10][ : 10]), end = '')
id2Tree[treeId] = Tree(treeId, variety, latitude, longitude)
latitude, longitude = float(latitude), float(longitude)
if prevLatLng == (latitude, longitude):
nodeId = prevNodeId
else:
nodeId, junk = NearestNode(latitude, longitude, nodeIds, id2Node)
prevLatLng, prevNodeId = (latitude, longitude), nodeId
# ... grab its edges ...
edgeIds = nodeId2EdgeIds.get(nodeId)
# ... and increment them
PrintNow('to {}'.format(','.join(str(edgeId) for edgeId in edgeIds)))
for edgeId in edgeIds:
id2Edge.get(edgeId).treeCount += 1
PrintNow('Added {:d} trees'.format(len(id2Tree)))
return id2Tree
def CreateTables(id2Node, id2Edge, id2Poi, id2Tree, graphIds):
# Helper
def List2Str(l):
return l and ','.join(str(e) for e in l if e) or ''
# Initialize
connection = MySqlConnect(user = 'root', port = 3306, db = mySqlDataBase)
cursor = connection.cursor()
# Nodes
PrintNow('Nodes TABLE ... ', end = '')
cursor.execute('''DROP TABLE Nodes ;''')
cursor.execute('''CREATE TABLE Nodes (id INT UNSIGNED NOT NULL PRIMARY KEY, isIntersection BOOLEAN, latitude DOUBLE NOT NULL, longitude DOUBLE NOT NULL, nodeIds TINYBLOB, edgeIds TINYBLOB, lengths TINYBLOB, poiIds TINYBLOB) ;''')
for node in id2Node.values():
cursor.execute('''INSERT INTO Nodes(id, isIntersection, latitude, longitude, nodeIds, edgeIds, lengths, poiIds) VALUES ({0.id:d}, {1:d}, {0.latitude:f}, {0.longitude:f}, "{2:s}", "{3:s}", "{4:s}", "{5:s}") ;'''.format(node, bool(node.id in graphIds), List2Str(node.nodeIds), List2Str(node.edgeIds), List2Str(node.lengths), List2Str(node.poiIds)))
connection.commit()
PrintNow('inserted {:d} rows'.format(len(id2Node)))
# Edges
PrintNow('Edges TABLE ... ', end = '')
cursor.execute('''DROP TABLE Edges ;''')
cursor.execute('''CREATE TABLE Edges (id INT UNSIGNED NOT NULL PRIMARY KEY, name TINYBLOB NOT NULL, nodeIds TINYBLOB NOT NULL, treeCount INT UNSIGNED NOT NULL) ;''')
for edge in id2Edge.values():
cursor.execute('''INSERT INTO Edges(id, name, nodeIds, treeCount) VALUES ({0.id:d}, "{1:s}", "{2:s}", {0.treeCount:d}) ;'''.format(edge, edge.name, List2Str(edge.nodeIds)))
connection.commit()
PrintNow('inserted {:d} rows'.format(len(id2Edge)))
# POIs
PrintNow('POIs TABLE ... ', end = '')
cursor.execute('''DROP TABLE POIs ;''')
cursor.execute('''CREATE TABLE POIs (id BIGINT NOT NULL PRIMARY KEY, poiType TINYBLOB NOT NULL, name TINYBLOB NOT NULL, nodeIds TINYBLOB NOT NULL, offsets TINYBLOB NOT NULL, latitude DOUBLE NOT NULL, longitude DOUBLE NOT NULL, address TINYBLOB NOT NULL, city TINYBLOB NOT NULL, state TINYBLOB NOT NULL, imageUrl TINYBLOB NOT NULL, yelpUrl TINYBLOB NOT NULL) ;''')
count = 0
for poi in id2Poi.values():
if (poi.latitude is None or poi.longitude is None):
continue
count += 1
cursor.execute('''INSERT INTO POIs(id, poiType, name, nodeIds, offsets, latitude, longitude, address, city, state, imageUrl, yelpUrl) VALUES ({0.id:d}, "{0.poiType:s}", "{1:s}", "{2:s}", "{3:s}", {0.latitude:f}, {0.longitude:f}, "{4:s}", "{0.city:s}", "{0.state:s}", "{0.imageUrl:s}", "{0.yelpUrl:s}") ;'''.format(poi, poi.name, List2Str(poi.nodeIds), List2Str(poi.offsets), poi.address[0]))
connection.commit()
PrintNow('inserted {:d} rows'.format(count))
# Trees
PrintNow('Trees TABLE .. ', end = '')
cursor.execute('''DROP TABLE Trees ;''')
cursor.execute('''CREATE TABLE Trees (id INT UNSIGNED NOT NULL PRIMARY KEY, variety TINYBLOB NOT NULL, latitude DOUBLE NOT NULL, longitude DOUBLE NOT NULL) ;''')
count = 0
for tree in id2Tree.values():
if (tree.latitude is None or tree.longitude is None):
continue
count += 1
cursor.execute('''INSERT INTO Trees(id, variety, latitude, longitude) VALUES ({0.id:d}, "{0.variety:s}", {0.latitude:f}, {0.longitude:f}) ;'''.format(tree))
connection.commit()
PrintNow('inserted {:d} rows'.format(count))
# Debug
if False:
cursor.execute('''SELECT * FROM POIs ;''')
PrintNow(*('\t'.join(str(col) for col in row) for row in cursor), sep = '\n')
# Garbage
connection.close()
return
def Mashup(osmFileName, datDirectory, pickleFileName = None):
# Maybe load pickle
pickleFileName = '{}/{}'.format(datDirectory, pickleFileName)
if Exists(pickleFileName):
PrintNow('Reading {:s} ... '.format(pickleFileName), end = '')
# Load pickle
with open(pickleFileName, 'rb') as f:
pickle = UnPickle(f)
id2Node = pickle.get('id2Node')
id2Edge = pickle.get('id2Edge')
graphIds = pickle.get('graphIds')
id2Poi = pickle.get('id2Poi')
id2Tree = pickle.get('id2Tree')
PrintNow('done')
else:
PrintNow('Pickle `{}` was not found ... generating graph instead'.format(pickleFileName))
# Read OSM file
osmFileName = '{}/{}'.format(datDirectory, osmFileName)
osmRoot = ReadOsmFile(osmFileName)
# Parse OSM nodes
id2Node = ParseOSMNodes(osmRoot)
# Parse OSM ways
id2Edge = ParseOSMWays(osmRoot, id2Node)
# Build graph
BuildGraph(id2Node, id2Edge)
# Trim disconnected graphs
graphIds = TrimGraph(id2Node, id2Edge)
# Link identical nodes with 0-length edges
CloseGraph(id2Node, id2Edge, graphIds)
# Snap POI's to intersection nodes
id2Poi = SnapPOIs(id2Node, graphIds, datDirectory)
# Snap Trees to edges
id2Tree = SnapTrees(id2Node, id2Edge, graphIds, datDirectory)
# Dump pickle
dat = {
'id2Node' : id2Node,
'id2Edge' : id2Edge,
'graphIds' : graphIds,
'id2Poi' : id2Poi,
'id2Tree' : id2Tree,
}
with open(pickleFileName, 'wb') as f:
PrintNow('Writing {:s} ... '.format(pickleFileName), end = '')
Pickle(dat, f)
PrintNow('done')
# Write MySql tables
CreateTables(id2Node, id2Edge, id2Poi, id2Tree, graphIds)
return
# Post MySQL
def MySql2Graph():
# Connect to database and initialize cursor
PrintNow('Using {} ... '.format(mySqlDataBase), end = '')
connection = MySqlConnect(user = 'root', port = 3306, db = mySqlDataBase)
cursor = connection.cursor()
PrintNow('done')
# id2Node
PrintNow('Loading Nodes ... ', end = '')
cursor.execute('''SELECT * FROM Nodes ;''')
id2Node = {nodeId : Node(nodeId, *other) for nodeId, *other in cursor}
PrintNow('found {:d}'.format(len(id2Node)))
# id2Edge
PrintNow('Loading Edges ... ', end = '')
cursor.execute('''SELECT * FROM Edges ;''')
id2Edge = {edgeId : Edge(edgeId, *other) for edgeId, *other in cursor}
PrintNow('found {:d}'.format(len(id2Edge)))
# id2Poi
PrintNow('Loading POIs ... ', end = '')
cursor.execute('''SELECT * FROM POIs ;''')
id2Poi = {poiId : POI(poiId, *other) for poiId, *other in cursor}
PrintNow('found {:d}'.format(len(id2Poi)))
# id2Tree
PrintNow('Loading Trees ... ', end = '')
cursor.execute('''SELECT * FROM Trees ;''')
id2Tree = {treeId : Tree(False, treeId, *other) for treeId, *other in cursor}
PrintNow('found {:d}'.format(len(id2Tree)))
# graphIds
graphIds = list(nodeId for nodeId, node in id2Node.items() if node.isIntersection)
PrintNow('Finished loading MySQL database `{}`.'.format(mySqlDataBase))
return id2Node, id2Edge, id2Poi, id2Tree, graphIds
def FindPOIs(nodeIds, id2Node, id2Poi):
PrintNow('Locating POI\'s ... ', end = '')
poiIds = list(set(poiId for nodeId in nodeIds for poiId in id2Node.get(nodeId).poiIds if poiId in id2Poi))
PrintNow('found {:d}'.format(len(poiIds)))
return poiIds
def LatLngDistance(latitude1, longitude1, latitude2, longitude2):
'''http://www.johndcook.com/python_longitude_latitude.html'''
phi1 = (90 - latitude1) * radiansPerDegree
phi2 = (90 - latitude2) * radiansPerDegree
deltaTheta = (longitude1 - longitude2) * radiansPerDegree
argument = Sine(phi1) * Sine(phi2) * Cosine(deltaTheta) + Cosine(phi1) * Cosine(phi2)
# Positions are the same!
if argument > 1:
return 0
return radiusOfEarth * ArcCosine(argument)
def ExtremeNode(nodeIds, id2Node, attribute, index):
return list(sorted((getattr(node, attribute), nodeId) for nodeId in nodeIds for node in [id2Node.get(nodeId)]))[index][1]
def BottomNode(nodeIds, id2Node):
return ExtremeNode(nodeIds, id2Node, attribute = 'latitude', index = -1)
def LeftNode(nodeIds, id2Node):
return ExtremeNode(nodeIds, id2Node, attribute = 'longitude', index = 0)
def RightNode(nodeIds, id2Node):
return ExtremeNode(nodeIds, id2Node, attribute = 'longitude', index = -1)
def TopNode(nodeIds, id2Node):
return ExtremeNode(nodeIds, id2Node, attribute = 'latitude', index = 0)
def CropGraph(center, radius, id2Thing, thingIds, description):
PrintNow('Cropping {:G}km around ({:G}, {:G}) ... '.format(radius / 1e3, *center), end = '')
# Crop to square
xmin, xmax, ymin, ymax = center[1] - radius / meterPerLng, center[1] + radius / meterPerLng, center[0] - radius / meterPerLat, center[0] + radius / meterPerLat
things = [thing for thingId in thingIds for thing in [id2Thing.get(thingId)] if xmin <= thing.longitude <= xmax and ymin <= thing.latitude <= ymax]
# Crop to circle
croppedIds = [thing.id for thing in things if LatLngDistance(center[0], center[1], thing.latitude, thing.longitude) <= radius]
PrintNow('{:d} {:s} remain'.format(len(croppedIds), description))
return croppedIds
def GeoCode(address):
from urllib.request import urlopen as UrlOpen
from urllib.parse import quote as Quote
# Encode query string into URL
url = 'http://maps.googleapis.com/maps/api/geocode/json?address={}&sensor=false'.format(Quote(address))
# Call API and extract JSON
PrintNow('Calling Google Maps API for `{:s}` ... '.format(address), end = '')
json = UrlOpen(url).read()
json = JSONLoad(json.decode('utf-8'))
# Extract longitude and latitude
if json.get('status') == 'ZERO_RESULTS':
latitude, longitude = None, None
PrintNow('it was not found')
else:
latitude, longitude = (value for key, value in sorted(json.get('results')[0].get('geometry').get('location').items()))
PrintNow('it is located at {:f}/{:f}'.format(latitude, longitude))
return Address(address, latitude, longitude)
def FindAddress(query, minutes, id2Node, id2Poi, graphIds, id2Tree):
# Specify San Francisco!
if all(city not in query.lower() for city in ('sf', 'san francisco', 's.f.')):
query += ', San Francisco, CA'
# Geocode it
address = GeoCode(query)
# Check if the address is in bounds
if address.latitude is None or address.longitude is None or not InBounds(address):
return None
# Grab address lat/lng
latlng = (address.latitude, address.longitude)
# Calculate center, radius, and bounds
center = (address.latitude, address.longitude)
radius = meterPerMin * minutes
bounds = [[center[0] - radius / meterPerLat, center[1] - radius / meterPerLng], [center[0] + radius / meterPerLat, center[1] + radius / meterPerLng]]
# Crop graph
bufferIds = CropGraph(center, radius * 1.5, id2Node, graphIds, 'nodes')
croppedIds = CropGraph(center, radius, id2Node, bufferIds, 'nodes')
# Snap to nearest node
nodeId, offset = NearestNode(address.latitude, address.longitude, croppedIds, id2Node)
# Find POI's
poiIds = FindPOIs(croppedIds, id2Node, id2Poi)
POIs = [id2Poi.get(poiId) for poiId in poiIds]
# Find trees
trees = [id2Tree.get(treeId) for treeId in CropGraph(center, radius * 1.5, id2Tree, id2Tree, 'trees')]
# Build JSON
json = {
'query' : query,
'minutes' : minutes,
'address' : GeoJSON(address),
'addressLatlng' : latlng,
'center' : center,
'radius' : radius,
'bounds' : bounds,
'croppedIds' : bufferIds,
'nodeId' : nodeId,
'offset' : offset,
'poiIds' : poiIds,
'POIs' : GeoJSON(POIs),
'trees' : GeoJSON(trees),
}
return json
def FinePath(pathIds, nodeIds, id2Node, id2Edge):
PrintNow('Building fine path ... ', end = '')
# Build path points
longitudes, latitudes = [], []
previous = current = None
for nodeId in pathIds:
current = id2Node.get(nodeId)
if previous is not None:
edgeId = current.edgeIds[current.nodeIds.index(previous.id)]
edge = id2Edge.get(edgeId)
index = edge.nodeIds.index(previous.id)
jndex = edge.nodeIds.index(current.id)
if index > jndex:
step = -1
else:
step = +1
nodeIds = edge.nodeIds[index : jndex + step : step]
longitudes.extend((id2Node.get(nodeId).longitude for nodeId in nodeIds))
latitudes.extend((id2Node.get(nodeId).latitude for nodeId in nodeIds))
previous = current
latlngs = list(zip(latitudes, longitudes))
PrintNow('contains {:d} fine nodes'.format(len(longitudes)))
return latlngs
def Dijkstra(start, finishes, id2Node, id2Edge):
assert(start in id2Node)
assert(all(finish in id2Node for finish in finishes))
# Initialize
uninspected = id2Node.copy()
id2Distance = {nodeId : 1e9 for nodeId in uninspected}
id2Distance[start] = 0
id2From = {}
# Inspect each node
while uninspected:
# Walk to nearest node
distance = min(id2Distance.get(nodeId) for nodeId in uninspected)
nearest = next(nodeId for nodeId in uninspected if distance == id2Distance.get(nodeId))
# Kick out if at finish
if nearest in finishes:
break
# Declare nearest to be inspected
nearest = uninspected.pop(nearest)
# Walk to each uninspected neighbor
for nodeId, edgeId, length in zip(nearest.nodeIds, nearest.edgeIds, nearest.lengths):
# Kick out inspected
if nodeId not in uninspected:
continue
# Reduce edge distance according to tree density
edge = id2Edge.get(edgeId)
treePerMeter = distance and edge.treeCount / distance
distance *= (1 - min(maximumDistanceReduction, treePerMeter / maximumTreePerMeter))
# Calculate distance
length += distance
# Use shortest path
if length < id2Distance.get(nodeId):
id2Distance[nodeId] = length
id2From[nodeId] = nearest.id
# Construct shortest path
nodeId = nearest
path = [nodeId]
while nodeId != start:
nodeId = id2From.get(nodeId)
path.insert(0, nodeId)
return path, id2Distance.get(nearest)
def Route(startId, finishIds, nodeIds, id2Node, id2Edge):
PrintNow('Routing a path from {:d} to {} ... '.format(startId, len(finishIds)), end = '')
subGraph = {nodeId : id2Node.get(nodeId) for nodeId in nodeIds}
pathIds, distance = Dijkstra(startId, finishIds, subGraph, id2Edge)
PrintNow('{:d} edges take {:G}km'.format(len(pathIds) - 1, distance / 1e3))
return pathIds, distance
def RoutePOI(startId, poiId, nodeIds, id2Node, id2Edge, id2Poi):
# Extract POI
poi = id2Poi.get(poiId)
finishIds = poi.nodeIds
# Route
pathIds, distance = Route(startId, finishIds, nodeIds, id2Node, id2Edge)
# Build fine-path
latlngs = FinePath(pathIds, nodeIds, id2Node, id2Edge)
# Grab offset
offset = poi.offsets[poi.nodeIds.index(pathIds[-1])]
# Build JSON
json = {
'startId' : startId,
'poiId' : poiId,
'finishIds' : finishIds,
'poiType' : poi.poiType,
'poiName' : poi.name,
'poiLatlng' : (poi.latitude, poi.longitude),
'offset' : offset,
'pathIds' : pathIds,
'distance' : distance,
'latlngs' : latlngs,
}
return json
def DebugPlot(pathIds, nodeIds, id2Node, id2Edge, pdfFileName = 'debug.pdf'):
import matplotlib.pyplot as Plot
PrintNow('Plotting graph ... ', end = '')
# Initialize
fig, ax = Plot.subplots()
# Draw Streets
x, y = [], []
for nodeId in nodeIds:
iNode = id2Node.get(nodeId)
for nodeJd in iNode.nodeIds:
edge = id2Edge.get(iNode.edgeIds[iNode.nodeIds.index(nodeJd)])
index = edge.nodeIds.index(nodeId)
jndex = edge.nodeIds.index(nodeJd)
if index > jndex:
step = -1
else:
step = +1
nodeIds = edge.nodeIds[index : jndex + step : step]
x.extend((id2Node.get(nodeId).longitude for nodeId in nodeIds))
y.extend((id2Node.get(nodeId).latitude for nodeId in nodeIds))
x.append(None)
y.append(None)
Plot.plot(x, y, color = 'black', linewidth = 0.5)
# Draw Intersections
x, y = [], []
for nodeId in nodeIds:
node = id2Node.get(nodeId)
x.extend((node.longitude, None))
y.extend((node.latitude, None))
Plot.plot(x, y, marker = 'o', markersize = 1, markerfacecolor = 'blue', markeredgecolor = 'blue')
# Draw path
x, y = [], []
previous = current = None
for nodeId in pathIds:
current = id2Node.get(nodeId)
if previous is not None:
edge = id2Edge.get(current.edgeIds[current.nodeIds.index(previous.id)])
index = edge.nodeIds.index(previous.id)
jndex = edge.nodeIds.index(current.id)
if index > jndex:
step = -1
else:
step = +1
nodeIds = edge.nodeIds[index : jndex + step : step]
x.extend((id2Node.get(nodeId).longitude for nodeId in nodeIds))
y.extend((id2Node.get(nodeId).latitude for nodeId in nodeIds))
x.append(None)
y.append(None)
previous = current
Plot.plot(x, y, color = 'orange', linewidth = 4, alpha = 0.5, marker = None)
# Draw start/finish
startId, finishId = pathIds[0], pathIds[-1]
x, y = id2Node.get(startId).longitude, id2Node.get(startId).latitude
Plot.plot(x, y, marker = 'x', markersize = 8, markeredgewidth = 2, markerfacecolor = 'green', markeredgecolor = 'green')
x, y = id2Node.get(finishId).longitude, id2Node.get(finishId).latitude
Plot.plot(x, y, marker = 'x', markersize = 8, markeredgewidth = 2, markerfacecolor = 'green', markeredgecolor = 'red')
# Pretty
ax.set_xlim((-122.4592000, -122.4156000))
ax.set_ylim((37.7719000, 37.7923000))
ax.set_title('Path from {:d} to {:d}'.format(startId, finishId))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.savefig(pdfFileName)
PrintNow('saved to {:d}'.format(pdfFileName))
return
# Filenames
datDirectory = './static/dat'
osmFileName = 'neighborhood.osm'
osmFileName = 'sf-city.osm'
treeFileName = 'sfTrees'
mySqlDataBase = 'dogWalkScore6'
pickleFileName = '{}.pkl'.format(mySqlDataBase)
# Script
if __name__ == '__main__':
Mashup(osmFileName, datDirectory, pickleFileName)
|
jeffseif/dogWalkScore
|
static/py/dogWalkScore.py
|
Python
|
bsd-3-clause
| 39,621
|
import re
from django import forms
from parsley.widgets import ParsleyChoiceFieldRendererMixin
FIELD_TYPES = [
(forms.URLField, "url"),
(forms.EmailField, "email"),
(forms.IntegerField, "digits"),
(forms.DecimalField, "number"),
(forms.FloatField, "number"),
]
FIELD_ATTRS = [
("min_length", "minlength"),
("max_length", "maxlength"),
("min_value", "min"),
("max_value", "max"),
]
def update_widget_attrs(field, prefix='data'):
attrs = field.widget.attrs
if field.required:
if isinstance(field.widget, forms.widgets.RadioSelect):
# Use a mixin, to try and support non-standard renderers if possible
class ParsleyChoiceFieldRenderer(ParsleyChoiceFieldRendererMixin, field.widget.renderer):
parsley_namespace = prefix
field.widget.renderer = ParsleyChoiceFieldRenderer
else:
attrs["{prefix}-required".format(prefix=prefix)] = "true"
error_message = field.error_messages.get('required', None)
if error_message:
attrs["{prefix}-required-message".format(prefix=prefix)] = error_message
if isinstance(field, forms.RegexField):
attrs.update({"{prefix}-regexp".format(prefix=prefix): field.regex.pattern})
error_message = field.error_messages.get('invalid', None)
if error_message:
attrs["{prefix}-regexp-message".format(prefix=prefix)] = error_message
if field.regex.flags & re.IGNORECASE:
attrs.update({"{prefix}-regexp-flag".format(prefix=prefix): "i"})
if isinstance(field, forms.MultiValueField):
for subfield in field.fields:
update_widget_attrs(subfield)
# Set {prefix}-* attributes for parsley based on Django field attributes
for attr, data_attr, in FIELD_ATTRS:
if getattr(field, attr, None):
attrs["{prefix}-{0}".format(data_attr, prefix=prefix)] = getattr(field, attr)
error_message = field.error_messages.get(attr, None)
if error_message:
attrs["{prefix}-{0}-message".format(data_attr, prefix=prefix)] = error_message
# Set {prefix}-type attribute based on Django field instance type
for klass, field_type in FIELD_TYPES:
if isinstance(field, klass):
attrs["{prefix}-type".format(prefix=prefix)] = field_type
error_message = field.error_messages.get('invalid', None)
if error_message:
attrs["{prefix}-type-{0}-message".format(field_type, prefix=prefix)] = error_message
def parsleyfy(klass):
"A decorator to add {prefix}-* attributes to your form.fields"
old_init = klass.__init__
def new_init(self, *args, **kwargs):
old_init(self, *args, **kwargs)
prefix = getattr(getattr(self, 'Meta', None), 'parsley_namespace', 'data-parsley')
for _, field in self.fields.items():
update_widget_attrs(field, prefix)
extras = getattr(getattr(self, 'Meta', None), 'parsley_extras', {})
for field_name, data in extras.items():
for key, value in data.items():
if field_name not in self.fields:
continue
attrs = self.fields[field_name].widget.attrs
if key == 'equalto':
# Use HTML id for {prefix}-equalto
value = '#' + self[value].id_for_label
if isinstance(value, bool):
value = "true" if value else "false"
attrs["{prefix}-%s".format(prefix=prefix) % key] = value
klass.__init__ = new_init
return klass
|
blueyed/Django-parsley
|
parsley/decorators.py
|
Python
|
bsd-3-clause
| 3,640
|
from setuptools import setup, find_packages
setup(
name = "django_internal_urls",
version = "0.1.0-2",
description = 'Add modular url callbacks',
author = 'David Danier',
author_email = 'david.danier@team23.de',
url = 'https://github.com/ddanier/django_internal_urls',
#long_description=open('README.rst', 'r').read(),
packages = [
'django_internal_urls',
'django_internal_urls.templatetags',
],
install_requires = [
'Django >=1.3',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
ddanier/django_internal_urls
|
setup.py
|
Python
|
bsd-3-clause
| 858
|
import enum
import logging
from typing import List, Any, Optional
from PyQt5 import QtCore
from ....config import Config
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ComponentType(enum.Enum):
Pinhole = 'pinhole'
Beamstop = 'beamstop'
PinholeSpacer = 'spacer'
FlightPipe = 'flightpipe'
class GeometryChoices(QtCore.QAbstractItemModel):
"""Model to store all possible choices for beamstop, flight pipes, pinholes (in 3 stages) and inter-pinhole spacers.
The tree hierarchy is like:
- beamstops
- 2.6
- 4
- ...
- flightpipes
- 160
- 280
- 1038
- 1200
- ...
- pinholes
- stage 1
- 300
- 600
- ...
- stage 2
- 100
- 300
- ...
- stage 3
- 750
- 1200
- ...
- spacers
- 65
- 65
- 100
- 100
- 100
- 200
- ...
"""
class IndexObject:
"""Class for indexing the choices tree model"""
def __init__(self, componenttype: ComponentType, index1: Optional[int] = None, index2: Optional[int] = None):
# logger.debug(f'Creating indexObject {componenttype=} {index1=} {index=}')
self.componenttype = componenttype
self.index1 = index1
self.index2 = index2
assert isinstance(self.componenttype, ComponentType)
@property
def level(self) -> int:
if self.index1 is None and self.index2 is None:
return 1
elif self.index1 is not None and self.index2 is None:
return 2
elif self.index1 is not None and self.index2 is not None:
return 3
else:
raise ValueError(self)
def __eq__(self, other) -> bool:
assert isinstance(other, type(self))
return ((self.componenttype == other.componenttype)
and (self.index1 == other.index1)
and (self.index2 == other.index2))
def __ne__(self, other) -> bool:
return not self.__eq__(other)
def __str__(self):
return f'IndexObject({self.componenttype.value=}, {self.index1=}, {self.index2=} {self.level=})'
pinhole1: List[float] # list of pinhole apertures in the 1st stage
pinhole2: List[float] # list of pinhole apertures in the 2nd stage
pinhole3: List[float] # list of pinhole apertures in the 3rd stage
beamstop: List[float] # list of possible beam stop diameters
spacers: List[float] # list of the lengths of spacers that can be put between pinhole stages. May be repeated
flightpipes: List[float] # list of flight pipe lengths to be put between the sample chamber and the beamstop stage
_indexobjects: List[IndexObject] # cache of index objects, to avoid garbage collecting them
config: Config # the configuration object
def __init__(self, **kwargs):
self.pinhole1 = []
self.pinhole2 = []
self.pinhole3 = []
self.beamstop = []
self.spacers = []
self.flightpipes = []
self._indexobjects = []
self.config = kwargs.pop('config')
super().__init__(**kwargs)
self.loadFromConfig()
def rowCount(self, parent: QtCore.QModelIndex = ...) -> int:
ip = parent.internalPointer() if parent.isValid() else None
assert isinstance(ip, self.IndexObject) or ip is None
if not parent.isValid(): # root level
return 4 # pinholes, spacers, beamstops, pipes
elif ip.level == 1:
if ip.componenttype == ComponentType.Pinhole: # the pinhole branch is one level deeper
return 3
elif ip.componenttype == ComponentType.Beamstop:
return len(self.beamstop)
elif ip.componenttype == ComponentType.PinholeSpacer:
return len(self.spacers)
elif ip.componenttype == ComponentType.FlightPipe:
return len(self.flightpipes)
elif ip.level == 2:
if ip.componenttype == ComponentType.Pinhole:
if ip.index1 == 0:
return len(self.pinhole1)
elif ip.index1 == 1:
return len(self.pinhole2)
elif ip.index1 == 2:
return len(self.pinhole3)
else:
raise ValueError(ip.index1)
else:
return 0
elif ip.level == 3:
assert ip.componenttype == ComponentType.Pinhole
return 0
else:
assert False
def columnCount(self, parent: QtCore.QModelIndex = ...) -> int:
return 1
def parent(self, child: QtCore.QModelIndex) -> QtCore.QModelIndex:
if not child.isValid():
return QtCore.QModelIndex()
ip = child.internalPointer()
assert isinstance(ip, self.IndexObject)
if ip.level == 1:
return QtCore.QModelIndex()
elif ip.level == 2:
if ip.componenttype == ComponentType.Beamstop:
return self.index(0, 0, QtCore.QModelIndex())
elif ip.componenttype == ComponentType.FlightPipe:
return self.index(1, 0, QtCore.QModelIndex())
elif ip.componenttype == ComponentType.Pinhole:
return self.index(2, 0, QtCore.QModelIndex())
elif ip.componenttype == ComponentType.PinholeSpacer:
return self.index(3, 0, QtCore.QModelIndex())
else:
assert False
elif ip.level == 3:
if ip.componenttype == ComponentType.Pinhole:
return self.createIndex(ip.index1, 0, self.IndexObject(ComponentType.Pinhole, ip.index1, None))
else:
assert False
else:
assert False
def index(self, row: int, column: int, parent: QtCore.QModelIndex = ...) -> QtCore.QModelIndex:
if not parent.isValid():
return self.createIndex(row, column, self.IndexObject(
[ComponentType.Beamstop, ComponentType.FlightPipe, ComponentType.Pinhole, ComponentType.PinholeSpacer][
row], None, None))
ip = parent.internalPointer()
assert isinstance(ip, self.IndexObject)
if ip.level == 1:
# we need to construct an index for a second-level item.
return self.createIndex(row, column, self.IndexObject(ip.componenttype, row, None))
elif (ip.level == 2) and ip.componenttype == ComponentType.Pinhole:
return self.createIndex(row, column, self.IndexObject(ip.componenttype, ip.index1, row))
else:
logger.error(ip)
assert False
def createIndex(self, row: int, column: int, object: Any = ...) -> QtCore.QModelIndex:
try:
obj = [x for x in self._indexobjects if x == object][0]
except IndexError:
self._indexobjects.append(object)
obj = object
return super().createIndex(row, column, obj)
def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlag:
if index.isValid():
ip = index.internalPointer()
assert isinstance(ip, self.IndexObject)
if (ip.level == 1) or ((ip.level == 2) and (ip.componenttype == ComponentType.Pinhole)):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsEditable
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def data(self, index: QtCore.QModelIndex, role: int = ...) -> Any:
if not index.isValid():
return False
ip = index.internalPointer()
assert isinstance(ip, self.IndexObject)
if (role == QtCore.Qt.DisplayRole) or (role == QtCore.Qt.EditRole):
if ip.level == 1:
return ip.componenttype.value.capitalize()
elif (ip.level == 2) and (ip.componenttype == ComponentType.Pinhole):
return f'Stage {index.row() + 1}'
elif (ip.level == 2) and (ip.componenttype == ComponentType.FlightPipe):
return f'{self.flightpipes[index.row()]:.2f}' if role == QtCore.Qt.DisplayRole else self.flightpipes[index.row()]
elif (ip.level == 2) and (ip.componenttype == ComponentType.Beamstop):
return f'{self.beamstop[index.row()]:.2f}' if role == QtCore.Qt.DisplayRole else self.beamstop[index.row()]
elif (ip.level == 2) and (ip.componenttype == ComponentType.PinholeSpacer):
return f'{self.spacers[index.row()]:.2f}' if role == QtCore.Qt.DisplayRole else self.spacers[index.row()]
elif (ip.level == 3) and (ip.componenttype == ComponentType.Pinhole):
lis = [self.pinhole1, self.pinhole2, self.pinhole3][ip.index1]
return f'{lis[index.row()]:.2f}' if role == QtCore.Qt.DisplayRole else lis[index.row()]
else:
assert False
return None
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> Any:
return None
def setData(self, index: QtCore.QModelIndex, value: Any, role: int = ...) -> bool:
logger.debug('setData')
if not index.isValid():
return False
ip = index.internalPointer()
assert isinstance(ip, self.IndexObject)
if (ip.level == 2) and (ip.componenttype == ComponentType.PinholeSpacer):
self.spacers[index.row()] = float(value)
self.spacers.sort()
elif (ip.level == 2) and (ip.componenttype == ComponentType.Beamstop):
self.beamstop[index.row()] = float(value)
self.beamstop.sort()
elif (ip.level == 2) and (ip.componenttype == ComponentType.FlightPipe):
self.flightpipes[index.row()] = float(value)
self.flightpipes.sort()
elif (ip.level == 3) and (ip.componenttype == ComponentType.Pinhole):
lis = [self.pinhole1, self.pinhole2, self.pinhole3][ip.index1]
lis[index.row()] = float(value)
lis.sort()
self.dataChanged.emit(self.index(0, 0, index.parent()),
self.index(self.rowCount(index.parent()), self.columnCount(index.parent()), index.parent()))
self.saveToConfig()
return True
def addPinhole(self, stage: int, aperture: float):
"""Add a new pinhole diameter, maintaining increasing order
After the pinhole is added, the configuration is saved
:param stage: which stage should it be added (indexing from 0)
:type stage: integer between 0 and 2
:param aperture: pinhole diameter
:type aperture: float
"""
lis = [self.pinhole1, self.pinhole2, self.pinhole3][stage]
row = max([i for i, l in enumerate(lis) if l < aperture] + [-1]) + 1
self.beginInsertRows(self.createIndex(stage, 0, self.IndexObject(ComponentType.Pinhole, stage, None)), row, row)
lis.insert(row, aperture)
self.endInsertRows()
self.saveToConfig()
def addSpacer(self, length: float):
"""Add a new spacer, maintaining increasing order
After the spacer length is added, the configuration is saved
:param length: length of the spacer in mm
:type length: float
"""
row = max([i for i, l in enumerate(self.spacers) if l < length] + [-1]) + 1
self.beginInsertRows(self.index(3, 0, QtCore.QModelIndex()), row, row)
self.spacers.insert(row, length)
self.endInsertRows()
self.saveToConfig()
def addFlightPipe(self, length: float):
row = max([i for i, l in enumerate(self.spacers) if l < length] + [-1]) + 1
self.beginInsertRows(self.index(1, 0, QtCore.QModelIndex()), row, row)
self.flightpipes.insert(row, length)
self.endInsertRows()
self.saveToConfig()
def addBeamstop(self, diameter: float):
row = max([i for i, l in enumerate(self.spacers) if l < diameter] + [-1]) + 1
self.beginInsertRows(self.index(0, 0, QtCore.QModelIndex()), row, row)
self.beamstop.insert(row, diameter)
self.endInsertRows()
self.saveToConfig()
def loadFromConfig(self):
self.beginResetModel()
try:
self.pinhole1 = self.config['geometry']['choices']['pinholes'][1]
except KeyError:
pass
try:
self.pinhole2 = self.config['geometry']['choices']['pinholes'][2]
except KeyError:
pass
try:
self.pinhole3 = self.config['geometry']['choices']['pinholes'][3]
except KeyError:
pass
try:
self.beamstop = self.config['geometry']['choices']['beamstops']
except KeyError:
pass
try:
self.spacers = self.config['geometry']['choices']['spacers']
except KeyError:
pass
try:
self.flightpipes = self.config['geometry']['choices']['flightpipes']
except KeyError:
pass
self.endResetModel()
def saveToConfig(self):
"""Save the current state to the configuration dictionary"""
if 'geometry' not in self.config:
self.config['geometry'] = {}
if 'choices' not in self.config:
self.config['geometry']['choices'] = {}
for listname in ['pinholes', 'spacers', 'flightpipes', 'beamstops']:
try:
del self.config['geometry']['choices'][listname]
except KeyError:
pass
self.config['geometry']['choices']['pinholes'] = {1: self.pinhole1, 2: self.pinhole2, 3: self.pinhole3}
self.config['geometry']['choices']['spacers'] = self.spacers
self.config['geometry']['choices']['flightpipes'] = self.flightpipes
self.config['geometry']['choices']['beamstops'] = self.beamstop
def removeRow(self, row: int, parent: QtCore.QModelIndex = ...) -> bool:
if not parent.isValid():
raise ValueError('Cannot remove top-level item')
ip = parent.internalPointer()
assert isinstance(ip, self.IndexObject)
if (ip.componenttype == ComponentType.Beamstop) and (ip.level == 1):
self.beginRemoveRows(parent, row, row)
del self.beamstop[row]
self.endRemoveRows()
elif (ip.componenttype == ComponentType.FlightPipe) and (ip.level == 1):
self.beginRemoveRows(parent, row, row)
del self.flightpipes[row]
self.endRemoveRows()
elif (ip.componenttype == ComponentType.PinholeSpacer) and (ip.level == 1):
self.beginRemoveRows(parent, row, row)
del self.spacers[row]
self.endRemoveRows()
elif (ip.componenttype == ComponentType.Pinhole) and (ip.level == 2) and (ip.index1 == 0):
self.beginRemoveRows(parent, row, row)
del self.pinhole1[row]
self.endRemoveRows()
elif (ip.componenttype == ComponentType.Pinhole) and (ip.level == 2) and (ip.index1 == 1):
self.beginRemoveRows(parent, row, row)
del self.pinhole2[row]
self.endRemoveRows()
elif (ip.componenttype == ComponentType.Pinhole) and (ip.level == 2) and (ip.index1 == 2):
self.beginRemoveRows(parent, row, row)
del self.pinhole3[row]
self.endRemoveRows()
else:
logger.error(ip)
raise ValueError('Cannot remove this item')
self.saveToConfig()
|
awacha/cct
|
cct/core2/instrument/components/geometry/choices.py
|
Python
|
bsd-3-clause
| 15,843
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import AuthorEntriesPlugin, LatestPostsPlugin, Post, BlogCategory
from .forms import LatestEntriesForm
from .settings import get_setting
class BlogPlugin(CMSPluginBase):
module = 'Blog'
class BlogLatestEntriesPlugin(BlogPlugin):
"""
Non cached plugin which returns the latest posts taking into account the
user / toolbar state
"""
render_template = 'djangocms_blog/plugins/latest_entries.html'
name = _('Latest Blog Articles')
model = LatestPostsPlugin
form = LatestEntriesForm
filter_horizontal = ('categories',)
cache = False
def render(self, context, instance, placeholder):
context = super(BlogLatestEntriesPlugin, self).render(context, instance, placeholder)
context['posts_list'] = instance.get_posts(context['request'])
context['TRUNCWORDS_COUNT'] = get_setting('POSTS_LIST_TRUNCWORDS_COUNT')
return context
class BlogLatestEntriesPluginCached(BlogPlugin):
"""
Cached plugin which returns the latest published posts
"""
render_template = 'djangocms_blog/plugins/latest_entries.html'
name = _('Latest Blog Articles')
model = LatestPostsPlugin
form = LatestEntriesForm
filter_horizontal = ('categories',)
def render(self, context, instance, placeholder):
context = super(BlogLatestEntriesPluginCached, self).render(context, instance, placeholder)
context['posts_list'] = instance.get_posts()
context['TRUNCWORDS_COUNT'] = get_setting('POSTS_LIST_TRUNCWORDS_COUNT')
return context
class BlogAuthorPostsPlugin(BlogPlugin):
module = _('Blog')
name = _('Author Blog Articles')
model = AuthorEntriesPlugin
form = LatestEntriesForm
render_template = 'djangocms_blog/plugins/authors.html'
filter_horizontal = ['authors']
def render(self, context, instance, placeholder):
context = super(BlogAuthorPostsPlugin, self).render(context, instance, placeholder)
context['authors_list'] = instance.get_authors()
return context
class BlogTagsPlugin(BlogPlugin):
module = _('Blog')
name = _('Tags')
model = CMSPlugin
render_template = 'djangocms_blog/plugins/tags.html'
def render(self, context, instance, placeholder):
context = super(BlogTagsPlugin, self).render(context, instance, placeholder)
context['tags'] = Post.objects.tag_cloud(queryset=Post.objects.published())
return context
class BlogCategoryPlugin(BlogPlugin):
module = _('Blog')
name = _('Categories')
model = CMSPlugin
render_template = 'djangocms_blog/plugins/categories.html'
def render(self, context, instance, placeholder):
context = super(BlogCategoryPlugin, self).render(context, instance, placeholder)
context['categories'] = BlogCategory.objects.all()
return context
class BlogArchivePlugin(BlogPlugin):
module = _('Blog')
name = _('Archive')
model = CMSPlugin
render_template = 'djangocms_blog/plugins/archive.html'
def render(self, context, instance, placeholder):
context = super(BlogArchivePlugin, self).render(context, instance, placeholder)
context['dates'] = Post.objects.get_months(queryset=Post.objects.published())
return context
plugin_pool.register_plugin(BlogLatestEntriesPlugin)
plugin_pool.register_plugin(BlogAuthorPostsPlugin)
plugin_pool.register_plugin(BlogTagsPlugin)
plugin_pool.register_plugin(BlogArchivePlugin)
plugin_pool.register_plugin(BlogCategoryPlugin)
|
creimers/djangocms-blog
|
djangocms_blog/cms_plugins.py
|
Python
|
bsd-3-clause
| 3,724
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Difference/trend_Lag1Trend/cycle_5/ar_/test_artificial_1024_Difference_Lag1Trend_5__0.py
|
Python
|
bsd-3-clause
| 265
|
import pandas as pd
import numpy as np
import pyaf.HierarchicalForecastEngine as hautof
import pyaf.Bench.TS_datasets as tsds
import datetime
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_AU_hierarchical_dataset();
df = b1.mPastData;
lEngine = hautof.cHierarchicalForecastEngine()
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lEngine.mOptions.set_active_autoregressions([]);
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H, b1.mHierarchy, None);
lEngine.getModelInfo();
#lEngine.standardPlots("outputs/AU");
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/Hierarchical_AU_apply_out.csv")
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(dfapp_out.columns)
print(dfapp_out.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
|
antoinecarme/pyaf
|
tests/hierarchical/test_hierarchy_AU_AllMethods.py
|
Python
|
bsd-3-clause
| 1,019
|
# coding=utf-8
from django.utils.translation import ugettext_lazy as _
from django import template
from django.utils.encoding import force_unicode
from django.template.defaultfilters import floatformat as django_floatformat
def floatformat(value, decimals):
return django_floatformat(value, decimals).replace('-', u'−')
def percent(float_value, unit=u'%s%%'):
if float_value == None:
return ''
rounded = floatformat(float_value * 100, 2)
return unit % force_unicode(rounded)
def money(value, unit=u''):
rounded = floatformat(value, 2)
if not unit or value is None:
return rounded
return _(unit) % rounded
register = template.Library()
register.filter('money', money)
register.filter('percent', percent)
|
samluescher/django-expenses
|
expenses/templatetags/moneyformats.py
|
Python
|
bsd-3-clause
| 753
|
import tests.periodicities.period_test as per
per.buildModel((30 , 'W' , 1600));
|
antoinecarme/pyaf
|
tests/periodicities/Week/Cycle_Week_1600_W_30.py
|
Python
|
bsd-3-clause
| 83
|
# -*- coding: utf-8 -*-
#
# giddy documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 15:54:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
import sphinx_bootstrap_theme
sys.path.insert(0, os.path.abspath('../../'))
import giddy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [#'sphinx_gallery.gen_gallery',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
#'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
'nbsphinx',
'nbsphinx_link']
# Configure the extension for sphinxcontrib.bibtex: set bibtex_bibfiles to the list of bib files.
# New in Version 2.0.0 of sphinxcontrib.bibtex
bibtex_bibfiles = ['_static/references.bib']
mathjax_config = {
'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}},
}
# sphinx_gallery_conf = {
# # path to your examples scripts
# 'examples_dirs': '../examples',
# # path where to save gallery generated examples
# 'gallery_dirs': 'auto_examples',
# 'backreferences_dir': False,
# }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'giddy'
copyright = '2018-, pysal developers'
author = 'pysal developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version.
version = giddy.__version__
release = giddy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'tests/*', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_title = "%s v%s Manual" % (project, version)
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
#html_logo = "_static/images/CGS_logo.jpg"
#html_logo = "_static/images/CGS_logo_green.png"
#html_logo = "_static/images/pysal_logo_small.jpg"
html_favicon = "_static/images/pysal_favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "giddy",
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
#'navbar_pagenav': True,
#'navbar_pagenav': False,
# No sidebar
'nosidebar': True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': 'footer',
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo", "yeti", "flatly".
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
'navbar_links': [
("Installation", "installation"),
("Tutorial", "tutorial"),
("API", "api"),
("References", "references"),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'giddydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'giddy.tex', u'giddy Documentation',
u'pysal developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'giddy', u'giddy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'giddy', u'giddy Documentation',
author, 'giddy', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# Napoleon configuration
# -----------------------------------------------------------------------------
# numpydoc_show_class_members = True
# numpydoc_class_members_toctree = False
#
# napoleon_use_ivar = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# Generate the API documentation when building
autosummary_generate = True
# avoid showing members twice
numpydoc_show_class_members = False
numpydoc_use_plots = True
# automatically document class members
autodoc_default_options = {
'members': True,
'undoc-members': True
}
# display the source code for Plot directive
plot_include_source = True
def setup(app):
app.add_css_file("pysal-styles.css")
# Configuration for intersphinx
intersphinx_mapping = {"python": ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'libpysal': ('https://pysal.org/libpysal/', None),
'mapclassify': ('https://pysal.org/mapclassify/', None),
'esda': ('https://pysal.org/esda/', None),
'matplotlib':("https://matplotlib.org/", None)
}
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None).replace("nblink","ipynb") %}
{% set fullpath = env.doc2path(env.docname, base='tree/master/notebooks/').replace("nblink","ipynb") %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docname }}`__.
Interactive online version:
:raw-html:`<a href="https://mybinder.org/v2/gh/pysal/giddy/master?filepath={{ docname }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/pysal/giddy/{{ fullpath }}
.. raw:: latex
\nbsphinxstartnotebook{\scriptsize\noindent\strut
\textcolor{gray}{The following section was generated from
\sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}}
"""
# This is processed by Jinja2 and inserted after each notebook
nbsphinx_epilog = r"""
.. raw:: latex
\nbsphinxstopnotebook{\scriptsize\noindent\strut
\textcolor{gray}{\dotfill\ \sphinxcode{\sphinxupquote{\strut
{{ env.doc2path(env.docname, base='doc') | escape_latex }}}} ends here.}}
"""
# List of arguments to be passed to the kernel that executes the notebooks:
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc={'figure.dpi': 96}",
]
mathjax_config = {
'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}},
}
|
pysal/giddy
|
docsrc/conf.py
|
Python
|
bsd-3-clause
| 11,247
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import geonames_field
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = geonames_field.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-geonames-field',
version=version,
description="""Geonames autocomplete field""",
long_description=readme + '\n\n' + history,
author='Savio Abuga',
author_email='savioabuga@gmail.com',
url='https://github.com/savioabuga/django-geonames-field',
packages=[
'geonames_field',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-geonames-field',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
savioabuga/django-geonames-field
|
setup.py
|
Python
|
bsd-3-clause
| 1,533
|
#!/usr/bin/env python
"""
Simple example of a custom, very slow history, that is loaded asynchronously.
By wrapping it in `ThreadedHistory`, the history will load in the background
without blocking any user interaction.
"""
import time
from prompt_toolkit import PromptSession
from prompt_toolkit.history import History, ThreadedHistory
class SlowHistory(History):
"""
Example class that loads the history very slowly...
"""
def load_history_strings(self):
for i in range(1000):
time.sleep(1) # Emulate slowness.
yield "item-%s" % (i,)
def store_string(self, string):
pass # Don't store strings.
def main():
print(
"Asynchronous loading of history. Notice that the up-arrow will work "
"for as far as the completions are loaded.\n"
"Even when the input is accepted, loading will continue in the "
"background and when the next prompt is displayed.\n"
)
our_history = ThreadedHistory(SlowHistory())
# The history needs to be passed to the `PromptSession`. It can't be passed
# to the `prompt` call because only one history can be used during a
# session.
session = PromptSession(history=our_history)
while True:
text = session.prompt("Say something: ")
print("You said: %s" % text)
if __name__ == "__main__":
main()
|
jonathanslenders/python-prompt-toolkit
|
examples/prompts/history/slow-history.py
|
Python
|
bsd-3-clause
| 1,373
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from flexmock import flexmock
from textwrap import dedent
import six
import time
import json
import logging
import inspect
import os
from osbs.http import HttpResponse
from osbs.constants import (BUILD_FINISHED_STATES,
BUILD_CANCELLED_STATE, WATCH_MODIFIED)
from osbs.exceptions import (OsbsResponseException, OsbsException, OsbsNetworkException)
from osbs.core import check_response, Openshift
from tests.constants import (TEST_BUILD, TEST_CANCELLED_BUILD, TEST_LABEL,
TEST_LABEL_VALUE, TEST_IMAGESTREAM)
from tests.fake_api import openshift, OAPI_PREFIX, API_VER # noqa
from requests.exceptions import ConnectionError
import pytest
from six.moves import http_client
class Response(object):
def __init__(self, status_code, content=None, iterable=None):
self.status_code = status_code
self.iterable = iterable
if content is not None:
self.content = content
def iter_lines(self):
for line in self.iterable:
yield line
def make_json_response(obj):
return HttpResponse(200,
headers={"Content-Type": "application/json"},
content=json.dumps(obj).encode('utf-8'))
class TestCheckResponse(object):
@pytest.mark.parametrize('content', [None, b'OK'])
@pytest.mark.parametrize('status_code', [http_client.OK, http_client.CREATED])
def test_check_response_ok(self, status_code, content):
response = Response(status_code, content=content)
check_response(response)
@pytest.mark.parametrize('log_errors', (True, False))
def test_check_response_bad_stream(self, caplog, log_errors):
iterable = [b'iter', b'lines']
status_code = http_client.CONFLICT
response = Response(status_code, iterable=iterable)
if log_errors:
log_type = logging.ERROR
else:
log_type = logging.DEBUG
with pytest.raises(OsbsResponseException):
if log_errors:
check_response(response)
else:
check_response(response, log_level=log_type)
logged = [(l.getMessage(), l.levelno) for l in caplog.records()]
assert len(logged) == 1
assert logged[0][0] == '[{code}] {message}'.format(code=status_code,
message=b'iterlines')
assert logged[0][1] == log_type
@pytest.mark.parametrize('log_errors', (True, False))
def test_check_response_bad_nostream(self, caplog, log_errors):
status_code = http_client.CONFLICT
content = b'content'
response = Response(status_code, content=content)
if log_errors:
log_type = logging.ERROR
else:
log_type = logging.DEBUG
with pytest.raises(OsbsResponseException):
if log_errors:
check_response(response)
else:
check_response(response, log_level=log_type)
logged = [(l.getMessage(), l.levelno) for l in caplog.records()]
assert len(logged) == 1
assert logged[0][0] == '[{code}] {message}'.format(code=status_code,
message=content)
assert logged[0][1] == log_type
class TestOpenshift(object):
def test_set_labels_on_build(self, openshift): # noqa
labels = openshift.set_labels_on_build(TEST_BUILD, {TEST_LABEL: TEST_LABEL_VALUE})
assert labels.json() is not None
@pytest.mark.parametrize('exc', [ # noqa
ConnectionError('Connection aborted.', http_client.BadStatusLine("''",)),
])
def test_stream_logs_bad_initial_connection(self, openshift, exc):
response = flexmock(status_code=http_client.OK)
(response
.should_receive('iter_lines')
.and_return([b"{'stream': 'foo\n'}"])
.and_raise(StopIteration))
wrapped_exc = OsbsNetworkException('http://spam.com', str(exc), status_code=None,
cause=exc)
(flexmock(openshift)
.should_receive('_get')
# First: simulate initial connection problem
.and_raise(wrapped_exc)
# Next: return a real response
.and_return(response))
(flexmock(time)
.should_receive('time')
.and_return(0)
.and_return(100))
logs = openshift.stream_logs(TEST_BUILD)
assert len([log for log in logs]) == 1
def test_stream_logs_utf8(self, openshift): # noqa
response = flexmock(status_code=http_client.OK)
(response
.should_receive('iter_lines')
.and_return([u"{'stream': 'Uňícode íš hářd\n'}".encode('utf-8')])
.and_raise(StopIteration))
(flexmock(openshift)
.should_receive('_get')
.and_return(response))
logs = openshift.stream_logs(TEST_BUILD)
assert len([log for log in logs]) == 1
def test_list_builds(self, openshift): # noqa
list_builds = openshift.list_builds()
assert list_builds is not None
assert bool(list_builds.json()) # is there at least something
def test_list_pods(self, openshift): # noqa
response = openshift.list_pods(label="openshift.io/build.name=%s" %
TEST_BUILD)
assert isinstance(response, HttpResponse)
def test_get_oauth_token(self, openshift): # noqa
token = openshift.get_oauth_token()
assert token is not None
def test_get_user(self, openshift): # noqa
l = openshift.get_user()
assert l.json() is not None
def test_watch_build(self, openshift): # noqa
response = openshift.wait_for_build_to_finish(TEST_BUILD)
status_lower = response["status"]["phase"].lower()
assert response["metadata"]["name"] == TEST_BUILD
assert status_lower in BUILD_FINISHED_STATES
assert isinstance(TEST_BUILD, six.text_type)
assert isinstance(status_lower, six.text_type)
def test_create_build(self, openshift): # noqa
response = openshift.create_build({})
assert response is not None
assert response.json()["metadata"]["name"] == TEST_BUILD
assert response.json()["status"]["phase"].lower() in BUILD_FINISHED_STATES
def test_cancel_build(self, openshift): # noqa
response = openshift.cancel_build(TEST_CANCELLED_BUILD)
assert response is not None
assert response.json()["metadata"]["name"] == TEST_CANCELLED_BUILD
assert response.json()["status"]["phase"].lower() in BUILD_CANCELLED_STATE
def test_get_build_config(self, openshift): # noqa
mock_response = {"spam": "maps"}
build_config_name = 'some-build-config-name'
expected_url = openshift._build_url("buildconfigs/%s/" % build_config_name)
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
response = openshift.get_build_config(build_config_name)
assert response['spam'] == 'maps'
def test_get_missing_build_config(self, openshift): # noqa
build_config_name = 'some-build-config-name'
expected_url = openshift._build_url("buildconfigs/%s/" % build_config_name)
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(HttpResponse(404, {}, b'')))
with pytest.raises(OsbsResponseException):
openshift.get_build_config(build_config_name)
def test_get_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": [{"spam": "maps"}]}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
response = openshift.get_build_config_by_labels(label_selectors)
assert response['spam'] == 'maps'
def test_get_missing_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": []}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
with pytest.raises(OsbsException) as exc:
openshift.get_build_config_by_labels(label_selectors)
assert str(exc.value).startswith('Build config not found')
def test_get_multiple_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": [{"spam": "maps"}, {"eggs": "sgge"}]}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
with pytest.raises(OsbsException) as exc:
openshift.get_build_config_by_labels(label_selectors)
assert str(exc.value).startswith('More than one build config found')
@pytest.mark.parametrize(('status_codes', 'should_raise'), [ # noqa
([http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.UNAUTHORIZED], True),
([http_client.UNAUTHORIZED], True),
([http_client.CONFLICT for _ in range(10)], True),
])
@pytest.mark.parametrize('update_or_set', ['update', 'set'])
@pytest.mark.parametrize('attr_type', ['labels', 'annotations'])
@pytest.mark.parametrize('object_type', ['build', 'build_config'])
def test_retry_update_attributes(self, openshift,
status_codes, should_raise,
update_or_set,
attr_type,
object_type):
try:
fn = getattr(openshift,
"{update}_{attr}_on_{object}"
.format(update=update_or_set,
attr=attr_type,
object=object_type))
except AttributeError:
return # not every combination is implemented
get_expectation = (flexmock(openshift)
.should_receive('_get')
.times(len(status_codes)))
put_expectation = (flexmock(openshift)
.should_receive('_put')
.times(len(status_codes)))
for status_code in status_codes:
get_response = make_json_response({"metadata": {}})
put_response = HttpResponse(status_code,
headers={},
content=b'')
get_expectation = get_expectation.and_return(get_response)
put_expectation = put_expectation.and_return(put_response)
(flexmock(time)
.should_receive('sleep')
.with_args(0.5))
args = ('any-object-id', {'key': 'value'})
if should_raise:
with pytest.raises(OsbsResponseException):
fn(*args)
else:
fn(*args)
def test_put_image_stream_tag(self, openshift): # noqa
tag_name = 'spam'
tag_id = 'maps:' + tag_name
mock_data = {
'kind': 'ImageStreamTag',
'apiVersion': 'v1',
'tag': {
'name': tag_name
}
}
expected_url = openshift._build_url('imagestreamtags/' + tag_id)
(flexmock(openshift)
.should_receive("_put")
.with_args(expected_url, data=json.dumps(mock_data),
headers={"Content-Type": "application/json"})
.once()
.and_return(make_json_response(mock_data)))
openshift.put_image_stream_tag(tag_id, mock_data)
def _make_tag_template(self):
# TODO: Just read from inputs folder
return json.loads(dedent('''\
{
"kind": "ImageStreamTag",
"apiVersion": "v1",
"metadata": {
"name": "{{IMAGE_STREAM_ID}}:{{TAG_ID}}"
},
"tag": {
"name": "{{TAG_ID}}",
"from": {
"kind": "DockerImage",
"name": "{{REPOSITORY}}:{{TAG_ID}}"
},
"importPolicy": {}
}
}
'''))
@pytest.mark.parametrize('existing_scheduled', (True, False, None)) # noqa
@pytest.mark.parametrize('existing_insecure', (True, False, None))
@pytest.mark.parametrize('expected_scheduled', (True, False))
@pytest.mark.parametrize(('s_annotations', 'expected_insecure'), (
({'openshift.io/image.insecureRepository': 'true'}, True),
({'openshift.io/image.insecureRepository': 'false'}, False),
({}, False),
(None, False),
))
@pytest.mark.parametrize('status_code', (200, 404, 500))
def test_ensure_image_stream_tag(self,
existing_scheduled,
existing_insecure,
expected_scheduled,
s_annotations,
expected_insecure,
status_code,
openshift):
stream_name = 'spam'
stream_repo = 'some.registry.com/spam'
stream = {
'metadata': {'name': stream_name},
'spec': {'dockerImageRepository': stream_repo}
}
if s_annotations is not None:
stream['metadata']['annotations'] = s_annotations
tag_name = 'maps'
tag_id = '{0}:{1}'.format(stream_name, tag_name)
expected_url = openshift._build_url('imagestreamtags/' +
tag_id)
def verify_image_stream_tag(*args, **kwargs):
data = json.loads(kwargs['data'])
assert (bool(data['tag']['importPolicy'].get('insecure')) ==
expected_insecure)
assert (bool(data['tag']['importPolicy'].get('scheduled')) ==
expected_scheduled)
# Also verify new image stream tags are created properly.
if status_code == 404:
assert data['metadata']['name'] == tag_id
assert data['tag']['name'] == tag_name
assert (data['tag']['from']['name'] ==
'{0}:{1}'.format(stream_repo, tag_name))
return make_json_response({})
expected_change = False
expected_error = status_code == 500
mock_response = {}
expectation = (flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once())
if status_code == 200:
existing_image_stream_tag = {'tag': {'importPolicy': {}}}
if existing_insecure is not None:
existing_image_stream_tag['tag']['importPolicy']['insecure'] = \
existing_insecure
if existing_scheduled is not None:
existing_image_stream_tag['tag']['importPolicy']['scheduled'] = \
existing_scheduled
mock_response = existing_image_stream_tag
if expected_insecure != bool(existing_insecure) or \
expected_scheduled != bool(existing_scheduled):
expected_change = True
expectation.and_return(make_json_response(mock_response))
else:
expectation.and_return(HttpResponse(status_code,
headers={},
content=b''))
if status_code == 404:
expected_change = True
if expected_change:
(flexmock(openshift)
.should_receive("_put")
.with_args(expected_url, data=str,
headers={"Content-Type": "application/json"})
.replace_with(verify_image_stream_tag)
.once())
if expected_error:
with pytest.raises(OsbsResponseException):
openshift.ensure_image_stream_tag(
stream, tag_name, self._make_tag_template(), expected_scheduled)
else:
assert (openshift.ensure_image_stream_tag(
stream,
tag_name,
self._make_tag_template(),
expected_scheduled) == expected_change)
@pytest.mark.parametrize(('kwargs', 'called'), (
({'use_auth': True, 'use_kerberos': True}, False),
({'use_auth': True, 'username': 'foo', 'password': 'bar'}, False),
({'use_auth': True, 'token': 'foo'}, False),
({'use_auth': False, 'use_kerberos': True}, False),
({'use_auth': False, 'username': 'foo', 'password': 'bar'}, False),
({'use_auth': False, 'token': 'foo'}, False),
({'use_kerberos': True}, False),
({'username': 'foo', 'password': 'bar'}, False),
({'token': 'foo'}, False),
({'use_auth': False}, True),
({}, True),
))
def test_use_service_account_token(self, kwargs, called):
openshift_mock = flexmock(Openshift).should_receive('can_use_serviceaccount_token')
if called:
openshift_mock.once()
else:
openshift_mock.never()
Openshift(OAPI_PREFIX, API_VER, "/oauth/authorize", **kwargs)
@pytest.mark.parametrize('modify', [True, False]) # noqa
def test_import_image(self, openshift, modify):
"""
tests that import_image return True
regardless if tags were changed
"""
this_file = inspect.getfile(TestCheckResponse)
this_dir = os.path.dirname(this_file)
json_path = os.path.join(this_dir, "mock_jsons", openshift._con.version, 'imagestream.json')
resource_json = json.load(open(json_path))
# keep just 1 tag, so it will be different from oldtags (3 tags)
if modify:
resource_json['status']['tags'] = [resource_json['status']['tags'][0]]
(flexmock(Openshift)
.should_receive('watch_resource')
.and_yield((WATCH_MODIFIED, resource_json)))
assert openshift.import_image(TEST_IMAGESTREAM)
|
vrutkovs/osbs-client
|
tests/test_core.py
|
Python
|
bsd-3-clause
| 19,623
|
from django.conf import settings
from PIL import Image
from appconf import AppConf
class AvatarConf(AppConf):
DEFAULT_SIZE = 80
RESIZE_METHOD = Image.ANTIALIAS
STORAGE_DIR = 'avatars'
GRAVATAR_BASE_URL = 'http://www.gravatar.com/avatar/'
GRAVATAR_BACKUP = True
GRAVATAR_DEFAULT = None
DEFAULT_URL = 'avatar/img/default.jpg'
MAX_AVATARS_PER_USER = 42
MAX_SIZE = 1024 * 1024
THUMB_FORMAT = 'JPEG'
THUMB_QUALITY = 85
HASH_FILENAMES = False
HASH_USERDIRNAMES = False
ALLOWED_FILE_EXTS = None
CACHE_TIMEOUT = 60 * 60
STORAGE = settings.DEFAULT_FILE_STORAGE
CLEANUP_DELETED = False
AUTO_GENERATE_SIZES = ((DEFAULT_SIZE, DEFAULT_SIZE),)
def configure_auto_generate_avatar_sizes(self, value):
return value or getattr(settings, 'AUTO_GENERATE_AVATAR_SIZES',
(self.DEFAULT_SIZE, self.DEFAULT_SIZE))
|
nai-central/django-avatar
|
avatar/conf.py
|
Python
|
bsd-3-clause
| 909
|
from arybo.lib import MBA, boolean_expr_solve
mba = MBA(64)
x = mba.var('x')
def f(X):
T = ((X+1)&(~X))
C = ((T | 0x7AFAFA697AFAFA69) & 0x80A061440A061440)\
+ ((~T & 0x10401050504) | 0x1010104)
return C
r = f(x)
sols = boolean_expr_solve(r[63], x, 1)
C0 = sols[0].get_int_be()
print(hex(C0))
print(hex(f(0)))
print(hex(f(C0)))
|
quarkslab/arybo
|
examples/dirac.py
|
Python
|
bsd-3-clause
| 339
|
# -*- coding: utf-8 -*-
import datetime as dt
from flask.ext.login import UserMixin
from metapp2.extensions import bcrypt
from metapp2.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK
)
class Meeting_Agenda_Item_User(SurrogatePK, Model):
__tablename__ = 'meeting_agenda_item_users'
date_created = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
meeting_agenda_item_id = ReferenceCol('meeting_agenda_items')
user_id = ReferenceCol('users')
def __init__(self, date_created, meeting_agenda_item, user):
db.Model.__init__(self, date_created=date_created, meeting_agenda_item=meeting_agenda_item, user=user)
def __repr__(self):
return 'Meeting Agenda Item User'
|
phamtrisi/metapp2
|
metapp2/meeting_agenda_item_user/models.py
|
Python
|
bsd-3-clause
| 773
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# lapack_testing.py
###############################################################################
from __future__ import print_function
from subprocess import Popen, STDOUT, PIPE
import os, sys, math
import getopt
# Arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:srep:t:n",
["help", "dir", "short", "run", "error","prec=","test=","number"])
except getopt.error as msg:
print(msg)
print("for help use --help")
sys.exit(2)
short_summary=0
with_file=1
just_errors = 0
prec='x'
test='all'
only_numbers=0
test_dir='TESTING'
bin_dir='bin/Release'
abs_bin_dir=os.path.normpath(os.path.join(os.getcwd(),bin_dir))
for o, a in opts:
if o in ("-h", "--help"):
print(sys.argv[0]+" [-h|--help] [-d dir |--dir dir] [-s |--short] [-r |--run] [-e |--error] [-p p |--prec p] [-t test |--test test] [-n | --number]")
print(" - h is to print this message")
print(" - r is to use to run the LAPACK tests then analyse the output (.out files). By default, the script will not run all the LAPACK tests")
print(" - d [dir] is to indicate where is the LAPACK testing directory (.out files). By default, the script will use .")
print(" LEVEL OF OUTPUT")
print(" - x is to print a detailed summary")
print(" - e is to print only the error summary")
print(" - s is to print a short summary")
print(" - n is to print the numbers of failing tests (turn on summary mode)")
print(" SECLECTION OF TESTS:")
print(" - p [s/c/d/z/x] is to indicate the PRECISION to run:")
print(" s=single")
print(" d=double")
print(" sd=single/double")
print(" c=complex")
print(" z=double complex")
print(" cz=complex/double complex")
print(" x=all [DEFAULT]")
print(" - t [lin/eig/mixed/rfp/all] is to indicate which TEST FAMILY to run:")
print(" lin=Linear Equation")
print(" eig=Eigen Problems")
print(" mixed=mixed-precision")
print(" rfp=rfp format")
print(" all=all tests [DEFAULT]")
print(" EXAMPLES:")
print(" ./lapack_testing.py -n")
print(" Will return the numbers of failed tests by analyzing the LAPACK output")
print(" ./lapack_testing.py -n -r -p s")
print(" Will return the numbers of failed tests in REAL precision by running the LAPACK Tests then analyzing the output")
print(" ./lapack_testing.py -n -p s -t eig ")
print(" Will return the numbers of failed tests in REAL precision by analyzing only the LAPACK output of EIGEN testings")
print("Written by Julie Langou (June 2011) ")
sys.exit(0)
else:
if o in ("-s", "--short"):
short_summary = 1
if o in ("-r", "--run"):
with_file = 0
if o in ("-e", "--error"):
just_errors = 1
if o in ( '-p', '--prec' ):
prec = a
if o in ( '-d', '--dir' ):
test_dir = a
if o in ( '-t', '--test' ):
test = a
if o in ( '-n', '--number' ):
only_numbers = 1
short_summary = 1
# process options
os.chdir(test_dir)
execution=1
summary="\n\t\t\t--> LAPACK TESTING SUMMARY <--\n";
if with_file: summary+= "\t\tProcessing LAPACK Testing output found in the "+test_dir+" directory\n";
summary+="SUMMARY \tnb test run \tnumerical error \tother error \n";
summary+="================ \t===========\t=================\t================ \n";
nb_of_test=0
# Add current directory to the path for subshells of this shell
# Allows the popen to find local files in both windows and unixes
os.environ["PATH"] = os.environ["PATH"]+":."
# Define a function to open the executable (different filenames on unix and Windows)
def run_summary_test( f, cmdline, short_summary):
nb_test_run=0
nb_test_fail=0
nb_test_illegal=0
nb_test_info=0
if (with_file):
if not os.path.exists(cmdline):
error_message=cmdline+" file not found"
r=1
if short_summary: return [nb_test_run,nb_test_fail,nb_test_illegal,nb_test_info]
else:
pipe = open(cmdline,'r')
r=0
else:
if os.name != 'nt':
cmdline='./' + cmdline
else :
cmdline=abs_bin_dir+os.path.sep+cmdline
outfile=cmdline.split()[4]
#pipe = open(outfile,'w')
p = Popen(cmdline, shell=True)#, stdout=pipe)
p.wait()
#pipe.close()
r=p.returncode
pipe = open(outfile,'r')
error_message=cmdline+" did not work"
if r != 0 and not with_file:
print("---- TESTING " + cmdline.split()[0] + "... FAILED(" + error_message +") !")
for line in pipe.readlines():
f.write(str(line))
elif r != 0 and with_file and not short_summary:
print("---- WARNING: please check that you have the LAPACK output : "+cmdline+"!")
print("---- WARNING: with the option -r, we can run the LAPACK testing for you")
# print "---- "+error_message
else:
for line in pipe.readlines():
f.write(str(line))
words_in_line=line.split()
if (line.find("run")!=-1):
# print line
whereisrun=words_in_line.index("run)")
nb_test_run+=int(words_in_line[whereisrun-2])
if (line.find("out of")!=-1):
if (short_summary==0): print(line, end=' ')
whereisout= words_in_line.index("out")
nb_test_fail+=int(words_in_line[whereisout-1])
if ((line.find("illegal")!=-1) or (line.find("Illegal")!=-1)):
if (short_summary==0):print(line, end=' ')
nb_test_illegal+=1
if (line.find(" INFO")!=-1):
if (short_summary==0):print(line, end=' ')
nb_test_info+=1
if (with_file==1):
pipe.close()
f.flush();
return [nb_test_run,nb_test_fail,nb_test_illegal,nb_test_info]
# If filename cannot be opened, send output to sys.stderr
filename = "testing_results.txt"
try:
f = open(filename, 'w')
except IOError:
f = sys.stdout
if (short_summary==0):
print(" ")
print("---------------- Testing LAPACK Routines ----------------")
print(" ")
print("-- Detailed results are stored in", filename)
dtypes = (
("s", "d", "c", "z"),
("REAL ", "DOUBLE PRECISION", "COMPLEX ", "COMPLEX16 "),
)
if prec=='s':
range_prec=[0]
elif prec=='d':
range_prec=[1]
elif prec=='sd':
range_prec=[0,1]
elif prec=='c':
range_prec=[2]
elif prec=='z':
range_prec=[3]
elif prec=='cz':
range_prec=[2,3]
else:
prec='x';
range_prec=list(range(4))
if test=='lin':
range_test=[16]
elif test=='mixed':
range_test=[17]
range_prec=[1,3]
elif test=='rfp':
range_test=[18]
elif test=='eig':
range_test=list(range(16))
else:
range_test=list(range(19))
list_results = [
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
for dtype in range_prec:
letter = dtypes[0][dtype]
name = dtypes[1][dtype]
if (short_summary==0):
print(" ")
print("------------------------- %s ------------------------" % name)
print(" ")
sys.stdout.flush()
dtests = (
("nep", "sep", "se2", "svd",
letter+"ec",letter+"ed",letter+"gg",
letter+"gd",letter+"sb",letter+"sg",
letter+"bb","glm","gqr",
"gsv","csd","lse",
letter+"test", letter+dtypes[0][dtype-1]+"test",letter+"test_rfp"),
("Nonsymmetric-Eigenvalue-Problem", "Symmetric-Eigenvalue-Problem", "Symmetric-Eigenvalue-Problem-2-stage", "Singular-Value-Decomposition",
"Eigen-Condition","Nonsymmetric-Eigenvalue","Nonsymmetric-Generalized-Eigenvalue-Problem",
"Nonsymmetric-Generalized-Eigenvalue-Problem-driver", "Symmetric-Eigenvalue-Problem", "Symmetric-Eigenvalue-Generalized-Problem",
"Banded-Singular-Value-Decomposition-routines", "Generalized-Linear-Regression-Model-routines", "Generalized-QR-and-RQ-factorization-routines",
"Generalized-Singular-Value-Decomposition-routines", "CS-Decomposition-routines", "Constrained-Linear-Least-Squares-routines",
"Linear-Equation-routines", "Mixed-Precision-linear-equation-routines","RFP-linear-equation-routines"),
(letter+"nep", letter+"sep", letter+"se2", letter+"svd",
letter+"ec",letter+"ed",letter+"gg",
letter+"gd",letter+"sb",letter+"sg",
letter+"bb",letter+"glm",letter+"gqr",
letter+"gsv",letter+"csd",letter+"lse",
letter+"test", letter+dtypes[0][dtype-1]+"test",letter+"test_rfp"),
)
for dtest in range_test:
nb_of_test=0
# NEED TO SKIP SOME PRECISION (namely s and c) FOR PROTO MIXED PRECISION TESTING
if dtest==17 and (letter=="s" or letter=="c"):
continue
if (with_file==1):
cmdbase=dtests[2][dtest]+".out"
else:
if dtest==16:
# LIN TESTS
cmdbase="xlintst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out"
elif dtest==17:
# PROTO LIN TESTS
cmdbase="xlintst"+letter+dtypes[0][dtype-1]+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out"
elif dtest==18:
# PROTO LIN TESTS
cmdbase="xlintstrf"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out"
else:
# EIG TESTS
cmdbase="xeigtst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out"
if (not just_errors and not short_summary):
print("Testing "+name+" "+dtests[1][dtest]+"-"+cmdbase, end=' ')
# Run the process: either to read the file or run the LAPACK testing
nb_test = run_summary_test(f, cmdbase, short_summary)
list_results[0][dtype]+=nb_test[0]
list_results[1][dtype]+=nb_test[1]
list_results[2][dtype]+=nb_test[2]
list_results[3][dtype]+=nb_test[3]
got_error=nb_test[1]+nb_test[2]+nb_test[3]
if (not short_summary):
if (nb_test[0]>0 and just_errors==0):
print("passed: "+str(nb_test[0]))
if (nb_test[1]>0):
print("failing to pass the threshold: "+str(nb_test[1]))
if (nb_test[2]>0):
print("Illegal Error: "+str(nb_test[2]))
if (nb_test[3]>0):
print("Info Error: "+str(nb_test[3]))
if (got_error>0 and just_errors==1):
print("ERROR IS LOCATED IN "+name+" "+dtests[1][dtest]+" [ "+cmdbase+" ]")
print("")
if (just_errors==0):
print("")
# elif (got_error>0):
# print dtests[2][dtest]+".out \t"+str(nb_test[1])+"\t"+str(nb_test[2])+"\t"+str(nb_test[3])
sys.stdout.flush()
if (list_results[0][dtype] > 0 ):
percent_num_error=float(list_results[1][dtype])/float(list_results[0][dtype])*100
percent_error=float(list_results[2][dtype]+list_results[3][dtype])/float(list_results[0][dtype])*100
else:
percent_num_error=0
percent_error=0
summary+=name+"\t"+str(list_results[0][dtype])+"\t\t"+str(list_results[1][dtype])+"\t("+"%.3f" % percent_num_error+"%)\t"+str(list_results[2][dtype]+list_results[3][dtype])+"\t("+"%.3f" % percent_error+"%)\t""\n"
list_results[0][4]+=list_results[0][dtype]
list_results[1][4]+=list_results[1][dtype]
list_results[2][4]+=list_results[2][dtype]
list_results[3][4]+=list_results[3][dtype]
if only_numbers==1:
print(str(list_results[1][4])+"\n"+str(list_results[2][4]+list_results[3][4]))
else:
print(summary)
if (list_results[0][4] > 0 ):
percent_num_error=float(list_results[1][4])/float(list_results[0][4])*100
percent_error=float(list_results[2][4]+list_results[3][4])/float(list_results[0][4])*100
else:
percent_num_error=0
percent_error=0
if (prec=='x'):
print("--> ALL PRECISIONS\t"+str(list_results[0][4])+"\t\t"+str(list_results[1][4])+"\t("+"%.3f" % percent_num_error+"%)\t"+str(list_results[2][4]+list_results[3][4])+"\t("+"%.3f" % percent_error+"%)\t""\n")
if list_results[0][4] == 0:
print("NO TESTS WERE ANALYZED, please use the -r option to run the LAPACK TESTING")
# This may close the sys.stdout stream, so make it the last statement
f.close()
|
kortschak/OpenBLAS
|
lapack-netlib/lapack_testing.py
|
Python
|
bsd-3-clause
| 12,799
|
"""
kombu.transport.zookeeper
=========================
Zookeeper transport.
:copyright: (c) 2010 - 2013 by Mahendra M.
:license: BSD, see LICENSE for more details.
**Synopsis**
Connects to a zookeeper node as <server>:<port>/<vhost>
The <vhost> becomes the base for all the other znodes. So we can use
it like a vhost.
This uses the built-in kazoo recipe for queues
**References**
- https://zookeeper.apache.org/doc/trunk/recipes.html#sc_recipes_Queues
- https://kazoo.readthedocs.io/en/latest/api/recipe/queue.html
**Limitations**
This queue does not offer reliable consumption. An entry is removed from
the queue prior to being processed. So if an error occurs, the consumer
has to re-queue the item or it will be lost.
"""
from __future__ import absolute_import
import os
import socket
from anyjson import loads, dumps
from kombu.five import Empty
from kombu.utils.encoding import bytes_to_str
from . import virtual
MAX_PRIORITY = 9
try:
import kazoo
from kazoo.client import KazooClient
from kazoo.recipe.queue import Queue
KZ_CONNECTION_ERRORS = (
kazoo.exceptions.SystemErrorException,
kazoo.exceptions.ConnectionLossException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.InvalidACLException,
kazoo.exceptions.AuthFailedException,
kazoo.exceptions.SessionExpiredException,
)
KZ_CHANNEL_ERRORS = (
kazoo.exceptions.RuntimeInconsistencyException,
kazoo.exceptions.DataInconsistencyException,
kazoo.exceptions.BadArgumentsException,
kazoo.exceptions.MarshallingErrorException,
kazoo.exceptions.UnimplementedException,
kazoo.exceptions.OperationTimeoutException,
kazoo.exceptions.ApiErrorException,
kazoo.exceptions.NoNodeException,
kazoo.exceptions.NoAuthException,
kazoo.exceptions.NodeExistsException,
kazoo.exceptions.NoChildrenForEphemeralsException,
kazoo.exceptions.NotEmptyException,
kazoo.exceptions.SessionExpiredException,
kazoo.exceptions.InvalidCallbackException,
socket.error,
)
except ImportError:
kazoo = None # noqa
KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa
DEFAULT_PORT = 2181
__author__ = 'Mahendra M <mahendra.m@gmail.com>'
class Channel(virtual.Channel):
_client = None
_queues = {}
def _get_path(self, queue_name):
return os.path.join(self.vhost, queue_name)
def _get_queue(self, queue_name):
queue = self._queues.get(queue_name, None)
if queue is None:
queue = Queue(self.client, self._get_path(queue_name))
self._queues[queue_name] = queue
# Ensure that the queue is created
len(queue)
return queue
def _put(self, queue, message, **kwargs):
try:
priority = message['properties']['delivery_info']['priority']
except KeyError:
priority = 0
queue = self._get_queue(queue)
queue.put(dumps(message), priority=(MAX_PRIORITY - priority))
def _get(self, queue):
queue = self._get_queue(queue)
msg = queue.get()
if msg is None:
raise Empty()
return loads(bytes_to_str(msg))
def _purge(self, queue):
count = 0
queue = self._get_queue(queue)
while True:
msg = queue.get()
if msg is None:
break
count += 1
return count
def _delete(self, queue, *args, **kwargs):
if self._has_queue(queue):
self._purge(queue)
self.client.delete(self._get_path(queue))
def _size(self, queue):
queue = self._get_queue(queue)
return len(queue)
def _new_queue(self, queue, **kwargs):
if not self._has_queue(queue):
queue = self._get_queue(queue)
def _has_queue(self, queue):
return self.client.exists(self._get_path(queue)) is not None
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn_str = '%s:%s' % (conninfo.hostname, port)
self.vhost = os.path.join('/', conninfo.virtual_host[0:-1])
conn = KazooClient(conn_str)
conn.start()
return conn
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (
virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS
)
channel_errors = (
virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS
)
driver_type = 'zookeeper'
driver_name = 'kazoo'
def __init__(self, *args, **kwargs):
if kazoo is None:
raise ImportError('The kazoo library is not installed')
super(Transport, self).__init__(*args, **kwargs)
def driver_version(self):
return kazoo.__version__
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/kombu/transport/zookeeper.py
|
Python
|
bsd-3-clause
| 5,232
|
from django.shortcuts import render
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import views
from geomat.feedback.serializers import FeedBackSerializer
from django.core.mail import send_mail
from rest_framework import status
from drf_yasg.utils import swagger_auto_schema
class FeedBackView(generics.GenericAPIView):
serializer_class = FeedBackSerializer
permission_classes = ()
@swagger_auto_schema(responses={200:"The Views response is 200 if mail is sent"})
def post(self, request, *args, **kwargs):
serializer = FeedBackSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer = serializer.data
message = send_mail(subject=serializer["emailTitle"],
from_email= "{0} <{1}>".format(serializer["username"],serializer["userEmail"]),
message=serializer["emailContent"],
recipient_list=["geomatdigital@dlist.uni-frankfurt.de"],
fail_silently=False)
if not message:
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(data=serializer)
# Create your views here.
|
GeoMatDigital/django-geomat
|
geomat/feedback/views.py
|
Python
|
bsd-3-clause
| 1,251
|
#
from setuptools import setup, find_packages
import sys, os
version = "1.0"
shortdesc = ""
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(name="agx.dexteritytemplate",
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
"",
],
keywords="",
author="",
author_email="",
url="",
license="",
packages=find_packages("src"),
package_dir={"": "src"},
namespace_packages=["agx"],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.dexterity',
##code-section dependencies
##/code-section dependencies
],
extras_require=dict(
##code-section extras_require
##/code-section extras_require
),
entry_points="""
##code-section entry_points
##/code-section entry_points
""",
##code-section additionals
##/code-section additionals
)
|
AnneGilles/dexterity.product
|
setup.py
|
Python
|
bsd-3-clause
| 1,049
|
"""
Module cdifflib -- c implementation of difflib.
Class CSequenceMatcher:
A faster version of difflib.SequenceMatcher. Reimplements a single
bottleneck function - find_longest_match - in native C. The rest of the
implementation is inherited.
"""
__all__ = ['CSequenceMatcher', '__version__']
__version__ = '1.2.5'
import sys
from difflib import SequenceMatcher as _SequenceMatcher
from difflib import Match as _Match
import _cdifflib
class CSequenceMatcher(_SequenceMatcher):
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a CSequenceMatcher.
Simply wraps the difflib.SequenceMatcher.
"""
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
# No autojunk in Python 2.6 and lower
_SequenceMatcher.__init__(self, isjunk, a, b)
else:
_SequenceMatcher.__init__(self, isjunk, a, b, autojunk)
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
Wrapper for the C implementation of this function.
"""
besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi)
return _Match(besti, bestj, bestsize)
def set_seq1(self, a):
"""Same as SequenceMatcher.set_seq1, but check for non-list inputs
implementation."""
if a is self.a:
return
self.a = a
if not isinstance(self.a, list):
self.a = list(self.a)
# Types must be hashable to work in the c layer. This will raise if
# list items are *not* hashable.
[hash(x) for x in self.a]
def set_seq2(self, b):
"""Same as SequenceMatcher.set_seq2, but uses the c chainb
implementation.
"""
if b is self.b and hasattr(self, 'isbjunk'):
return
self.b = b
if not isinstance(self.a, list):
self.a = list(self.a)
if not isinstance(self.b, list):
self.b = list(self.b)
# Types must be hashable to work in the c layer. This check lines will
# raise the correct error if they are *not* hashable.
[hash(x) for x in self.a]
[hash(x) for x in self.b]
self.matching_blocks = self.opcodes = None
self.fullbcount = None
junk, popular = _cdifflib.chain_b(self)
assert hasattr(junk, '__contains__')
assert hasattr(popular, '__contains__')
self.isbjunk = junk.__contains__
self.isbpopular = popular.__contains__
# We use this to speed up find_longest_match a smidge
def get_matching_blocks(self):
"""Same as SequenceMatcher.get_matching_blocks, but calls through to a
faster loop for find_longest_match. The rest is the same.
"""
if self.matching_blocks is not None:
return self.matching_blocks
matching_blocks = _cdifflib.matching_blocks(self)
matching_blocks.append((len(self.a), len(self.b), 0))
self.matching_blocks = matching_blocks
return map(_Match._make, self.matching_blocks)
|
mduggan/cdifflib
|
cdifflib.py
|
Python
|
bsd-3-clause
| 3,130
|
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""This module implements the stackless API on top of py.magic greenlet API
This way it is possible to run concurrence applications on top of normal python
using the greenlet module.
Because the greenlet module uses only 'hard' switching as opposed to stackless 'soft' switching
it is a bit slower (about 35%), but very usefull because you don't need to install stackless.
Note that this does not aim to be a complete implementation of stackless on top of greenlets,
just enough of the stackless API to make concurrence run.
This code was inspired by:
http://aigamedev.com/programming-tips/round-robin-multi-tasking and
also by the pypy implementation of the same thing (buggy, not being maintained?) at
https://codespeak.net/viewvc/pypy/dist/pypy/lib/stackless.py?view=markup
"""
try:
from py.magic import greenlet #as of version 1.0 of py, it does not supply greenlets anymore
except ImportError:
from greenlet import greenlet #there is an older package containing just the greenlet lib
from collections import deque
class TaskletExit(SystemExit):pass
import __builtin__
__builtin__.TaskletExit = TaskletExit
class bomb(object):
"""used as a result value for sending exceptions trough a channel"""
def __init__(self, exc_type = None, exc_value = None, exc_traceback = None):
self.type = exc_type
self.value = exc_value
self.traceback = exc_traceback
def raise_(self):
raise self.type, self.value, self.traceback
class channel(object):
"""implementation of stackless's channel object"""
def __init__(self):
self.balance = 0
self.queue = deque()
def receive(self):
return _scheduler._receive(self)
def send(self, data):
return _scheduler._send(self, data)
def send_exception(self, exp_type, *args):
self.send(bomb(exp_type, exp_type(*args)))
def send_sequence(self, iterable):
for item in iterable:
self.send(item)
class tasklet(object):
"""implementation of stackless's tasklet object"""
def __init__(self, f = None, greenlet = None, alive = False):
self.greenlet = greenlet
self.func = f
self.alive = alive
self.blocked = False
self.data = None
def bind(self, func):
if not callable(func):
raise TypeError('tasklet function must be a callable')
self.func = func
def __call__(self, *args, **kwargs):
"""this is where the new task starts to run, e.g. it is where the greenlet is created
and the 'task' is first scheduled to run"""
if self.func is None:
raise TypeError('tasklet function must be a callable')
def _func(*_args, **_kwargs):
try:
self.func(*args, **kwargs)
except TaskletExit:
pass #let it pass silently
except:
import logging
logging.exception('unhandled exception in greenlet')
#don't propagate to parent
finally:
assert _scheduler.current == self
_scheduler.remove(self)
if _scheduler._runnable: #there are more tasklets scheduled to run next
#this make sure that flow will continue in the correct greenlet, e.g. the next in the schedule
self.greenlet.parent = _scheduler._runnable[0].greenlet
self.alive = False
del self.greenlet
del self.func
del self.data
self.greenlet = greenlet(_func)
self.alive = True
_scheduler.append(self)
return self
def kill(self):
_scheduler.throw(self, TaskletExit)
def raise_exception(self, *args):
_scheduler.throw(self, *args)
def __str__(self):
return repr(self)
def __repr__(self):
if hasattr(self, 'name'):
_id = self.name
else:
_id = str(self.func)
return '<tasklet %s at %0x>' % (_id, id(self))
class scheduler(object):
def __init__(self):
self._main_task = tasklet(greenlet = greenlet.getcurrent(), alive = True)
#all non blocked tast are in this queue
#all tasks are only onces in this queue
#the current task is the first item in the queue
self._runnable = deque([self._main_task])
def schedule(self):
"""schedules the next tasks and puts the current task back at the queue of runnables"""
self._runnable.rotate(-1)
next_task = self._runnable[0]
next_task.greenlet.switch()
def schedule_block(self):
"""blocks the current task and schedules next"""
self._runnable.popleft()
next_task = self._runnable[0]
next_task.greenlet.switch()
def throw(self, task, *args):
if not task.alive: return #this is what stackless does
assert task.blocked or task in self._runnable
task.greenlet.parent = self._runnable[0].greenlet
if task.blocked:
self._runnable.appendleft(task)
else:
self._runnable.remove(task)
self._runnable.appendleft(task)
task.greenlet.throw(*args)
def _receive(self, channel):
#Receiving 1):
#A tasklet wants to receive and there is
#a queued sending tasklet. The receiver takes
#its data from the sender, unblocks it,
#and inserts it at the end of the runnables.
#The receiver continues with no switch.
#Receiving 2):
#A tasklet wants to receive and there is
#no queued sending tasklet.
#The receiver will become blocked and inserted
#into the queue. The next sender will
#handle the rest through "Sending 1)".
if channel.queue: #some sender
channel.balance -= 1
sender = channel.queue.popleft()
sender.blocked = False
self._runnable.append(sender)
data, sender.data = sender.data, None
else: #no sender
current = self._runnable[0]
channel.queue.append(current)
channel.balance -= 1
current.blocked = True
try:
self.schedule_block()
except:
channel.queue.remove(current)
channel.balance += 1
current.blocked = False
raise
data, current.data = current.data, None
if isinstance(data, bomb):
data.raise_()
else:
return data
def _send(self, channel, data):
# Sending 1):
# A tasklet wants to send and there is
# a queued receiving tasklet. The sender puts
# its data into the receiver, unblocks it,
# and inserts it at the top of the runnables.
# The receiver is scheduled.
# Sending 2):
# A tasklet wants to send and there is
# no queued receiving tasklet.
# The sender will become blocked and inserted
# into the queue. The next receiver will
# handle the rest through "Receiving 1)".
#print 'send q', channel.queue
if channel.queue: #some receiver
channel.balance += 1
receiver = channel.queue.popleft()
receiver.data = data
receiver.blocked = False
self._runnable.rotate(-1)
self._runnable.appendleft(receiver)
self._runnable.rotate(1)
self.schedule()
else: #no receiver
current = self.current
channel.queue.append(current)
channel.balance += 1
current.data = data
current.blocked = True
try:
self.schedule_block()
except:
channel.queue.remove(current)
channel.balance -= 1
current.data = None
current.blocked = False
raise
def remove(self, task):
assert task.blocked or task in self._runnable
if task in self._runnable:
self._runnable.remove(task)
def append(self, task):
assert task not in self._runnable
self._runnable.append(task)
@property
def runcount(self):
return len(self._runnable)
@property
def current(self):
return self._runnable[0]
#there is only 1 scheduler, this is it:
_scheduler = scheduler()
def getruncount():
return _scheduler.runcount
def getcurrent():
return _scheduler.current
def schedule():
return _scheduler.schedule()
|
concurrence/concurrence
|
lib/concurrence/_stackless.py
|
Python
|
bsd-3-clause
| 8,949
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Robotnik Automation SLL
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Robotnik Automation SSL nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import time, threading
from robotnik_msgs.msg import State
from sensor_msgs.msg import LaserScan, PointCloud2
from laser_assembler.srv import *
from robotnik_msgs.srv import enable_disable
DEFAULT_FREQ = 1.0
MAX_FREQ = 2.0
DEFAULT_TIME_PCLOUD_BACK = 3
# Class Template of Robotnik component for Pyhton
class LaserAssemblerNode:
def __init__(self, args):
self.node_name = rospy.get_name().replace('/','')
self.desired_freq = args['desired_freq']
# Time to look the point cloud back
self._time_point_cloud_back = args['time_point_cloud_back']
# Checks value of freq
if self.desired_freq <= 0.0 or self.desired_freq > MAX_FREQ:
rospy.loginfo('%s::init: Desired freq (%f) is not possible. Setting desired_freq to %f'%(self.node_name,self.desired_freq, DEFAULT_FREQ))
self.desired_freq = DEFAULT_FREQ
self._assembly_scans_service_name = args['assembly_scans_service_name']
self._point_cloud_publisher_topic_name = args['point_cloud_publisher_topic_name']
self._publish_point_cloud = False
self.real_freq = 0.0
# Saves the state of the component
self.state = State.INIT_STATE
# Saves the previous state
self.previous_state = State.INIT_STATE
# flag to control the initialization of the component
self.initialized = False
# flag to control the initialization of ROS stuff
self.ros_initialized = False
# flag to control that the control loop is running
self.running = False
# Variable used to control the loop frequency
self.time_sleep = 1.0 / self.desired_freq
# State msg to publish
self.msg_state = State()
# Timer to publish state
self.publish_state_timer = 1
self.t_publish_state = threading.Timer(self.publish_state_timer, self.publishROSstate)
def setup(self):
'''
Initializes de hand
@return: True if OK, False otherwise
'''
self.initialized = True
return 0
def rosSetup(self):
'''
Creates and inits ROS components
'''
if self.ros_initialized:
return 0
# Publishers
self._state_publisher = rospy.Publisher('~state', State, queue_size=10)
self._point_cloud_publisher = rospy.Publisher(self._point_cloud_publisher_topic_name, PointCloud2, queue_size=10)
# Subscribers
# topic_name, msg type, callback, queue_size
# self.topic_sub = rospy.Subscriber('topic_name', Int32, self.topicCb, queue_size = 10)
# Service Servers
self.publish_point_cloud_service_server = rospy.Service('~publish_point_cloud', enable_disable, self.publishPointCloudServiceCb)
# Service Clients
self._assemble_scans_service_client = rospy.ServiceProxy(self._assembly_scans_service_name, AssembleScans2)
#ret = self.service_client.call(ServiceMsg)
self.ros_initialized = True
self.publishROSstate()
return 0
def shutdown(self):
'''
Shutdowns device
@return: 0 if it's performed successfully, -1 if there's any problem or the component is running
'''
if self.running or not self.initialized:
return -1
rospy.loginfo('%s::shutdown'%self.node_name)
# Cancels current timers
self.t_publish_state.cancel()
self._state_publisher.unregister()
self.initialized = False
return 0
def rosShutdown(self):
'''
Shutdows all ROS components
@return: 0 if it's performed successfully, -1 if there's any problem or the component is running
'''
if self.running or not self.ros_initialized:
return -1
# Performs ROS topics & services shutdown
self._state_publisher.unregister()
self.ros_initialized = False
return 0
def stop(self):
'''
Creates and inits ROS components
'''
self.running = False
return 0
def start(self):
'''
Runs ROS configuration and the main control loop
@return: 0 if OK
'''
self.rosSetup()
if self.running:
return 0
self.running = True
self.controlLoop()
return 0
def controlLoop(self):
'''
Main loop of the component
Manages actions by state
'''
while self.running and not rospy.is_shutdown():
t1 = time.time()
if self.state == State.INIT_STATE:
self.initState()
elif self.state == State.STANDBY_STATE:
self.standbyState()
elif self.state == State.READY_STATE:
self.readyState()
elif self.state == State.EMERGENCY_STATE:
self.emergencyState()
elif self.state == State.FAILURE_STATE:
self.failureState()
elif self.state == State.SHUTDOWN_STATE:
self.shutdownState()
self.allState()
t2 = time.time()
tdiff = (t2 - t1)
t_sleep = self.time_sleep - tdiff
if t_sleep > 0.0:
try:
rospy.sleep(t_sleep)
except rospy.exceptions.ROSInterruptException:
rospy.loginfo('%s::controlLoop: ROS interrupt exception'%self.node_name)
self.running = False
t3= time.time()
self.real_freq = 1.0/(t3 - t1)
self.running = False
# Performs component shutdown
self.shutdownState()
# Performs ROS shutdown
self.rosShutdown()
rospy.loginfo('%s::controlLoop: exit control loop'%self.node_name)
return 0
def rosPublish(self):
'''
Publish topics at standard frequency
'''
return 0
def initState(self):
'''
Actions performed in init state
'''
if not self.initialized:
self.setup()
else:
self.switchToState(State.STANDBY_STATE)
return
def standbyState(self):
'''
Actions performed in standby state
'''
if self._publish_point_cloud:
self.switchToState(State.READY_STATE)
return
def readyState(self):
'''
Actions performed in ready state
'''
try:
t_end = rospy.Time.now()
t_begin = t_end - rospy.Duration.from_sec(self._time_point_cloud_back)
resp = self._assemble_scans_service_client(t_begin, t_end)
rospy.loginfo("%s::readyState: Got cloud with %u points" %(self.node_name, len(resp.cloud.data)))
self._point_cloud_publisher.publish(resp.cloud)
except rospy.ServiceException, e:
rospy.logerr("%s::readyState: Service call failed: %s"%(self.node_name, e))
if not self._publish_point_cloud:
self.switchToState(State.STANDBY_STATE)
return
def shutdownState(self):
'''
Actions performed in shutdown state
'''
if self.shutdown() == 0:
self.switchToState(State.INIT_STATE)
return
def emergencyState(self):
'''
Actions performed in emergency state
'''
return
def failureState(self):
'''
Actions performed in failure state
'''
return
def switchToState(self, new_state):
'''
Performs the change of state
'''
if self.state != new_state:
self.previous_state = self.state
self.state = new_state
rospy.loginfo('%s::switchToState: %s'%(self.node_name, self.stateToString(self.state)))
return
def allState(self):
'''
Actions performed in all states
'''
self.rosPublish()
return
def stateToString(self, state):
'''
@param state: state to set
@type state: State
@returns the equivalent string of the state
'''
if state == State.INIT_STATE:
return 'INIT_STATE'
elif state == State.STANDBY_STATE:
return 'STANDBY_STATE'
elif state == State.READY_STATE:
return 'READY_STATE'
elif state == State.EMERGENCY_STATE:
return 'EMERGENCY_STATE'
elif state == State.FAILURE_STATE:
return 'FAILURE_STATE'
elif state == State.SHUTDOWN_STATE:
return 'SHUTDOWN_STATE'
else:
return 'UNKNOWN_STATE'
def publishROSstate(self):
'''
Publish the State of the component at the desired frequency
'''
self.msg_state.state = self.state
self.msg_state.state_description = self.stateToString(self.state)
self.msg_state.desired_freq = self.desired_freq
self.msg_state.real_freq = self.real_freq
self._state_publisher.publish(self.msg_state)
self.t_publish_state = threading.Timer(self.publish_state_timer, self.publishROSstate)
self.t_publish_state.start()
"""
def topicCb(self, msg):
'''
Callback for inelfe_video_manager state
@param msg: received message
@type msg: std_msgs/Int32
'''
# DEMO
rospy.loginfo('LaserAssemblerNode:topicCb')
"""
def publishPointCloudServiceCb(self, req):
'''
ROS service server
@param req: Required action
@type req: robotnik_msgs/enable_disable
'''
# DEMO
rospy.loginfo('%s:publishPointCloudServiceCb: setting point cloud publication to %s'%(self.node_name, req.value) )
self._publish_point_cloud = req.value
return True
def main():
rospy.init_node("robospect_laser_assembler_node")
_name = rospy.get_name().replace('/','')
arg_defaults = {
'topic_state': 'state',
'desired_freq': DEFAULT_FREQ,
'assembly_scans_service_name': '/assemble_scans2',
'point_cloud_publisher_topic_name': '/assembled_cloud',
'time_point_cloud_back': DEFAULT_TIME_PCLOUD_BACK,
}
args = {}
for name in arg_defaults:
try:
if rospy.search_param(name):
args[name] = rospy.get_param('~%s'%(name)) # Adding the name of the node, because the para has the namespace of the node
else:
args[name] = arg_defaults[name]
#print name
except rospy.ROSException, e:
rospy.logerr('%s: %s'%(e, _name))
rc_node = LaserAssemblerNode(args)
rospy.loginfo('%s: starting'%(_name))
rc_node.start()
if __name__ == "__main__":
main()
|
RobospectEU/robospect_common
|
robospect_laser_assembler/src/robospect_laser_assembler_node.py
|
Python
|
bsd-3-clause
| 10,929
|
'''
Created on Jul 23, 2015
@author: Aaron Klein
'''
import GPy
import numpy as np
from robo.task.rembo import REMBO
from robo.task.synthetic_functions.branin import Branin
from robo.models.gpy_model import GPyModel
from robo.maximizers.cmaes import CMAES
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition.ei import EI
class BraninInBillionDims(REMBO):
def __init__(self):
self.b = Branin()
X_lower = np.concatenate((self.b.X_lower, np.zeros([999998])))
X_upper = np.concatenate((self.b.X_upper, np.ones([999998])))
super(BraninInBillionDims, self).__init__(X_lower, X_upper, d=2)
def objective_function(self, x):
return self.b.objective_function(x[:, :2])
task = BraninInBillionDims()
kernel = GPy.kern.Matern52(input_dim=task.n_dims)
model = GPyModel(kernel, optimize=True, num_restarts=10)
acquisition_func = EI(model, task.X_lower, task.X_upper)
maximizer = CMAES(acquisition_func, task.X_lower, task.X_upper)
bo = BayesianOptimization(acquisition_func=acquisition_func,
model=model,
maximize_func=maximizer,
task=task)
bo.run(500)
|
aaronkl/RoBO
|
examples/example_branin_in_billion_dims.py
|
Python
|
bsd-3-clause
| 1,197
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests to ensure annotation tests are working as expected.
"""
from __future__ import print_function
import os
import argparse
import sys
import tempfile
from annotation_tools import NetworkTrafficAnnotationTools
# If this test starts failing, please set TEST_IS_ENABLED to "False" and file a
# bug to get this reenabled, and cc the people listed in
# //tools/traffic_annotation/OWNERS.
TEST_IS_ENABLED = True
MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS = 260
class TrafficAnnotationTestsChecker():
def __init__(self, build_path=None, annotations_filename=None):
"""Initializes a TrafficAnnotationTestsChecker object.
Args:
build_path: str Absolute or relative path to a fully compiled build
directory.
"""
self.tools = NetworkTrafficAnnotationTools(build_path)
self.last_result = None
self.persist_annotations = bool(annotations_filename)
if not annotations_filename:
annotations_file = tempfile.NamedTemporaryFile()
annotations_filename = annotations_file.name
annotations_file.close()
self.annotations_filename = annotations_filename
def RunAllTests(self):
"""Runs all tests and returns the result."""
return self.CheckAuditorResults() and self.CheckOutputExpectations()
def CheckAuditorResults(self):
"""Runs auditor using different configurations, expecting to run error free,
and having equal results in the exported TSV file in all cases. The TSV file
provides a summary of all annotations and their content.
Returns:
bool True if all results are as expected.
"""
configs = [
# Similar to trybot.
[
"--test-only",
"--error-resilient",
],
# Failing on any runtime error.
[
"--test-only",
],
# No heuristic filtering.
[
"--test-only",
"--no-filtering",
],
]
self.last_result = None
for config in configs:
result = self._RunTest(config)
if not result:
print("No output for config: %s" % config)
return False
if self.last_result and self.last_result != result:
print("Unexpected different results for config: %s" % config)
return False
self.last_result = result
return True
def CheckOutputExpectations(self):
# This test can be replaced by getting results from a diagnostic mode call
# to traffic_annotation_auditor, and checking for an expected minimum number
# of items for each type of pattern that it extracts. E.g., we should have
# many annotations of each type (complete, partial, ...), functions that
# need annotations, direct assignment to mutable annotations, etc.
# |self.last_result| includes the content of the TSV file that the auditor
# generates. Counting the number of end of lines in the text will give the
# number of extracted annotations.
annotations_count = self.last_result.count("\n")
print("%i annotations found in auditor's output." % annotations_count)
if annotations_count < MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS:
print("Annotations are expected to be at least %i." %
MINIMUM_EXPECTED_NUMBER_OF_ANNOTATIONS)
return False
return True
def _RunTest(self, args):
"""Runs the auditor test with given |args|, and returns the extracted
annotations.
Args:
args: list of str Arguments to be passed to auditor.
Returns:
str Content of annotations.tsv file if successful, otherwise None.
"""
print("Running auditor using config: %s" % args)
try:
os.remove(self.annotations_filename)
except OSError:
pass
stdout_text, stderr_text, return_code = self.tools.RunAuditor(
args + ["--annotations-file=%s" % self.annotations_filename])
annotations = None
if os.path.exists(self.annotations_filename):
# When tests are run on all files (without filtering), there might be some
# compile errors in irrelevant files on Windows that can be ignored.
if (return_code and "--no-filtering" in args and
sys.platform.startswith(('win', 'cygwin'))):
print("Ignoring return code: %i" % return_code)
return_code = 0
if not return_code:
annotations = open(self.annotations_filename).read()
if not self.persist_annotations:
os.remove(self.annotations_filename)
if annotations:
print("Test PASSED.")
else:
print("Test FAILED.")
if stdout_text:
print(stdout_text)
if stderr_text:
print(stderr_text)
return annotations
def main():
if not TEST_IS_ENABLED:
return 0
parser = argparse.ArgumentParser(
description="Traffic Annotation Tests checker.")
parser.add_argument(
'--build-path',
help='Specifies a compiled build directory, e.g. out/Debug. If not '
'specified, the script tries to guess it. Will not proceed if not '
'found.')
parser.add_argument(
'--annotations-file',
help='Optional path to a TSV output file with all annotations.')
args = parser.parse_args()
checker = TrafficAnnotationTestsChecker(args.build_path,
args.annotations_file)
return 0 if checker.RunAllTests() else 1
if '__main__' == __name__:
sys.exit(main())
|
endlessm/chromium-browser
|
tools/traffic_annotation/scripts/traffic_annotation_auditor_tests.py
|
Python
|
bsd-3-clause
| 5,523
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'temps'.
"""
from primaires.interpreteur.commande.commande import Commande
class CmdTemps(Commande):
"""Commande 'temps'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "temps", "time")
self.aide_courte = "affiche la date et l'heure de l'univers"
self.aide_longue = \
"Cette commande affiche la date et l'heure actuelles de l'univers."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
temps = type(self).importeur.temps.temps
personnage << "Nous sommes le {}.\nIl est {}.".format(
temps.date_formatee, temps.heure_formatee)
|
stormi/tsunami
|
src/primaires/temps/commandes/temps/__init__.py
|
Python
|
bsd-3-clause
| 2,324
|
# -*- coding: utf-8 -*-
# 3rd party imports
from model_bakery import baker
# CrAdmin imports
from cradmin_legacy import cradmin_testhelpers
# Django imports
from django import test
# Devilry imports
from devilry.devilry_qualifiesforexam_plugin_students.views import select_students
class TestStudentSelectionView(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = select_students.PluginSelectStudentsView
def test_elements_are_listed(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
baker.make('core.RelatedStudent', period=testperiod, _quantity=20)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testperiod)
self.assertEqual(len(mockresponse.selector.list('.cradmin-legacy-listbuilder-itemvalue')), 20)
def test_all_students_are_listed(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
relatedstudents = baker.make('core.RelatedStudent', period=testperiod, _quantity=20)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testperiod)
selectorlist = mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-description'
)
elements_normalized = [element.alltext_normalized for element in selectorlist]
self.assertEqual(len(elements_normalized), len(relatedstudents))
for relatedstudent in relatedstudents:
self.assertIn(relatedstudent.user.shortname, elements_normalized)
|
devilry/devilry-django
|
devilry/devilry_qualifiesforexam_plugin_students/tests/test_student_selection_view.py
|
Python
|
bsd-3-clause
| 1,532
|
###############################################################################
## fs.py
## 9te [angband.ornl.gov]
## Wed Jan 12 10:37:50 2011
###############################################################################
## Copyright (C) 2008 Oak Ridge National Laboratory, UT-Battelle, LLC.
##---------------------------------------------------------------------------##
## generated by /data/denovo/production/head/setup/bin/pygen built on 20110112
###############################################################################
import os, sys, math, string
# pyspn equation type
from spn_fv import *
print_it = False
##---------------------------------------------------------------------------##
## MAIN
##---------------------------------------------------------------------------##
initialize(sys.argv)
if node() == 0:
print "Denovo - pyspn Python Front-End"
print "-------------------------------"
print "Release : %16s" % (release())
print "Release Date : %16s" % (release_date())
print "Build Date : %16s" % (build_date())
print
timer = Timer()
timer.start()
##---------------------------------------------------------------------------##
## XS DATA
####### UO2 Fuel-Clad Macroscopic Cross Sections ##########
## Transport-corrected Total Cross Sections
T_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_UO2[0] = 1.77949e-1
T_UO2[1] = 3.29805e-1
T_UO2[2] = 4.80388e-1
T_UO2[3] = 5.54367e-1
T_UO2[4] = 3.11801e-1
T_UO2[5] = 3.95168e-1
T_UO2[6] = 5.64406e-1
## Fission Cross Section
F_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_UO2[0] = 7.21206e-3
F_UO2[1] = 8.19301e-4
F_UO2[2] = 6.45320e-3
F_UO2[3] = 1.85648e-2
F_UO2[4] = 1.78084e-2
F_UO2[5] = 8.30348e-2
F_UO2[6] = 2.16004e-1
## Nu
N_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_UO2[0] = 2.78145
N_UO2[1] = 2.47443
N_UO2[2] = 2.43383
N_UO2[3] = 2.43380
N_UO2[4] = 2.43380
N_UO2[5] = 2.43380
N_UO2[6] = 2.43380
## Chi
C_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_UO2[0] = 5.87910e-1
C_UO2[1] = 4.11760e-1
C_UO2[2] = 3.39060e-4
C_UO2[3] = 1.17610e-7
C_UO2[4] = 0.00000000
C_UO2[5] = 0.00000000
C_UO2[6] = 0.00000000
## Scattering Matrix for UO2 Fuel-Clad (Macroscopic)
S_UO2 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]]]
S_UO2[0] = [[1.27537e-1]]
S_UO2[1] = [[4.23780e-2], [3.24456e-1]]
S_UO2[2] = [[9.43740e-6], [1.63140e-3], [4.50940e-1]]
S_UO2[3] = [[5.51630e-9], [3.14270e-9], [2.67920e-3], [4.52565e-1], [1.25250e-4]]
S_UO2[4] = [[0.00000000], [0.00000000], [0.00000000], [5.56640e-3], [2.71401e-1], [1.29680e-3]]
S_UO2[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.02550e-2], [2.65802e-1], [8.54580e-3]]
S_UO2[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.00210e-8], [1.68090e-2], [2.73080e-1]]
## Upscattering Matrix
U_UO2 = [ [], [], [], [], [], [], [] ]
U_UO2[0] = []
U_UO2[1] = []
U_UO2[2] = []
U_UO2[3] = [4]
U_UO2[4] = [5]
U_UO2[5] = [6]
U_UO2[6] = []
######## 4.3% MOX Fuel-Clad Macroscopic Cross-Sections ############
## Transport-corrected Total Cross Sections
T_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOX43[0] = 1.78731e-1
T_MOX43[1] = 3.30849e-1
T_MOX43[2] = 4.83772e-1
T_MOX43[3] = 5.66922e-1
T_MOX43[4] = 4.26227e-1
T_MOX43[5] = 6.78997e-1
T_MOX43[6] = 6.82852e-1
## Fission Cross-Sections
F_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_MOX43[0] = 7.62704e-3
F_MOX43[1] = 8.76898e-4
F_MOX43[2] = 5.69835e-3
F_MOX43[3] = 2.28872e-2
F_MOX43[4] = 1.07635e-2
F_MOX43[5] = 2.32757e-1
F_MOX43[6] = 2.48968e-1
## Nu Cross-Sections
N_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_MOX43[0] = 2.85209
N_MOX43[1] = 2.89099
N_MOX43[2] = 2.85486
N_MOX43[3] = 2.86073
N_MOX43[4] = 2.85447
N_MOX43[5] = 2.86415
N_MOX43[6] = 2.86780
## Chi Cross-Sections
C_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_MOX43[0] = 5.87910e-1
C_MOX43[1] = 4.11760e-1
C_MOX43[2] = 3.39060e-4
C_MOX43[3] = 1.17610e-7
C_MOX43[4] = 0.00000000
C_MOX43[5] = 0.00000000
C_MOX43[6] = 0.00000000
## Scattering Matrix for 4.3% MOX Fuel-Clad (Macroscopic)
S_MOX43 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOX43[0] = [[1.28876e-1]]
S_MOX43[1] = [[4.14130e-2], [3.25452e-1]]
S_MOX43[2] = [[8.22900e-6], [1.63950e-3], [4.53188e-1]]
S_MOX43[3] = [[5.04050e-9], [1.59820e-9], [2.61420e-3], [4.57173e-1], [1.60460e-4]]
S_MOX43[4] = [[0.00000000], [0.00000000], [0.00000000], [5.53940e-3], [2.76814e-1], [2.00510e-3]]
S_MOX43[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.31270e-3], [2.52962e-1], [8.49480e-3]]
S_MOX43[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.16560e-9], [1.48500e-2], [2.65007e-1]]
## Upscattering Matrix
U_MOX43 = [ [], [], [], [], [], [], [] ]
U_MOX43[0] = []
U_MOX43[1] = []
U_MOX43[2] = []
U_MOX43[3] = [4]
U_MOX43[4] = [5]
U_MOX43[5] = [6]
U_MOX43[6] = []
############### Moderator 1 Macroscopic Cross-Sections ################
## Transport-corrected Total Cross Section
T_MOD1 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOD1[0] = 1.59206e-1
T_MOD1[1] = 4.12970e-1
T_MOD1[2] = 5.90310e-1
T_MOD1[3] = 5.84350e-1
T_MOD1[4] = 7.18000e-1
T_MOD1[5] = 1.25445
T_MOD1[6] = 2.65038
## Scattering Matrix for Moderator (Macroscopic)
S_MOD1 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOD1[0] = [[4.44777e-2]]
S_MOD1[1] = [[1.13400e-1], [2.82334e-1]]
S_MOD1[2] = [[7.23470e-4], [1.29940e-1], [3.45256e-1]]
S_MOD1[3] = [[3.74990e-6], [6.23400e-4], [2.24570e-1], [9.10284e-2], [7.14370e-5]]
S_MOD1[4] = [[5.31840e-8], [4.80020e-5], [1.69990e-2], [4.15510e-1], [1.39138e-1], [2.21570e-3]]
S_MOD1[5] = [[0.00000000], [7.44860e-6], [2.64430e-3], [6.37320e-2], [5.11820e-1], [6.99913e-1], [1.32440e-1]]
S_MOD1[6] = [[0.00000000], [1.04550e-6], [5.03440e-4], [1.21390e-2], [6.12290e-2], [5.37320e-1], [2.48070 ]]
## Upscattering Matrix
U_MOD1 = [ [], [], [], [], [], [], [] ]
U_MOD1[0] = []
U_MOD1[1] = []
U_MOD1[2] = []
U_MOD1[3] = [4]
U_MOD1[4] = [5]
U_MOD1[5] = [6]
U_MOD1[6] = []
################### Create nuf vectors
NUF_UO2 = []
NUF_MOX43 = []
for i in range(0, 7):
NUF_UO2.append( N_UO2[i] * F_UO2[i] )
NUF_MOX43.append( N_MOX43[i] * F_MOX43[i] )
##---------------------------------------------------------------------------##
## BUILD MESH
def build_mesh(N):
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# UO2 pins
uo2_pin = Pincell()
uo2_ids = [1]
uo2_r = [0.4759]
uo2_pin.set_shells(uo2_ids, uo2_r, 3)
# MOX pins
mox_pin = Pincell()
mox_ids = [2]
mox_r = [0.4759]
mox_pin.set_shells(mox_ids, mox_r, 3)
# Make a 2x2 uo2 lattice and a 2x2 mox lattice
uo2_lat = Lattice(2)
mox_lat = Lattice(2)
# lattices are uniform
layout = [0, 0, 0, 0]
uo2_lat.set_pins(layout)
mox_lat.set_pins(layout)
# assign the pins in the lattices
uo2_lat.assign_pin(uo2_pin, 0)
mox_lat.assign_pin(mox_pin, 0)
# build the lattice
uo2_lat.build_lattice(N)
mox_lat.build_lattice(N)
# print out mixing tables
if print_it:
print "UO2 Lattice"
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
print "MOX Lattice"
for m in xrange(mox_lat.num_mixtures()):
vf = mox_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
# make the mixtable for the combined lattices by appending the mox table
# to the UO2 table (don't include the clean mixtures at the front of the
# table)
num_mixtures = uo2_lat.num_mixtures() + mox_lat.num_mixtures() - 4
table = Vec_Dbl(num_mixtures * 4)
ctr = 0
mox_offset = uo2_lat.num_mixtures()
# add UO2 mixtures
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# add MOX mixtures, skipping the clean mixes
for m in xrange(4, mox_lat.num_mixtures()):
vf = mox_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# make the cleanids
cleanids = [0, 1, 2, 3]
# the total core is 3x3 assemblies (2x2 fuel surrounded by water)
xylat = uo2_lat.xy_planes()
Nr = len(xylat) - 1
delta = Vec_Dbl(Nr, 0.0)
for i in xrange(Nr):
delta[i] = xylat[i+1] - xylat[i]
if Nr % 2 != 0:
print "Non-even lattices cells."
sys.exit(1)
# build the core planes
xycore = Vec_Dbl(int(2.5*Nr) + 1, 0.0)
for n in xrange(2):
for i in xrange(Nr):
index = i + n * Nr
xycore[index + 1] = xycore[index] + delta[i]
for i in xrange(Nr/2):
index = i + 2 * Nr
xycore[index + 1] = xycore[index] + delta[i]
# z-planes (14 in each assembly)
height = 14.28 * 1.5
Nz = 21
z = [0.0] * (Nz + 1)
dz = height / float(Nz)
for k in xrange(Nz):
z[k+1] = z[k] + dz
# get matids for each lattice
uo2ids = Vec_Int(uo2_lat.mixids())
moxids = Vec_Int(mox_lat.mixids())
# update the mox mixtures (leave clean zones alone)
for m in xrange(len(moxids)):
if moxids[m] > 3:
moxids[m] = moxids[m] + mox_offset - 4
# assign the matids
Nx = len(xycore) - 1
Ny = len(xycore) - 1
# arrangement
# |-----|-----|-----|
# | | | |
# | mod | mod | mod |
# | | | |
# |-----|-----|-----|
# | | | |
# | mox | uo2 | mod | y
# | | | |
# |-----|-----|-----|
# | | | |
# | uo2 | mox | mod |
# | | | |
# |-----|-----|-----|
# x
mixids = Vec_Int(Nx * Ny * Nz, 3)
kend = Nz / 2
# (0, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + j * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
# (1, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + j * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (0, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (1, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
return (xycore, z, mixids, cleanids, table)
##---------------------------------------------------------------------------##
## DB
##---------------------------------------------------------------------------##
entries = {
"problem_type" : "FIXED_SOURCE",
"num_groups" : 7,
"downscatter" : False,
"Pn_order" : 0,
"tolerance" : 1.0e-3,
"max_itr" : 400,
"linear_solver_xml_file" : "azilut01.xml",
"boundary" : "reflect",
"boundary_db" : {"reflect" : [1, 0, 1, 0, 1, 0]},
"SPN_order" : 7
}
db = DB.from_dict(entries)
# decomposition
if nodes() == 1:
db.insert("num_blocks_i", 1)
db.insert("num_blocks_j", 1)
elif nodes() == 2:
db.insert("num_blocks_i", 2)
db.insert("num_blocks_j", 1)
elif nodes() == 16:
db.insert("num_blocks_i", 4)
db.insert("num_blocks_j", 4)
# Mesh
(r, z, mixids, cleanids, table) = build_mesh(10)
db.insert("x_edges", r)
db.insert("y_edges", r)
db.insert("z_edges", z)
##---------------------------------------------------------------------------##
## MANAGER
##---------------------------------------------------------------------------##
# make manager, material, and angles
manager = Manager()
mat = Mat()
# partition the problem
manager.partition(db, mat)
# get mapping and mesh objects
mapp = manager.get_map()
indexer = manager.get_indexer()
mesh = manager.get_mesh()
# global and local cell numbers
Gx = indexer.num_global(X)
Gy = indexer.num_global(Y)
Gz = mesh.num_cells_dim(Z)
Nx = mesh.num_cells_dim(X)
Ny = mesh.num_cells_dim(Y)
Nz = mesh.num_cells_dim(Z)
if node() == 0:
print ">>> Partitioned global mesh with %i x %i x %i cells" \
% (Gx, Gy, Gz)
##---------------------------------------------------------------------------##
## MATERIAL SETUP
##---------------------------------------------------------------------------##
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# set database
xsdb = XS_DB(db)
xsdb.set_num(4)
xsdb.assign_zero(0)
for g in xrange(0, xsdb.num_groups()):
xsdb.assign_upscatter(1, g, T_UO2[g], U_UO2[g], S_UO2[g])
xsdb.assign_upscatter(2, g, T_MOX43[g], U_MOX43[g], S_MOX43[g])
xsdb.assign_upscatter(3, g, T_MOD1[g], U_MOD1[g], S_MOD1[g])
## Assign fission data
xsdb.assign_fission(1, NUF_UO2, C_UO2)
xsdb.assign_fission(2, NUF_MOX43, C_MOX43)
# make macro mixer
mixer = Macro_Mixer(xsdb)
mixer.set(cleanids, table)
# make the material database
mixer.mix_with_global_ids(mixids, mat)
##---------------------------------------------------------------------------##
## ENERGY PARTITIONING
##---------------------------------------------------------------------------##
manager.partition_energy(mat)
##---------------------------------------------------------------------------##
## SOURCE SETUP
##---------------------------------------------------------------------------##
# allocate source and problem state
source = Isotropic_Source()
manager.setup(source)
total = Gx * Gy * Gz
Ng = mat.num_groups()
srcids = Vec_Int(total, 0)
srcstr = Vec_Dbl(total, 0.0)
num_shapes = 2
shapes = Vec_Dbl(2 * mat.num_groups(), 0.0)
chi0 = xsdb.fission_data(1, 0, CHI)
chi1 = xsdb.fission_data(2, 0, CHI)
# source 0 spectrum -> UO2 Chi
# source 1 spectrum -> MOX Chi
# make shapes
ctr = 0
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(1, g, CHI)
ctr += 1
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(2, g, CHI)
ctr += 1
# assign ids and strengths
for cell in xrange(total):
matid = mixids[cell]
if mat.assigned_fission(matid):
for g in xrange(Ng):
srcstr[cell] += mat.fission_data(matid, g, NU_SIGMA_F)
if mat.fission_data(matid, 0, CHI) == chi1:
srcids[cell] = 1
# set the source
source.set(num_shapes, shapes, srcids, srcstr)
##---------------------------------------------------------------------------##
## SOLVE
##---------------------------------------------------------------------------##
if node() == 0:
print ">>> Setup complete"
print ">>> Solving with %s differencing" % (manager.spatial_descriptor())
# solve the problem
manager.solve(source)
##---------------------------------------------------------------------------##
## OUTPUT
##---------------------------------------------------------------------------##
# make SILO output
silo = SILO()
silo.add_mixer(mixer)
silo.open("fs")
phi = Vec_Dbl(mesh.num_cells(), 0.0)
for g in xrange(Ng):
flux = manager.moments(g)
for cell in xrange(mesh.num_cells()):
phi[cell] = phi[cell] + flux.scalar_flux(cell)
silo.add("phi", phi)
silo.close()
##---------------------------------------------------------------------------##
## TIMING
##---------------------------------------------------------------------------##
# output final database (has class-dependent defaults)
db.output()
timer.stop()
time = timer.wall_clock()
keys = timer_keys()
if len(keys) > 0 and node() == 0:
print "\n"
print "TIMING : Problem ran in %16.6e seconds." % (time)
print "------------------------------------------------------------------"
for key in keys:
print "%30s : %16.6e %16.6e" % (key, timer_value(key) / time, timer_value(key))
print "------------------------------------------------------------------"
##---------------------------------------------------------------------------##
manager.close()
finalize()
###############################################################################
## end of fs.py
###############################################################################
|
sslattery/Chimera
|
doc/spn/fuel_assembly/sp7/fs_azilut01.py
|
Python
|
bsd-3-clause
| 16,598
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-29 11:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('url_name', models.CharField(max_length=64, verbose_name='Url Name')),
('query', models.CharField(blank=True, max_length=1000, verbose_name='Query String')),
('is_share', models.BooleanField(default=False, verbose_name='Is Shared')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'Bookmark',
'verbose_name_plural': 'Bookmarks',
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=256, verbose_name='Settings Key')),
('value', models.TextField(verbose_name='Settings Content')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'User Setting',
'verbose_name_plural': 'User Settings',
},
),
migrations.CreateModel(
name='UserWidget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page_id', models.CharField(max_length=256, verbose_name='Page')),
('widget_type', models.CharField(max_length=50, verbose_name='Widget Type')),
('value', models.TextField(verbose_name='Widget Params')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'User Widget',
'verbose_name_plural': 'User Widgets',
},
),
]
|
pobear/django-xadmin
|
xadmin/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 2,906
|
from multiprocessing import Pool
def lower(str_in):
return reversed(str_in.lower())
if __name__ == '__main__':
pool = Pool(processes=4)
data = ['FOO', 'BAR', 'BAZ'] * 1000
print pool.map(lower, data)
|
dcolish/Presentations
|
osbridge/mutliproc.py
|
Python
|
bsd-3-clause
| 219
|
import functools
import json
import urllib
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
import commonware
import jinja2
import waffle
from curling.lib import HttpClientError
from tower import ugettext as _
from waffle.decorators import waffle_switch
import amo
from access import acl
from amo import messages
from amo.decorators import json_view, login_required, post_required, write
from constants.payments import (PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR, PROVIDER_BANGO,
PROVIDER_CHOICES)
from lib.crypto import generate_key
from lib.pay_server import client
from market.models import Price
from mkt.constants import DEVICE_LOOKUP, PAID_PLATFORMS
from mkt.developers import forms, forms_payments
from mkt.developers.decorators import dev_required
from mkt.developers.models import CantCancel, PaymentAccount, UserInappKey
from mkt.developers.providers import get_provider, get_providers
from mkt.inapp.models import InAppProduct
from mkt.inapp.serializers import InAppProductForm
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.devhub')
@dev_required
@post_required
def disable_payments(request, addon_id, addon):
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('payments'))
@dev_required(owner_for_post=True, webapp=True)
def payments(request, addon_id, addon, webapp=False):
premium_form = forms_payments.PremiumForm(
request.POST or None, request=request, addon=addon,
user=request.amo_user)
region_form = forms.RegionForm(
request.POST or None, product=addon, request=request)
upsell_form = forms_payments.UpsellForm(
request.POST or None, addon=addon, user=request.amo_user)
providers = get_providers()
if 'form-TOTAL_FORMS' in request.POST:
formset_data = request.POST
else:
formset_data = None
account_list_formset = forms_payments.AccountListFormSet(
data=formset_data,
provider_data=[
{'addon': addon, 'user': request.amo_user, 'provider': provider}
for provider in providers])
if request.method == 'POST':
active_forms = [premium_form, region_form, upsell_form]
if formset_data is not None:
active_forms.append(account_list_formset)
success = all(form.is_valid() for form in active_forms)
if success:
region_form.save()
try:
premium_form.save()
except client.Error as err:
success = False
log.error('Error setting payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to the '
u'payment server.'))
raise # We want to see these exceptions!
is_free_inapp = addon.premium_type == amo.ADDON_FREE_INAPP
is_now_paid = (addon.premium_type in amo.ADDON_PREMIUMS
or is_free_inapp)
# If we haven't changed to a free app, check the upsell.
if is_now_paid and success:
try:
if not is_free_inapp:
upsell_form.save()
if formset_data is not None:
account_list_formset.save()
except client.Error as err:
log.error('Error saving payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to '
u'the payment server.'))
success = False
raise # We want to see all the solitude errors now.
# If everything happened successfully, give the user a pat on the back.
if success:
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('payments'))
# TODO: refactor this (bug 945267)
is_packaged = addon.is_packaged
android_payments_enabled = waffle.flag_is_active(request,
'android-payments')
android_packaged_enabled = waffle.flag_is_active(request,
'android-packaged')
desktop_packaged_enabled = waffle.flag_is_active(request,
'desktop-packaged')
# If android payments is not allowed then firefox os must
# be 'checked' and android-mobile and android-tablet should not be.
invalid_paid_platform_state = []
# If isn't packaged or it is packaged and the android-packaged flag is on
# then we should check that desktop isn't set to True.
if not is_packaged or (is_packaged and desktop_packaged_enabled):
invalid_paid_platform_state.append(('desktop', True))
if not android_payments_enabled:
# When android-payments is off...
# If isn't packaged or it is packaged and the android-packaged flag is
# on then we should check for the state of android-mobile and
# android-tablet.
if not is_packaged or (is_packaged and android_packaged_enabled):
invalid_paid_platform_state += [('android-mobile', True),
('android-tablet', True)]
invalid_paid_platform_state.append(('firefoxos', False))
cannot_be_paid = (
addon.premium_type == amo.ADDON_FREE and
any(premium_form.device_data['free-%s' % x] == y
for x, y in invalid_paid_platform_state))
try:
tier_zero = Price.objects.get(price='0.00', active=True)
tier_zero_id = tier_zero.pk
except Price.DoesNotExist:
tier_zero = None
tier_zero_id = ''
# Get the regions based on tier zero. This should be all the
# regions with payments enabled.
paid_region_ids_by_name = []
if tier_zero:
paid_region_ids_by_name = tier_zero.region_ids_by_name()
platforms = PAID_PLATFORMS(request, is_packaged)
paid_platform_names = [unicode(platform[1]) for platform in platforms]
provider_regions = {}
if tier_zero:
provider_regions = tier_zero.provider_regions()
return render(request, 'developers/payments/premium.html',
{'addon': addon, 'webapp': webapp, 'premium': addon.premium,
'form': premium_form, 'upsell_form': upsell_form,
'tier_zero_id': tier_zero_id, 'region_form': region_form,
'DEVICE_LOOKUP': DEVICE_LOOKUP,
'is_paid': (addon.premium_type in amo.ADDON_PREMIUMS
or addon.premium_type == amo.ADDON_FREE_INAPP),
'cannot_be_paid': cannot_be_paid,
'paid_platform_names': paid_platform_names,
'is_packaged': addon.is_packaged,
# Bango values
'account_list_forms': account_list_formset.forms,
'account_list_formset': account_list_formset,
# Waffles
'api_pricelist_url': reverse('price-list'),
'payment_methods': {
PAYMENT_METHOD_ALL: _('All'),
PAYMENT_METHOD_CARD: _('Credit card'),
PAYMENT_METHOD_OPERATOR: _('Carrier'),
},
'provider_lookup': dict(PROVIDER_CHOICES),
'all_paid_region_ids_by_name': paid_region_ids_by_name,
'providers': providers,
'provider_regions': provider_regions,
})
@login_required
@json_view
def payment_accounts(request):
app_slug = request.GET.get('app-slug', '')
if app_slug:
app = Webapp.objects.get(app_slug=app_slug)
app_name = app.name
else:
app_name = ''
accounts = PaymentAccount.objects.filter(
user=request.amo_user,
provider__in=[p.provider for p in get_providers()],
inactive=False)
def account(acc):
def payment_account_names(app):
account_names = [unicode(acc.payment_account)
for acc in app.all_payment_accounts()]
return (unicode(app.name), account_names)
addon_payment_accounts = acc.addonpaymentaccount_set.all()
associated_apps = [apa.addon
for apa in addon_payment_accounts
if hasattr(apa, 'addon')]
app_names = u', '.join(unicode(app.name) for app in associated_apps)
app_payment_accounts = json.dumps(dict([payment_account_names(app)
for app in associated_apps]))
provider = acc.get_provider()
data = {
'account-url': reverse('mkt.developers.provider.payment_account',
args=[acc.pk]),
'agreement-url': acc.get_agreement_url(),
'agreement': 'accepted' if acc.agreed_tos else 'rejected',
'current-app-name': jinja2.escape(app_name),
'app-names': jinja2.escape(app_names),
'app-payment-accounts': jinja2.escape(app_payment_accounts),
'delete-url': reverse(
'mkt.developers.provider.delete_payment_account',
args=[acc.pk]),
'id': acc.pk,
'name': jinja2.escape(unicode(acc)),
'provider': provider.name,
'provider-full': unicode(provider.full),
'shared': acc.shared,
'portal-url': provider.get_portal_url(app_slug)
}
return data
return map(account, accounts)
@login_required
def payment_accounts_form(request):
webapp = get_object_or_404(Webapp, app_slug=request.GET.get('app_slug'))
provider = get_provider(name=request.GET.get('provider'))
account_list_formset = forms_payments.AccountListFormSet(
provider_data=[
{'user': request.amo_user, 'addon': webapp, 'provider': p}
for p in get_providers()])
account_list_form = next(form for form in account_list_formset.forms
if form.provider.name == provider.name)
return render(request,
'developers/payments/includes/bango_accounts_form.html',
{'account_list_form': account_list_form})
@write
@post_required
@login_required
@json_view
def payments_accounts_add(request):
provider = get_provider(name=request.POST.get('provider'))
form = provider.forms['account'](request.POST)
if not form.is_valid():
return json_view.error(form.errors)
try:
obj = provider.account_create(request.amo_user, form.cleaned_data)
except HttpClientError as e:
log.error('Client error create {0} account: {1}'.format(
provider.name, e))
return http.HttpResponseBadRequest(json.dumps(e.content))
return {'pk': obj.pk, 'agreement-url': obj.get_agreement_url()}
@write
@login_required
@json_view
def payments_account(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
provider = account.get_provider()
if request.POST:
form = provider.forms['account'](request.POST, account=account)
if form.is_valid():
form.save()
else:
return json_view.error(form.errors)
return provider.account_retrieve(account)
@write
@post_required
@login_required
def payments_accounts_delete(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
try:
account.cancel(disable_refs=True)
except CantCancel:
log.info('Could not cancel account.')
return http.HttpResponse('Cannot cancel account', status=409)
log.info('Account cancelled: %s' % id)
return http.HttpResponse('success')
@login_required
def in_app_keys(request):
"""
Allows developers to get a simulation-only key for in-app payments.
This key cannot be used for real payments.
"""
keys = UserInappKey.objects.no_cache().filter(
solitude_seller__user=request.amo_user
)
# TODO(Kumar) support multiple test keys. For now there's only one.
key = None
key_public_id = None
if keys.exists():
key = keys.get()
# Attempt to retrieve the public id from solitude
try:
key_public_id = key.public_id()
except HttpClientError, e:
messages.error(request,
_('A server error occurred '
'when retrieving the application key.'))
log.exception('Solitude connection error: {0}'.format(e.message))
if request.method == 'POST':
if key:
key.reset()
messages.success(request, _('Secret was reset successfully.'))
else:
UserInappKey.create(request.amo_user)
messages.success(request,
_('Key and secret were created successfully.'))
return redirect(reverse('mkt.developers.apps.in_app_keys'))
return render(request, 'developers/payments/in-app-keys.html',
{'key': key, 'key_public_id': key_public_id})
@login_required
def in_app_key_secret(request, pk):
key = (UserInappKey.objects.no_cache()
.filter(solitude_seller__user=request.amo_user, pk=pk))
if not key.count():
# Either the record does not exist or it's not owned by the
# logged in user.
return http.HttpResponseForbidden()
return http.HttpResponse(key.get().secret())
def require_in_app_payments(render_view):
@functools.wraps(render_view)
def inner(request, addon_id, addon, *args, **kwargs):
setup_url = reverse('mkt.developers.apps.payments',
args=[addon.app_slug])
if addon.premium_type not in amo.ADDON_INAPPS:
messages.error(
request,
_('Your app is not configured for in-app payments.'))
return redirect(setup_url)
if not addon.has_payment_account():
messages.error(request, _('No payment account for this app.'))
return redirect(setup_url)
# App is set up for payments; render the view.
return render_view(request, addon_id, addon, *args, **kwargs)
return inner
@waffle_switch('in-app-products')
@login_required
@dev_required(webapp=True)
@require_in_app_payments
def in_app_products(request, addon_id, addon, webapp=True, account=None):
owner = acl.check_addon_ownership(request, addon)
products = addon.inappproduct_set.all()
new_product = InAppProduct(webapp=addon)
form = InAppProductForm()
return render(request, 'developers/payments/in-app-products.html',
{'addon': addon, 'form': form, 'new_product': new_product,
'owner': owner, 'products': products, 'form': form})
@login_required
@dev_required(owner_for_post=True, webapp=True)
@require_in_app_payments
def in_app_config(request, addon_id, addon, webapp=True):
"""
Allows developers to get a key/secret for doing in-app payments.
"""
config = get_inapp_config(addon)
owner = acl.check_addon_ownership(request, addon)
if request.method == 'POST':
# Reset the in-app secret for the app.
(client.api.generic
.product(config['resource_pk'])
.patch(data={'secret': generate_key(48)}))
messages.success(request, _('Changes successfully saved.'))
return redirect(reverse('mkt.developers.apps.in_app_config',
args=[addon.app_slug]))
return render(request, 'developers/payments/in-app-config.html',
{'addon': addon, 'owner': owner,
'seller_config': config})
@login_required
@dev_required(webapp=True)
@require_in_app_payments
def in_app_secret(request, addon_id, addon, webapp=True):
config = get_inapp_config(addon)
return http.HttpResponse(config['secret'])
def get_inapp_config(addon):
"""
Returns a generic Solitude product, the app's in-app configuration.
We use generic products in Solitude to represent an "app" that is
enabled for in-app purchases.
"""
if not addon.solitude_public_id:
# If the view accessing this method uses all the right
# decorators then this error won't be raised.
raise ValueError('The app {a} has not yet been configured '
'for payments'.format(a=addon))
return client.api.generic.product.get_object(
public_id=addon.solitude_public_id)
@dev_required(webapp=True)
def bango_portal_from_addon(request, addon_id, addon, webapp=True):
try:
bango = addon.payment_account(PROVIDER_BANGO)
except addon.PayAccountDoesNotExist:
log.error('Bango portal not available for app {app} '
'with accounts {acct}'
.format(app=addon,
acct=list(addon.all_payment_accounts())))
return http.HttpResponseForbidden()
else:
account = bango.payment_account
if not ((addon.authors.filter(pk=request.user.pk,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists()) and
(account.solitude_seller.user.id == request.user.id)):
log.error(('User not allowed to reach the Bango portal; '
'pk=%s') % request.user.pk)
return http.HttpResponseForbidden()
return _redirect_to_bango_portal(account.account_id,
'addon_id: %s' % addon_id)
def _redirect_to_bango_portal(package_id, source):
try:
bango_token = client.api.bango.login.post({'packageId':
int(package_id)})
except HttpClientError as e:
log.error('Failed to authenticate against Bango portal; %s' % source,
exc_info=True)
return http.HttpResponseBadRequest(json.dumps(e.content))
bango_url = '{base_url}{parameters}'.format(**{
'base_url': settings.BANGO_BASE_PORTAL_URL,
'parameters': urllib.urlencode({
'authenticationToken': bango_token['authentication_token'],
'emailAddress': bango_token['email_address'],
'packageId': package_id,
'personId': bango_token['person_id'],
})
})
response = http.HttpResponse(status=204)
response['Location'] = bango_url
return response
# TODO(andym): move these into a DRF API.
@login_required
@json_view
def agreement(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
provider = account.get_provider()
if request.method == 'POST':
return provider.terms_update(account)
return provider.terms_retrieve(account)
|
jinankjain/zamboni
|
mkt/developers/views_payments.py
|
Python
|
bsd-3-clause
| 19,059
|
"""
Wrapper for the layout.
"""
from typing import Dict, Generator, Iterable, List, Optional, Union
from prompt_toolkit.buffer import Buffer
from .containers import (
AnyContainer,
ConditionalContainer,
Container,
Window,
to_container,
)
from .controls import BufferControl, SearchBufferControl, UIControl
__all__ = [
"Layout",
"InvalidLayoutError",
"walk",
]
FocusableElement = Union[str, Buffer, UIControl, AnyContainer]
class Layout:
"""
The layout for a prompt_toolkit
:class:`~prompt_toolkit.application.Application`.
This also keeps track of which user control is focused.
:param container: The "root" container for the layout.
:param focused_element: element to be focused initially. (Can be anything
the `focus` function accepts.)
"""
def __init__(
self,
container: AnyContainer,
focused_element: Optional[FocusableElement] = None,
) -> None:
self.container = to_container(container)
self._stack: List[Window] = []
# Map search BufferControl back to the original BufferControl.
# This is used to keep track of when exactly we are searching, and for
# applying the search.
# When a link exists in this dictionary, that means the search is
# currently active.
# Map: search_buffer_control -> original buffer control.
self.search_links: Dict[SearchBufferControl, BufferControl] = {}
# Mapping that maps the children in the layout to their parent.
# This relationship is calculated dynamically, each time when the UI
# is rendered. (UI elements have only references to their children.)
self._child_to_parent: Dict[Container, Container] = {}
if focused_element is None:
try:
self._stack.append(next(self.find_all_windows()))
except StopIteration as e:
raise InvalidLayoutError(
"Invalid layout. The layout does not contain any Window object."
) from e
else:
self.focus(focused_element)
# List of visible windows.
self.visible_windows: List[Window] = [] # List of `Window` objects.
def __repr__(self) -> str:
return "Layout(%r, current_window=%r)" % (self.container, self.current_window)
def find_all_windows(self) -> Generator[Window, None, None]:
"""
Find all the :class:`.UIControl` objects in this layout.
"""
for item in self.walk():
if isinstance(item, Window):
yield item
def find_all_controls(self) -> Iterable[UIControl]:
for container in self.find_all_windows():
yield container.content
def focus(self, value: FocusableElement) -> None:
"""
Focus the given UI element.
`value` can be either:
- a :class:`.UIControl`
- a :class:`.Buffer` instance or the name of a :class:`.Buffer`
- a :class:`.Window`
- Any container object. In this case we will focus the :class:`.Window`
from this container that was focused most recent, or the very first
focusable :class:`.Window` of the container.
"""
# BufferControl by buffer name.
if isinstance(value, str):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer.name == value:
self.focus(control)
return
raise ValueError(
"Couldn't find Buffer in the current layout: %r." % (value,)
)
# BufferControl by buffer object.
elif isinstance(value, Buffer):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer == value:
self.focus(control)
return
raise ValueError(
"Couldn't find Buffer in the current layout: %r." % (value,)
)
# Focus UIControl.
elif isinstance(value, UIControl):
if value not in self.find_all_controls():
raise ValueError(
"Invalid value. Container does not appear in the layout."
)
if not value.is_focusable():
raise ValueError("Invalid value. UIControl is not focusable.")
self.current_control = value
# Otherwise, expecting any Container object.
else:
value = to_container(value)
if isinstance(value, Window):
# This is a `Window`: focus that.
if value not in self.find_all_windows():
raise ValueError(
"Invalid value. Window does not appear in the layout: %r"
% (value,)
)
self.current_window = value
else:
# Focus a window in this container.
# If we have many windows as part of this container, and some
# of them have been focused before, take the last focused
# item. (This is very useful when the UI is composed of more
# complex sub components.)
windows = []
for c in walk(value, skip_hidden=True):
if isinstance(c, Window) and c.content.is_focusable():
windows.append(c)
# Take the first one that was focused before.
for w in reversed(self._stack):
if w in windows:
self.current_window = w
return
# None was focused before: take the very first focusable window.
if windows:
self.current_window = windows[0]
return
raise ValueError(
"Invalid value. Container cannot be focused: %r" % (value,)
)
def has_focus(self, value: FocusableElement) -> bool:
"""
Check whether the given control has the focus.
:param value: :class:`.UIControl` or :class:`.Window` instance.
"""
if isinstance(value, str):
if self.current_buffer is None:
return False
return self.current_buffer.name == value
if isinstance(value, Buffer):
return self.current_buffer == value
if isinstance(value, UIControl):
return self.current_control == value
else:
value = to_container(value)
if isinstance(value, Window):
return self.current_window == value
else:
# Check whether this "container" is focused. This is true if
# one of the elements inside is focused.
for element in walk(value):
if element == self.current_window:
return True
return False
@property
def current_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to currently has the focus.
"""
return self._stack[-1].content
@current_control.setter
def current_control(self, control: UIControl) -> None:
"""
Set the :class:`.UIControl` to receive the focus.
"""
for window in self.find_all_windows():
if window.content == control:
self.current_window = window
return
raise ValueError("Control not found in the user interface.")
@property
def current_window(self) -> Window:
" Return the :class:`.Window` object that is currently focused. "
return self._stack[-1]
@current_window.setter
def current_window(self, value: Window):
" Set the :class:`.Window` object to be currently focused. "
self._stack.append(value)
@property
def is_searching(self) -> bool:
" True if we are searching right now. "
return self.current_control in self.search_links
@property
def search_target_buffer_control(self) -> Optional[BufferControl]:
"""
Return the :class:`.BufferControl` in which we are searching or `None`.
"""
# Not every `UIControl` is a `BufferControl`. This only applies to
# `BufferControl`.
control = self.current_control
if isinstance(control, SearchBufferControl):
return self.search_links.get(control)
else:
return None
def get_focusable_windows(self) -> Iterable[Window]:
"""
Return all the :class:`.Window` objects which are focusable (in the
'modal' area).
"""
for w in self.walk_through_modal_area():
if isinstance(w, Window) and w.content.is_focusable():
yield w
def get_visible_focusable_windows(self) -> List[Window]:
"""
Return a list of :class:`.Window` objects that are focusable.
"""
# focusable windows are windows that are visible, but also part of the
# modal container. Make sure to keep the ordering.
visible_windows = self.visible_windows
return [w for w in self.get_focusable_windows() if w in visible_windows]
@property
def current_buffer(self) -> Optional[Buffer]:
"""
The currently focused :class:`~.Buffer` or `None`.
"""
ui_control = self.current_control
if isinstance(ui_control, BufferControl):
return ui_control.buffer
return None
def get_buffer_by_name(self, buffer_name: str) -> Optional[Buffer]:
"""
Look in the layout for a buffer with the given name.
Return `None` when nothing was found.
"""
for w in self.walk():
if isinstance(w, Window) and isinstance(w.content, BufferControl):
if w.content.buffer.name == buffer_name:
return w.content.buffer
return None
@property
def buffer_has_focus(self) -> bool:
"""
Return `True` if the currently focused control is a
:class:`.BufferControl`. (For instance, used to determine whether the
default key bindings should be active or not.)
"""
ui_control = self.current_control
return isinstance(ui_control, BufferControl)
@property
def previous_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to previously had the focus.
"""
try:
return self._stack[-2].content
except IndexError:
return self._stack[-1].content
def focus_last(self) -> None:
"""
Give the focus to the last focused control.
"""
if len(self._stack) > 1:
self._stack = self._stack[:-1]
def focus_next(self) -> None:
"""
Focus the next visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index + 1) % len(windows)
self.focus(windows[index])
def focus_previous(self) -> None:
"""
Focus the previous visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index - 1) % len(windows)
self.focus(windows[index])
def walk(self) -> Iterable[Container]:
"""
Walk through all the layout nodes (and their children) and yield them.
"""
for i in walk(self.container):
yield i
def walk_through_modal_area(self) -> Iterable[Container]:
"""
Walk through all the containers which are in the current 'modal' part
of the layout.
"""
# Go up in the tree, and find the root. (it will be a part of the
# layout, if the focus is in a modal part.)
root: Container = self.current_window
while not root.is_modal() and root in self._child_to_parent:
root = self._child_to_parent[root]
for container in walk(root):
yield container
def update_parents_relations(self) -> None:
"""
Update child->parent relationships mapping.
"""
parents = {}
def walk(e: Container) -> None:
for c in e.get_children():
parents[c] = e
walk(c)
walk(self.container)
self._child_to_parent = parents
def reset(self) -> None:
# Remove all search links when the UI starts.
# (Important, for instance when control-c is been pressed while
# searching. The prompt cancels, but next `run()` call the search
# links are still there.)
self.search_links.clear()
self.container.reset()
def get_parent(self, container: Container) -> Optional[Container]:
"""
Return the parent container for the given container, or ``None``, if it
wasn't found.
"""
try:
return self._child_to_parent[container]
except KeyError:
return None
class InvalidLayoutError(Exception):
pass
def walk(container: Container, skip_hidden: bool = False) -> Iterable[Container]:
"""
Walk through layout, starting at this container.
"""
# When `skip_hidden` is set, don't go into disabled ConditionalContainer containers.
if (
skip_hidden
and isinstance(container, ConditionalContainer)
and not container.filter()
):
return
yield container
for c in container.get_children():
# yield from walk(c)
yield from walk(c, skip_hidden=skip_hidden)
|
jonathanslenders/python-prompt-toolkit
|
prompt_toolkit/layout/layout.py
|
Python
|
bsd-3-clause
| 14,111
|
import sys
from setuptools import setup, find_packages
exec(open('fftoptionlib/version.py').read())
def check_python_version():
if sys.version_info[:2] < (3, 4):
print('Python 3.4 or newer is required. Python version detected: {}'.format(sys.version_info))
sys.exit(-1)
def main():
setup(name='fftoptionlib',
version=__version__,
author='ArrayStream (Yu Zheng, Ran Fan)',
author_email='team@arraystream.com',
url='https://github.com/arraystream/fftoptionlib',
description='FFT-based Option Pricing Method in Python',
long_description='FFT-based Option Pricing Method in Python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Mathematics',
'Intended Audience :: Financial and Insurance Industry'
],
license='BSD',
packages=find_packages(include=['fftoptionlib']),
install_requires=['numpy', 'scipy', 'pandas', 'autograd'],
platforms='any')
if __name__ == '__main__':
check_python_version()
main()
|
arraystream/fftoptionlib
|
setup.py
|
Python
|
bsd-3-clause
| 1,367
|
from .base import * # NOQA
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGEME!!!'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{ cookiecutter.repo_name }}',
'USER': 'vagrant',
'PASSWORD': 'vagrant'
}
}
INSTALLED_APPS += (
'django_extensions',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Process all tasks synchronously.
# Helpful for local development and running tests
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_ALWAYS_EAGER = True
|
RocketPod/wagtail-cookiecutter
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/dev.py
|
Python
|
bsd-3-clause
| 686
|
import logging
from glob import glob
from os import path
import pytest
from steampak import SteamApi
from steampak.libsteam.resources.apps import Application
from steampak.libsteam.resources.stats import Achievement
from steampak.libsteam.resources.user import User
def set_log_level(lvl):
logging.basicConfig(level=lvl, format="%(message)s")
APP_SPACEWAR = 480
APP_DLC_SPACEWAR = 110902
APP_AMNESIA = 57300
APP_TIMBERMAN = 398710
LOG_LEVEL = logging.DEBUG
# set_log_level(LOG_LEVEL)
libs = sorted(glob('libsteam_api*'))
if not libs:
raise Exception('Unable to locate library .so')
LIBRARY_PATH = path.join(path.dirname(__file__), libs[-1])
@pytest.fixture()
def api():
api = SteamApi(LIBRARY_PATH, app_id=APP_SPACEWAR)
yield api
api.shutdown()
@pytest.fixture()
def set_app_id():
def set_app_id_(app_id):
return SteamApi(LIBRARY_PATH, app_id=app_id)
return set_app_id_
def test_basic(api):
assert api.steam_running
assert api.app_id == APP_SPACEWAR
assert api.install_path
def test_utils(api):
assert api.utils.ipc_call_count > 10
assert api.utils.seconds_app_active >= 0
assert api.utils.seconds_computer_active >= 0
assert api.utils.server_time
assert api.utils.country_code == 'RU'
assert api.utils.battery_power == 255
assert api.utils.app_id == APP_SPACEWAR
assert api.utils.overlay_enabled is False
assert api.utils.vr_mode is False
assert api.utils.ui_language in {'russian', 'english'}
assert api.utils.universe == 'public'
api.utils.set_notification_position(api.utils.notification_positions.TOP_LEFT)
def test_current_user(api):
user = api.current_user
assert user.steam_id
assert user.steam_handle
assert 10 < user.level < 60
assert user.behind_nat
assert user.logged_in
user_obj = user.user
assert user_obj.name == 'idle sign'
assert user_obj.state == 'online'
def test_friends(api):
friends = api.friends
assert 10 < friends.get_count() < 20
friends = {friend.name: friend for friend in friends}
picked = friends['hiter-fuma']
assert picked
# assert picked.level == 33 # todo
assert picked.name_history == ['hiter-fuma']
assert picked.state in {'online', 'away', 'offline'}
assert picked.has_friends()
picked.show_profile()
tags = {tag.name: tag for tag in api.friends.tags}
assert tags
assert 2 < len(tags['кореша']) < 10
def test_groups(api):
groups = {group.name: group for group in api.groups}
assert groups
group = groups['Steam Universe']
assert group.alias == 'Steam U'
stats = group.stats
assert stats
assert 900000 > stats['online'] > 100000
def test_apps(api):
apps = api.apps
installed = apps.installed
apps_installed = dict(installed)
assert apps_installed
app_current = apps.current
assert app_current.name == 'Spacewar'
assert app_current.build_id == 0
assert app_current.language_current == ''
assert app_current.language_available == ['english']
assert app_current.vac_banned is False
assert app_current.mode_cybercafe is False
assert app_current.mode_free_weekend is False
assert app_current.low_violence is False
assert app_current.owned is True
assert app_current.owner.name == 'idle sign'
assert not app_current.mark_corrupt(only_files_missing=True)
assert app_current.beta_name == ''
dlcs = dict(app_current.dlcs)
assert dlcs
dlc = dlcs[APP_DLC_SPACEWAR]
assert not dlc.available
assert dlc.install_dir == ''
assert dlc.name == 'pieterw test DLC'
assert not dlc.owned
assert not dlc.installed
dlc.install()
assert dlc.get_download_progress() == (0, 0)
dlc.uninstall()
app: Application = Application(APP_AMNESIA)
assert app.name == 'Amnesia: The Dark Descent'
assert 'SteamApps/common/Amnesia' in app.install_dir
assert app.build_id >= 3192428
assert app.owned
assert app.installed
assert app.purchase_time.year == 2011
def test_ach(set_app_id):
api = set_app_id(APP_TIMBERMAN)
achs = api.apps.current.achievements
achs_dict = dict(achs)
assert len(achs_dict) >= 20
ach_angel = achs_dict['NEW_ACHIEVEMENT_1_1'] # type: Achievement
assert ach_angel.title == 'Angel of Axe'
assert not ach_angel.hidden
assert 'Score 150' in ach_angel.description
assert ach_angel.unlocked
unlocked, unlocked_at = ach_angel.get_unlock_info()
assert unlocked_at
assert unlocked_at.year == 2015
ach_streamer = achs_dict['NEW_ACHIEVEMENT_1_11'] # type: Achievement
assert not ach_streamer.unlocked
assert 2 < ach_streamer.global_unlock_percent < 5
unlocked, unlocked_at = ach_streamer.get_unlock_info()
assert unlocked is False
assert unlocked_at is None
assert ach_streamer.unlock(store=False)
assert ach_streamer.clear()
assert achs.store_stats()
def test_overlay(api):
overlay = api.overlay
overlay.activate('https://pythonz.net')
overlay.activate(overlay.PAGE_ACHIEVEMENTS)
def test_screenshots(api):
screenshots = api.screenshots
assert not screenshots.is_hooked
screenshots.toggle_hook()
screenshots.take()
|
idlesign/steampak
|
tests/test_manual.py
|
Python
|
bsd-3-clause
| 5,260
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
import os
from telemetry.story import story as story_module
from telemetry.wpr import archive_info
class StorySet(object):
"""A collection of stories.
A typical usage of StorySet would be to subclass it and then call
AddStory for each Story.
"""
def __init__(self,
archive_data_file='',
cloud_storage_bucket=None,
base_dir=None,
serving_dirs=None,
request_handler_class=None):
"""Creates a new StorySet.
Args:
archive_data_file: The path to Web Page Replay's archive data, relative
to self.base_dir.
cloud_storage_bucket: The cloud storage bucket used to download
Web Page Replay's archive data. Valid values are: None,
story.PUBLIC_BUCKET, story.PARTNER_BUCKET, or story.INTERNAL_BUCKET
(defined in telemetry.util.cloud_storage).
serving_dirs: A set of paths, relative to self.base_dir, to directories
containing hash files for non-wpr archive data stored in cloud
storage.
"""
self._stories = []
self._story_names = set()
self._archive_data_file = archive_data_file
self._wpr_archive_info = None
archive_info.AssertValidCloudStorageBucket(cloud_storage_bucket)
self._cloud_storage_bucket = cloud_storage_bucket
if base_dir:
if not os.path.isdir(base_dir):
raise ValueError('Invalid directory path of base_dir: %s' % base_dir)
self._base_dir = base_dir
else:
self._base_dir = os.path.dirname(inspect.getfile(self.__class__))
# Convert any relative serving_dirs to absolute paths.
self._serving_dirs = set(os.path.realpath(os.path.join(self.base_dir, d))
for d in serving_dirs or [])
self._request_handler_class = request_handler_class
@property
def shared_state_class(self):
if self._stories:
return self._stories[0].shared_state_class
else:
return None
@property
def file_path(self):
return inspect.getfile(self.__class__).replace('.pyc', '.py')
@property
def base_dir(self):
"""The base directory to resolve archive_data_file.
This defaults to the directory containing the StorySet instance's class.
"""
return self._base_dir
@property
def serving_dirs(self):
all_serving_dirs = self._serving_dirs.copy()
for story in self.stories:
if story.serving_dir:
all_serving_dirs.add(story.serving_dir)
return all_serving_dirs
@property
def archive_data_file(self):
return self._archive_data_file
@property
def bucket(self):
return self._cloud_storage_bucket
@property
def wpr_archive_info(self):
"""Lazily constructs wpr_archive_info if it's not set and returns it."""
if self.archive_data_file and not self._wpr_archive_info:
self._wpr_archive_info = archive_info.WprArchiveInfo.FromFile(
os.path.join(self.base_dir, self.archive_data_file), self.bucket)
return self._wpr_archive_info
@property
def stories(self):
return self._stories
@property
def request_handler_class(self):
return self._request_handler_class
def SetRequestHandlerClass(self, handler_class):
self._request_handler_class = handler_class
def AddStory(self, story):
assert isinstance(story, story_module.Story)
assert self._IsUnique(story), ('Tried to add story with duplicate '
'name %s. Story names should be '
'unique.' % story.name)
shared_state_class = self.shared_state_class
if shared_state_class is not None:
assert story.shared_state_class == shared_state_class, (
'Story sets with mixed shared states are not allowed. Adding '
'story %s with shared state %s, but others have %s.' %
(story.name, story.shared_state_class, shared_state_class))
self._stories.append(story)
self._story_names.add(story.name)
def _IsUnique(self, story):
return story.name not in self._story_names
def RemoveStory(self, story):
"""Removes a Story.
TODO(crbug.com/980758): Remove this functionality after migrating
system_health_smoke_test.py and benchmark_smoke_unittest.py off of
it.
Allows the stories to be filtered.
"""
self._stories.remove(story)
self._story_names.remove(story.name)
@classmethod
def Name(cls):
"""Returns the string name of this StorySet.
Note that this should be a classmethod so the benchmark_runner script can
match the story class with its name specified in the run command:
'Run <User story test name> <User story class name>'
"""
return cls.__module__.split('.')[-1]
@classmethod
def Description(cls):
"""Return a string explaining in human-understandable terms what this
story represents.
Note that this should be a classmethod so the benchmark_runner script can
display stories' names along with their descriptions in the list command.
"""
if cls.__doc__:
return cls.__doc__.splitlines()[0]
else:
return ''
def WprFilePathForStory(self, story, target_platform=None):
"""Convenient function to retrieve WPR archive file path.
Args:
story: The Story to look up.
Returns:
The WPR archive file path for the given Story, if found.
Otherwise, None.
"""
if not self.wpr_archive_info:
return None
return self.wpr_archive_info.WprFilePathForStory(
story, target_platform=target_platform)
def GetAbridgedStorySetTagFilter(self):
"""Override this method to shorten your story set.
Returns a story tag string that marks the stories that are
part of the abridged story set. If it returns None, then no stories will
be filtered.
Abridging your story set is useful for large benchmarks so that they can
be run quickly when needed.
"""
return None
def __iter__(self):
return self.stories.__iter__()
def __len__(self):
return len(self.stories)
def __getitem__(self, key):
return self.stories[key]
def __setitem__(self, key, value):
self._stories[key] = value
|
endlessm/chromium-browser
|
third_party/catapult/telemetry/telemetry/story/story_set.py
|
Python
|
bsd-3-clause
| 6,323
|
#!/usr/bin/env python
import flask
from flask_cors import cross_origin
import StringIO
import logging
import os
import cooperhewitt.roboteyes.atkinson as atkinson
import cooperhewitt.flask.http_pony as http_pony
app = http_pony.setup_flask_app('ATKINSON_SERVER')
@app.route('/ping', methods=['GET'])
@cross_origin(methods=['GET'])
def ping():
return flask.jsonify({'stat': 'ok'})
@app.route('/dither', methods=['GET', 'POST'])
def dither():
try:
if flask.request.method=='POST':
path = http_pony.get_upload_path(app)
else:
path = http_pony.get_local_path(app)
except Exception, e:
logging.error(e)
flask.abort(400)
logging.debug("%s %s %s" % (flask.request.method, 'dither', path))
src = path
dest = StringIO.StringIO()
ok = True
try:
atkinson.dither(src, dest)
except Exception, e:
logging.error("failed to process %s, because %s" % (path, e))
ok = False
if flask.request.method=='POST':
logging.debug("unlink %s" % path)
os.unlink(path)
if not ok:
flask.abort(500)
dest.seek(0)
return flask.send_file(dest, mimetype='image/gif')
if __name__ == '__main__':
# app is defined above, remember
http_pony.run_from_cli(app)
|
cooperhewitt/plumbing-atkinson-server
|
scripts/atkinson-server.py
|
Python
|
bsd-3-clause
| 1,302
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements encoding and decoding of properties.
"""
from datafinder.persistence.metadata.value_mapping.custom_format import getPersistenceRepresentation
from datafinder.persistence.metadata.value_mapping.custom_format import MetadataValue
__version__ = "$Revision-Id:$"
|
DLR-SC/DataFinder
|
src/datafinder/persistence/metadata/value_mapping/__init__.py
|
Python
|
bsd-3-clause
| 1,988
|
"""Functions to plot raw M/EEG data."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: Simplified BSD
from functools import partial
from collections import OrderedDict
import numpy as np
from ..annotations import _annotations_starts_stops
from ..filter import create_filter
from ..io.pick import pick_types, _pick_data_channels, pick_info, pick_channels
from ..utils import verbose, _validate_type, _check_option
from ..time_frequency import psd_welch
from ..defaults import _handle_default
from .topo import _plot_topo, _plot_timeseries, _plot_timeseries_unified
from .utils import (plt_show, _compute_scalings, _handle_decim, _check_cov,
_shorten_path_from_middle, _handle_precompute,
_get_channel_plotting_order, _make_event_color_dict)
_RAW_CLIP_DEF = 1.5
@verbose
def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color='lightgray',
event_color='cyan', scalings=None, remove_dc=True, order=None,
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4,
clipping=_RAW_CLIP_DEF, show_first_samp=False,
proj=True, group_by='type', butterfly=False, decim='auto',
noise_cov=None, event_id=None, show_scrollbars=True,
show_scalebars=True, time_format='float',
precompute=None, use_opengl=None, *, theme='auto', verbose=None):
"""Plot raw data.
Parameters
----------
raw : instance of Raw
The raw data to plot.
events : array | None
Events to show with vertical bars.
duration : float
Time window (s) to plot. The lesser of this value and the duration
of the raw file will be used.
start : float
Initial time to show (can be changed dynamically once plotted). If
show_first_samp is True, then it is taken relative to
``raw.first_samp``.
n_channels : int
Number of channels to plot at once. Defaults to 20. The lesser of
``n_channels`` and ``len(raw.ch_names)`` will be shown.
Has no effect if ``order`` is 'position', 'selection' or 'butterfly'.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
%(event_color)s
Defaults to ``'cyan'``.
%(scalings)s
remove_dc : bool
If True remove DC component when plotting data.
order : array of int | None
Order in which to plot data. If the array is shorter than the number of
channels, only the given channels are plotted. If None (default), all
channels are plotted. If ``group_by`` is ``'position'`` or
``'selection'``, the ``order`` parameter is used only for selecting the
channels to be plotted.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly by clicking on a line.
May not work on all systems / platforms.
(Only Qt) If you run from a script, this needs to
be ``True`` or a Qt-eventloop needs to be started somewhere
else in the script (e.g. if you want to implement the browser
inside another Qt-Application).
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
If highpass > lowpass, a bandstop rather than bandpass filter
will be applied.
filtorder : int
Filtering order. 0 will use FIR filtering with MNE defaults.
Other values will construct an IIR filter of the given order
and apply it with :func:`~scipy.signal.filtfilt` (making the effective
order twice ``filtorder``). Filtering may produce some edge artifacts
(at the left and right edges) of the signals during display.
.. versionchanged:: 0.18
Support for ``filtorder=0`` to use FIR filtering.
clipping : str | float | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
If float, clipping occurs for values beyond the ``clipping`` multiple
of their dedicated range, so ``clipping=1.`` is an alias for
``clipping='transparent'``.
.. versionchanged:: 0.21
Support for float, and default changed from None to 1.5.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
proj : bool
Whether to apply projectors prior to plotting (default is ``True``).
Individual projectors can be enabled/disabled interactively (see
Notes). This argument only affects the plot; use ``raw.apply_proj()``
to modify the data stored in the Raw object.
%(group_by_browse)s
butterfly : bool
Whether to start in butterfly mode. Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate least three times
larger than ``min(info['lowpass'], lowpass)`` (e.g., a 40 Hz lowpass
will result in at least a 120 Hz displayed sample rate).
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
event_id : dict | None
Event IDs used to show at event markers (default None shows
the event numbers).
.. versionadded:: 0.16.0
%(show_scrollbars)s
%(show_scalebars)s
.. versionadded:: 0.20.0
%(time_format)s
%(precompute)s
%(use_opengl)s
%(theme_pg)s
.. versionadded:: 1.0
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure | ``PyQt5.QtWidgets.QMainWindow``
Browser instance.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The
left/right arrows will scroll by 25%% of ``duration``, whereas
shift+left/shift+right will scroll by 100%% of ``duration``. The scaling
can be adjusted with - and + (or =) keys. The viewport dimensions can be
adjusted with page up/page down and home/end keys. Full screen mode can be
toggled with the F11 key, and scrollbars can be hidden/shown by pressing
'z'. Right-click a channel label to view its location. To mark or un-mark a
channel as bad, click on a channel label or a channel trace. The changes
will be reflected immediately in the raw object's ``raw.info['bads']``
entry.
If projectors are present, a button labelled "Prj" in the lower right
corner of the plot window opens a secondary control window, which allows
enabling/disabling specific projectors individually. This provides a means
of interactively observing how each projector would affect the raw data if
it were applied.
Annotation mode is toggled by pressing 'a', butterfly mode by pressing
'b', and whitening mode (when ``noise_cov is not None``) by pressing 'w'.
By default, the channel means are removed when ``remove_dc`` is set to
``True``. This flag can be toggled by pressing 'd'.
.. note:: For the Qt backend to run in IPython with ``block=False``
you must run the magic command ``%%gui qt5`` first.
.. note:: To report issues with the qt-backend, please use the
`issues <https://github.com/mne-tools/mne-qt-browser/issues>`_
of ``mne-qt-browser``.
"""
from ..io.base import BaseRaw
from ._figure import _get_browser
info = raw.info.copy()
sfreq = info['sfreq']
projs = info['projs']
# this will be an attr for which projectors are currently "on" in the plot
projs_on = np.full_like(projs, proj, dtype=bool)
# disable projs in info if user doesn't want to see them right away
if not proj:
with info._unlock():
info['projs'] = list()
# handle defaults / check arg validity
color = _handle_default('color', color)
scalings = _compute_scalings(scalings, raw, remove_dc=remove_dc,
duration=duration)
if scalings['whitened'] == 'auto':
scalings['whitened'] = 1.
_validate_type(raw, BaseRaw, 'raw', 'Raw')
decim, picks_data = _handle_decim(info, decim, lowpass)
noise_cov = _check_cov(noise_cov, info)
units = _handle_default('units', None)
unit_scalings = _handle_default('scalings', None)
_check_option('group_by', group_by,
('selection', 'position', 'original', 'type'))
# clipping
_validate_type(clipping, (None, 'numeric', str), 'clipping')
if isinstance(clipping, str):
_check_option('clipping', clipping, ('clamp', 'transparent'),
extra='when a string')
clipping = 1. if clipping == 'transparent' else clipping
elif clipping is not None:
clipping = float(clipping)
# be forgiving if user asks for too much time
duration = min(raw.times[-1], float(duration))
# determine IIR filtering parameters
if highpass is not None and highpass <= 0:
raise ValueError(f'highpass must be > 0, got {highpass}')
if highpass is None and lowpass is None:
ba = filt_bounds = None
else:
filtorder = int(filtorder)
if filtorder == 0:
method = 'fir'
iir_params = None
else:
method = 'iir'
iir_params = dict(order=filtorder, output='sos', ftype='butter')
ba = create_filter(np.zeros((1, int(round(duration * sfreq)))),
sfreq, highpass, lowpass, method=method,
iir_params=iir_params)
filt_bounds = _annotations_starts_stops(
raw, ('edge', 'bad_acq_skip'), invert=True)
# compute event times in seconds
if events is not None:
event_times = (events[:, 0] - raw.first_samp).astype(float)
event_times /= sfreq
event_nums = events[:, 2]
else:
event_times = event_nums = None
# determine trace order
ch_names = np.array(raw.ch_names)
ch_types = np.array(raw.get_channel_types())
order = _get_channel_plotting_order(order, ch_types)
n_channels = min(info['nchan'], n_channels, len(order))
# adjust order based on channel selection, if needed
selections = None
if group_by in ('selection', 'position'):
selections = _setup_channel_selections(raw, group_by, order)
order = np.concatenate(list(selections.values()))
default_selection = list(selections)[0]
n_channels = len(selections[default_selection])
# handle event colors
event_color_dict = _make_event_color_dict(event_color, events, event_id)
# handle first_samp
first_time = raw._first_time if show_first_samp else 0
start += first_time
event_id_rev = {v: k for k, v in (event_id or {}).items()}
# generate window title; allow instances without a filename (e.g., ICA)
if title is None:
title = '<unknown>'
fnames = raw._filenames.copy()
if len(fnames):
title = fnames.pop(0)
extra = f' ... (+ {len(fnames)} more)' if len(fnames) else ''
title = f'{title}{extra}'
if len(title) > 60:
title = _shorten_path_from_middle(title)
elif not isinstance(title, str):
raise TypeError(f'title must be None or a string, got a {type(title)}')
# gather parameters and initialize figure
_validate_type(use_opengl, (bool, None), 'use_opengl')
precompute = _handle_precompute(precompute)
params = dict(inst=raw,
info=info,
# channels and channel order
ch_names=ch_names,
ch_types=ch_types,
ch_order=order,
picks=order[:n_channels],
n_channels=n_channels,
picks_data=picks_data,
group_by=group_by,
ch_selections=selections,
# time
t_start=start,
duration=duration,
n_times=raw.n_times,
first_time=first_time,
time_format=time_format,
decim=decim,
# events
event_color_dict=event_color_dict,
event_times=event_times,
event_nums=event_nums,
event_id_rev=event_id_rev,
# preprocessing
projs=projs,
projs_on=projs_on,
apply_proj=proj,
remove_dc=remove_dc,
filter_coefs=ba,
filter_bounds=filt_bounds,
noise_cov=noise_cov,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=bad_color,
ch_color_dict=color,
# display
butterfly=butterfly,
clipping=clipping,
scrollbars_visible=show_scrollbars,
scalebars_visible=show_scalebars,
window_title=title,
bgcolor=bgcolor,
# Qt-specific
precompute=precompute,
use_opengl=use_opengl)
fig = _get_browser(show=show, block=block, **params)
return fig
@verbose
def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False,
n_fft=None, n_overlap=0, reject_by_annotation=True,
picks=None, ax=None, color='black', xscale='linear',
area_mode='std', area_alpha=0.33, dB=True, estimate='auto',
show=True, n_jobs=1, average=False, line_alpha=None,
spatial_colors=True, sphere=None, window='hamming',
exclude='bads', verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
raw : instance of Raw
The raw object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
n_fft : int | None
Number of points to use in Welch FFT calculations.
Default is None, which uses the minimum of 2048 and the
number of time points.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
%(reject_by_annotation_raw)s
%(picks_plot_psd_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(color_plot_psd)s
%(xscale_plot_psd)s
%(area_mode_plot_psd)s
%(area_alpha_plot_psd)s
%(dB_plot_psd)s
%(estimate_plot_psd)s
%(show)s
%(n_jobs)s
%(average_plot_psd)s
%(line_alpha_plot_psd)s
%(spatial_colors_plot_psd)s
%(sphere_topomap_auto)s
%(window_psd)s
.. versionadded:: 0.22.0
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the bad channels
are excluded. Pass an empty list to plot all channels (including
channels marked "bad", if any).
.. versionadded:: 0.24.0
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from ._mpl_figure import _psd_figure
# handle FFT
if n_fft is None:
if tmax is None or not np.isfinite(tmax):
tmax = raw.times[-1]
tmin = 0. if tmin is None else tmin
n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)
# generate figure
fig = _psd_figure(
inst=raw, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,
average=average, estimate=estimate, area_mode=area_mode,
line_alpha=line_alpha, area_alpha=area_alpha, color=color,
spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft,
n_overlap=n_overlap, reject_by_annotation=reject_by_annotation,
window=window, exclude=exclude)
plt_show(show)
return fig
@verbose
def plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, block=False, n_jobs=1, axes=None,
verbose=None):
"""Plot channel-wise frequency spectra as topography.
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to 2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If None (default), the correct layout is
inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
May not work on all systems / platforms. Defaults to False.
%(n_jobs)s
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure distributing one image per channel across sensor topography.
"""
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(raw.info)
psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, n_jobs=n_jobs)
if dB:
psds = 10 * np.log10(psds)
y_label = 'dB'
else:
y_label = 'Power'
show_func = partial(_plot_timeseries_unified, data=[psds], color=color,
times=[freqs])
click_func = partial(_plot_timeseries, data=[psds], color=color,
times=[freqs])
picks = _pick_data_channels(raw.info)
info = pick_info(raw.info, picks)
fig = _plot_topo(info, times=freqs, show_func=show_func,
click_func=click_func, layout=layout,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor, x_label='Frequency (Hz)',
unified=True, y_label=y_label, axes=axes)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return fig
def _setup_channel_selections(raw, kind, order):
"""Get dictionary of channel groupings."""
from ..channels import (read_vectorview_selection, _SELECTIONS,
_EEG_SELECTIONS, _divide_to_regions)
from ..utils import _get_stim_channel
_check_option('group_by', kind, ('position', 'selection'))
if kind == 'position':
selections_dict = _divide_to_regions(raw.info)
keys = _SELECTIONS[1:] # omit 'Vertex'
else: # kind == 'selection'
from ..channels.channels import _get_ch_info
(has_vv_mag, has_vv_grad, *_, has_neuromag_122_grad, has_csd_coils
) = _get_ch_info(raw.info)
if not (has_vv_grad or has_vv_mag or has_neuromag_122_grad):
raise ValueError("order='selection' only works for Neuromag "
"data. Use order='position' instead.")
selections_dict = OrderedDict()
# get stim channel (if any)
stim_ch = _get_stim_channel(None, raw.info, raise_error=False)
stim_ch = stim_ch if len(stim_ch) else ['']
stim_ch = pick_channels(raw.ch_names, stim_ch)
# loop over regions
keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS])
for key in keys:
channels = read_vectorview_selection(key, info=raw.info)
picks = pick_channels(raw.ch_names, channels)
picks = np.intersect1d(picks, order)
if not len(picks):
continue # omit empty selections
selections_dict[key] = np.concatenate([picks, stim_ch])
# add misc channels
misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True,
ecg=True, emg=True, ref_meg=False, misc=True,
resp=True, chpi=True, exci=True, ias=True, syst=True,
seeg=False, bio=True, ecog=False, fnirs=False, dbs=False,
exclude=())
if len(misc) and np.in1d(misc, order).any():
selections_dict['Misc'] = misc
return selections_dict
|
mne-tools/mne-python
|
mne/viz/raw.py
|
Python
|
bsd-3-clause
| 23,560
|
#!/usr/bin/env python
"""Display status of APOGEE QuickLook Actor
History:
2011-08-16 ROwen Save window state.
"""
import Tkinter
import RO.Wdg
import APOGEEWdg
WindowName = "Inst.APOGEE"
def addWindow(tlSet, visible=False):
"""Create the window.
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+346+398",
visible = visible,
resizable = False,
wdgFunc = APOGEEWdg.APOGEEWdg,
doSaveState = True,
)
if __name__ == '__main__':
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("tcc")
tuiModel = testDispatcher.tuiModel
root = tuiModel.tkRoot
addWindow(tuiModel.tlSet, visible=True)
tuiModel.reactor.run()
|
r-owen/stui
|
TUI/Inst/APOGEE/APOGEEWindow.py
|
Python
|
bsd-3-clause
| 750
|
# Generated by Django 2.2.13 on 2021-03-29 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("profiles", "0009_remove_profile_leadership_level")]
operations = [
migrations.AddField(
model_name="profile",
name="can_skip_application_steps",
field=models.BooleanField(default=False),
)
]
|
mitodl/bootcamp-ecommerce
|
profiles/migrations/0010_profile_can_skip_application_steps.py
|
Python
|
bsd-3-clause
| 407
|
from touchforms.formplayer.signals import sms_form_complete
from corehq.apps.receiverwrapper.util import get_submit_url
from corehq.apps.receiverwrapper.util import submit_form_locally
from couchforms.models import XFormInstance
def handle_sms_form_complete(sender, session_id, form, **kwargs):
from corehq.apps.smsforms.models import XFormsSession
session = XFormsSession.by_session_id(session_id)
if session:
resp = submit_form_locally(form, session.domain, app_id=session.app_id)
xform_id = resp['X-CommCareHQ-FormID']
session.end(completed=True)
session.submission_id = xform_id
session.save()
xform = XFormInstance.get(xform_id)
xform.survey_incentive = session.survey_incentive
xform.save()
sms_form_complete.connect(handle_sms_form_complete)
|
SEL-Columbia/commcare-hq
|
corehq/apps/smsforms/signals.py
|
Python
|
bsd-3-clause
| 844
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt.QtCore as qc
import qt.QtGui as qg
class StrictDoubleValidator(qg.QDoubleValidator):
def validate(self, input_value, pos):
state, input_value, pos = super(StrictDoubleValidator, self).validate(str(input_value), pos)
if input_value == '' or input_value == '.':
return self.Intermediate, input_value, pos
if state != self.Acceptable:
return self.Invalid, input_value, pos
return self.Acceptable, input_value, pos
|
danaukes/popupcad
|
popupcad/filetypes/validators.py
|
Python
|
mit
| 627
|
#!/usr/bin/env python
import textwrap, time
import sys
if sys.version_info.major == 3:
from queue import Empty as QueueEmpty
else:
from Queue import Empty as QueueEmpty
import tx
import monitor
import peers
import wallet
import splash
import console
import net
import forks
import footer
"""
def resize(s, state, window):
if state['mode'] == 'tx':
tx.draw_window(state, window)
elif state['mode'] == 'block':
block.draw_window(state, window)
elif state['mode'] == 'peers':
peers.draw_window(state, window)
elif state['mode'] == 'wallet':
wallet.draw_window(state, window)
elif state['mode'] == 'monitor':
monitor.draw_window(state, window)
elif state['mode'] == 'console':
console.draw_window(state, window)
elif state['mode'] == 'net':
net.draw_window(state, window)
"""
def getblockchaininfo(s, state, window):
if s['getblockchaininfo']['chain'] == "test":
state['testnet'] = 1
else:
state['testnet'] = 0
if state['mode'] == "splash":
splash.draw_window(state, window)
def getnetworkinfo(s, state, window):
state['version'] = s['getnetworkinfo']['subversion']
def getconnectioncount(s, state, window):
state['peers'] = s['getconnectioncount']
def getbalance(s, state, window):
state['balance'] = s['getbalance']
def getunconfirmedbalance(s, state, window):
state['unconfirmedbalance'] = s['getunconfirmedbalance']
def getblock(s, state, window):
height = s['getblock']['height']
state['blocks'][str(height)] = s['getblock']
if state['mode'] == "monitor":
monitor.draw_window(state, window)
footer.draw_window(state)
"""
if state['mode'] == "block":
# TODO: This query check stops the block view from updating whenever
# a new block comes in. It shouldn't fire any more because
# we don't poll outside of monitor mode.
# if 'queried' in s['getblock']:
if True:
# state['blocks'][str(height)].pop('queried')
state['blocks']['browse_height'] = height
state['blocks']['offset'] = 0
state['blocks']['cursor'] = 0
block.draw_window(state, window)
"""
def getblockhash(s, state, window):
pass
def coinbase(s, state, window):
height = str(s['height'])
if height in state['blocks']:
state['blocks'][height]['coinbase_amount'] = s['coinbase']
def getrawtransaction(s, state, window):
pass
def getnetworkhashps(s, state, window):
blocks = s['getnetworkhashps']['blocks']
state['networkhashps'][blocks] = s['getnetworkhashps']['value']
if state['mode'] == "splash" and blocks == 2016: # initialization complete
state['mode'] = "monitor"
monitor.draw_window(state, window)
footer.draw_window(state)
def getnettotals(s, state, window):
state['totalbytesrecv'] = s['getnettotals']['totalbytesrecv']
state['totalbytessent'] = s['getnettotals']['totalbytessent']
state['history']['getnettotals'].append(s['getnettotals'])
# ensure getnettotals history does not fill RAM eventually, 300 items is enough
if len(state['history']['getnettotals']) > 500:
state['history']['getnettotals'] = state['history']['getnettotals'][-300:]
if state['mode'] == 'net':
net.draw_window(state, window)
footer.draw_window(state)
def getmininginfo(s, state, window):
state['mininginfo'] = s['getmininginfo']
if 'browse_height' not in state['blocks']:
state['blocks']['browse_height'] = s['getmininginfo']['blocks']
state['networkhashps']['diff'] = (int(s['getmininginfo']['difficulty'])*2**32)/600
def getpeerinfo(s, state, window):
state['peerinfo'] = s['getpeerinfo']
state['peerinfo_offset'] = 0
if state['mode'] == "peers":
peers.draw_window(state, window)
footer.draw_window(state)
def getchaintips(s, state, window):
state['chaintips'] = s['getchaintips']
state['chaintips_offset'] = 0
if state['mode'] == 'forks':
forks.draw_window(state, window)
footer.draw_window(state)
def listsinceblock(s, state, window):
state['wallet'] = s['listsinceblock']
state['wallet']['cursor'] = 0
state['wallet']['offset'] = 0
state['wallet']['view_string'] = []
state['wallet']['transactions'].sort(key=lambda entry: entry['category'], reverse=True)
# add cumulative balance field to transactiosn once ordered by time
state['wallet']['transactions'].sort(key=lambda entry: entry['time'])
state['wallet']['transactions'].sort(key=lambda entry: entry['confirmations'], reverse=True)
cumulative_balance = 0
nonce = 0 # ensures a definitive ordering of transactions for cumulative balance
for entry in state['wallet']['transactions']:
entry['nonce'] = nonce
nonce += 1
if 'amount' in entry:
if 'fee' in entry:
cumulative_balance += entry['fee']
cumulative_balance += entry['amount']
entry['cumulative_balance'] = cumulative_balance
state['wallet']['transactions'].sort(key=lambda entry: entry['nonce'], reverse=True)
unit = 'BTC'
if 'testnet' in state:
if state['testnet']:
unit = 'TNC'
for entry in state['wallet']['transactions']:
if 'txid' in entry:
entry_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(entry['time']))
output_string = entry_time + " %8d" % entry['confirmations'] + " conf"
delta = entry['amount']
if 'fee' in entry:
delta += entry['fee'] # this fails if not all inputs owned by wallet; could be 'too negative'
output_string += "% 17.8f" % delta + unit
output_string += " " + "% 17.8f" % entry['cumulative_balance'] + unit
state['wallet']['view_string'].append(output_string)
output_string = entry['txid'].rjust(74)
state['wallet']['view_string'].append(output_string)
if 'address' in entry: # TODO: more sanity checking here
output_string = " " + entry['category'].ljust(15) + entry['address']
else:
output_string = " unknown transaction type"
state['wallet']['view_string'].append(output_string)
state['wallet']['view_string'].append("")
if state['mode'] == "wallet":
wallet.draw_window(state, window)
footer.draw_window(state)
def lastblocktime(s, state, window):
state['lastblocktime'] = s['lastblocktime']
def txid(s, state, window):
if s['size'] < 0:
if 'tx' in state:
state.pop('tx')
if state['mode'] == 'tx':
tx.draw_window(state, window)
footer.draw_window(state)
return False
state['tx'] = {
'txid': s['txid'],
'vin': [],
'vout_string': [],
'cursor': 0,
'offset': 0,
'out_offset': 0,
'loaded': 1,
'mode': 'inputs',
'size': s['size'],
}
for vin in s['vin']:
if 'coinbase' in vin:
state['tx']['vin'].append({'coinbase': vin['coinbase']})
elif 'txid' in vin:
if 'prev_tx' in vin:
state['tx']['vin'].append({'txid': vin['txid'], 'vout': vin['vout'], 'prev_tx': vin['prev_tx']})
else:
state['tx']['vin'].append({'txid': vin['txid'], 'vout': vin['vout']})
state['tx']['total_outputs'] = 0
for vout in s['vout']:
if 'value' in vout:
if vout['scriptPubKey']['type'] == "pubkeyhash":
buffer_string = "% 14.8f" % vout['value'] + ": " + vout['scriptPubKey']['addresses'][0]
else:
buffer_string = "% 14.8f" % vout['value'] + ": " + vout['scriptPubKey']['asm']
if 'confirmations' in s:
if 'spent' in vout:
if vout['spent'] == 'confirmed':
buffer_string += " [SPENT]"
elif vout['spent'] == 'unconfirmed':
buffer_string += " [UNCONFIRMED SPEND]"
else:
buffer_string += " [UNSPENT]"
state['tx']['total_outputs'] += vout['value']
state['tx']['vout_string'].extend(textwrap.wrap(buffer_string,70)) # change this to scale with window ?
if 'total_inputs' in s:
state['tx']['total_inputs'] = s['total_inputs']
if 'confirmations' in s:
state['tx']['confirmations'] = s['confirmations']
if state['mode'] == 'tx':
tx.draw_window(state, window)
footer.draw_window(state)
def consolecommand(s, state, window):
state['console']['cbuffer'].append(s['consolecommand'])
state['console']['rbuffer'].append(s['consoleresponse'])
state['console']['offset'] = 0
if state['mode'] == "console":
console.draw_window(state, window)
footer.draw_window(state)
def estimatefee(s, state, window):
blocks = s['estimatefee']['blocks']
state['estimatefee'][blocks] = s['estimatefee']['value']
def queue(state, window, response_queue):
from rpc2 import RPCResponse
while True:
try:
s = response_queue.get(False)
except QueueEmpty:
return False
if isinstance(s, dict):
# if 'resize' in s: resize(s, state, window)
if 'lastblocktime' in s: lastblocktime(s, state, window)
elif 'txid' in s: txid(s, state, window)
elif 'consolecommand' in s: consolecommand(s, state, window)
elif 'coinbase' in s: coinbase(s, state, window)
elif 'stop' in s: return s['stop']
continue
if not isinstance(s, RPCResponse):
print("Ignoring")
continue
methods = {
"getblockchaininfo": getblockchaininfo,
"getnetworkinfo": getnetworkinfo,
"getconnectioncount": getconnectioncount,
"getbalance": getbalance,
"getunconfirmedbalance": getunconfirmedbalance,
"getblock": getblock,
"getblockhash": getblockhash,
"getnetworkhashps": getnetworkhashps,
"getnettotals": getnettotals,
"getmininginfo": getmininginfo,
"getpeerinfo": getpeerinfo,
"getchaintips": getchaintips,
"getrawtransaction": getrawtransaction,
"listsinceblock": listsinceblock,
"estimatefee": estimatefee,
}
try:
method = methods[s.req.method]
except KeyError:
print("Unknown {}".format(s.req.method))
return
method({s.req.method: s.result}, state, window)
|
esotericnonsense/bitcoind-ncurses
|
process.py
|
Python
|
mit
| 10,751
|
from django.shortcuts import render
from data_center.models import Announcement
def index(request):
announcements = Announcement.objects.all().order_by('-time')
return render(request, 'index.html', {'announcements': announcements})
|
leVirve/NTHU_Course
|
index/views.py
|
Python
|
mit
| 243
|
"""ReClean URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from properties import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^send$', views.send_message, name='send'),
url(r'^cleaned$', views.cleaned, name='cleaned'),
url(r'^sms$', views.sms, name='sms'),
url(r'^check$', views.check, name='check'),
url(r'^reset$', views.reset, name='reset'),
url(r'^admin/', include(admin.site.urls)),
]
|
oOPa/ReClean
|
ReClean/urls.py
|
Python
|
mit
| 1,075
|
from flask import Flask, request, g, abort
app = Flask(__name__)
@app.before_request
def before_request():
abort(400)
@app.after_request
def after_request(response):
print 'after_request', response
return response
@app.teardown_request
def teardown_request(exc):
print 'teardown_request', exc
@app.route('/')
def hello():
return 'hello'
app.run(port=3000)
|
zeaphoo/cocopot
|
examples/flasktest.py
|
Python
|
mit
| 385
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse
from django.views.generic import FormView
class TemplateFormView(FormView):
template_name = 'form.html'
def heavy_data_1(request):
numbers = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five']
results = [{'id': index, 'text': value} for (index, value) in enumerate(numbers)]
return HttpResponse(json.dumps({'err': 'nil', 'results': results}), content_type='application/json')
def heavy_data_2(request):
numbers = ['Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Fortytwo']
results = [{'id': index, 'text': value} for (index, value) in enumerate(numbers)]
return HttpResponse(json.dumps({'err': 'nil', 'results': results}), content_type='application/json')
|
djkartsa/django-select2-chained
|
tests/testapp/views.py
|
Python
|
mit
| 794
|
import os
import click
from keep import cli, utils
@click.command('pull', short_help='Updates the local database with remote.')
@click.option('--overwrite', is_flag=True, help='Overwrite local commands')
@cli.pass_context
def cli(ctx, overwrite):
"""Updates the local database with remote."""
credentials_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if not os.path.exists(credentials_path):
click.echo('You are not registered.')
utils.register()
else:
utils.pull(ctx, overwrite)
|
OrkoHunter/keep
|
keep/legacy_commands/removed_cmd_pull.py
|
Python
|
mit
| 546
|
import math
from unittest import TestCase
import jsonschema
class TestMinItems(TestCase):
schema = { "type": "array", "minItems": 4 }
schema2 = { "minItems": 4 }
def test_minItems_pass(self):
#test equal
data1 = [1, 2, "3", 4.0]
#test greater than
data2 = [1, 2, "3", 4.0, 5.00]
try:
jsonschema.validate(data1, self.schema)
jsonschema.validate(data2, self.schema)
except ValueError, e:
self.fail("Unexpected failure: %s" % e)
def test_minItems_pass2(self):
#test when data is not an array
data1 = "test"
#test arrays with no type attribute
data2 = [1, 2, "3", 4.0, 5.00]
try:
jsonschema.validate(data1, self.schema2)
jsonschema.validate(data2, self.schema2)
except ValueError, e:
self.fail("Unexpected failure: %s" % e)
def test_minItems_fail(self):
#test equal
data1 = [1, 2, "3"]
try:
jsonschema.validate(data1, self.schema)
except ValueError:
pass
else:
self.fail("Expected failure for %s" % repr(None))
try:
jsonschema.validate(data1, self.schema2)
except ValueError:
pass
else:
self.fail("Expected failure for %s" % repr(None))
|
okoye/json-grammer-nazi
|
jsonschema/tests/test_minItems.py
|
Python
|
mit
| 1,253
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from reviewboard.reviews.models import (DefaultReviewer, ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.errors import ChangeNumberInUseError
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class ReviewRequestManagerTests(TestCase):
"""Unit tests for reviewboard.reviews.managers.ReviewRequestManager."""
fixtures = ['test_users']
@add_fixtures(['test_scmtools'])
def test_create_with_site(self):
"""Testing ReviewRequest.objects.create with LocalSite"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(
user, repository, local_site=local_site)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.local_site, local_site)
self.assertEqual(review_request.local_id, 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id(self):
"""Testing ReviewRequest.objects.create with LocalSite and commit ID"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(
user, repository,
commit_id='123',
local_site=local_site)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.commit_id, '123')
self.assertEqual(review_request.local_site, local_site)
self.assertEqual(review_request.local_id, 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_conflicts_review_request(self):
"""Testing ReviewRequest.objects.create with LocalSite and commit ID
that conflicts with a review request
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
# This one should be fine.
ReviewRequest.objects.create(user, repository, commit_id='123',
local_site=local_site)
self.assertEqual(local_site.review_requests.count(), 1)
# This one will yell.
with self.assertRaises(ChangeNumberInUseError):
ReviewRequest.objects.create(
user,
repository,
commit_id='123',
local_site=local_site)
# Make sure that entry doesn't exist in the database.
self.assertEqual(local_site.review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_conflicts_draft(self):
"""Testing ReviewRequest.objects.create with LocalSite and
commit ID that conflicts with a draft
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
# This one should be fine.
existing_review_request = ReviewRequest.objects.create(
user, repository, local_site=local_site)
existing_draft = ReviewRequestDraft.create(existing_review_request)
existing_draft.commit_id = '123'
existing_draft.save()
self.assertEqual(local_site.review_requests.count(), 1)
# This one will yell.
with self.assertRaises(ChangeNumberInUseError):
ReviewRequest.objects.create(
user,
repository,
commit_id='123',
local_site=local_site)
# Make sure that entry doesn't exist in the database.
self.assertEqual(local_site.review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_create_with_site_and_commit_id_and_fetch_problem(self):
"""Testing ReviewRequest.objects.create with LocalSite and
commit ID with problem fetching commit details
"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='test')
repository = self.create_repository()
self.assertEqual(local_site.review_requests.count(), 0)
ReviewRequest.objects.create(
user, repository,
commit_id='123',
local_site=local_site,
create_from_commit_id=True)
# Make sure that entry doesn't exist in the database.
self.assertEqual(local_site.review_requests.count(), 1)
review_request = local_site.review_requests.get()
self.assertEqual(review_request.local_id, 1)
self.assertEqual(review_request.commit_id, '123')
@add_fixtures(['test_scmtools'])
def test_create_with_create_from_commit_id(self):
"""Testing ReviewRequest.objects.create with commit ID and
create_from_commit_id
"""
user = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Test')
review_request = ReviewRequest.objects.create(
user,
repository,
commit_id='123',
create_from_commit_id=True)
self.assertEqual(review_request.repository, repository)
self.assertEqual(review_request.diffset_history.diffsets.count(), 0)
self.assertEqual(review_request.commit_id, '123')
self.assertEqual(review_request.changenum, 123)
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIsNotNone(draft.diffset)
self.assertEqual(draft.commit_id, '123')
@add_fixtures(['test_scmtools'])
def test_create_with_create_from_commit_id_and_default_reviewers(self):
"""Testing ReviewRequest.objects.create with commit ID,
create_from_commit_id, and default reviewers
"""
user = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Test')
default_reviewer = DefaultReviewer.objects.create(
name='Default Reviewer',
file_regex='.')
default_reviewer.repository.add(repository)
default_reviewer.people.add(user)
default_reviewer.groups.add(self.create_review_group())
review_request = ReviewRequest.objects.create(
user,
repository,
commit_id='123',
create_from_commit_id=True)
self.assertEqual(review_request.target_people.count(), 0)
self.assertEqual(review_request.target_groups.count(), 0)
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertEqual(draft.target_people.count(), 1)
self.assertEqual(draft.target_groups.count(), 1)
def test_public(self):
"""Testing ReviewRequest.objects.public"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
self.create_review_request(summary='Test 1',
publish=True,
submitter=user1)
self.create_review_request(summary='Test 2',
submitter=user2)
self.create_review_request(summary='Test 3',
status='S',
public=True,
submitter=user1)
self.create_review_request(summary='Test 4',
status='S',
public=True,
submitter=user2)
self.create_review_request(summary='Test 5',
status='D',
public=True,
submitter=user1)
self.create_review_request(summary='Test 6',
status='D',
submitter=user2)
self.assertValidSummaries(
ReviewRequest.objects.public(user=user1),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(status=None),
[
'Test 5',
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(user=user2, status=None),
[
'Test 6',
'Test 5',
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.public(status=None,
show_all_unpublished=True),
[
'Test 6',
'Test 5',
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
@add_fixtures(['test_scmtools'])
def test_public_with_repository_on_local_site(self):
"""Testing ReviewRequest.objects.public with repository on a
Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(local_site=local_site)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_without_private_repo_access(self):
"""Testing ReviewRequest.objects.public without access to private
repositories
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_without_private_repo_access_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access(self):
"""Testing ReviewRequest.objects.public with access to private
repositories
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True,
local_site=local_site)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_through_group(self):
"""Testing ReviewRequest.objects.public with access to private
repositories
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False)
repository.review_groups.add(group)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_access_through_group_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
repositories on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
repository.review_groups.add(group)
review_request = self.create_review_request(repository=repository,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_public_without_private_group_access(self):
"""Testing ReviewRequest.objects.public without access to private
group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
def test_public_with_private_group_access(self):
"""Testing ReviewRequest.objects.public with access to private
group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
group.users.add(user)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_access_on_local_site(self):
"""Testing ReviewRequest.objects.public with access to private
group on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
group.users.add(user)
review_request = self.create_review_request(publish=True,
local_site=local_site)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_public_group(self):
"""Testing ReviewRequest.objects.public without access to private
repositories and with access to private group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group()
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_group_and_public_repo(self):
"""Testing ReviewRequest.objects.public with access to private
group and without access to private group
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
repository = self.create_repository(public=False)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_owner(self):
"""Testing ReviewRequest.objects.public without access to private
repository and as the submitter
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
submitter=user,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_owner_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
repository and as the submitter on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
repository = self.create_repository(public=False,
local_site=local_site)
review_request = self.create_review_request(repository=repository,
submitter=user,
local_site=local_site,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_owner(self):
"""Testing ReviewRequest.objects.public without access to private
group and as the submitter
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(submitter=user,
publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_owner_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
group and as the submitter on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
review_request = self.create_review_request(submitter=user,
local_site=local_site,
publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
@add_fixtures(['test_scmtools'])
def test_public_with_private_repo_and_target_people(self):
"""Testing ReviewRequest.objects.public without access to private
repository and user in target_people
"""
user = User.objects.get(username='grumpy')
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
review_request.target_people.add(user)
self.assertFalse(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 0)
def test_public_with_private_group_and_target_people(self):
"""Testing ReviewRequest.objects.public without access to private
group and user in target_people
"""
user = User.objects.get(username='grumpy')
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
review_request.target_people.add(user)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user)
self.assertEqual(review_requests.count(), 1)
def test_public_with_private_group_and_target_people_on_local_site(self):
"""Testing ReviewRequest.objects.public without access to private
group and user in target_people on a Local Site
"""
local_site = LocalSite.objects.create(name='test')
user = User.objects.get(username='grumpy')
local_site.users.add(user)
group = self.create_review_group(invite_only=True,
local_site=local_site)
review_request = self.create_review_request(publish=True,
local_site=local_site)
review_request.target_groups.add(group)
review_request.target_people.add(user)
self.assertTrue(review_request.is_accessible_by(user))
review_requests = ReviewRequest.objects.public(user=user,
local_site=local_site)
self.assertEqual(review_requests.count(), 1)
def test_to_group(self):
"""Testing ReviewRequest.objects.to_group"""
user1 = User.objects.get(username='doc')
group1 = self.create_review_group(name='privgroup')
group1.users.add(user1)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
public=False,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 3',
public=True,
status='S',
submitter=user1)
review_request.target_groups.add(group1)
self.assertValidSummaries(
ReviewRequest.objects.to_group('privgroup', None),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_group('privgroup', None, status=None),
[
'Test 3',
'Test 1',
])
def test_to_user_group(self):
"""Testing ReviewRequest.objects.to_user_groups"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
public=True,
status='S')
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 3',
public=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups('doc', local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups(
'doc', status=None, local_site=None),
[
'Test 3',
'Test 2',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_groups(
'grumpy', user=user2, local_site=None),
[
'Test 3',
])
def test_to_user_directly(self):
"""Testing ReviewRequest.objects.to_user_directly"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
status='S')
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 3',
public=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 4',
public=True,
status='S',
submitter=user2)
review_request.target_people.add(user1)
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly('doc', local_site=None),
[
'Test 3',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly('doc', status=None),
[
'Test 4',
'Test 3',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user_directly(
'doc', user2, status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 2',
])
def test_from_user(self):
"""Testing ReviewRequest.objects.from_user"""
user1 = User.objects.get(username='doc')
self.create_review_request(summary='Test 1',
public=True,
submitter=user1)
self.create_review_request(summary='Test 2',
public=False,
submitter=user1)
self.create_review_request(summary='Test 3',
public=True,
status='S',
submitter=user1)
self.assertValidSummaries(
ReviewRequest.objects.from_user('doc', local_site=None),
[
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.from_user('doc', status=None,
local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.from_user(
'doc', user=user1, status=None, local_site=None),
[
'Test 3',
'Test 2',
'Test 1',
])
def test_to_user(self):
"""Testing ReviewRequest.objects.to_user"""
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='grumpy')
group1 = self.create_review_group(name='group1')
group1.users.add(user1)
group2 = self.create_review_group(name='group2')
group2.users.add(user2)
review_request = self.create_review_request(summary='Test 1',
publish=True,
submitter=user1)
review_request.target_groups.add(group1)
review_request = self.create_review_request(summary='Test 2',
submitter=user2,
status='S')
review_request.target_groups.add(group1)
review_request.target_people.add(user2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 3',
publish=True,
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
review_request = self.create_review_request(summary='Test 4',
publish=True,
status='S',
submitter=user2)
review_request.target_groups.add(group1)
review_request.target_groups.add(group2)
review_request.target_people.add(user1)
self.assertValidSummaries(
ReviewRequest.objects.to_user('doc', local_site=None),
[
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user('doc', status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 1',
])
self.assertValidSummaries(
ReviewRequest.objects.to_user(
'doc', user=user2, status=None, local_site=None),
[
'Test 4',
'Test 3',
'Test 2',
'Test 1',
])
def assertValidSummaries(self, review_requests, summaries):
r_summaries = [r.summary for r in review_requests]
for summary in r_summaries:
self.assertIn(summary, summaries,
'summary "%s" not found in summary list'
% summary)
for summary in summaries:
self.assertIn(summary, r_summaries,
'summary "%s" not found in review request list'
% summary)
|
davidt/reviewboard
|
reviewboard/reviews/tests/test_review_request_manager.py
|
Python
|
mit
| 34,489
|
#!/usr/bin/env python
# encoding: utf-8
"""
Haystack.py
An on-disk cache with a dict-like API,inspired by Facebook's Haystack store
Created by Rui Carmo on 2010-04-05
Published under the MIT license.
"""
__author__ = ('Rui Carmo http://the.taoofmac.com')
__revision__ = "$Id$"
__version__ = "1.0"
import os, sys, stat, mmap, thread, time, logging
log = logging.getLogger()
try:
import cPickle as pickle
except ImportError:
import pickle # fall back on Python version
class Haystack(dict):
def __init__(self,path,basename = "haystack", commit = 300, compact = 3600):
super(Haystack,self).__init__()
self.enabled = True
self.mutex = thread.allocate_lock()
self.commitinterval = commit
self.compactinterval = compact
self.path = path
self.basename = basename
self.cache = os.path.join(self.path,self.basename + '.bin')
self.index = os.path.join(self.path,self.basename + '.idx')
self.temp = os.path.join(self.path,self.basename + '.tmp')
self._rebuild()
self.created = self.modified = self.compacted = self.committed = time.time()
def _rebuild(self):
self.mutex.acquire()
try:
os.makedirs(self.path)
except Exception, e:
log.warn("Error on makedirs(%s): %s" % (self.path, e))
pass
try:
cache = open(self.cache,"rb")
except Exception, e:
log.warn("Error while opening %s for reading: %s" % (self.cache, e))
cache = open(self.cache,"ab")
cache.close()
try:
self._index = pickle.loads(open(self.index,"rb").read())
except Exception, e:
log.warn("index retrieval from disk failed: %s" % e)
self._index = {} # "key": [mtime,length,offset]
self.created = self.modified = self.compacted = self.committed = time.time()
log.debug("Rebuild complete, %d items." % len(self._index.keys()))
self.mutex.release()
def commit(self):
if not self.enabled:
return
self.mutex.acquire()
open(self.index,"wb").write(pickle.dumps(self._index))
self.committed = time.time()
log.debug("Index %s commited, %d items." % (self.index, len(self._index.keys())))
self.mutex.release()
def purge(self):
self.mutex.acquire()
try:
os.unlink(self.index)
except OSError, e:
log.warn("Could not unlink %s: %s" % (self.index, e))
pass
try:
os.unlink(self.cache)
except OSError, e:
log.warn("Could not unlink %s: %s" % (self.index, e))
pass
self.mutex.release()
self._rebuild()
def _cleanup(self):
now = time.time()
if now > (self.committed + self.commitinterval):
self.commit()
if now > (self.compacted + self.compactinterval):
self._compact()
def __eq__(self,other):
raise TypeError('Equality undefined for this kind of dictionary')
def __ne__(self,other):
raise TypeError('Equality undefined for this kind of dictionary')
def __lt__(self,other):
raise TypeError('Comparison undefined for this kind of dictionary')
def __le__(self,other):
raise TypeError('Comparison undefined for this kind of dictionary')
def __gt__(self,other):
raise TypeError('Comparison undefined for this kind of dictionary')
def __ge__(self,other):
raise TypeError('Comparison undefined for this kind of dictionary')
def __repr__(self,other):
raise TypeError('Comparison undefined for this kind of dictionary')
def expire(self,when):
"""Remove from cache any items older than a specified time"""
if not self.enabled: return
self.mutex.acquire()
for k in self._index.keys():
if self._index[k][0] < when:
del self._index[k]
self.mutex.release()
self._cleanup()
def keys(self):
return self._index.keys()
def stats(self,key):
if not self.enabled:
raise KeyError
self.mutex.acquire()
try:
stats = self._index[key]
self.mutex.release()
return stats
except KeyError:
self.mutex.release()
raise KeyError
def __setitem__(self,key,val):
"""Store an item in the cache - errors will cause the entire cache to be rebuilt"""
if not self.enabled:
return
self.mutex.acquire()
try:
cache = open(self.cache,"ab")
buffer = pickle.dumps(val,2)
offset = cache.tell()
cache.write(buffer)
self.modified = mtime = time.time()
self._index[key] = [mtime,len(buffer),offset]
cache.flush()
cache.close()
self.mutex.release()
except Exception, e:
log.warn("Error while storing %s: %s" % (key, e))
self.mutex.release()
raise IOError
def __delitem__(self,key):
"""Remove item from cache - in practice, we only remove it from the index"""
if not self.enabled:
return
self.mutex.acquire()
try:
del self._index[key]
self.mutex.release()
except Exception, e:
log.warn("Error while deleting %s: %s" % (key, e))
self.mutex.release()
raise KeyError
def get(self, key, default):
"""Retrieve item"""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Retrieve item"""
if not self.enabled:
raise KeyError
self.mutex.acquire()
try:
cache = open(self.cache,"rb")
cache.seek(self._index[key][2])
buffer = cache.read(self._index[key][1])
item = pickle.loads(buffer)
self.mutex.release()
except Exception, e:
log.debug("Error while retrieving %s: %s" % (key, e))
self.mutex.release()
raise KeyError
finally:
cache.close()
return item
def mtime(self, key):
"""Return the creation/modification time of a cache item"""
if not self.enabled:
raise KeyError
self.mutex.acquire()
try:
item = self._index[key][0]
self.mutex.release()
return item
except Exception, e:
log.debug("Error while getting modification time for %s: %s" % (key, e))
self.mutex.release()
raise KeyError
def _compact(self):
"""Compact the cache"""
self.mutex.acquire()
cache = open(self.cache,"rb")
compacted = open(self.temp,"ab")
newindex = {}
i = 0
for key in self._index.keys():
cache.seek(self._index[key][2])
offset = compacted.tell()
compacted.write(cache.read(self._index[key][1]))
newindex[key] = [time.time(),self._index[key][1],offset]
i = i + 1
cache.close()
size = compacted.tell()
compacted.flush()
os.fsync(compacted.fileno())
compacted.close()
os.rename(self.temp,self.cache)
self.compacted = time.time()
self._index = newindex
self.mutex.release()
self.commit()
log.debug("Compacted %s: %d items into %d bytes" % (self.cache, i, size))
if __name__=="__main__":
c = Haystack('.', commit = 3, compact = 4)
c['tired'] = "to expire in 2 seconds"
for i in range(1,10):
time.sleep(1)
c['foo'] = {'a':1}
c['zbr'] = '42'
c['test/path/name'] = "test"
c.expire(time.time() - 2)
del c['foo']
assert(c['zbr']=='42') # will eventually fail with KeyError when the cache times out
|
rcarmo/yaki-gae
|
lib/haystack.py
|
Python
|
mit
| 8,031
|
# Copyright (c) 1999 John Aycock
# Copyright (c) 2000-2002 by hartmut Goebel <hartmut@goebel.noris.de>
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
#
# See main module for license.
#
#
# Decompilation (walking AST)
#
# All table-driven. Step 1 determines a table (T) and a path to a
# table key (K) from the node type (N) (other nodes are shown as O):
#
# N N N&K
# / | ... \ / | ... \ / | ... \
# O O O O O K O O O
# |
# K
#
# MAP_R0 (TABLE_R0) MAP_R (TABLE_R) MAP_DIRECT (TABLE_DIRECT)
#
# The default is a direct mapping. The key K is then extracted from the
# subtree and used to find a table entry T[K], if any. The result is a
# format string and arguments (a la printf()) for the formatting engine.
# Escapes in the format string are:
#
# %c evaluate N[A] recursively*
# %C evaluate N[A[0]]..N[A[1]] recursively, separate by A[2]*
# %, print ',' if last %C only printed one item (for tuples--unused)
# %| tab to current indentation level
# %+ increase current indentation level
# %- decrease current indentation level
# %{...} evaluate ... in context of N
# %% literal '%'
#
# * indicates an argument (A) required.
#
# The '%' may optionally be followed by a number (C) in square brackets, which
# makes the engine walk down to N[C] before evaluating the escape code.
#
import sys, re, cStringIO
from types import ListType, TupleType, DictType, \
EllipsisType, IntType, CodeType
from spark import GenericASTTraversal
import Parser
from Parser import AST
from Scanner import Token, Code
minint = -sys.maxint-1
# Some ASTs used for comparing code fragments (like 'return None' at
# the end of functions).
RETURN_LOCALS = AST('stmt',
[ AST('return_stmt',
[ AST('expr', [ Token('LOAD_LOCALS') ]),
Token('RETURN_VALUE')]) ])
NONE = AST('expr', [ Token('LOAD_CONST', pattr=None) ] )
RETURN_NONE = AST('stmt',
[ AST('return_stmt',
[ NONE, Token('RETURN_VALUE')]) ])
ASSIGN_DOC_STRING = lambda doc_string: \
AST('stmt',
[ AST('assign',
[ AST('expr', [ Token('LOAD_CONST', pattr=doc_string) ]),
AST('designator', [ Token('STORE_NAME', pattr='__doc__')])
])])
BUILD_TUPLE_0 = AST('expr',
[ AST('build_list',
[ Token('BUILD_TUPLE_0') ])])
#TAB = '\t' # as God intended
TAB = ' ' *4 # is less spacy than "\t"
INDENT_PER_LEVEL = ' ' # additional intent per pretty-print level
TABLE_R = {
'build_tuple2': ( '%C', (0,-1,', ') ),
'POP_TOP': ( '%|%c\n', 0 ),
'STORE_ATTR': ( '%c.%[1]{pattr}', 0),
# 'STORE_SUBSCR': ( '%c[%c]', 0, 1 ),
'STORE_SLICE+0': ( '%c[:]', 0 ),
'STORE_SLICE+1': ( '%c[%c:]', 0, 1 ),
'STORE_SLICE+2': ( '%c[:%c]', 0, 1 ),
'STORE_SLICE+3': ( '%c[%c:%c]', 0, 1, 2 ),
'JUMP_ABSOLUTE': ( '%|continue\n', ),
'DELETE_SLICE+0': ( '%|del %c[:]\n', 0 ),
'DELETE_SLICE+1': ( '%|del %c[%c:]\n', 0, 1 ),
'DELETE_SLICE+2': ( '%|del %c[:%c]\n', 0, 1 ),
'DELETE_SLICE+3': ( '%|del %c[%c:%c]\n', 0, 1, 2 ),
'DELETE_ATTR': ( '%|del %c.%[-1]{pattr}\n', 0 ),
# 'EXEC_STMT': ( '%|exec %c in %[1]C\n', 0, (0,sys.maxint,', ') ),
'BINARY_SUBSCR': ( '%c[%c]', 0, 1), # required for augmented assign
'UNARY_POSITIVE': ( '+%c', 0 ),
'UNARY_NEGATIVE': ( '-%c', 0 ),
'UNARY_CONVERT': ( '`%c`', 0 ),
'UNARY_INVERT': ( '~%c', 0 ),
'UNARY_NOT': ( '(not %c)', 0 ),
'SLICE+0': ( '%c[:]', 0 ),
'SLICE+1': ( '%c[%c:]', 0, 1 ),
'SLICE+2': ( '%c[:%c]', 0, 1 ),
'SLICE+3': ( '%c[%c:%c]', 0, 1, 2 ),
}
TABLE_R0 = {
# 'BUILD_LIST': ( '[%C]', (0,-1,', ') ),
# 'BUILD_TUPLE': ( '(%C)', (0,-1,', ') ),
# 'CALL_FUNCTION': ( '%c(%C)', 0, (1,-1,', ') ),
}
TABLE_DIRECT = {
'BINARY_ADD': ( '+' ,),
'BINARY_SUBTRACT': ( '-' ,),
'BINARY_MULTIPLY': ( '*' ,),
'BINARY_DIVIDE': ( '/' ,),
'BINARY_TRUE_DIVIDE': ( '/' ,),
'BINARY_FLOOR_DIVIDE': ( '//' ,),
'BINARY_MODULO': ( '%%',),
'BINARY_POWER': ( '**',),
'BINARY_LSHIFT': ( '<<',),
'BINARY_RSHIFT': ( '>>',),
'BINARY_AND': ( '&' ,),
'BINARY_OR': ( '|' ,),
'BINARY_XOR': ( '^' ,),
'INPLACE_ADD': ( '+=' ,),
'INPLACE_SUBTRACT': ( '-=' ,),
'INPLACE_MULTIPLY': ( '*=' ,),
'INPLACE_DIVIDE': ( '/=' ,),
'INPLACE_TRUE_DIVIDE': ( '/=' ,),
'INPLACE_FLOOR_DIVIDE': ( '//=' ,),
'INPLACE_MODULO': ( '%%=',),
'INPLACE_POWER': ( '**=',),
'INPLACE_LSHIFT': ( '<<=',),
'INPLACE_RSHIFT': ( '>>=',),
'INPLACE_AND': ( '&=' ,),
'INPLACE_OR': ( '|=' ,),
'INPLACE_XOR': ( '^=' ,),
'binary_expr': ( '(%c %c %c)', 0, -1, 1 ),
'IMPORT_FROM': ( '%{pattr}', ),
'LOAD_ATTR': ( '.%{pattr}', ),
'LOAD_FAST': ( '%{pattr}', ),
'LOAD_NAME': ( '%{pattr}', ),
'LOAD_GLOBAL': ( '%{pattr}', ),
'LOAD_DEREF': ( '%{pattr}', ),
'LOAD_LOCALS': ( 'locals()', ),
# 'LOAD_CONST': ( '%{pattr}', ), # handled by n_LOAD_CONST
'DELETE_FAST': ( '%|del %{pattr}\n', ),
'DELETE_NAME': ( '%|del %{pattr}\n', ),
'DELETE_GLOBAL': ( '%|del %{pattr}\n', ),
'delete_subscr': ( '%|del %c[%c]\n', 0, 1,),
'binary_subscr': ( '%c[%c]', 0, 1),
'store_subscr': ( '%c[%c]', 0, 1),
'STORE_FAST': ( '%{pattr}', ),
'STORE_NAME': ( '%{pattr}', ),
'STORE_GLOBAL': ( '%{pattr}', ),
'STORE_DEREF': ( '%{pattr}', ),
'unpack': ( '(%C,)', (1, sys.maxint, ', ') ),
'unpack_list': ( '[%C]', (1, sys.maxint, ', ') ),
#'list_compr': ( '[ %c ]', -2), # handled by n_list_compr
'list_iter': ( '%c', 0),
'list_for': ( ' for %c in %c%c', 2, 0, 3 ),
'list_if': ( ' if %c%c', 0, 2 ),
'lc_body': ( '', ), # ignore when recusing
'assign': ( '%|%c = %c\n', -1, 0 ),
'augassign1': ( '%|%c %c %c\n', 0, 2, 1),
'augassign2': ( '%|%c%c %c %c\n', 0, 2, -3, -4),
# 'dup_topx': ( '%c', 0),
'designList': ( '%c = %c', 0, -1 ),
'and': ( '(%c and %c)', 0, 3 ),
'and2': ( '%c', 4 ),
'or': ( '(%c or %c)', 0, 3 ),
'compare': ( '(%c %[-1]{pattr} %c)', 0, 1 ),
'cmp_list': ( '%c %c', 0, 1),
'cmp_list1': ( '%[3]{pattr} %c %c', 0, -2),
'cmp_list2': ( '%[1]{pattr} %c', 0),
# 'classdef': (), # handled by n_classdef()
'funcdef': ( '\n%|def %c\n', -2), # -2 to handle closures
'kwarg': ( '%[0]{pattr}=%c', 1),
'importstmt': ( '%|import %[0]{pattr}\n', ),
'importfrom': ( '%|from %[0]{pattr} import %c\n', 1 ),
'importlist': ( '%C', (0, sys.maxint, ', ') ),
'importstmt2': ( '%|import %c\n', 1),
'importstar2': ( '%|from %[1]{pattr} import *\n', ),
'importfrom2': ( '%|from %[1]{pattr} import %c\n', 2 ),
'importlist2': ( '%C', (0, sys.maxint, ', ') ),
'importstmt25': ( '%|import %c\n', 2),
'importstar25': ( '%|from %[2]{pattr} import *\n', ),
'importfrom25': ( '%|from %[2]{pattr} import %c\n', 3 ),
'assert': ( '%|assert %c\n' , 3 ),
'assert2': ( '%|assert %c, %c\n' , 3, -5 ),
'assert3': ( '%|assert %c\n' , 0 ),
'assert4': ( '%|assert %c, %c\n' , 0, -4 ),
'print_stmt': ( '%|print %c,\n', 0 ),
'print_stmt_nl': ( '%|print %[0]C\n', (0,1, None) ),
'print_nl_stmt': ( '%|print\n', ),
'print_to': ( '%|print >> %c, %c,\n', 0, 1 ),
'print_to_nl': ( '%|print >> %c, %c\n', 0, 1 ),
'print_nl_to': ( '%|print >> %c\n', 0 ),
'print_to_items': ( '%C', (0, 2, ', ') ),
'call_stmt': ( '%|%c\n', 0),
'break_stmt': ( '%|break\n', ),
'continue_stmt': ( '%|continue\n', ),
'raise_stmt': ( '%|raise %[0]C\n', (0,sys.maxint,', ') ),
'yield_stmt': ( '%|yield %c\n', 0),
'return_stmt': ( '%|return %c\n', 0),
'return_lambda': ( '%c', 0),
'ifstmt': ( '%|if %c:\n%+%c%-', 0, 2 ),
'ifelsestmt': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 2, -2 ),
'ifelifstmt': ( '%|if %c:\n%+%c%-%c', 0, 2, -2 ),
'elifelifstmt': ( '%|elif %c:\n%+%c%-%c', 0, 2, -2 ),
'elifstmt': ( '%|elif %c:\n%+%c%-', 0, 2 ),
'elifelsestmt': ( '%|elif %c:\n%+%c%-%|else:\n%+%c%-', 0, 2, -2 ),
'ifnotstmt': ( '%|if not(%c):\n%+%c%-', 0, 2 ),
'ifnotelsestmt': ( '%|if not(%c):\n%+%c%-%|else:\n%+%c%-', 0, 2, -2 ),
'ifandstmt': ( '%|if(%c and %c):\n%+%c%-', 0, 3, 6),
'ifforstmt': ( '%|if %c:\n%+%|for %c in %c:\n%+%c%-%-\n', 0, 5, 3, 6 ),
'ifforelsestmt':( '%|if %c:\n%+%|for %c in %c:\n%+%c%-%-\n%|else:\n%+%c%-', 0, 5, 3, 6, -3 ),
'whilestmt': ( '%|while %c:\n%+%c%-\n', 1, 4 ),
'while1stmt': ( '%|while 1:\n%+%c%-\n', 5 ),
'whileelsestmt': ( '%|while %c:\n%+%c%-%|else:\n%+%c%-\n', 1, 4, -2 ),
'while1elsestmt': ( '%|while 1:\n%+%c%-%|else:\n%+%c%-\n', 5, -2 ),
'forstmt': ( '%|for %c in %c:\n%+%c%-\n', 3, 1, 4 ),
'forelsestmt': (
'%|for %c in %c:\n%+%c%-%|else:\n%+%c%-\n', 3, 1, 4, -2
),
'trystmt': ( '%|try:\n%+%c%-%c', 1, 5 ),
'except': ( '%|except:\n%+%c%-', 3 ),
'except_cond1': ( '%|except %c:\n%+%c%-', 1, 8 ),
'except_cond2': ( '%|except %c, %c:\n%+%c%-', 1, 6, 8 ),
'except_else': ( '%|else:\n%+%c%-', 2 ),
'tryfinallystmt': ( '%|try:\n%+%c%-\n%|finally:\n%+%c%-\n', 1, 5 ),
'passstmt': ( '%|pass\n', ),
'STORE_FAST': ( '%{pattr}', ),
'kv': ( '%c: %c', 3, 1 ),
'kv2': ( '%c: %c', 1, 2 ),
'mapexpr': ( '{%[1]C}', (0,sys.maxint,', ') ),
}
MAP_DIRECT = (TABLE_DIRECT, )
MAP_R0 = (TABLE_R0, -1, 0)
MAP_R = (TABLE_R, -1)
MAP = {
'stmt': MAP_R,
'del_stmt': MAP_R,
'designator': MAP_R,
'expr': MAP_R,
'exprlist': MAP_R0,
}
ASSIGN_TUPLE_PARAM = lambda param_name: \
AST('expr', [ Token('LOAD_FAST', pattr=param_name) ])
escape = re.compile(r'''
(?P<prefix> [^%]* )
% ( \[ (?P<child> -? \d+ ) \] )?
((?P<type> [^{] ) |
( [{] (?P<expr> [^}]* ) [}] ))
''', re.VERBOSE)
class ParserError(Parser.ParserError):
def __init__(self, error, tokens):
self.error = error # previous exception
self.tokens = tokens
def __str__(self):
lines = ['--- This code section failed: ---']
lines.extend( map(str, self.tokens) )
lines.extend( ['', str(self.error)] )
return '\n'.join(lines)
__globals_tokens__ = ('STORE_GLOBAL', 'DELETE_GLOBAL') # 'LOAD_GLOBAL'
def find_globals(node, globals):
"""Find globals in this statement."""
for n in node:
if isinstance(n, AST):
#if n != 'stmt': # skip nested statements
globals = find_globals(n, globals)
elif n.type in __globals_tokens__:
globals[n.pattr] = None
return globals
class Walker(GenericASTTraversal, object):
stacked_params = ('f', 'indent', 'isLambda', '_globals')
def __init__(self, out, scanner, showast=0):
GenericASTTraversal.__init__(self, ast=None)
self.scanner = scanner
params = {
'f': out,
'indent': '',
}
self.showast = showast
self.__params = params
self.__param_stack = []
f = property(lambda s: s.__params['f'],
lambda s, x: s.__params.__setitem__('f', x),
lambda s: s.__params.__delitem__('f'),
None)
indent = property(lambda s: s.__params['indent'],
lambda s, x: s.__params.__setitem__('indent', x),
lambda s: s.__params.__delitem__('indent'),
None)
isLambda = property(lambda s: s.__params['isLambda'],
lambda s, x: s.__params.__setitem__('isLambda', x),
lambda s: s.__params.__delitem__('isLambda'),
None)
_globals = property(lambda s: s.__params['_globals'],
lambda s, x: s.__params.__setitem__('_globals', x),
lambda s: s.__params.__delitem__('_globals'),
None)
def indentMore(self, indent=TAB):
self.indent += indent
def indentLess(self, indent=TAB):
self.indent = self.indent[:-len(indent)]
def traverse(self, node, indent=None, isLambda=0):
self.__param_stack.append(self.__params)
if indent is None: indent = self.indent
self.__params = {
'_globals': {},
'f': cStringIO.StringIO(),
'indent': indent,
'isLambda': isLambda,
}
self.preorder(node)
result = self.f.getvalue()
self.__params = self.__param_stack.pop()
return result
def write(self, *data):
if type(data) == ListType:
self.f.writelines(data)
elif type(data) == TupleType:
self.f.writelines(list(data))
else:
self.f.write(data)
def print_(self, *data):
self.write(*data)
print >> self.f
def print_docstring(self, indent, docstring):
pass
# def unquote(quote, string):
# unquote = '\\' + quote
# while string.find(quote) >= 0:
# string = string.replace(quote, unquote)
# return string
#
# if docstring.find('\n'): # multiline string
# if docstring.find('"""') >=0:
# quote = "'''"
# else:
# quote = '"""';
# unquote(quote, docstring)
# docstring = docstring.split('\n')
# self.write(indent, quote)
# for i in range(len(docstring)-1):
# self.print_( repr(docstring[i])[1:-1] )
# self.print_(repr(docstring[-1])[1:-1], quote)
# else:
# self.print_(indent, repr(docstring))
def n_LOAD_CONST(self, node):
data = node.pattr; datatype = type(data)
if datatype is IntType and data == minint:
# convert to hex, since decimal representation
# would result in 'LOAD_CONST; UNARY_NEGATIVE'
# change:hG/2002-02-07: this was done for all negative integers
# todo: check whether this is necessary in Python 2.1
self.write( hex(data) )
elif datatype is EllipsisType:
self.write('...')
elif data is None:
# LOAD_CONST 'None' only occurs, when None is
# implicit eg. in 'return' w/o params
#pass
self.write('None')
else:
self.write(repr(data))
# LOAD_CONST is a terminal, so stop processing/recursing early
self.prune()
def n_delete_subscr(self, node):
maybe_tuple = node[-2][-1]
if maybe_tuple.type.startswith('BUILD_TUPLE'):
maybe_tuple.type = 'build_tuple2'
self.default(node)
n_store_subscr = n_binary_subscr = n_delete_subscr
def n_exec_stmt(self, node):
"""
exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT
exec_stmt ::= expr exprlist EXEC_STMT
"""
self.write(self.indent, 'exec ')
self.preorder(node[0])
if node[1][0] != NONE:
sep = ' in '
for subnode in node[1]:
self.write(sep); sep = ", "
self.preorder(subnode)
self.print_()
self.prune() # stop recursing
def n_list_compr(self, node):
n = node[-2]
assert n == 'list_iter'
# find innerst node
while n == 'list_iter':
n = n[0] # recurse one step
if n == 'list_for': n = n[3]
elif n == 'list_if': n = n[2]
assert n == 'lc_body'
self.write( '[ ');
self.preorder(n[1]) # lc_body
self.preorder(node[-2]) # for/if parts
self.write( ' ]')
self.prune() # stop recursing
def n_ifelsestmt(self, node, preprocess=0):
if len(node[-2]) == 1:
ifnode = node[-2][0][0]
if ifnode == 'ifelsestmt':
node.type = 'ifelifstmt'
self.n_ifelsestmt(ifnode, preprocess=1)
if ifnode == 'ifelifstmt':
ifnode.type = 'elifelifstmt'
elif ifnode == 'ifelsestmt':
ifnode.type = 'elifelsestmt'
elif ifnode == 'ifstmt':
node.type = 'ifelifstmt'
ifnode.type = 'elifstmt'
if not preprocess:
self.default(node)
def n_import_as(self, node):
iname = node[0].pattr;
assert node[-1][-1].type.startswith('STORE_')
sname = node[-1][-1].pattr # assume one of STORE_.... here
if iname == sname or iname.startswith(sname + '.'):
self.write(iname)
else:
self.write(iname, ' as ', sname)
self.prune() # stop recursing
def n_mkfunc(self, node):
self.write(node[-2].attr.co_name) # = code.co_name
self.indentMore()
self.make_function(node, isLambda=0)
self.indentLess()
self.prune() # stop recursing
def n_mklambda(self, node):
self.make_function(node, isLambda=1)
self.prune() # stop recursing
def n_classdef(self, node):
# class definition ('class X(A,B,C):')
assert node[0].pattr == node[-1][-1].pattr
self.write(self.indent, 'class ', str(node[-1][-1].pattr))
if node[1] != BUILD_TUPLE_0: # this is a subclass
self.preorder(node[1]) # output superclasses
self.print_(':')
# class body
self.indentMore()
self.build_class(node[-4][-2].attr)
self.indentLess()
self.prune()
def n_mapexpr(self, node):
"""
prettyprint a mapexpr
'mapexpr' is something like k = {'a': 1, 'b': 42 }"
"""
# " <- emacs happy
assert node[-1] == 'kvlist'
node = node[-1] # goto kvlist
self.indentMore(INDENT_PER_LEVEL)
line_seperator = ',\n' + self.indent
sep = INDENT_PER_LEVEL[:-1]
self.write('{')
for kv in node:
assert kv in ('kv', 'kv2')
# kv ::= DUP_TOP expr ROT_TWO expr STORE_SUBSCR
# kv2 ::= DUP_TOP expr expr ROT_THREE STORE_SUBSCR
if kv == 'kv':
name = self.traverse(kv[-2], indent='');
value = self.traverse(kv[1], indent=self.indent+(len(name)+2)*' ')
else:
name = self.traverse(kv[1], indent='');
value = self.traverse(kv[-3], indent=self.indent+(len(name)+2)*' ')
self.write(sep, name, ': ', value)
sep = line_seperator
self.write('}')
self.indentLess(INDENT_PER_LEVEL)
self.prune()
def n_build_list(self, node):
"""
prettyprint a list or tuple
"""
lastnode = node.pop().type
if lastnode.startswith('BUILD_LIST'):
self.write('['); endchar = ']'
elif lastnode.startswith('BUILD_TUPLE'):
self.write('('); endchar = ')'
else:
raise 'Internal Error: n_build_list expects list or tuple'
self.indentMore(INDENT_PER_LEVEL)
line_seperator = ',\n' + self.indent
sep = INDENT_PER_LEVEL[:-1]
for elem in node:
assert elem == 'expr'
value = self.traverse(elem)
self.write(sep, value)
sep = line_seperator
self.write(endchar)
self.indentLess(INDENT_PER_LEVEL)
self.prune()
def engine(self, entry, startnode):
#self.print_("-----")
#self.print_(str(startnode.__dict__))
fmt = entry[0]
## no longer used, since BUILD_TUPLE_n is pretty printed:
##lastC = 0
arg = 1
i = 0
m = escape.search(fmt)
while m:
i = m.end()
self.write(m.group('prefix'))
typ = m.group('type') or '{'
node = startnode
try:
if m.group('child'):
node = node[int(m.group('child'))]
except:
print node.__dict__
raise
if typ == '%': self.write('%')
elif typ == '+': self.indentMore()
elif typ == '-': self.indentLess()
elif typ == '|': self.write(self.indent)
## no longer used, since BUILD_TUPLE_n is pretty printed:
##elif typ == ',':
## if lastC == 1:
## self.write(',')
elif typ == 'c':
self.preorder(node[entry[arg]])
arg += 1
elif typ == 'C':
low, high, sep = entry[arg]
## lastC = remaining = len(node[low:high])
remaining = len(node[low:high])
for subnode in node[low:high]:
self.preorder(subnode)
remaining -= 1
if remaining > 0:
self.write(sep)
arg += 1
elif typ == '{':
d = node.__dict__
expr = m.group('expr')
try:
self.f.write(eval(expr, d, d))
except:
print node
raise
m = escape.search(fmt, i)
self.write(fmt[i:])
def default(self, node):
mapping = MAP.get(node, MAP_DIRECT)
table = mapping[0]
key = node
for i in mapping[1:]:
key = key[i]
if table.has_key(key):
self.engine(table[key], node)
self.prune()
def customize(self, customize):
"""
Special handling for opcodes that take a variable number
of arguments -- we add a new entry for each in TABLE_R.
"""
for k, v in customize.items():
if TABLE_R.has_key(k):
continue
op = k[ :k.rfind('_') ]
if op == 'CALL_FUNCTION': TABLE_R[k] = ('%c(%C)', 0, (1,-1,', '))
elif op in ('CALL_FUNCTION_VAR',
'CALL_FUNCTION_VAR_KW', 'CALL_FUNCTION_KW'):
if v == 0:
str = '%c(%C' # '%C' is a dummy here ...
p2 = (0, 0, None) # .. because of this
else:
str = '%c(%C, '
p2 = (1,-2, ', ')
if op == 'CALL_FUNCTION_VAR':
str += '*%c)'
entry = (str, 0, p2, -2)
elif op == 'CALL_FUNCTION_KW':
str += '**%c)'
entry = (str, 0, p2, -2)
else:
str += '*%c, **%c)'
if p2[2]: p2 = (1, -3, ', ')
entry = (str, 0, p2, -3, -2)
TABLE_R[k] = entry
## handled by n_mapexpr:
##if op == 'BUILD_SLICE': TABLE_R[k] = ('%C' , (0,-1,':'))
## handled by n_build_list:
##if op == 'BUILD_LIST': TABLE_R[k] = ('[%C]' , (0,-1,', '))
##elif op == 'BUILD_TUPLE': TABLE_R[k] = ('(%C%,)', (0,-1,', '))
def get_tuple_parameter(self, ast, name):
"""
If the name of the formal parameter starts with dot,
it's a tuple parameter, like this:
# def MyFunc(xx, (a,b,c), yy):
# print a, b*2, c*42
In byte-code, the whole tuple is assigned to parameter '.1' and
then the tuple gets unpacked to 'a', 'b' and 'c'.
Since identifiers starting with a dot are illegal in Python,
we can search for the byte-code equivalent to '(a,b,c) = .1'
"""
assert ast == 'stmts'
for i in range(len(ast)):
# search for an assign-statement
assert ast[i] == 'stmt'
node = ast[i][0]
if node == 'assign' \
and node[0] == ASSIGN_TUPLE_PARAM(name):
# okay, this assigns '.n' to something
del ast[i]
# walk lhs; this
# returns a tuple of identifiers as used
# within the function definition
assert node[1] == 'designator'
# if lhs is not a UNPACK_TUPLE (or equiv.),
# add parenteses to make this a tuple
if node[1][0] not in ('unpack', 'unpack_list'):
return '(' + self.traverse(node[1], indent='') + ')'
return self.traverse(node[1], indent='')
raise "Can't find tuple parameter" % name
def make_function(self, node, isLambda, nested=1):
"""Dump function defintion, doc string, and function body."""
def build_param(ast, name, default):
"""build parameters:
- handle defaults
- handle format tuple parameters
"""
# if formal parameter is a tuple, the paramater name
# starts with a dot (eg. '.1', '.2')
if name.startswith('.'):
# replace the name with the tuple-string
name = self.get_tuple_parameter(ast, name)
if default:
if self.showast:
print '--', name
print default
print '--'
result = '%s = %s' % (name, self.traverse(default, indent='') )
if result[-2:] == '= ': # default was 'LOAD_CONST None'
result += 'None'
return result
else:
return name
defparams = node[:node[-1].attr] # node[-1] == MAKE_xxx_n
code = node[-2].attr
assert type(code) == CodeType
code = Code(code, self.scanner)
#assert isinstance(code, Code)
ast = self.build_ast(code._tokens, code._customize)
code._tokens = None # save memory
assert ast == 'stmts'
if isLambda:
# convert 'return' statement to expression
#assert len(ast[0]) == 1 wrong, see 'lambda (r,b): r,b,g'
assert ast[-1] == 'stmt'
assert len(ast[-1]) == 1
assert ast[-1][0] == 'return_stmt'
ast[-1][0].type = 'return_lambda'
else:
if ast[-1] == RETURN_NONE:
# Python adds a 'return None' to the
# end of any function; remove it
ast.pop() # remove last node
# add defaults values to parameter names
argc = code.co_argcount
paramnames = list(code.co_varnames[:argc])
# defaults are for last n parameters, thus reverse
paramnames.reverse(); defparams.reverse()
# build parameters
##This would be a nicer piece of code, but I can't get this to work
## now, have to find a usable lambda constuct hG/2000-09-05
##params = map(lambda name, default: build_param(ast, name, default),
## paramnames, defparams)
params = []
for name, default in map(lambda a,b: (a,b), paramnames, defparams):
params.append( build_param(ast, name, default) )
params.reverse() # back to correct order
if 4 & code.co_flags: # flag 2 -> variable number of args
params.append('*%s' % code.co_varnames[argc])
argc += 1
if 8 & code.co_flags: # flag 3 -> keyword args
params.append('**%s' % code.co_varnames[argc])
argc += 1
# dump parameter list (with default values)
indent = self.indent
if isLambda:
self.write("lambda ", ", ".join(params), ":")
else:
self.print_("(", ", ".join(params), "):")
#self.print_(indent, '#flags:\t', int(code.co_flags))
if len(code.co_consts)>0 and code.co_consts[0] != None: # docstring exists, dump it
self.print_docstring(indent, code.co_consts[0])
for g in find_globals(ast, {}).keys():
self.print_(indent, 'global ', g)
self.gen_source(ast, code._customize, isLambda=isLambda)
code._tokens = None; code._customize = None # save memory
def build_class(self, code):
"""Dump class definition, doc string and class body."""
assert type(code) == CodeType
code = Code(code, self.scanner)
#assert isinstance(code, Code)
indent = self.indent
#self.print_(indent, '#flags:\t', int(code.co_flags))
ast = self.build_ast(code._tokens, code._customize)
code._tokens = None # save memory
assert ast == 'stmts'
# if docstring exists, dump it
if code.co_consts[0] != None \
and ast[0] == ASSIGN_DOC_STRING(code.co_consts[0]):
#print '\n\n>>-->>doc string set\n\n'
self.print_docstring(indent, code.co_consts[0])
del ast[0]
# the function defining a class normally returns locals(); we
# don't want this to show up in the source, thus remove the node
if ast[-1] == RETURN_LOCALS:
ast.pop() # remove last node
for g in find_globals(ast, {}).keys():
self.print_(indent, 'global ', g)
self.gen_source(ast, code._customize)
code._tokens = None; code._customize = None # save memory
def gen_source(self, ast, customize, isLambda=0):
"""convert AST to source code"""
# if code would be empty, append 'pass'
if len(ast) == 0:
self.print_(self.indent, 'pass')
else:
self.customize(customize)
self.print_(self.traverse(ast, isLambda=isLambda))
def build_ast(self, tokens, customize):
assert type(tokens) == ListType
assert isinstance(tokens[0], Token)
# Build AST from disassembly.
try:
ast = Parser.parse(tokens, customize)
except Parser.ParserError, e:
raise ParserError(e, tokens)
if self.showast:
self.print_(repr(ast))
return ast
# local variables:
# tab-width: 4
|
devyn/unholy
|
decompyle/decompyle/Walker.py
|
Python
|
mit
| 29,850
|
"""
Objects with No values
"""
from galaxy.datatypes.metadata import MetadataCollection
from galaxy.datatypes.registry import Registry
class RecursiveNone:
def __str__( self ):
return "None"
def __repr__( self ):
return str( self )
def __getattr__( self, name ):
value = RecursiveNone()
setattr( self, name, value )
return value
def __nonzero__( self ):
return False
class NoneDataset( RecursiveNone ):
def __init__( self, datatypes_registry = None, ext = 'data', dbkey = '?' ):
self.ext = self.extension = ext
self.dbkey = dbkey
if datatypes_registry is None: datatypes_registry = Registry()
self.datatype = datatypes_registry.get_datatype_by_extension( ext )
self._metadata = None
self.metadata = MetadataCollection( self )
def __getattr__( self, name ):
return "None"
def missing_meta( self ):
return False
|
volpino/Yeps-EURAC
|
lib/galaxy/util/none_like.py
|
Python
|
mit
| 952
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# DBSCAN_multiplex/setup.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Setup script for DBSCAN_multiplex, a fast and memory-efficient implementation of DBSCAN
(Density-Based Spatial Clustering of Appplications with Noise).
The gain is especially outstanding for applications involving multiple rounds of down-sampling
and clustering from a common dataset.
References
----------
* Ester, M., Kriegel, H.-P., Sander, J. and Xu, X., "A Density-Based Algorithm for Discovering Clusters in Large Spatial
Databases with Noise".
In: Proceedings of the Second International Conference on Knowledge Discovery and Data Mining (KDD-96), pp. 226–231. 1996
* Kriegel, H.-P., Kroeger, P., Sander, J. and Zimek, A., "Density-based Clustering".
In: WIREs Data Mining and Knowledge Discovery, 1, 3, pp. 231–240. 2011
"""
import codecs
from os import path
from sys import version
from distutils.core import setup
if version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
here = path.abspath(path.dirname(__file__))
try:
import pypandoc
z = pypandoc.convert('README.md', 'rst', format = 'markdown')
with open(path.join(here, 'README'), 'w') as f:
f.write(z)
with codecs.open(path.join(here, 'README'), encoding = 'utf-8') as f:
long_description = f.read()
except:
print("WARNING: 'pypandoc' module not found: could not convert from Markdown to RST format")
long_description = ''
setup(name = 'DBSCAN_multiplex',
version = '1.5',
description = 'Fast and memory-efficient DBSCAN clustering,'
'possibly on various subsamples out of a common dataset',
long_description = long_description,
url = 'https://github.com/GGiecold/DBSCAN_multiplex',
download_url = 'https://github.com/GGiecold/DBSCAN_multiplex',
author = 'Gregory Giecold',
author_email = 'g.giecold@gmail.com',
maintainer = 'Gregory Giecold',
maintainer_email = 'ggiecold@jimmy.harvard.edu',
license = 'MIT License',
py_modules = ['DBSCAN_multiplex'],
platforms = ('Any',),
requires = ['numpy (>=1.9.0)', 'sklearn', 'tables'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Mathematics', ],
keywords = 'machine-learning clustering',
)
|
GGiecold/DBSCAN
|
setup.py
|
Python
|
mit
| 3,305
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from models import ipl_scores
from django.contrib import auth
class customizedform(UserCreationForm):
#Email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'password1', 'password2')
def save(self, commit = True):
user = super(UserCreationForm, self).save(commit=False)
#user.Email = self.cleaned_data['Email']
#user.username = self['username'] #user.username = None
#user.password = self['password1']
#user.username = self['username']
if commit:
user.save()
return user
'''
class ipl_scores(forms.ModelForm):
class Meta:
model = ipl_scores
fields = ('Detail', 'Score1', 'Score2')
'''
|
Rahul91/Django_IPL
|
templates/signups/forms.py
|
Python
|
mit
| 735
|
#
# Cython/Python language types
#
from __future__ import absolute_import
import copy
import re
try:
reduce
except NameError:
from functools import reduce
from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from .Errors import error
class BaseType(object):
#
# Base class for all Cython types including pseudo-types.
# List of attribute names of any subtypes
subtypes = []
_empty_declaration = None
def can_coerce_to_pyobject(self, env):
return False
def cast_code(self, expr_code):
return "((%s)%s)" % (self.empty_declaration_code(), expr_code)
def empty_declaration_code(self):
if self._empty_declaration is None:
self._empty_declaration = self.declaration_code('')
return self._empty_declaration
def specialization_name(self):
# This is not entirely robust.
safe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789'
all = []
for c in self.empty_declaration_code().replace("unsigned ", "unsigned_").replace("long long", "long_long").replace(" ", "__"):
if c in safe:
all.append(c)
else:
all.append('_%x_' % ord(c))
return ''.join(all)
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
else:
return base_code
def __deepcopy__(self, memo):
"""
Types never need to be copied, if we do copy, Unfortunate Things
Will Happen!
"""
return self
def get_fused_types(self, result=None, seen=None, subtypes=None):
subtypes = subtypes or self.subtypes
if not subtypes:
return None
if result is None:
result = []
seen = set()
for attr in subtypes:
list_or_subtype = getattr(self, attr)
if list_or_subtype:
if isinstance(list_or_subtype, BaseType):
list_or_subtype.get_fused_types(result, seen)
else:
for subtype in list_or_subtype:
subtype.get_fused_types(result, seen)
return result
def specialize_fused(self, env):
if env.fused_to_specific:
return self.specialize(env.fused_to_specific)
return self
@property
def is_fused(self):
"""
Whether this type or any of its subtypes is a fused type
"""
# Add this indirection for the is_fused property to allow overriding
# get_fused_types in subclasses.
return self.get_fused_types()
def deduce_template_params(self, actual):
"""
Deduce any template params in this (argument) type given the actual
argument type.
http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction
"""
if self == actual:
return {}
else:
return None
def __lt__(self, other):
"""
For sorting. The sorting order should correspond to the preference of
conversion from Python types.
Override to provide something sensible. This is only implemented so that
python 3 doesn't trip
"""
return id(type(self)) < id(type(other))
def py_type_name(self):
"""
Return the name of the Python type that can coerce to this type.
"""
def typeof_name(self):
"""
Return the string with which fused python functions can be indexed.
"""
if self.is_builtin_type or self.py_type_name() == 'object':
index_name = self.py_type_name()
else:
index_name = str(self)
return index_name
def check_for_null_code(self, cname):
"""
Return the code for a NULL-check in case an UnboundLocalError should
be raised if an entry of this type is referenced before assignment.
Returns None if no check should be performed.
"""
return None
def invalid_value(self):
"""
Returns the most invalid value an object of this type can assume as a
C expression string. Returns None if no such value exists.
"""
class PyrexType(BaseType):
#
# Base class for all Cython types
#
# is_pyobject boolean Is a Python object type
# is_extension_type boolean Is a Python extension type
# is_final_type boolean Is a final extension type
# is_numeric boolean Is a C numeric type
# is_int boolean Is a C integer type
# is_float boolean Is a C floating point type
# is_complex boolean Is a C complex type
# is_void boolean Is the C void type
# is_array boolean Is a C array type
# is_ptr boolean Is a C pointer type
# is_null_ptr boolean Is the type of NULL
# is_reference boolean Is a C reference type
# is_const boolean Is a C const type.
# is_cfunction boolean Is a C function type
# is_struct_or_union boolean Is a C struct or union type
# is_struct boolean Is a C struct type
# is_enum boolean Is a C enum type
# is_typedef boolean Is a typedef type
# is_string boolean Is a C char * type
# is_pyunicode_ptr boolean Is a C PyUNICODE * type
# is_cpp_string boolean Is a C++ std::string type
# is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# * If for_display, this is for reading by a human in an error
# message; otherwise it must be valid C code.
# * If dll_linkage is not None, it must be 'DL_EXPORT' or
# 'DL_IMPORT', and will be added to the base type part of
# the declaration.
# * If pyrex = 1, this is for use in a 'cdef extern'
# statement of a Cython include file.
#
# assignable_from(src_type)
# Tests whether a variable of this type can be
# assigned a value of type src_type.
#
# same_as(other_type)
# Tests whether this type represents the same type
# as other_type.
#
# as_argument_type():
# Coerces array and C function types into pointer type for use as
# a formal argument type.
#
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
is_final_type = 0
is_builtin_type = 0
is_numeric = 0
is_int = 0
is_float = 0
is_complex = 0
is_void = 0
is_array = 0
is_ptr = 0
is_null_ptr = 0
is_reference = 0
is_const = 0
is_cfunction = 0
is_struct_or_union = 0
is_cpp_class = 0
is_cpp_string = 0
is_struct = 0
is_enum = 0
is_typedef = 0
is_string = 0
is_pyunicode_ptr = 0
is_unicode_char = 0
is_returncode = 0
is_error = 0
is_buffer = 0
is_ctuple = 0
is_memoryviewslice = 0
has_attributes = 0
default_value = ""
def resolve(self):
# If a typedef, returns the base type.
return self
def specialize(self, values):
# TODO(danilo): Override wherever it makes sense.
return self
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
def as_argument_type(self):
return self
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return 1
def is_simple_buffer_dtype(self):
return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or
self.is_extension_type or self.is_ptr)
def struct_nesting_depth(self):
# Returns the number levels of nested structs. This is
# used for constructing a stack for walking the run-time
# type information of the struct.
return 1
def global_init_code(self, entry, code):
# abstract
pass
def needs_nonecheck(self):
return 0
def public_decl(base_code, dll_linkage):
if dll_linkage:
return "%s(%s)" % (dll_linkage, base_code)
else:
return base_code
def create_typedef_type(name, base_type, cname, is_external=0):
is_fused = base_type.is_fused
if base_type.is_complex or is_fused:
if is_external:
if is_fused:
msg = "Fused"
else:
msg = "Complex"
raise ValueError("%s external typedefs not supported" % msg)
return base_type
else:
return CTypedefType(name, base_type, cname, is_external)
class CTypedefType(BaseType):
#
# Pseudo-type defined with a ctypedef statement in a
# 'cdef extern from' block.
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# qualified_name string
# typedef_name string
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
subtypes = ['typedef_base_type']
def __init__(self, name, base_type, cname, is_external=0):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
def invalid_value(self):
return self.typedef_base_type.invalid_value()
def resolve(self):
return self.typedef_base_type.resolve()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
# For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
else:
return BaseType.cast_code(self, expr_code)
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
def __str__(self):
return self.typedef_name
def _create_utility_code(self, template_utility_code,
template_function_name):
type_name = type_identifier(self.typedef_cname)
utility_code = template_utility_code.specialize(
type = self.typedef_cname,
TypeName = type_name)
function_name = template_function_name % type_name
return utility_code, function_name
def create_to_py_utility_code(self, env):
if self.typedef_is_external:
if not self.to_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": self.to_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
pass
if self.to_py_utility_code:
env.use_utility_code(self.to_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_to_py_utility_code(env)
def create_from_py_utility_code(self, env):
if self.typedef_is_external:
if not self.from_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
if self.from_py_utility_code:
env.use_utility_code(self.from_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_from_py_utility_code(env)
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
if to_py_function is None:
to_py_function = self.to_py_function
return self.typedef_base_type.to_py_call_code(
source_code, result_code, result_type, to_py_function)
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
if from_py_function is None:
from_py_function = self.from_py_function
if error_condition is None:
error_condition = self.error_condition(result_code)
return self.typedef_base_type.from_py_call_code(
source_code, result_code, error_pos, code, from_py_function, error_condition)
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.empty_declaration_code()
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load_cached(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load_cached(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load_cached(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def error_condition(self, result_code):
if self.typedef_is_external:
if self.exception_value:
condition = "(%s == (%s)%s)" % (
result_code, self.typedef_cname, self.exception_value)
if self.exception_check:
condition += " && PyErr_Occurred()"
return condition
# delegation
return self.typedef_base_type.error_condition(result_code)
def __getattr__(self, name):
return getattr(self.typedef_base_type, name)
def py_type_name(self):
return self.typedef_base_type.py_type_name()
def can_coerce_to_pyobject(self, env):
return self.typedef_base_type.can_coerce_to_pyobject(env)
class MemoryViewSliceType(PyrexType):
is_memoryviewslice = 1
has_attributes = 1
scope = None
# These are special cased in Defnode
from_py_function = None
to_py_function = None
exception_value = None
exception_check = True
subtypes = ['dtype']
def __init__(self, base_dtype, axes):
"""
MemoryViewSliceType(base, axes)
Base is the C base type; axes is a list of (access, packing) strings,
where access is one of 'full', 'direct' or 'ptr' and packing is one of
'contig', 'strided' or 'follow'. There is one (access, packing) tuple
for each dimension.
the access specifiers determine whether the array data contains
pointers that need to be dereferenced along that axis when
retrieving/setting:
'direct' -- No pointers stored in this dimension.
'ptr' -- Pointer stored in this dimension.
'full' -- Check along this dimension, don't assume either.
the packing specifiers specify how the array elements are layed-out
in memory.
'contig' -- The data are contiguous in memory along this dimension.
At most one dimension may be specified as 'contig'.
'strided' -- The data aren't contiguous along this dimenison.
'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
has its stride automatically computed from extents of the other
dimensions to ensure C or Fortran memory layout.
C-contiguous memory has 'direct' as the access spec, 'contig' as the
*last* axis' packing spec and 'follow' for all other packing specs.
Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
the *first* axis' packing spec and 'follow' for all other packing
specs.
"""
from . import MemoryView
self.dtype = base_dtype
self.axes = axes
self.ndim = len(axes)
self.flags = MemoryView.get_buf_flags(self.axes)
self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
assert not (self.is_c_contig and self.is_f_contig)
self.mode = MemoryView.get_mode(axes)
self.writable_needed = False
if not self.dtype.is_fused:
self.dtype_name = MemoryView.mangle_dtype_name(self.dtype)
def __hash__(self):
return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes))
def __eq__(self, other):
if isinstance(other, BaseType):
return self.same_as_resolved_type(other)
else:
return False
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
def needs_nonecheck(self):
return True
def is_complete(self):
# incomplete since the underlying struct doesn't have a cython.memoryview object.
return 0
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
# XXX: we put these guards in for now...
assert not pyrex
assert not dll_linkage
from . import MemoryView
return self.base_declaration_code(
MemoryView.memviewslice_cname,
entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'mvs_class_'+self.specialization_suffix(),
None,
visibility='extern')
scope.parent_type = self
scope.directives = {}
scope.declare_var('_data', c_char_ptr_type, None,
cname='data', is_cdef=1)
return True
def declare_attribute(self, attribute, env, pos):
from . import MemoryView, Options
scope = self.scope
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
elif attribute in ("copy", "copy_fortran"):
ndim = len(self.axes)
to_axes_c = [('direct', 'contig')]
to_axes_f = [('direct', 'contig')]
if ndim - 1:
to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c
to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1)
to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f)
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
entry = scope.declare_cfunction(cython_name,
CFuncType(self, [CFuncTypeArg("memviewslice", self, None)]),
pos=pos,
defining=1,
cname=MemoryView.copy_c_or_fortran_cname(to_memview))
#entry.utility_code_definition = \
env.use_utility_code(MemoryView.get_copy_new_utility(pos, self, to_memview))
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('f', 'is_f_contig')):
is_contig_name = \
MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
args=[CFuncTypeArg("memviewslice", self, None)],
exception_value="-1",
)
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos=pos,
defining=1,
cname=is_contig_name)
entry.utility_code_definition = MemoryView.get_is_contig_utility(
attribute == 'is_c_contig', self.ndim)
return True
def specialization_name(self):
return super(MemoryViewSliceType,self).specialization_name() \
+ '_' + self.specialization_suffix()
def specialization_suffix(self):
return "%s_%s" % (self.axes_to_name(), self.dtype_name)
def can_coerce_to_pyobject(self, env):
return True
def check_for_null_code(self, cname):
return cname + '.memview'
def create_from_py_utility_code(self, env):
from . import MemoryView, Buffer
# We don't have 'code', so use a LazyUtilityCode with a callback.
def lazy_utility_callback(code):
context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype)
return TempitaUtilityCode.load(
"ObjectToMemviewSlice", "MemoryView_C.c", context=context)
env.use_utility_code(Buffer.acquire_utility_code)
env.use_utility_code(MemoryView.memviewslice_init_code)
env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
if self.is_c_contig:
c_or_f_flag = "__Pyx_IS_C_CONTIG"
elif self.is_f_contig:
c_or_f_flag = "__Pyx_IS_F_CONTIG"
else:
c_or_f_flag = "0"
suffix = self.specialization_suffix()
funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
context = dict(
MemoryView.context,
buf_flag = self.flags,
ndim = self.ndim,
axes_specs = ', '.join(self.axes_to_code()),
dtype_typedecl = self.dtype.empty_declaration_code(),
struct_nesting_depth = self.dtype.struct_nesting_depth(),
c_or_f_flag = c_or_f_flag,
funcname = funcname,
)
self.from_py_function = funcname
return True
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
return True
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
assert self._dtype_to_py_func
assert self._dtype_from_py_func
to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func
tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject)
return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
get_function = "__pyx_memview_get_%s" % self.dtype_name
set_function = "__pyx_memview_set_%s" % self.dtype_name
context = dict(
get_function = get_function,
set_function = set_function,
)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
to_py = self.dtype.create_to_py_utility_code(env)
from_py = self.dtype.create_from_py_utility_code(env)
if not (to_py or from_py):
return "NULL", "NULL"
if not self.dtype.to_py_function:
get_function = "NULL"
if not self.dtype.from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
to_py_function = self.dtype.to_py_function,
from_py_function = self.dtype.from_py_function,
dtype = self.dtype.empty_declaration_code(),
error_condition = error_condition,
)
utility = TempitaUtilityCode.load_cached(
utility_name, "MemoryView_C.c", context=context)
env.use_utility_code(utility)
return get_function, set_function
def axes_to_code(self):
"""Return a list of code constants for each axis"""
from . import MemoryView
d = MemoryView._spec_to_const
return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
def axes_to_name(self):
"""Return an abbreviated name for our axes"""
from . import MemoryView
d = MemoryView._spec_to_abbrev
return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
def error_condition(self, result_code):
return "!%s.memview" % result_code
def __str__(self):
from . import MemoryView
axes_code_list = []
for idx, (access, packing) in enumerate(self.axes):
flag = MemoryView.get_memoryview_flag(access, packing)
if flag == "strided":
axes_code_list.append(":")
else:
if flag == 'contiguous':
have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
if p == 'follow']
if have_follow or self.ndim == 1:
flag = '1'
axes_code_list.append("::" + flag)
if self.dtype.is_pyobject:
dtype_name = self.dtype.name
else:
dtype_name = self.dtype
return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
def specialize(self, values):
"""This does not validate the base type!!"""
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return MemoryViewSliceType(dtype, self.axes)
return self
def cast_code(self, expr_code):
return expr_code
class BufferType(BaseType):
#
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# dtype PyrexType
# ndim int
# mode str
# negative_indices bool
# cast bool
# is_buffer bool
# writable bool
is_buffer = 1
writable = True
subtypes = ['dtype']
def __init__(self, base, dtype, ndim, mode, negative_indices, cast):
self.base = base
self.dtype = dtype
self.ndim = ndim
self.buffer_ptr_type = CPtrType(dtype)
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
def as_argument_type(self):
return self
def specialize(self, values):
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return BufferType(self.base, dtype, self.ndim, self.mode,
self.negative_indices, self.cast)
return self
def __getattr__(self, name):
return getattr(self.base, name)
def __repr__(self):
return "<BufferType %r>" % self.base
def __str__(self):
# avoid ', ', as fused functions split the signature string on ', '
cast_str = ''
if self.cast:
cast_str = ',cast=True'
return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim,
cast_str)
def assignable_from(self, other_type):
if other_type.is_buffer:
return (self.same_as(other_type, compare_base=False) and
self.base.assignable_from(other_type.base))
return self.base.assignable_from(other_type)
def same_as(self, other_type, compare_base=True):
if not other_type.is_buffer:
return other_type.same_as(self.base)
return (self.dtype.same_as(other_type.dtype) and
self.ndim == other_type.ndim and
self.mode == other_type.mode and
self.cast == other_type.cast and
(not compare_base or self.base.same_as(other_type.base)))
class PyObjectType(PyrexType):
#
# Base class for all Python object types (reference-counted).
#
# buffer_defaults dict or None Default options for bu
name = "object"
is_pyobject = 1
default_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
is_gc_simple = False
def __str__(self):
return "Python object"
def __repr__(self):
return "<PyObjectType>"
def can_coerce_to_pyobject(self, env):
return True
def default_coerced_ctype(self):
"""The default C type that this Python type coerces to, or None."""
return None
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if (not self.is_complete()) or self.is_extension_type:
return "(PyObject *)" + cname
else:
return cname
def py_type_name(self):
return "object"
def __lt__(self, other):
"""
Make sure we sort highest, as instance checking on py_type_name
('object') is always true
"""
return False
def global_init_code(self, entry, code):
code.put_init_var_to_py_none(entry, nanny=False)
def check_for_null_code(self, cname):
return cname
builtin_types_that_cannot_create_refcycles = set([
'bool', 'int', 'long', 'float', 'complex',
'bytearray', 'bytes', 'unicode', 'str', 'basestring'
])
class BuiltinObjectType(PyObjectType):
# objstruct_cname string Name of PyObject struct
is_builtin_type = 1
has_attributes = 1
base_type = None
module_name = '__builtin__'
# fields that let it look like an extension type
vtabslot_cname = None
vtabstruct_cname = None
vtabptr_cname = None
typedef_flag = True
is_external = True
decl_type = 'PyObject'
def __init__(self, name, cname, objstruct_cname=None):
self.name = name
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
if name == 'type':
# Special case the type type, as many C API calls (and other
# libraries) actually expect a PyTypeObject* for type arguments.
self.decl_type = objstruct_cname
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def __str__(self):
return "%s object" % self.name
def __repr__(self):
return "<%s>"% self.cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
return c_char_ptr_type
elif self.name == 'bool':
return c_bint_type
elif self.name == 'float':
return c_double_type
return None
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
if self.name == 'basestring':
return src_type.name in ('str', 'unicode', 'basestring')
else:
return src_type.name == self.name
elif src_type.is_extension_type:
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (src_type.module_name == '__builtin__' and
src_type.name == self.name)
else:
return True
def typeobj_is_available(self):
return True
def attributes_known(self):
return True
def subtype_of(self, type):
return type.is_pyobject and type.assignable_from(self)
def type_check_function(self, exact=True):
type_name = self.name
if type_name == 'str':
type_check = 'PyString_Check'
elif type_name == 'basestring':
type_check = '__Pyx_PyBaseString_Check'
elif type_name == 'bytearray':
type_check = 'PyByteArray_Check'
elif type_name == 'frozenset':
type_check = 'PyFrozenSet_Check'
else:
type_check = 'Py%s_Check' % type_name.capitalize()
if exact and type_name not in ('bool', 'slice'):
type_check += 'Exact'
return type_check
def isinstance_code(self, arg):
return '%s(%s)' % (self.type_check_function(exact=False), arg)
def type_test_code(self, arg, notnone=False, exact=True):
type_check = self.type_check_function(exact=exact)
check = 'likely(%s(%s))' % (type_check, arg)
if not notnone:
check += '||((%s) == Py_None)' % arg
if self.name == 'basestring':
name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")'
space_for_name = 16
else:
name = '"%s"' % self.name
# avoid wasting too much space but limit number of different format strings
space_for_name = (len(self.name) // 16 + 1) * 16
error = '(PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % (
space_for_name, name, arg)
return check + '||' + error
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
base_code = public_decl(self.decl_type, dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if self.decl_type == 'PyObject':
return cname
else:
return "(PyObject *)" + cname
def cast_code(self, expr_code, to_object_struct = False):
return "((%s*)%s)" % (
to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None
expr_code)
def py_type_name(self):
return self.name
class PyExtensionType(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# visibility string
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# objtypedef_cname string Name of PyObject struct typedef
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
# defered_declarations [thunk] Used to declare class hierarchies in order
is_extension_type = 1
has_attributes = 1
objtypedef_cname = None
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
if base_type is not None:
base_type.is_subclassed = True
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
self.defered_declarations = []
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def needs_nonecheck(self):
return True
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type or other_type.is_builtin_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def assignable_from(self, src_type):
if self == src_type:
return True
if isinstance(src_type, PyExtensionType):
if src_type.base_type is not None:
return self.assignable_from(src_type.base_type)
if isinstance(src_type, BuiltinObjectType):
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (self.module_name == '__builtin__' and
self.name == src_type.name)
return False
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
objstruct = self.objstruct_cname
else:
objstruct = "struct %s" % self.objstruct_cname
base_code = public_decl(objstruct, dll_linkage)
if deref:
assert not entity_code
else:
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def type_test_code(self, py_arg, notnone=False):
none_check = "((%s) == Py_None)" % py_arg
type_check = "likely(__Pyx_TypeTest(%s, %s))" % (
py_arg, self.typeptr_cname)
if notnone:
return type_check
else:
return "likely(%s || %s)" % (none_check, type_check)
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
def py_type_name(self):
if not self.module_name:
return self.name
return "__import__(%r, None, None, ['']).%s" % (self.module_name,
self.name)
class CType(PyrexType):
#
# Base class for all C types (non-reference-counted).
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
to_py_function = None
from_py_function = None
exception_value = None
exception_check = 1
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
def can_coerce_to_pyobject(self, env):
return self.create_to_py_utility_code(env)
def error_condition(self, result_code):
conds = []
if self.is_string or self.is_pyunicode_ptr:
conds.append("(!%s)" % result_code)
elif self.exception_value is not None:
conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value))
if self.exception_check:
conds.append("PyErr_Occurred()")
if len(conds) > 0:
return " && ".join(conds)
else:
return 0
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
func = self.to_py_function if to_py_function is None else to_py_function
assert func
if self.is_string or self.is_cpp_string:
if result_type.is_builtin_type:
result_type_name = result_type.name
if result_type_name in ('bytes', 'str', 'unicode'):
func = func.replace("Object", result_type_name.title(), 1)
elif result_type_name == 'bytearray':
func = func.replace("Object", "ByteArray", 1)
return '%s = %s(%s)' % (
result_code,
func,
source_code or 'NULL')
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
return '%s = %s(%s); %s' % (
result_code,
from_py_function or self.from_py_function,
source_code,
code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
class CConstType(BaseType):
is_const = 1
def __init__(self, const_base_type):
self.const_base_type = const_base_type
if const_base_type.has_attributes and const_base_type.scope is not None:
from . import Symtab
self.scope = Symtab.CConstScope(const_base_type.scope)
def __repr__(self):
return "<CConstType %s>" % repr(self.const_base_type)
def __str__(self):
return self.declaration_code("", for_display=1)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if for_display or pyrex:
return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex)
else:
return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.const_base_type.specialize(values)
if base_type == self.const_base_type:
return self
else:
return CConstType(base_type)
def deduce_template_params(self, actual):
return self.const_base_type.deduce_template_params(actual)
def can_coerce_to_pyobject(self, env):
return self.const_base_type.can_coerce_to_pyobject(env)
def create_to_py_utility_code(self, env):
if self.const_base_type.create_to_py_utility_code(env):
self.to_py_function = self.const_base_type.to_py_function
return True
def __getattr__(self, name):
return getattr(self.const_base_type, name)
class FusedType(CType):
"""
Represents a Fused Type. All it needs to do is keep track of the types
it aggregates, as it will be replaced with its specific version wherever
needed.
See http://wiki.cython.org/enhancements/fusedtypes
types [PyrexType] is the list of types to be fused
name str the name of the ctypedef
"""
is_fused = 1
exception_check = 0
def __init__(self, types, name=None):
# Use list rather than set to preserve order (list should be short).
flattened_types = []
for t in types:
if t.is_fused:
# recursively merge in subtypes
for subtype in t.types:
if subtype not in flattened_types:
flattened_types.append(subtype)
elif t not in flattened_types:
flattened_types.append(t)
self.types = flattened_types
self.name = name
def declaration_code(self, entity_code, for_display = 0,
dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return self.name
raise Exception("This may never happen, please report a bug")
def __repr__(self):
return 'FusedType(name=%r)' % self.name
def specialize(self, values):
return values[self]
def get_fused_types(self, result=None, seen=None):
if result is None:
return [self]
if self not in seen:
result.append(self)
seen.add(self)
class CVoidType(CType):
#
# C "void" type
#
is_void = 1
to_py_function = "__Pyx_void_to_None"
def __repr__(self):
return "<CVoidType>"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_complete(self):
return 0
class InvisibleVoidType(CVoidType):
#
# For use with C++ constructors and destructors return types.
# Acts like void, but does not print out a declaration.
#
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "[void]"
else:
base_code = public_decl("", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class CNumericType(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
is_numeric = 1
default_value = "0"
has_attributes = True
scope = None
sign_words = ("unsigned ", "", "signed ")
def __init__(self, rank, signed = 1):
self.rank = rank
if rank > 0 and signed == SIGNED:
# Signed is meaningless for anything but char, and complicates
# type promotion.
signed = 1
self.signed = signed
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
base_code = type_name.replace('PY_LONG_LONG', 'long long')
else:
base_code = public_decl(type_name, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname=" ")
return True
def __lt__(self, other):
"""Sort based on rank, preferring signed over unsigned"""
if other.is_numeric:
return self.rank > other.rank and self.signed >= other.signed
# Prefer numeric types over others
return True
def py_type_name(self):
if self.rank <= 4:
return "(int, long)"
return "float"
class ForbidUseClass:
def __repr__(self):
raise RuntimeError()
def __str__(self):
raise RuntimeError()
ForbidUse = ForbidUseClass()
class CIntType(CNumericType):
is_int = 1
typedef_flag = 0
to_py_function = None
from_py_function = None
exception_value = -1
def can_coerce_to_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
if type(self).to_py_function is None:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
if type(self).from_py_function is None:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
return "PyInt_FromLong"
else:
# Py{Int|Long}_From[Unsigned]Long[Long]
Prefix = "Int"
SignWord = ""
TypeName = "Long"
if not self.signed:
Prefix = "Long"
SignWord = "Unsigned"
if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
Prefix = "Long"
TypeName = "LongLong"
return "Py%s_From%s%s" % (Prefix, SignWord, TypeName)
def assignable_from_resolved_type(self, src_type):
return src_type.is_int or src_type.is_enum or src_type is error_type
def invalid_value(self):
if rank_to_type_name[int(self.rank)] == 'char':
return "'?'"
else:
# We do not really know the size of the type, so return
# a 32-bit literal and rely on casting to final type. It will
# be negative for signed ints, which is good.
return "0xbad0bad0"
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.empty_declaration_code()
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load_cached(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
if type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': name}))
elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': name}))
elif self.rank <= 1:
# sizeof(short) < sizeof(int)
return "__Pyx_%s_%s_no_overflow" % (binop, name)
else:
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load_cached(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load_cached(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def _load_overflow_base(env):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
for type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': type.replace(' ', '_')}))
for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': type.replace(' ', '_')}))
class CAnonEnumType(CIntType):
is_enum = 1
def sign_and_name(self):
return 'int'
class CReturnCodeType(CIntType):
to_py_function = "__Pyx_Owned_Py_None"
is_returncode = True
exception_check = False
class CBIntType(CIntType):
to_py_function = "__Pyx_PyBool_FromLong"
from_py_function = "__Pyx_PyObject_IsTrue"
exception_check = 1 # for C++ bool
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if for_display:
base_code = 'bool'
elif pyrex:
base_code = 'bint'
else:
base_code = public_decl('int', dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __repr__(self):
return "<CNumericType bint>"
def __str__(self):
return 'bint'
def py_type_name(self):
return "bool"
class CPyUCS4IntType(CIntType):
# Py_UCS4
is_unicode_char = True
# Py_UCS4 coerces from and to single character unicode strings (or
# at most two characters on 16bit Unicode builds), but we also
# allow Python integers as input. The value range for Py_UCS4
# is 0..1114111, which is checked when converting from an integer
# value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UCS4"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UCS4"
class CPyUnicodeIntType(CIntType):
# Py_UNICODE
is_unicode_char = True
# Py_UNICODE coerces from and to single character unicode strings,
# but we also allow Python integers as input. The value range for
# Py_UNICODE is 0..1114111, which is checked when converting from
# an integer value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UNICODE"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UNICODE"
class CPyHashTType(CIntType):
to_py_function = "__Pyx_PyInt_FromHash_t"
from_py_function = "__Pyx_PyInt_AsHash_t"
def sign_and_name(self):
return "Py_hash_t"
class CPySSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "__Pyx_PyIndex_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "PyInt_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSizeTType(CIntType):
to_py_function = "__Pyx_PyInt_FromSize_t"
def sign_and_name(self):
return "size_t"
class CPtrdiffTType(CIntType):
def sign_and_name(self):
return "ptrdiff_t"
class CFloatType(CNumericType):
is_float = 1
to_py_function = "PyFloat_FromDouble"
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
if rank == RANK_FLOAT:
self.from_py_function = "__pyx_PyFloat_AsFloat"
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
def invalid_value(self):
return Naming.PYX_NAN
class CComplexType(CNumericType):
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
if real_type.is_typedef and real_type.typedef_is_external:
# The below is not actually used: Coercions are currently disabled
# so that complex types of external types can not be created
self.funcsuffix = "_%s" % real_type.specialization_name()
elif hasattr(real_type, 'math_h_modifier'):
self.funcsuffix = real_type.math_h_modifier
else:
self.funcsuffix = "_%s" % real_type.specialization_name()
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
self.from_parts = "%s_from_parts" % self.specialization_name()
self.default_value = "%s(0, 0)" % self.from_parts
def __eq__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type == other.real_type
else:
return False
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
else:
return True
def __lt__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type < other.real_type
else:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return ~hash(self.real_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
base_code = "%s complex" % real_code
else:
base_code = public_decl(self.sign_and_name(), dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def sign_and_name(self):
real_type_name = self.real_type.specialization_name()
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
and src_type.typedef_is_external):
return False
else:
return super(CComplexType, self).assignable_from(src_type)
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True)
scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True)
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname="__Pyx_c_conj%s" % self.funcsuffix)
return True
def create_declaration_utility_code(self, env):
# This must always be run, because a single CComplexType instance can be shared
# across multiple compilations (the one created in the module scope)
env.use_utility_code(complex_header_utility_code)
env.use_utility_code(complex_real_imag_utility_code)
for utility_code in (complex_type_utility_code,
complex_from_parts_utility_code,
complex_arithmetic_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.empty_declaration_code(),
m = self.funcsuffix,
is_float = self.real_type.is_float))
return True
def can_coerce_to_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
env.use_utility_code(complex_real_imag_utility_code)
env.use_utility_code(complex_to_py_utility_code)
return True
def create_from_py_utility_code(self, env):
self.real_type.create_from_py_utility_code(env)
for utility_code in (complex_from_parts_utility_code,
complex_from_py_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.empty_declaration_code(),
m = self.funcsuffix,
is_float = self.real_type.is_float))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
except KeyError:
pass
try:
op_name = complex_ops[nargs, op]
self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix)
return func_name
except KeyError:
return None
def unary_op(self, op):
return self.lookup_op(1, op)
def binary_op(self, op):
return self.lookup_op(2, op)
def py_type_name(self):
return "complex"
def cast_code(self, expr_code):
return expr_code
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
(2, '+'): 'sum',
(2, '-'): 'diff',
(2, '*'): 'prod',
(2, '/'): 'quot',
(2, '=='): 'eq',
}
complex_header_utility_code = UtilityCode(
proto_block='h_code',
proto="""
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
""")
complex_real_imag_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
""")
complex_type_utility_code = UtilityCode(
proto_block='complex_type_declarations',
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< %(real_type)s > %(type_name)s;
#else
typedef %(real_type)s _Complex %(type_name)s;
#endif
#else
typedef struct { %(real_type)s real, imag; } %(type_name)s;
#endif
""")
complex_from_parts_utility_code = UtilityCode(
proto_block='utility_code_proto',
proto="""
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s, %(real_type)s);
""",
impl="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return ::std::complex< %(real_type)s >(x, y);
}
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return x + y*(%(type)s)_Complex_I;
}
#endif
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
%(type)s z;
z.real = x;
z.imag = y;
return z;
}
#endif
""")
complex_to_py_utility_code = UtilityCode(
proto="""
#define __pyx_PyComplex_FromComplex(z) \\
PyComplex_FromDoubles((double)__Pyx_CREAL(z), \\
(double)__Pyx_CIMAG(z))
""")
complex_from_py_utility_code = UtilityCode(
proto="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject*);
""",
impl="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject* o) {
Py_complex cval;
#if CYTHON_COMPILING_IN_CPYTHON
if (PyComplex_CheckExact(o))
cval = ((PyComplexObject *)o)->cval;
else
#endif
cval = PyComplex_AsCComplex(o);
return %(type_name)s_from_parts(
(%(real_type)s)cval.real,
(%(real_type)s)cval.imag);
}
""")
complex_arithmetic_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq%(m)s(a, b) ((a)==(b))
#define __Pyx_c_sum%(m)s(a, b) ((a)+(b))
#define __Pyx_c_diff%(m)s(a, b) ((a)-(b))
#define __Pyx_c_prod%(m)s(a, b) ((a)*(b))
#define __Pyx_c_quot%(m)s(a, b) ((a)/(b))
#define __Pyx_c_neg%(m)s(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero%(m)s(z) ((z)==(%(real_type)s)0)
#define __Pyx_c_conj%(m)s(z) (::std::conj(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (::std::abs(z))
#define __Pyx_c_pow%(m)s(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero%(m)s(z) ((z)==0)
#define __Pyx_c_conj%(m)s(z) (conj%(m)s(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (cabs%(m)s(z))
#define __Pyx_c_pow%(m)s(a, b) (cpow%(m)s(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s);
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s);
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s, %(type)s);
#endif
#endif
""",
impl="""
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s a, %(type)s b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s a) {
%(type)s z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s a) {
%(type)s z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt%(m)s(z.real*z.real + z.imag*z.imag);
#else
return hypot%(m)s(z.real, z.imag);
#endif
}
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
%(real_type)s denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(a, a);
case 3:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, a);
case 4:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs%(m)s(a);
theta = atan2%(m)s(a.imag, a.real);
}
lnr = log%(m)s(r);
z_r = exp%(m)s(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos%(m)s(z_theta);
z.imag = z_r * sin%(m)s(z_theta);
return z;
}
#endif
#endif
""")
class CPointerBaseType(CType):
# common base type for pointer/array types
#
# base_type CType Reference type
subtypes = ['base_type']
def __init__(self, base_type):
self.base_type = base_type
for char_type in (c_char_type, c_uchar_type, c_schar_type):
if base_type.same_as(char_type):
self.is_string = 1
break
else:
if base_type.same_as(c_py_unicode_type):
self.is_pyunicode_ptr = 1
if self.is_string and not base_type.is_error:
if base_type.signed == 2:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_AsSString"
elif base_type.signed:
self.to_py_function = "__Pyx_PyObject_FromString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_AsString"
else:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_AsUString"
self.exception_value = "NULL"
elif self.is_pyunicode_ptr and not base_type.is_error:
self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
if self.is_ptr:
self.from_py_function = "__Pyx_PyUnicode_AsUnicode"
self.exception_value = "NULL"
def py_type_name(self):
if self.is_string:
return "bytes"
elif self.is_pyunicode_ptr:
return "unicode"
else:
return super(CPointerBaseType, self).py_type_name()
def literal_code(self, value):
if self.is_string:
assert isinstance(value, str)
return '"%s"' % StringEncoding.escape_byte_string(value)
class CArrayType(CPointerBaseType):
# base_type CType Element type
# size integer or None Number of elements
is_array = 1
to_tuple_function = None
def __init__(self, base_type, size):
super(CArrayType, self).__init__(base_type)
self.size = size
def __eq__(self, other):
if isinstance(other, CType) and other.is_array and self.size == other.size:
return self.base_type.same_as(other.base_type)
return False
def __hash__(self):
return hash(self.base_type) + 28 # arbitrarily chosen offset
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def assignable_from_resolved_type(self, src_type):
# C arrays are assigned by value, either Python containers or C arrays/pointers
if src_type.is_pyobject:
return True
if src_type.is_ptr or src_type.is_array:
return self.base_type.assignable_from(src_type.base_type)
return False
def element_ptr_type(self):
return c_ptr_type(self.base_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
else:
dimension_code = ""
if entity_code.startswith("*"):
entity_code = "(%s)" % entity_code
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
def as_argument_type(self):
return c_ptr_type(self.base_type)
def is_complete(self):
return self.size is not None
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CArrayType(base_type, self.size)
def deduce_template_params(self, actual):
if isinstance(actual, CArrayType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
def can_coerce_to_pyobject(self, env):
return self.base_type.can_coerce_to_pyobject(env)
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return self.to_py_function
if not self.base_type.create_to_py_utility_code(env):
return False
base_type = self.base_type.declaration_code("", pyrex=1)
safe_typename = re.sub('[^a-zA-Z0-9]', '__', base_type)
to_py_function = "__Pyx_carray_to_py_%s" % safe_typename
to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename
from .UtilityCode import CythonUtilityCode
context = {
'cname': to_py_function,
'to_tuple_cname': to_tuple_function,
'base_type': base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.to_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.to_tuple_function = to_tuple_function
self.to_py_function = to_py_function
return True
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
func = self.to_py_function if to_py_function is None else to_py_function
if self.is_string or self.is_pyunicode_ptr:
return '%s = %s(%s)' % (
result_code,
func,
source_code)
target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple'
return '%s = %s(%s, %s)' % (
result_code,
self.to_tuple_function if target_is_tuple else func,
source_code,
self.size)
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return self.from_py_function
if not self.base_type.create_from_py_utility_code(env):
return False
base_type = self.base_type.declaration_code("", pyrex=1)
safe_typename = re.sub('[^a-zA-Z0-9]', '__', base_type)
from_py_function = "__Pyx_carray_from_py_%s" % safe_typename
from .UtilityCode import CythonUtilityCode
context = {
'cname': from_py_function,
'base_type': base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.from_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.from_py_function = from_py_function
return True
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
call_code = "%s(%s, %s, %s)" % (
from_py_function or self.from_py_function,
source_code, result_code, self.size)
return code.error_goto_if_neg(call_code, error_pos)
class CPtrType(CPointerBaseType):
# base_type CType Reference type
is_ptr = 1
default_value = "0"
def __hash__(self):
return hash(self.base_type) + 27 # arbitrarily chosen offset
def __eq__(self, other):
if isinstance(other, CType) and other.is_ptr:
return self.base_type.same_as(other.base_type)
return False
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
if other_type.is_null_ptr:
return 1
if self.base_type.is_const:
self = CPtrType(self.base_type.const_base_type)
if self.base_type.is_cfunction:
if other_type.is_ptr:
other_type = other_type.base_type.resolve()
if other_type.is_cfunction:
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CPtrType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
def invalid_value(self):
return "1"
def find_cpp_operation_type(self, operator, operand_type=None):
if self.base_type.is_cpp_class:
return self.base_type.find_cpp_operation_type(operator, operand_type)
return None
class CNullPtrType(CPtrType):
is_null_ptr = 1
class CReferenceType(BaseType):
is_reference = 1
is_fake_reference = 0
def __init__(self, base_type):
self.ref_base_type = base_type
def __repr__(self):
return "<CReferenceType %s>" % repr(self.ref_base_type)
def __str__(self):
return "%s &" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
return self
else:
return type(self)(base_type)
def deduce_template_params(self, actual):
return self.ref_base_type.deduce_template_params(actual)
def __getattr__(self, name):
return getattr(self.ref_base_type, name)
class CFakeReferenceType(CReferenceType):
is_fake_reference = 1
def __repr__(self):
return "<CFakeReferenceType %s>" % repr(self.ref_base_type)
def __str__(self):
return "%s [&]" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code)
class CFuncType(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value string
# exception_check boolean True if PyErr_Occurred check needed
# calling_convention string Function calling convention
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
# cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
# from_fused boolean Indicates whether this is a specialized
# C function
# is_strict_signature boolean function refuses to accept coerced arguments
# (used for optimisation overrides)
# is_const_method boolean
# is_static_method boolean
is_cfunction = 1
original_sig = None
cached_specialized_types = None
from_fused = False
is_const_method = False
subtypes = ['return_type', 'args']
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
is_const_method = False, is_static_method=False,
templates = None, is_strict_signature = False):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.optional_arg_count = optional_arg_count
self.exception_value = exception_value
self.exception_check = exception_check
self.calling_convention = calling_convention
self.nogil = nogil
self.with_gil = with_gil
self.is_overridable = is_overridable
self.is_const_method = is_const_method
self.is_static_method = is_static_method
self.templates = templates
self.is_strict_signature = is_strict_signature
def __repr__(self):
arg_reprs = list(map(repr, self.args))
if self.has_varargs:
arg_reprs.append("...")
if self.exception_value:
except_clause = " %r" % self.exception_value
else:
except_clause = ""
if self.exception_check:
except_clause += "?"
return "<CFuncType %s %s[%s]%s>" % (
repr(self.return_type),
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
def as_argument_type(self):
return c_ptr_type(self)
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def same_c_signature_as_resolved_type(self, other_type, as_cmethod = 0):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if self.is_overridable != other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.same_as(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.exception_check != other_type.exception_check:
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
return 1
def _same_exception_value(self, other_exc_value):
if self.exception_value == other_exc_value:
return 1
if self.exception_check != '+':
return 0
if not self.exception_value or not other_exc_value:
return 0
if self.exception_value.type != other_exc_value.type:
return 0
if self.exception_value.entry and other_exc_value.entry:
if self.exception_value.entry.cname != other_exc_value.entry.cname:
return 0
if self.exception_value.name != other_exc_value.name:
return 0
return 1
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if not self.is_overridable and other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
return 0
if self.optional_arg_count < other_type.optional_arg_count:
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, len(other_type.args)):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.nogil != other_type.nogil:
return 0
if not self.exception_check and other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
self.original_sig = other_type.original_sig or other_type
return 1
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
for i in range(as_cmethod, nargs):
if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
return 0
else:
self.args[i].needs_type_test = other_type.args[i].needs_type_test \
or not self.args[i].type.same_as(other_type.args[i].type)
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.exception_check and other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
return 1
def same_calling_convention_as(self, other):
## XXX Under discussion ...
## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
## cs1 = self.calling_convention
## cs2 = other.calling_convention
## if (cs1 in callspec_words or
## cs2 in callspec_words):
## return cs1 == cs2
## else:
## return True
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
def same_as_resolved_type(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \
and self.nogil == other_type.nogil
def pointer_assignable_from_resolved_type(self, other_type):
return self.same_c_signature_as_resolved_type(other_type) \
and not (self.nogil and not other_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
for arg in self.args[:len(self.args)-self.optional_arg_count]:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
trailer = ""
if (pyrex or for_display) and not self.return_type.is_pyobject:
if self.exception_value and self.exception_check:
trailer = " except? %s" % self.exception_value
elif self.exception_value:
trailer = " except %s" % self.exception_value
elif self.exception_check == '+':
trailer = " except +"
elif self.exception_check and for_display:
# not spelled out by default, unless for human eyes
trailer = " except *"
if self.nogil:
trailer += " nogil"
if not with_calling_convention:
cc = ''
else:
cc = self.calling_convention_prefix()
if (not entity_code and cc) or entity_code.startswith("*"):
entity_code = "(%s%s)" % (cc, entity_code)
cc = ""
if self.is_const_method:
trailer += " const"
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
def function_header_code(self, func_name, arg_code):
if self.is_const_method:
trailer = " const"
else:
trailer = ""
return "%s%s(%s)%s" % (self.calling_convention_prefix(),
func_name, arg_code, trailer)
def signature_string(self):
s = self.empty_declaration_code()
return s
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
def specialize(self, values):
result = CFuncType(self.return_type.specialize(values),
[arg.specialize(values) for arg in self.args],
has_varargs = self.has_varargs,
exception_value = self.exception_value,
exception_check = self.exception_check,
calling_convention = self.calling_convention,
nogil = self.nogil,
with_gil = self.with_gil,
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
is_const_method = self.is_const_method,
is_static_method = self.is_static_method,
templates = self.templates)
result.from_fused = self.is_fused
return result
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
# Methods that deal with Fused Types
# All but map_with_specific_entries should be called only on functions
# with fused types (and not on their corresponding specific versions).
def get_all_specialized_permutations(self, fused_types=None):
"""
Permute all the types. For every specific instance of a fused type, we
want all other specific instances of all other fused types.
It returns an iterable of two-tuples of the cname that should prefix
the cname of the function, and a dict mapping any fused types to their
respective specific types.
"""
assert self.is_fused
if fused_types is None:
fused_types = self.get_fused_types()
return get_all_specialized_permutations(fused_types)
def get_all_specialized_function_types(self):
"""
Get all the specific function types of this one.
"""
assert self.is_fused
if self.entry.fused_cfunction:
return [n.type for n in self.entry.fused_cfunction.nodes]
elif self.cached_specialized_types is not None:
return self.cached_specialized_types
cfunc_entries = self.entry.scope.cfunc_entries
cfunc_entries.remove(self.entry)
result = []
permutations = self.get_all_specialized_permutations()
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
if self.optional_arg_count:
# Remember, this method is set by CFuncDeclaratorNode
self.declare_opt_arg_struct(new_func_type, cname)
new_entry = copy.deepcopy(self.entry)
new_func_type.specialize_entry(new_entry, cname)
new_entry.type = new_func_type
new_func_type.entry = new_entry
result.append(new_func_type)
cfunc_entries.append(new_entry)
self.cached_specialized_types = result
return result
def get_fused_types(self, result=None, seen=None, subtypes=None):
"""Return fused types in the order they appear as parameter types"""
return super(CFuncType, self).get_fused_types(result, seen,
subtypes=['args'])
def specialize_entry(self, entry, cname):
assert not self.is_fused
specialize_entry(entry, cname)
def can_coerce_to_pyobject(self, env):
# duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code
if self.has_varargs or self.optional_arg_count:
return False
if self.to_py_function is not None:
return self.to_py_function
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env):
return False
if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
# FIXME: it seems we're trying to coerce in more cases than we should
if self.to_py_function is not None:
return self.to_py_function
if not self.can_coerce_to_pyobject(env):
return False
from .UtilityCode import CythonUtilityCode
safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1))
to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env):
return False
if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env):
return False
def declared_type(ctype):
type_displayname = str(ctype.declaration_code("", for_display=True))
if ctype.is_pyobject:
arg_ctype = type_name = type_displayname
if ctype.is_builtin_type:
arg_ctype = ctype.name
elif not ctype.is_extension_type:
type_name = 'object'
type_displayname = None
else:
type_displayname = repr(type_displayname)
elif ctype is c_bint_type:
type_name = arg_ctype = 'bint'
else:
type_name = arg_ctype = type_displayname
if ctype is c_double_type:
type_displayname = 'float'
else:
type_displayname = repr(type_displayname)
return type_name, arg_ctype, type_displayname
class Arg(object):
def __init__(self, arg_name, arg_type):
self.name = arg_name
self.type = arg_type
self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type)
if self.return_type.is_void:
except_clause = 'except *'
elif self.return_type.is_pyobject:
except_clause = ''
elif self.exception_value:
except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value
else:
except_clause = 'except *'
context = {
'cname': to_py_function,
'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)],
'return_type': Arg('return', self.return_type),
'except_clause': except_clause,
}
# FIXME: directives come from first defining environment and do not adapt for reuse
env.use_utility_code(CythonUtilityCode.load(
"cfunc.to_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.to_py_function = to_py_function
return True
def specialize_entry(entry, cname):
"""
Specialize an entry of a copied fused function or method
"""
entry.is_fused_specialized = True
entry.name = get_fused_cname(cname, entry.name)
if entry.is_cmethod:
entry.cname = entry.name
if entry.is_inherited:
entry.cname = StringEncoding.EncodedString(
"%s.%s" % (Naming.obj_base_cname, entry.cname))
else:
entry.cname = get_fused_cname(cname, entry.cname)
if entry.func_cname:
entry.func_cname = get_fused_cname(cname, entry.func_cname)
def get_fused_cname(fused_cname, orig_cname):
"""
Given the fused cname id and an original cname, return a specialized cname
"""
assert fused_cname and orig_cname
return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
fused_cname, orig_cname))
def unique(somelist):
seen = set()
result = []
for obj in somelist:
if obj not in seen:
result.append(obj)
seen.add(obj)
return result
def get_all_specialized_permutations(fused_types):
return _get_all_specialized_permutations(unique(fused_types))
def _get_all_specialized_permutations(fused_types, id="", f2s=()):
fused_type, = fused_types[0].get_fused_types()
result = []
for newid, specific_type in enumerate(fused_type.types):
# f2s = dict(f2s, **{ fused_type: specific_type })
f2s = dict(f2s)
f2s.update({ fused_type: specific_type })
if id:
cname = '%s_%s' % (id, newid)
else:
cname = str(newid)
if len(fused_types) > 1:
result.extend(_get_all_specialized_permutations(
fused_types[1:], cname, f2s))
else:
result.append((cname, f2s))
return result
def specialization_signature_string(fused_compound_type, fused_to_specific):
"""
Return the signature for a specialization of a fused type. e.g.
floating[:] ->
'float' or 'double'
cdef fused ft:
float[:]
double[:]
ft ->
'float[:]' or 'double[:]'
integral func(floating) ->
'int (*func)(float)' or ...
"""
fused_types = fused_compound_type.get_fused_types()
if len(fused_types) == 1:
fused_type = fused_types[0]
else:
fused_type = fused_compound_type
return fused_type.specialize(fused_to_specific).typeof_name()
def get_specialized_types(type):
"""
Return a list of specialized types in their declared order.
"""
assert type.is_fused
if isinstance(type, FusedType):
result = list(type.types)
for specialized_type in result:
specialized_type.specialization_string = specialized_type.typeof_name()
else:
result = []
for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
specialized_type = type.specialize(f2s)
specialized_type.specialization_string = (
specialization_signature_string(type, f2s))
result.append(specialized_type)
return result
class CFuncTypeArg(BaseType):
# name string
# cname string
# type PyrexType
# pos source file position
# FIXME: is this the right setup? should None be allowed here?
not_none = False
or_none = False
accept_none = True
accept_builtin_subtypes = False
subtypes = ['type']
def __init__(self, name, type, pos, cname=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
class ToPyStructUtilityCode(object):
requires = None
def __init__(self, type, forward_decl, env):
self.type = type
self.header = "static PyObject* %s(%s)" % (type.to_py_function,
type.declaration_code('s'))
self.forward_decl = forward_decl
self.env = env
def __eq__(self, other):
return isinstance(other, ToPyStructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
def get_tree(self):
pass
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
code.putln("res = PyDict_New(); if (res == NULL) return NULL;")
for member in self.type.scope.var_entries:
nameconst_cname = code.get_py_string_const(member.name, identifier=True)
code.putln("%s; if (member == NULL) goto bad;" % (
member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type)))
code.putln("if (PyDict_SetItem(res, %s, member) < 0) goto bad;" % nameconst_cname)
code.putln("Py_DECREF(member);")
code.putln("return res;")
code.putln("bad:")
code.putln("Py_XDECREF(member);")
code.putln("Py_DECREF(res);")
code.putln("return NULL;")
code.putln("}")
# This is a bit of a hack, we need a forward declaration
# due to the way things are ordered in the module...
if self.forward_decl:
proto.putln(self.type.empty_declaration_code() + ';')
proto.putln(self.header + ";")
def inject_tree_and_scope_into(self, module_node):
pass
class CStructOrUnionType(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
# entry Entry
is_struct_or_union = 1
has_attributes = 1
exception_check = True
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
self.is_struct = kind == 'struct'
self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
self.packed = packed
def create_to_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_to_py_code is False:
return None # tri-state-ish
if self._convert_to_py_code is None:
is_union = not self.is_struct
unsafe_union_types = set()
safe_union_types = set()
for member in self.scope.var_entries:
member_type = member.type
if not member_type.create_to_py_utility_code(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
if is_union:
if member_type.is_ptr or member_type.is_cpp_class:
unsafe_union_types.add(member_type)
else:
safe_union_types.add(member_type)
if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1):
# unsafe mix of safe and unsafe to convert types
self.from_py_function = None
self._convert_from_py_code = False
return False
forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env)
env.use_utility_code(self._convert_to_py_code)
return True
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_name=self.name,
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
from .UtilityCode import CythonUtilityCode
self._convert_from_py_code = CythonUtilityCode.load(
"FromPyStructUtility" if self.is_struct else "FromPyUnionUtility",
"CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (
self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "%s %s" % (self.kind, self.cname)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __eq__(self, other):
try:
return (isinstance(other, CStructOrUnionType) and
self.name == other.name)
except AttributeError:
return False
def __lt__(self, other):
try:
return self.name < other.name
except AttributeError:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return hash(self.cname) ^ hash(self.kind)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
def can_be_complex(self):
# Does the struct consist of exactly two identical floats?
fields = self.scope.var_entries
if len(fields) != 2: return False
a, b = fields
return (a.type.is_float and b.type.is_float and
a.type.empty_declaration_code() ==
b.type.empty_declaration_code())
def struct_nesting_depth(self):
child_depths = [x.type.struct_nesting_depth()
for x in self.scope.var_entries]
return max(child_depths) + 1
def cast_code(self, expr_code):
if self.is_struct:
return expr_code
return super(CStructOrUnionType, self).cast_code(expr_code)
cpp_string_conversions = ("std::string",)
builtin_cpp_conversions = ("std::pair",
"std::vector", "std::list",
"std::set", "std::unordered_set",
"std::map", "std::unordered_map")
class CppClassType(CType):
# name string
# cname string
# scope CppClassScope
# templates [string] or None
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
# For struct-like declaration.
kind = "struct"
packed = False
typedef_flag = False
subtypes = ['templates']
def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None):
self.name = name
self.cname = cname
self.scope = scope
self.base_classes = base_classes
self.operators = []
self.templates = templates
self.template_type = template_type
self.specializations = {}
self.is_cpp_string = cname in cpp_string_conversions
def use_conversion_utility(self, from_or_to):
pass
def maybe_unordered(self):
if 'unordered' in self.cname:
return 'unordered_'
else:
return ''
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
if T.exception_value is not None:
# This is a hack due to the except value clause
# requiring a const (literal) value of the right
# (visible) type.
def guess_type(value):
if not T.is_typedef and (T.is_numeric or T.is_ptr):
return T
try:
int(value)
return c_longlong_type
except ValueError:
pass
try:
float(value)
return c_double_type
except ValueError:
pass
return T
except_type = guess_type(T.exception_value)
except_clause = "%s " % T.exception_value
if T.exception_check:
except_clause = "? %s" % except_clause
declarations.append(
" ctypedef %s %s '%s'" % (
except_type.declaration_code("", for_display=True), X[ix], T.empty_declaration_code()))
else:
except_clause = "*"
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.empty_declaration_code()))
declarations.append(
" cdef %s %s_from_py '%s' (object) except %s" % (
X[ix], X[ix], T.from_py_function, except_clause))
if self.cname in cpp_string_conversions:
cls = 'string'
tags = type_identifier(self),
else:
cls = self.cname[5:]
cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
}
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context))
self.from_py_function = cname
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.empty_declaration_code()))
declarations.append(
" cdef object %s_to_py '%s' (%s)" % (
X[ix], T.to_py_function, X[ix]))
if self.cname in cpp_string_conversions:
cls = 'string'
prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode
tags = type_identifier(self),
else:
cls = self.cname[5:]
prefix = ''
cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
}
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", context=context))
self.to_py_function = cname
return True
def is_template_type(self):
return self.templates is not None and self.template_type is None
def get_fused_types(self, result=None, seen=None):
if result is None:
result = []
seen = set()
if self.namespace:
self.namespace.get_fused_types(result, seen)
if self.templates:
for T in self.templates:
T.get_fused_types(result, seen)
return result
def specialize_here(self, pos, template_values=None):
if not self.is_template_type():
error(pos, "'%s' type is not a template" % self)
return error_type
if len(self.templates) != len(template_values):
error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
has_object_template_param = False
for value in template_values:
if value.is_pyobject:
has_object_template_param = True
error(pos,
"Python object type '%s' cannot be used as a template argument" % value)
if has_object_template_param:
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if self.templates is None:
self.templates = []
key = tuple(values.items())
if key in self.specializations:
return self.specializations[key]
template_values = [t.specialize(values) for t in self.templates]
specialized = self.specializations[key] = \
CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
# Need to do these *after* self.specializations[key] is set
# to avoid infinite recursion on circular references.
specialized.base_classes = [b.specialize(values) for b in self.base_classes]
if self.namespace is not None:
specialized.namespace = self.namespace.specialize(values)
specialized.scope = self.scope.specialize(values, specialized)
return specialized
def deduce_template_params(self, actual):
if self == actual:
return {}
# TODO(robertwb): Actual type equality.
elif self.empty_declaration_code() == actual.template_type.empty_declaration_code():
return reduce(
merge_template_deductions,
[formal_param.deduce_template_params(actual_param)
for (formal_param, actual_param) in zip(self.templates, actual.templates)],
{})
else:
return None
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.templates:
template_strings = [param.declaration_code('', for_display, None, pyrex)
for param in self.templates]
if for_display:
brackets = "[%s]"
else:
brackets = "<%s> "
templates = brackets % ",".join(template_strings)
else:
templates = ""
if pyrex or for_display:
base_code = "%s%s" % (self.name, templates)
else:
base_code = "%s%s" % (self.cname, templates)
if self.namespace is not None:
base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_subclass(self, other_type):
if self.same_as_resolved_type(other_type):
return 1
for base_class in self.base_classes:
if base_class.is_subclass(other_type):
return 1
return 0
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
elif (self.cname == other_type.cname and
self.template_type and other_type.template_type):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if not t1.same_as_resolved_type(t2):
return 0
return 1
return 0
def assignable_from_resolved_type(self, other_type):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
return other_type.is_cpp_class and other_type.is_subclass(self)
def attributes_known(self):
return self.scope is not None
def find_cpp_operation_type(self, operator, operand_type=None):
operands = [self]
if operand_type is not None:
operands.append(operand_type)
# pos == None => no errors
operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
if not operator_entry:
return None
func_type = operator_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup(u'<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
error(pos, "C++ class must have a nullary constructor to be %s" % msg)
class TemplatePlaceholderType(CType):
def __init__(self, name):
self.name = name
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
def specialize(self, values):
if self in values:
return values[self]
else:
return self
def deduce_template_params(self, actual):
return {self: actual}
def same_as_resolved_type(self, other_type):
if isinstance(other_type, TemplatePlaceholderType):
return self.name == other_type.name
else:
return 0
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
else:
return cmp(type(self), type(other))
def __eq__(self, other):
if isinstance(other, TemplatePlaceholderType):
return self.name == other.name
else:
return False
class CEnumType(CType):
# name string
# cname string or None
# typedef_flag boolean
is_enum = 1
signed = 1
rank = -1 # Ranks below any integer type
def __init__(self, name, cname, typedef_flag):
self.name = name
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
self.default_value = "(%s) 0" % self.empty_declaration_code()
def __str__(self):
return self.name
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "enum %s" % self.cname
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def create_to_py_utility_code(self, env):
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
rhs = "%s(%s)" % (
from_py_function or self.from_py_function,
source_code)
return '%s = %s;%s' % (
result_code,
typecast(self, c_long_type, rhs),
' %s' % code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
class CTupleType(CType):
# components [PyrexType]
is_ctuple = True
def __init__(self, cname, components):
self.cname = cname
self.components = components
self.size = len(components)
self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
def __str__(self):
return "(%s)" % ", ".join(str(c) for c in self.components)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return str(self)
else:
return self.base_declaration_code(self.cname, entity_code)
def can_coerce_to_pyobject(self, env):
for component in self.components:
if not component.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
if self._convert_to_py_code is False:
return None # tri-state-ish
if self._convert_to_py_code is None:
for component in self.components:
if not component.create_to_py_utility_code(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
context = dict(
struct_type_decl=self.empty_declaration_code(),
components=self.components,
funcname=self.to_py_function,
size=len(self.components)
)
self._convert_to_py_code = TempitaUtilityCode.load(
"ToPyCTupleUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_to_py_code)
return True
def create_from_py_utility_code(self, env):
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
for component in self.components:
if not component.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type_decl=self.empty_declaration_code(),
components=self.components,
funcname=self.from_py_function,
size=len(self.components)
)
self._convert_from_py_code = TempitaUtilityCode.load(
"FromPyCTupleUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def c_tuple_type(components):
components = tuple(components)
cname = Naming.ctuple_type_prefix + type_list_identifier(components)
tuple_type = CTupleType(cname, components)
return tuple_type
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
is_unspecified = 1
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
def same_as_resolved_type(self, other_type):
return False
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
def create_to_py_utility_code(self, env):
return True
def create_from_py_utility_code(self, env):
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
def same_as_resolved_type(self, other_type):
return 1
def error_condition(self, result_code):
return "dummy"
rank_to_type_name = (
"char", # 0
"short", # 1
"int", # 2
"long", # 3
"PY_LONG_LONG", # 4
"float", # 5
"double", # 6
"long double", # 7
)
_rank_to_type_name = list(rank_to_type_name)
RANK_INT = _rank_to_type_name.index('int')
RANK_LONG = _rank_to_type_name.index('long')
RANK_FLOAT = _rank_to_type_name.index('float')
UNSIGNED = 0
SIGNED = 2
error_type = ErrorType()
unspecified_type = UnspecifiedType()
py_object_type = PyObjectType()
c_void_type = CVoidType()
c_uchar_type = CIntType(0, UNSIGNED)
c_ushort_type = CIntType(1, UNSIGNED)
c_uint_type = CIntType(2, UNSIGNED)
c_ulong_type = CIntType(3, UNSIGNED)
c_ulonglong_type = CIntType(4, UNSIGNED)
c_char_type = CIntType(0)
c_short_type = CIntType(1)
c_int_type = CIntType(2)
c_long_type = CIntType(3)
c_longlong_type = CIntType(4)
c_schar_type = CIntType(0, SIGNED)
c_sshort_type = CIntType(1, SIGNED)
c_sint_type = CIntType(2, SIGNED)
c_slong_type = CIntType(3, SIGNED)
c_slonglong_type = CIntType(4, SIGNED)
c_float_type = CFloatType(5, math_h_modifier='f')
c_double_type = CFloatType(6)
c_longdouble_type = CFloatType(7, math_h_modifier='l')
c_float_complex_type = CComplexType(c_float_type)
c_double_complex_type = CComplexType(c_double_type)
c_longdouble_complex_type = CComplexType(c_longdouble_type)
c_anon_enum_type = CAnonEnumType(-1)
c_returncode_type = CReturnCodeType(RANK_INT)
c_bint_type = CBIntType(RANK_INT)
c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED)
c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED)
c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED)
c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED)
c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED)
c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED)
c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED)
c_null_ptr_type = CNullPtrType(c_void_type)
c_void_ptr_type = CPtrType(c_void_type)
c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
c_char_ptr_type = CPtrType(c_char_type)
c_uchar_ptr_type = CPtrType(c_uchar_type)
c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
c_int_ptr_type = CPtrType(c_int_type)
c_py_unicode_ptr_type = CPtrType(c_py_unicode_type)
c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type)
c_ssize_t_ptr_type = CPtrType(c_ssize_t_type)
c_size_t_ptr_type = CPtrType(c_size_t_type)
# GIL state
c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
# Not sure whether the unsigned versions and 'long long' should be in there
# long long requires C99 and might be slow, and would always get preferred
# when specialization happens through calling and not indexing
cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type],
name="integral")
# Omitting long double as it might be slow
cy_floating_type = FusedType([c_float_type, c_double_type], name="floating")
cy_numeric_type = FusedType([c_short_type,
c_int_type,
c_long_type,
c_float_type,
c_double_type,
c_float_complex_type,
c_double_complex_type], name="numeric")
# buffer-related structs
c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
None, 1, "__Pyx_Buf_DimInfo")
c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
None, 1, "__Pyx_LocalBuf_ND")
cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct",
None, 0, "__pyx_memoryview_obj")
memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
None, 1, "__Pyx_memviewslice")
modifiers_and_name_to_type = {
#(signed, longness, name) : type
(0, 0, "char"): c_uchar_type,
(1, 0, "char"): c_char_type,
(2, 0, "char"): c_schar_type,
(0, -1, "int"): c_ushort_type,
(0, 0, "int"): c_uint_type,
(0, 1, "int"): c_ulong_type,
(0, 2, "int"): c_ulonglong_type,
(1, -1, "int"): c_short_type,
(1, 0, "int"): c_int_type,
(1, 1, "int"): c_long_type,
(1, 2, "int"): c_longlong_type,
(2, -1, "int"): c_sshort_type,
(2, 0, "int"): c_sint_type,
(2, 1, "int"): c_slong_type,
(2, 2, "int"): c_slonglong_type,
(1, 0, "float"): c_float_type,
(1, 0, "double"): c_double_type,
(1, 1, "double"): c_longdouble_type,
(1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins
(1, 0, "floatcomplex"): c_float_complex_type,
(1, 0, "doublecomplex"): c_double_complex_type,
(1, 1, "doublecomplex"): c_longdouble_complex_type,
#
(1, 0, "void"): c_void_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
(0, 0, "Py_UCS4"): c_py_ucs4_type,
(2, 0, "Py_hash_t"): c_py_hash_t_type,
(2, 0, "Py_ssize_t"): c_py_ssize_t_type,
(2, 0, "ssize_t") : c_ssize_t_type,
(0, 0, "size_t") : c_size_t_type,
(2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
(1, 0, "object"): py_object_type,
}
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
# evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
return (src_type.is_enum or
(src_type.is_int and
unsigned + src_type.rank < dst_type.rank))
elif dst_type.same_as(c_double_type):
return src_type.is_float and src_type.rank <= dst_type.rank
return False
def best_match(args, functions, pos=None, env=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
This function is used, e.g., when deciding which overloaded method
to dispatch for C++ classes.
We first eliminate functions based on arity, and if only one
function has the correct arity, we return it. Otherwise, we weight
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
* promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
so on.
If no function is deemed a good fit, or if two or more functions have
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
# TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(args)
candidates = []
errors = []
for func in functions:
error_mesg = ""
func_type = func.type
if func_type.is_ptr:
func_type = func_type.base_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error and pos is not None:
error_mesg = "Calling non-function type '%s'" % func_type
errors.append((func, error_mesg))
continue
# Check no. of args
max_nargs = len(func_type.args)
min_nargs = max_nargs - func_type.optional_arg_count
if actual_nargs < min_nargs or \
(not func_type.has_varargs and actual_nargs > max_nargs):
if max_nargs == min_nargs and not func_type.has_varargs:
expectation = max_nargs
elif actual_nargs < min_nargs:
expectation = "at least %s" % min_nargs
else:
expectation = "at most %s" % max_nargs
error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \
% (expectation, actual_nargs)
errors.append((func, error_mesg))
continue
if func_type.templates:
arg_types = [arg.type for arg in args]
deductions = reduce(
merge_template_deductions,
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
{})
if deductions is None:
errors.append((func, "Unable to deduce type parameters"))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
else:
type_list = [deductions[param] for param in func_type.templates]
from .Symtab import Entry
specialization = Entry(
name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]),
type = func_type.specialize(deductions),
pos = func.pos)
candidates.append((specialization, specialization.type))
else:
candidates.append((func, func_type))
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
elif len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
error(pos, errmsg)
else:
error(pos, "no suitable method found")
return None
possibilities = []
bad_types = []
needed_coercions = {}
for index, (func, func_type) in enumerate(candidates):
score = [0,0,0,0]
for i in range(min(len(args), len(func_type.args))):
src_type = args[i].type
dst_type = func_type.args[i].type
assignable = dst_type.assignable_from(src_type)
# Now take care of normal string literals. So when you call a cdef
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
if not assignable and src_type.is_pyobject:
if (src_type.is_builtin_type and src_type.name == 'str' and
dst_type.resolve() is c_char_ptr_type):
c_src_type = c_char_ptr_type
else:
c_src_type = src_type.default_coerced_ctype()
if c_src_type:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
needed_coercions[func] = (i, dst_type)
if assignable:
if src_type == dst_type or dst_type.same_as(src_type):
pass # score 0
elif func_type.is_strict_signature:
break # exact match requested but not found
elif is_promotion(src_type, dst_type):
score[2] += 1
elif ((src_type.is_int and dst_type.is_int) or
(src_type.is_float and dst_type.is_float)):
score[2] += abs(dst_type.rank + (not dst_type.signed) -
(src_type.rank + (not src_type.signed))) + 1
elif not src_type.is_pyobject:
score[1] += 1
else:
score[0] += 1
else:
error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type)
bad_types.append((func, error_mesg))
break
else:
possibilities.append((score, index, func)) # so we can sort it
if possibilities:
possibilities.sort()
if len(possibilities) > 1:
score1 = possibilities[0][0]
score2 = possibilities[1][0]
if score1 == score2:
if pos is not None:
error(pos, "ambiguous overloaded method")
return None
function = possibilities[0][-1]
if function in needed_coercions and env:
arg_i, coerce_to_type = needed_coercions[function]
args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env)
return function
if pos is not None:
if len(bad_types) == 1:
error(pos, bad_types[0][1])
else:
error(pos, "no suitable method found")
return None
def merge_template_deductions(a, b):
if a is None or b is None:
return None
all = a
for param, value in b.items():
if param in all:
if a[param] != b[param]:
return None
else:
all[param] = value
return all
def widest_numeric_type(type1, type2):
"""Given two numeric types, return the narrowest type encompassing both of them.
"""
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
def real_type(ntype):
if ntype.is_complex:
return ntype.real_type
return ntype
widest_type = CComplexType(
widest_numeric_type(
real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
elif type1.rank < type2.rank:
widest_type = type2
elif type1.rank > type2.rank:
widest_type = type1
elif type1.signed < type2.signed:
widest_type = type1
elif type1.signed > type2.signed:
widest_type = type2
elif type1.is_typedef > type2.is_typedef:
widest_type = type1
else:
widest_type = type2
return widest_type
def numeric_type_fits(small_type, large_type):
return widest_numeric_type(small_type, large_type) == large_type
def independent_spanning_type(type1, type2):
# Return a type assignable independently from both type1 and
# type2, but do not require any interoperability between the two.
# For example, in "True * 2", it is safe to assume an integer
# result type (so spanning_type() will do the right thing),
# whereas "x = True or 2" must evaluate to a type that can hold
# both a boolean value and an integer, so this function works
# better.
if type1.is_reference ^ type2.is_reference:
if type1.is_reference:
type1 = type1.ref_base_type
else:
type2 = type2.ref_base_type
if type1 == type2:
return type1
elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric):
# special case: if one of the results is a bint and the other
# is another C integer, we must prevent returning a numeric
# type so that we do not lose the ability to coerce to a
# Python bool if we have to.
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return error_type
return span_type
def spanning_type(type1, type2):
# Return a type assignable from both type1 and type2, or
# py_object_type if no better type is found. Assumes that the
# code that calls this will try a coercion afterwards, which will
# fail if the types cannot actually coerce to a py_object_type.
if type1 == type2:
return type1
elif type1 is py_object_type or type2 is py_object_type:
return py_object_type
elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
# Py_UNICODE behaves more like a string than an int
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return py_object_type
return span_type
def _spanning_type(type1, type2):
if type1.is_numeric and type2.is_numeric:
return widest_numeric_type(type1, type2)
elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric:
return widest_numeric_type(c_double_type, type2)
elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric:
return widest_numeric_type(type1, c_double_type)
elif type1.is_extension_type and type2.is_extension_type:
return widest_extension_type(type1, type2)
elif type1.is_pyobject or type2.is_pyobject:
return py_object_type
elif type1.assignable_from(type2):
if type1.is_extension_type and type1.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type1
elif type2.assignable_from(type1):
if type2.is_extension_type and type2.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type2
elif type1.is_ptr and type2.is_ptr:
# incompatible pointers, void* will do as a result
return c_void_ptr_type
else:
return None
def widest_extension_type(type1, type2):
if type1.typeobj_is_imported() or type2.typeobj_is_imported():
return py_object_type
while True:
if type1.subtype_of(type2):
return type2
elif type2.subtype_of(type1):
return type1
type1, type2 = type1.base_type, type2.base_type
if type1 is None or type2 is None:
return py_object_type
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def parse_basic_type(name):
base = None
if name.startswith('p_'):
base = parse_basic_type(name[2:])
elif name.startswith('p'):
base = parse_basic_type(name[1:])
elif name.endswith('*'):
base = parse_basic_type(name[:-1])
if base:
return CPtrType(base)
#
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
#
signed = 1
longness = 0
if name == 'Py_UNICODE':
signed = 0
elif name == 'Py_UCS4':
signed = 0
elif name == 'Py_hash_t':
signed = 2
elif name == 'Py_ssize_t':
signed = 2
elif name == 'ssize_t':
signed = 2
elif name == 'size_t':
signed = 0
else:
if name.startswith('u'):
name = name[1:]
signed = 0
elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
longness = 0
while name.startswith('short'):
name = name.replace('short', '', 1).strip()
longness -= 1
while name.startswith('long'):
name = name.replace('long', '', 1).strip()
longness += 1
if longness != 0 and not name:
name = 'int'
return simple_c_type(signed, longness, name)
def c_array_type(base_type, size):
# Construct a C array type.
if base_type is error_type:
return error_type
else:
return CArrayType(base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type is error_type:
return error_type
elif base_type.is_reference:
return CPtrType(base_type.ref_base_type)
else:
return CPtrType(base_type)
def c_ref_type(base_type):
# Construct a C reference type
if base_type is error_type:
return error_type
else:
return CReferenceType(base_type)
def c_const_type(base_type):
# Construct a C const type.
if base_type is error_type:
return error_type
else:
return CConstType(base_type)
def same_type(type1, type2):
return type1.same_as(type2)
def assignable_from(type1, type2):
return type1.assignable_from(type2)
def typecast(to_type, from_type, expr_code):
# Return expr_code cast to a C type which can be
# assigned to to_type, assuming its existing C type
# is from_type.
if (to_type is from_type or
(not to_type.is_pyobject and assignable_from(to_type, from_type))):
return expr_code
elif (to_type is py_object_type and from_type and
from_type.is_builtin_type and from_type.name != 'type'):
# no cast needed, builtins are PyObject* already
return expr_code
else:
#print "typecast: to", to_type, "from", from_type ###
return to_type.cast_code(expr_code)
def type_list_identifier(types):
return cap_length('__and_'.join(type_identifier(type) for type in types))
_type_identifier_cache = {}
def type_identifier(type):
decl = type.empty_declaration_code()
safe = _type_identifier_cache.get(decl)
if safe is None:
safe = decl
safe = re.sub(' +', ' ', safe)
safe = re.sub(' ([^a-zA-Z0-9_])', r'\1', safe)
safe = re.sub('([^a-zA-Z0-9_]) ', r'\1', safe)
safe = (safe.replace('__', '__dunder')
.replace('const ', '__const_')
.replace(' ', '__space_')
.replace('*', '__ptr')
.replace('&', '__ref')
.replace('[', '__lArr')
.replace(']', '__rArr')
.replace('<', '__lAng')
.replace('>', '__rAng')
.replace('(', '__lParen')
.replace(')', '__rParen')
.replace(',', '__comma_')
.replace('::', '__in_'))
safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe))
_type_identifier_cache[decl] = safe
return safe
def cap_length(s, max_prefix=63, max_len=1024):
if len(s) <= max_prefix:
return s
else:
return '%x__%s__etc' % (abs(hash(s)) % (1<<20), s[:max_len-17])
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/Cython/Compiler/PyrexTypes.py
|
Python
|
mit
| 158,149
|
"""
bridge to docker-compose
"""
from compose.cli.main import TopLevelCommand
from compose.container import Container
import logging
def ps_(project):
"""
containers status
"""
logging.debug('ps ' + project.name)
containers = project.containers(stopped=True) + project.containers(one_off=True)
items = map(lambda container: {
'name': container.name,
'name_without_project': container.name_without_project,
'command': container.human_readable_command,
'state': container.human_readable_state,
'labels': container.labels,
'ports': container.ports,
'volumes': get_volumes(get_container_from_id(project.client, container.id)),
'is_running': container.is_running}, containers)
return items
def get_container_from_id(client, container_id):
"""
return the docker container from a given id
"""
return Container.from_id(client, container_id)
def get_volumes(container):
"""
retrieve container volumes details
"""
volumes = container.get('Volumes')
volumes_rw = container.get('VolumesRW')
items = map(lambda volume: \
dict(write=volumes_rw[volume], dest=volume, src=volumes[volume]), \
volumes)
return items
def get_project(path):
"""
get docker project given file path
"""
logging.debug('get project ' + path)
command = TopLevelCommand()
command.base_dir = path
project = command.get_project(command.get_config_path())
return project
|
DaniTheLion/docker-compose-ui
|
scripts/bridge.py
|
Python
|
mit
| 1,520
|
import pystache
class SVGGenerator:
def __init__(self, template_file):
self.template_file = template_file
self.template = None
self.renderer = pystache.Renderer()
def to_svg(self, data=None):
if self.template is None:
template_file = open(self.template_file)
self.template = template_file.read()
template_file.close()
return self.renderer.render(self.template, data)
|
gizmo-cda/g2x
|
overlay/SVGGenerator.py
|
Python
|
mit
| 455
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import json
import logging
import os
import re
from collections import deque
from functools import wraps, partial
from flask import Flask, request, jsonify, make_response
from flask_compress import Compress
from flask_cors import CORS
from flask_restplus import Api as RestPlusAPI, Resource
from jsonschema import RefResolutionError
from werkzeug.http import generate_etag
from flexget import manager
from flexget.config_schema import process_config, format_checker
from flexget.utils.database import with_session
from flexget.webserver import User
from . import __path__
__version__ = '1.4.3'
log = logging.getLogger('api')
class APIClient(object):
"""
This is an client which can be used as a more pythonic interface to the rest api.
It skips http, and is only usable from within the running flexget process.
"""
def __init__(self):
self.app = api_app.test_client()
def __getattr__(self, item):
return APIEndpoint('/api/' + item, self.get_endpoint)
def get_endpoint(self, url, data=None, method=None):
if method is None:
method = 'POST' if data is not None else 'GET'
auth_header = dict(Authorization='Token %s' % api_key())
response = self.app.open(url, data=data, follow_redirects=True, method=method, headers=auth_header)
result = json.loads(response.get_data(as_text=True))
# TODO: Proper exceptions
if 200 > response.status_code >= 300:
raise Exception(result['error'])
return result
class APIEndpoint(object):
def __init__(self, endpoint, caller):
self.endpoint = endpoint
self.caller = caller
def __getattr__(self, item):
return self.__class__(self.endpoint + '/' + item, self.caller)
__getitem__ = __getattr__
def __call__(self, data=None, method=None):
return self.caller(self.endpoint, data=data, method=method)
def api_version(f):
""" Add the 'API-Version' header to all responses """
@wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
rv.headers['API-Version'] = __version__
return rv
return wrapped
class APIResource(Resource):
"""All api resources should subclass this class."""
method_decorators = [with_session, api_version]
def __init__(self, api, *args, **kwargs):
self.manager = manager.manager
super(APIResource, self).__init__(api, *args, **kwargs)
class API(RestPlusAPI):
"""
Extends a flask restplus :class:`flask_restplus.Api` with:
- methods to make using json schemas easier
- methods to auto document and handle :class:`ApiError` responses
"""
def validate(self, model, schema_override=None, description=None):
"""
When a method is decorated with this, json data submitted to the endpoint will be validated with the given
`model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response.
"""
def decorator(func):
@api.expect((model, description))
@api.response(ValidationError)
@wraps(func)
def wrapper(*args, **kwargs):
payload = request.json
try:
schema = schema_override if schema_override else model.__schema__
errors = process_config(config=payload, schema=schema, set_defaults=False)
if errors:
raise ValidationError(errors)
except RefResolutionError as e:
raise APIError(str(e))
return func(*args, **kwargs)
return wrapper
return decorator
def response(self, code_or_apierror, description='Success', model=None, **kwargs):
"""
Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of
response code. If an `ApiError` is used, the response code, and expected response model, is automatically
documented.
"""
try:
if issubclass(code_or_apierror, APIError):
description = code_or_apierror.description or description
return self.doc(
responses={code_or_apierror.status_code: (description, code_or_apierror.response_model)}, **kwargs)
except TypeError:
# If first argument isn't a class this happens
pass
return self.doc(responses={code_or_apierror: (description, model)}, **kwargs)
def pagination_parser(self, parser=None, sort_choices=None, default=None, add_sort=None):
"""
Return a standardized pagination parser, to be used for any endpoint that has pagination.
:param RequestParser parser: Can extend a given parser or create a new one
:param tuple sort_choices: A tuple of strings, to be used as server side attribute searches
:param str default: The default sort string, used `sort_choices[0]` if not given
:param bool add_sort: Add sort order choices without adding specific sort choices
:return: An api.parser() instance with pagination and sorting arguments.
"""
pagination = parser.copy() if parser else self.parser()
pagination.add_argument('page', type=int, default=1, help='Page number')
pagination.add_argument('per_page', type=int, default=50, help='Results per page')
if sort_choices or add_sort:
pagination.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order')
if sort_choices:
pagination.add_argument('sort_by', choices=sort_choices, default=default or sort_choices[0],
help='Sort by attribute')
return pagination
api_app = Flask(__name__, template_folder=os.path.join(__path__[0], 'templates'))
api_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'
api_app.config['DEBUG'] = True
api_app.config['ERROR_404_HELP'] = False
api_app.url_map.strict_slashes = False
CORS(api_app, expose_headers='Link, Total-Count, Count, ETag')
Compress(api_app)
api = API(
api_app,
title='Flexget API v{}'.format(__version__),
version=__version__,
description='View and manage flexget core operations and plugins. Open each endpoint view for usage information.'
' Navigate to http://flexget.com/API for more details.',
format_checker=format_checker
)
base_message = {
'type': 'object',
'properties': {
'status_code': {'type': 'integer'},
'message': {'type': 'string'},
'status': {'type': 'string'}
},
'required': ['status_code', 'message', 'status']
}
base_message_schema = api.schema_model('base_message', base_message)
class APIError(Exception):
description = 'Server error'
status_code = 500
status = 'Error'
response_model = base_message_schema
def __init__(self, message=None, payload=None):
self.message = message
self.payload = payload
def to_dict(self):
rv = self.payload or {}
rv.update(status_code=self.status_code, message=self.message, status=self.status)
return rv
@classmethod
def schema(cls):
return cls.response_model.__schema__
class NotFoundError(APIError):
status_code = 404
description = 'Not found'
class Unauthorized(APIError):
status_code = 401
description = 'Unauthorized'
class BadRequest(APIError):
status_code = 400
description = 'Bad request'
class Conflict(APIError):
status_code = 409
description = 'Conflict'
class PreconditionFailed(APIError):
status_code = 412
description = 'Precondition failed'
class NotModified(APIError):
status_code = 304
description = 'not modified'
class ValidationError(APIError):
status_code = 422
description = 'Validation error'
response_model = api.schema_model('validation_error', {
'type': 'object',
'properties': {
'validation_errors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'message': {'type': 'string', 'description': 'A human readable message explaining the error.'},
'validator': {'type': 'string', 'description': 'The name of the failed validator.'},
'validator_value': {
'type': 'string', 'description': 'The value for the failed validator in the schema.'
},
'path': {'type': 'string'},
'schema_path': {'type': 'string'},
}
}
}
},
'required': ['validation_errors']
})
verror_attrs = (
'message', 'cause', 'validator', 'validator_value',
'path', 'schema_path', 'parent'
)
def __init__(self, validation_errors, message='validation error'):
payload = {'validation_errors': [self._verror_to_dict(error) for error in validation_errors]}
super(ValidationError, self).__init__(message, payload=payload)
def _verror_to_dict(self, error):
error_dict = {}
for attr in self.verror_attrs:
if isinstance(getattr(error, attr), deque):
error_dict[attr] = list(getattr(error, attr))
else:
error_dict[attr] = str(getattr(error, attr))
return error_dict
empty_response = api.schema_model('empty', {'type': 'object'})
def success_response(message, status_code=200, status='success'):
rsp_dict = {
'message': message,
'status_code': status_code,
'status': status
}
rsp = jsonify(rsp_dict)
rsp.status_code = status_code
return rsp
@api.errorhandler(APIError)
@api.errorhandler(NotFoundError)
@api.errorhandler(ValidationError)
@api.errorhandler(BadRequest)
@api.errorhandler(Unauthorized)
@api.errorhandler(Conflict)
@api.errorhandler(NotModified)
@api.errorhandler(PreconditionFailed)
def api_errors(error):
return error.to_dict(), error.status_code
@with_session
def api_key(session=None):
log.debug('fetching token for internal lookup')
return session.query(User).first().token
def etag(method=None, cache_age=0):
"""
A decorator that add an ETag header to the response and checks for the "If-Match" and "If-Not-Match" headers to
return an appropriate response.
:param method: A GET or HEAD flask method to wrap
:param cache_age: max-age cache age for the content
:return: The method's response with the ETag and Cache-Control headers, raises a 412 error or returns a 304 response
"""
# If called without method, we've been called with optional arguments.
# We return a decorator with the optional arguments filled in.
# Next time round we'll be decorating method.
if method is None:
return partial(etag, cache_age=cache_age)
@wraps(method)
def wrapped(*args, **kwargs):
# Identify if this is a GET or HEAD in order to proceed
assert request.method in ['HEAD', 'GET'], '@etag is only supported for GET requests'
rv = method(*args, **kwargs)
rv = make_response(rv)
# Some headers can change without data change for specific page
content_headers = rv.headers.get('link', '') + rv.headers.get('count', '') + rv.headers.get('total-count', '')
data = (rv.get_data().decode() + content_headers).encode()
etag = generate_etag(data)
rv.headers['Cache-Control'] = 'max-age=%s' % cache_age
rv.headers['ETag'] = etag
if_match = request.headers.get('If-Match')
if_none_match = request.headers.get('If-None-Match')
if if_match:
etag_list = [tag.strip() for tag in if_match.split(',')]
if etag not in etag_list and '*' not in etag_list:
raise PreconditionFailed('etag does not match')
elif if_none_match:
etag_list = [tag.strip() for tag in if_none_match.split(',')]
if etag in etag_list or '*' in etag_list:
raise NotModified
return rv
return wrapped
def pagination_headers(total_pages, total_items, page_count, request):
"""
Creates the `Link`. 'Count' and 'Total-Count' headers, to be used for pagination traversing
:param total_pages: Total number of pages
:param total_items: Total number of items in all the pages
:param page_count: Item count for page (may differ from page size request)
:param request: The flask request used, required to build other reoccurring vars like url and such.
:return:
"""
# Build constant variables from request data
url = request.url_root + request.path.lstrip('/')
per_page = request.args.get('per_page', 50)
page = int(request.args.get('page', 1))
# Build the base template
LINKTEMPLATE = '<{}?per_page={}&'.format(url, per_page)
# Removed page and per_page from query string
query_string = re.sub(b'per_page=\d+', b'', request.query_string)
query_string = re.sub(b'page=\d+', b'', query_string)
query_string = re.sub(b'&{2,}', b'&', query_string)
# Add all original query params
LINKTEMPLATE += query_string.decode().lstrip('&') + '&page={}>; rel="{}"'
link_string = ''
if page > 1:
link_string += LINKTEMPLATE.format(page - 1, 'prev') + ', '
if page < total_pages:
link_string += LINKTEMPLATE.format(page + 1, 'next') + ', '
link_string += LINKTEMPLATE.format(total_pages, 'last')
return {
'Link': link_string,
'Total-Count': total_items,
'Count': page_count
}
|
jawilson/Flexget
|
flexget/api/app.py
|
Python
|
mit
| 13,897
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricName(Model):
"""The name of a metric.
:param value: The system name of the metric.
:type value: str
:param localized_value: The localized name of the metric.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, value=None, localized_value=None):
self.value = value
self.localized_value = localized_value
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/metric_name.py
|
Python
|
mit
| 1,023
|
#!/usr/bin/env/python
import os
import numpy as np
import pandas as pd
import paths
from utils import saveAnimation, animateSubseqs, dataNearAnnotations
from utils import generateVideos, sectionsOfDataNearAnnotationsImpure
VIDS_DIR = 'vids'
# ------------------------------------------------ Public funcs
def getLabeledTsList(instancesPerTs=10, shuffle=False, padLen=25,
keepLabels=['Z', 'Z2'], includeZC=True, padJitter=175, addNoise=True):
r = Recording(padLen=padLen, includeZC=includeZC, addNoise=addNoise)
if includeZC:
# keepLabels = ['Z3'] # superset of Z2
keepLabels = ['Z', 'Z3', 'Z2', 'ZC'] # superset of Z2
tsList = sectionsOfDataNearAnnotationsImpure(r.data, r.rangeStartIdxs,
r.rangeEndIdxs, r.labels, instancesPerTs=instancesPerTs, shuffle=shuffle,
padLen=padLen, maxPadJitter=padJitter, keepLabels=keepLabels,
datasetName="dishwasherGroups")
return tsList
# ------------------------------------------------ File IO
def readFile(path):
return np.loadtxt(path, delimiter=',')
def readData():
return readFile(paths.DISHWASHER)[:, 1:] # first col is just timestamp
# return readFile(paths.DISHWASHER_SHORT)
# return readFile(paths.DISHWASHER_20K)
def readAnnotations():
try:
annotations = pd.read_csv(paths.DISHWASHER_LABELS,
delim_whitespace=True, header=None)
except IOError: # happens if not calling this from project root
# if calling if from one level above project root (for standalone
# code), this fixes it
annotations = pd.read_csv(paths.DISHWASHER_LABELS_ALT,
delim_whitespace=True, header=None)
annotations.columns = ['start', 'end', 'label']
return annotations
# ------------------------------------------------ Old funcs
def generateVids():
"""main function used to generate videos for dataset annotation; see function
in utils for a much cleaner version. This function should not be used."""
# assumes header line has been removed; on unix, this can be done via:
# $ cat DWE.csv | tail -n +2 > dishwasher_nohead.csv
#
# Note that you will of course have to have the csv saved in the current
# directory for this (or the rest of this script) to work
appliance = 'dishwasher'
# appliance = 'washer'
# appliance = 'dryer'
path = appliance + '_nohead.csv'
ar = readFile(path)
# ar = ar[:(20*1000)]
print("{0} -> {1} array".format(path, ar.shape))
# # find rising edges and save where they are to help us annotate
# realPower = ar[:, 6]
# diffs = realPower[1:] - realPower[:-1]
# # idxs = np.where((realPower[1:] > 500) * (diffs > 100.))[0]
# idxs = np.where(diffs > 50.)[0]
# np.savetxt('idxs.txt', idxs, fmt="%d")
# return
# # plot a few particular indices we accidentally missed (bug is
# # fixed now, but I don't want to recreate all the videos)
# missedIdxs = [354950, 379950, 559950, 634950, 1014950]
# for idx in missedIdxs:
# xMin = idx - 100
# xMax = idx + 200
# xVals = np.arange(xMin, xMax, dtype=np.int)
# offset = int((xMax // 1e5) * 1e5) # floor to nearest 100k
# plt.figure(figsize=(8,6))
# plt.title("{0}, {1}-{2}".format(appliance.title(), xMin, xMax))
# plt.plot(xVals - offset, ar[xMin:xMax])
# plt.xlim([xMin - offset, xMax - offset])
# plt.ylim([0,1000])
# plt.show()
# return
step = 5*1000
windowLen = 300
epochSz = 100*1000 # 100k per epoch--mostly so xlabels stay legible
for epochNum, epochStartIdx in enumerate(range(0, len(ar), epochSz)):
epochEndIdx = epochStartIdx + epochSz
epochData = ar[epochStartIdx:epochEndIdx]
subdir = "{0}k-{1}k".format(epochStartIdx / 1000, epochEndIdx / 1000)
saveDir = VIDS_DIR + '/' + appliance + '/' + subdir + '/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
n = len(epochData)
for startIdx in range(0, n, step):
endIdx = min(startIdx + step, n)
absoluteStartIdx = epochStartIdx + startIdx
absoluteEndIdx = epochStartIdx + endIdx
data = epochData[startIdx:endIdx]
figName = appliance + "_{0}-{1}".format(absoluteStartIdx, absoluteEndIdx-1)
figPath = os.path.join(saveDir, figName + '.mp4')
anim = animateSubseqs(data, windowLen, figsize=(8,6),
dataName=appliance.title(), ylimits=[0,1000],
idxOffsetTitle=absoluteStartIdx, idxOffsetXLabel=startIdx)
saveAnimation(anim, figPath, fps=25)
# ------------------------------------------------ Data Structures
def addZC(startIdxs, endIdxs, labels, alsoZ3=True):
newStartIdxs = []
newEndIdxs = []
newLabels = []
for i in range(len(startIdxs) - 1):
# Z, C -> Z, ZC, Z3
# Z, Z2 -> Z
# Z2, C -> Z2, ZC, Z3
# Z, * -> Z, Z3
# Z2, * -> Z2, Z3
newStartIdxs.append(startIdxs[i])
newEndIdxs.append(endIdxs[i])
newLabels.append(labels[i])
isZ = labels[i] == 'Z'
isZ2 = labels[i] == 'Z2'
if not (isZ2 or isZ):
continue
nextIsZ2 = labels[i+1] == 'Z2'
if nextIsZ2:
continue
z2start = startIdxs[i]
cEnd = endIdxs[i+1] # end of (possible) C after the Z2
followedByC = (labels[i+1] == 'C') and (cEnd - endIdxs[i] < 150)
if followedByC:
newStartIdxs.append(z2start)
newEndIdxs.append(cEnd)
newLabels.append('ZC')
else:
cEnd = endIdxs[i] # just the end of the Z2
if alsoZ3:
newStartIdxs.append(z2start)
newEndIdxs.append(cEnd)
newLabels.append('Z3') # Z2 without a C or ZC
newStartIdxs = np.array(newStartIdxs, dtype=np.int)
newEndIdxs = np.array(newEndIdxs, dtype=np.int)
newLabels = np.array(newLabels, dtype=np.object)
return newStartIdxs, newEndIdxs, newLabels
class Recording(object):
def __init__(self, shortened=False, padLen=100, just2=False, just3=False,
includeZC=True, addNoise=True):
self.data = readData()
if addNoise:
self.data += np.random.randn(self.data.shape[0], self.data.shape[1])
annos = readAnnotations()
# annos = annos[:22] # for dishwasher_20k
self.rangeStartIdxs = np.array(annos['start'], dtype=np.int)
self.rangeEndIdxs = np.array(annos['end'], dtype=np.int)
self.labels = np.array(annos['label'], dtype=np.str)
self.shortened = shortened or just2 or just3
self.just2 = just2 # only 2 examples
self.just3 = just3 # only 3 examples
if padLen >= 0:
self.padLen = padLen
else: # negative padLen -> set automatically
padLen = np.mean(self.rangeEndIdxs - self.rangeStartIdxs) / 2.
if self.shortened:
self.data, self.rangeStartIdxs, self.rangeEndIdxs, = \
dataNearAnnotations(self.data, self.rangeStartIdxs,
self.rangeEndIdxs, self.padLen)
if self.just2 or self.just3:
whereZ2 = np.where(self.labels == 'Z2')[0]
if self.just2:
keepIdxs = whereZ2[:2]
self.data = self.data[:650] # after end of 2nd instance
# self.labels = np.zeros(2)
elif self.just3:
keepIdxs = whereZ2[:3]
self.data = self.data[:950] # after end of 3rd instance
# self.labels = np.zeros(3)
self.rangeStartIdxs = self.rangeStartIdxs[keepIdxs]
self.rangeEndIdxs = self.rangeEndIdxs[keepIdxs]
self.labels = self.labels[keepIdxs]
if includeZC: # also have Z2 followed by C as a pattern, since common
self.rangeStartIdxs, self.rangeEndIdxs, self.labels = addZC(
self.rangeStartIdxs, self.rangeEndIdxs, self.labels)
print "Dishwasher recording: data shape = ", self.data.shape
# print "Dishwasher annotations:"
# annotations = np.c_[self.rangeStartIdxs, self.rangeEndIdxs, self.labels]
# for anno in annotations:
# if 50 <= int(anno[0]) // 1000 < 60: print anno
def animate(self):
dataName = 'dishwasher'
if self.shortened:
dataName += '-short'
dataName += '-pad{}'.format(self.padLen)
if self.just2:
dataName += '-just2'
elif self.just3:
dataName += '-just3'
# generateVideos(self.data, dataName="dishwasher", saveInDir="figs")
# generateVideos(self.data, dataName="dishwasher-short", saveInDir="figs")
generateVideos(self.data, dataName=dataName, saveInDir="figs",
rangeStartIdxs=self.rangeStartIdxs, rangeEndIdxs=self.rangeEndIdxs,
rangeLabels=self.labels, ylimits=[0, 1000])
# ------------------------------------------------ Main
if __name__ == '__main__':
from doctest import testmod
testmod()
# r = Recording(shortened=True)
# r = Recording(shortened=False)
# r = Recording(just2=True)
# r = Recording(just3=True)
# r.animate()
# print np.median(r.data, axis=0)
# print np.mean(r.data, axis=0)
# plt.plot(r.data)
# plt.show()
#
# plot groups with 5 instances
#
# saveDir = 'figs/dishwasher_groups/'
# np.random.seed(123)
# tsList = getLabeledTsList(shuffle=True, includeZC=True, instancesPerTs=5)
# # note that grouping into 5s with this seed yields 2 pretty good visual
# # examples as the last group--one is missing part of the X
# for ts in tsList:
# # ts.plot(saveDir=saveDir, staggerHeights=False)
# ts.plot(saveDir=saveDir)
#
# plot groups with 3 instances
#
saveDir = 'figs/dishwasher_groups3/'
np.random.seed(123)
tsList = getLabeledTsList(shuffle=True, includeZC=True, instancesPerTs=3)
# note that grouping into 5s with this seed yields 2 pretty good visual
# examples as the last group--one is missing part of the X
for ts in tsList:
# ts.plot(saveDir=saveDir, staggerHeights=False)
ts.plot(saveDir=saveDir)
# annos = readAnnotations()
# whereZ2 = np.where(annos['label'] == 'Z2')[0]
# z2starts = np.array(annos['start'][whereZ2])
# z2ends = np.array(annos['end'][whereZ2])
# # print whereZ2, z2starts, z2ends
# lengths = z2ends - z2starts
# print lengths
# minIdx = np.argmin(lengths)
# print minIdx, z2starts[minIdx]
|
dblalock/dig
|
python/dig/datasets/dishwasher.py
|
Python
|
mit
| 9,404
|
"""
Authorize Sauce
===============
The secret sauce for accessing the Authorize.net API. The Authorize APIs for
transactions, recurring payments, and saved payments are all different and
awkward to use directly. Instead, you can use Authorize Sauce, which unifies
all three Authorize.net APIs into one coherent Pythonic interface. Charge
credit cards, easily!
::
>>> # Init the authorize client and a credit card
>>> from authorize import AuthorizeClient, CreditCard
>>> authorize = AuthorizeClient('285tUPuS', '58JKJ4T95uee75wd')
>>> cc = CreditCard('4111111111111111', '2018', '01', '911', 'Joe', 'Blow')
>>> card = client.card(cc)
>>> # Charge a card
>>> card.capture(100)
<AuthorizeTransaction 2171829470>
>>> # Save the card on Authorize servers for later
>>> saved_card = card.save()
>>> saved_card.uid
'7713982|6743206'
>>> # Use a saved card to auth a transaction, and settle later
>>> saved_card = client.saved_card('7713982|6743206')
>>> transaction = saved_card.auth(200)
>>> transaction.settle()
Saucy Features
--------------
* Charge a credit card
* Authorize a credit card charge, and settle it or release it later
* Credit or refund to a card
* Save a credit card securely on Authorize.net's servers
* Use saved cards to charge, auth and credit
* Create recurring charges, with billing cycles, trial periods, etc.
For the full documentation, please visit us at `Read the Docs`_. Thanks to
Chewse_ for supporting the development and open-sourcing of this library.
Authorize Sauce is released under the `MIT License`_.
.. _Read the Docs: http://authorizesauce.readthedocs.io/
.. _Chewse: https://www.chewse.com/
.. _MIT License: http://www.opensource.org/licenses/mit-license
"""
import os
from setuptools import setup
# Hard links don't work inside VirtualBox shared folders. In order to allow
# setup.py sdist to work in such an environment, this quick and dirty hack is
# used. See http://stackoverflow.com/a/22147112.
if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
del os.link
setup(
name='AuthorizeSauce',
version='0.5.0',
author='Jeff Schenck',
author_email='jmschenck@gmail.com',
url='http://authorizesauce.readthedocs.io/',
download_url='https://github.com/drewisme/authorizesauce',
description='An awesome-sauce Python library for accessing the Authorize.net API. Sweet!',
long_description=__doc__,
license='MIT',
install_requires=[
'suds-jurko>=0.6',
'six>=1.9.0',
],
packages=[
'authorize',
'authorize.apis',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Topic :: Office/Business :: Financial',
'Topic :: Internet :: WWW/HTTP',
],
)
|
drewisme/authorizesauce
|
setup.py
|
Python
|
mit
| 3,354
|
import socket
import sys
import random
import threading
local_address = '127.0.0.1', 9005
remote_address = '127.0.0.1'
remote_address = sys.argv[1]
connect_port = int(sys.argv[2])
mode = sys.argv[3]
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.bind(local_address)
socket.connect((remote_address,connect_port))
if mode == 'read':
while True:
try:
data = socket.recv(1024)
print 'recv data ', data
except Exception:
pass
else:
while True:
try:
data = str(random.randint(0,10))
socket.sendall(data)
threading.sleep(3)
except Exception:
pass
|
minminopk/PySnippet
|
tcp2tcp/mock.py
|
Python
|
mit
| 692
|
#!/usr/bin/env python
""" A unittest script for the HostEpigeneticsRawSeqSet module. """
import unittest
import json
import tempfile
from cutlass import HostEpigeneticsRawSeqSet
from CutlassTestConfig import CutlassTestConfig
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class HostEpigeneticsRawSeqSetTest(unittest.TestCase):
""" A unit test class for the HostEpigeneticsRawSeqSet class. """
session = None
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
# Establish the session for each test method
cls.session = CutlassTestConfig.get_session()
cls.util = CutlassTestUtil()
def testImport(self):
""" Test the importation of the HostEpigeneticsRawSeqSet module. """
success = False
try:
from cutlass import HostEpigeneticsRawSeqSet
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(HostEpigeneticsRawSeqSet is None)
def testSessionCreate(self):
""" Test the creation of a HostEpigeneticsRawSeqSet via the session. """
success = False
seq_set = None
try:
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(seq_set is None)
def testToJson(self):
"""
Test the generation of JSON from a HostEpigeneticsRawSeqSet
instance.
"""
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
success = False
comment = "Test comment"
private_files = False
seq_set.comment = comment
seq_set.private_files = private_files
seq_set_json = None
try:
seq_set_json = seq_set.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(seq_set_json is not None,
"to_json() returned data.")
parse_success = False
try:
data = json.loads(seq_set_json)
parse_success = True
except Exception:
pass
self.assertTrue(parse_success,
"to_json() did not throw an exception.")
self.assertTrue(data is not None,
"to_json() returned parsable JSON.")
self.assertTrue('meta' in data,
"JSON has 'meta' key in it.")
self.assertEqual(data['meta']['comment'],
comment,
"'comment' in JSON had expected value."
)
self.assertEqual(data['meta']['private_files'],
private_files,
"'private_files' in JSON had expected value."
)
def testId(self):
""" Test the id property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.assertTrue(seq_set.id is None,
"New template object has no ID.")
with self.assertRaises(AttributeError):
seq_set.id = "test"
def testVersion(self):
""" Test the version property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.assertTrue(seq_set.version is None,
"New template object has no version.")
with self.assertRaises(ValueError):
seq_set.version = "test"
def testAssayType(self):
""" Test the assay_type property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "assay_type")
self.util.stringPropertyTest(self, seq_set, "assay_type")
def testComment(self):
""" Test the comment property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "comment")
self.util.stringPropertyTest(self, seq_set, "comment")
def testExpLength(self):
""" Test the exp_length property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.intTypeTest(self, seq_set, "exp_length")
self.util.intPropertyTest(self, seq_set, "exp_length")
def testExpLengthNegative(self):
""" Test the exp_length property with an illegal negative value. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
with self.assertRaises(Exception):
seq_set.exp_length = -1
def testChecksums(self):
""" Test the checksums property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
success = False
checksums = {"md5": "asdf32qrfrae"}
try:
seq_set.checksums = checksums
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the checksums setter")
self.assertEqual(seq_set.checksums['md5'], checksums['md5'],
"Property getter for 'checksums' works.")
def testFormat(self):
""" Test the format property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "format")
success = False
test_format = "fasta"
try:
seq_set.format = test_format
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the 'format' setter")
self.assertEqual(seq_set.format, test_format,
"Property getter for 'format' works.")
def testFormatIllegal(self):
""" Test the format property with an illegal value. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
with self.assertRaises(Exception):
seq_set.format = "asbdasidsa"
def testFormatDoc(self):
""" Test the format_doc property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "format_doc")
self.util.stringPropertyTest(self, seq_set, "format_doc")
def testSequenceType(self):
""" Test the sequence_type property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "sequence_type")
success = False
sequence_type = "peptide"
try:
seq_set.sequence_type = sequence_type
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the sequence_type setter")
self.assertEqual(seq_set.sequence_type, sequence_type,
"Property getter for 'sequence_type' works.")
def testSequenceTypeIllegal(self):
""" Test the sequence_type property with an illegal value. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
with self.assertRaises(Exception):
seq_set.sequence_type = "asbdasidsa"
def testSeqModel(self):
""" Test the seq_model property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "seq_model")
self.util.stringPropertyTest(self, seq_set, "seq_model")
def testSize(self):
""" Test the size property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
self.util.intTypeTest(self, seq_set, "size")
self.util.intPropertyTest(self, seq_set, "size")
def testSizeNegative(self):
""" Test the size property with an illegal negative value. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
with self.assertRaises(Exception):
seq_set.size = -1
def testStudy(self):
""" Test the study property with a legal value. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
success = False
study = "ibd"
try:
seq_set.study = study
success = True
except Exception:
pass
self.assertTrue(success, "Able to use the study setter")
self.assertEqual(seq_set.study, study,
"Property getter for 'study' works.")
def testStudyIllegal(self):
""" Test the study property with an illegal value. """
seq_set = self.session.create_object("host_wgs_raw_seq_set")
self.util.stringTypeTest(self, seq_set, "study")
with self.assertRaises(Exception):
seq_set.study = "adfadsf"
def testTags(self):
""" Test the tags property. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
tags = seq_set.tags
self.assertTrue(type(tags) == list,
"Object tags() method returns a list.")
self.assertEqual(len(tags), 0, "Template seq_set tags list is empty.")
new_tags = ["tagA", "tagB"]
seq_set.tags = new_tags
self.assertEqual(seq_set.tags, new_tags,
"Can set tags on a HostEpigeneticsRawSeqSet.")
json_str = seq_set.to_json()
doc = json.loads(json_str)
self.assertTrue('tags' in doc['meta'],
"JSON representation has 'tags' field in 'meta'.")
self.assertEqual(doc['meta']['tags'], new_tags,
"JSON representation had correct tags after setter.")
def testAddTag(self):
""" Test the add_tag() method. """
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
seq_set.add_tag("test")
self.assertEqual(seq_set.tags, ["test"],
"Can add a tag to a HostEpigeneticsRawSeqSet.")
json_str = seq_set.to_json()
doc = json.loads(json_str)
self.assertEqual(doc['meta']['tags'], ["test"],
"JSON representation had correct tags after add_tag().")
# Try adding the same tag yet again, shouldn't get a duplicate
with self.assertRaises(ValueError):
seq_set.add_tag("test")
json_str = seq_set.to_json()
doc2 = json.loads(json_str)
self.assertEqual(doc2['meta']['tags'], ["test"],
"JSON document did not end up with duplicate tags.")
def testRequiredFields(self):
""" Test the required_fields() static method. """
required = HostEpigeneticsRawSeqSet.required_fields()
self.assertEqual(type(required), tuple,
"required_fields() returns a tuple.")
self.assertTrue(len(required) > 0,
"required_field() did not return empty value.")
def testLoadSaveDeleteHostEpigeneticsRawSeqSet(self):
""" Extensive test for the load, edit, save and delete functions. """
# Attempt to save a HostEpigeneticsRawSeqSet at all points
# before and after adding the required fields
temp_file = tempfile.NamedTemporaryFile(delete=False).name
seq_set = self.session.create_object("host_epigenetics_raw_seq_set")
test_assay_type = "RRBS"
test_comment = "Test comment"
checksums = {"md5": "abdbcbfbdbababdbcbfbdbabdbfbcbdb"}
exp_length = 100
test_format = "fasta"
format_doc = "http://www.google.com"
test_seq_model = "center for sequencing"
size = 132
study = "ibd"
test_links = {"sequenced_from": []}
tag = "Test tag"
self.assertFalse(seq_set.save(),
"Not saved successfully, no required fields")
seq_set.comment = test_comment
self.assertFalse(seq_set.save(), "Not saved successfully")
seq_set.checksums = checksums
self.assertFalse(seq_set.save(), "Not saved successfully")
seq_set.links = test_links
self.assertFalse(seq_set.save(), "Not saved successfully")
seq_set.assay_type = test_assay_type
seq_set.exp_length = exp_length
seq_set.format_doc = format_doc
seq_set.format = test_format
seq_set.seq_model = test_seq_model
seq_set.local_file = temp_file
seq_set.size = size
seq_set.study = study
seq_set.add_tag(tag)
# Make sure seq_set does not delete if it does not exist
with self.assertRaises(Exception):
seq_set.delete()
self.assertTrue(seq_set.save() is True,
"HostEpigeneticsRawSeqSet was saved successfully")
# load the HostEpigeneticsRawSeqSet that was just saved
ss_loaded = self.session.create_object("host_epigenetics_raw_seq_set")
ss_loaded = ss_loaded.load(seq_set.id)
# Check all fields were saved and loaded successfully
self.assertEqual(seq_set.comment, ss_loaded.comment,
"Object comment saved & loaded successfully")
self.assertEqual(seq_set.size, ss_loaded.size,
"Object size saved & loaded successfully")
# seq_set is deleted successfully
self.assertTrue(seq_set.delete(),
"Object was deleted successfully")
# The seq_set of the initial ID should not load successfully
ss_test = self.session.create_object("host_epigenetics_raw_seq_set")
with self.assertRaises(Exception):
ss_test = ss_test.load(seq_set.id)
if __name__ == '__main__':
unittest.main()
|
ihmpdcc/cutlass
|
tests/test_host_epigenetics_raw_seq_set.py
|
Python
|
mit
| 13,839
|
##############################################################################
#
# Kennedy Institute of Rheumatology
#
# $Id$
#
# Copyright (C) 2015 Stephen Sansom
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
"""
===========================
Pipeline cram2fastq
===========================
:Author: Stephen Sansom
:Release: $Id$
:Date: |today|
:Tags: Python
Overview
========
This pipeline coverts Sanger CRAM files to fastq.gz,
optionally quality trimming and reconciling the fastq files
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_cram2fastq.py config
Input files
-----------
Requirements
------------
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
.. Add any additional external requirements such as 3rd party software
or R modules below:
Requirements:
* samtools >= 1.1
Pipeline output
===============
Glossary
========
Code
====
"""
from ruffus import *
import sys
import os
import glob
import sqlite3
from CGATCore import Experiment as E
from CGATCore import Pipeline as P
from CGATCore import Database as DB
import pysam
# -------------------------- < parse parameters > --------------------------- #
# load options from the config file
PARAMS = P.get_parameters(
["%s/pipeline.yml" % os.path.splitext(__file__)[0],
"../pipeline.yml",
"pipeline.yml"])
# ----------------------- < pipeline configuration > ------------------------ #
if len(sys.argv) > 1:
if(sys.argv[1] == "config") and __name__ == "__main__":
sys.exit(P.main(sys.argv))
# ------------------------< specific pipeline tasks >------------------------ #
@follows(mkdir("validate.cram.dir"))
@transform(glob.glob("data.dir/*.cram"),
regex(r".*/(.*).cram"),
[r"validate.cram.dir/\1.validate", r"validate.cram.dir/\1.quality"])
def validateCramFiles(infile, outfiles):
'''Validate CRAM files by exit status of
cramtools qstat. Save the quality scores of cram files.
'''
outfile, outfile_quality = outfiles
statement = '''temp_quality=`mktemp -p %(cluster_tmpdir)s`;
cramtools qstat -I %(infile)s > $temp_quality;
echo $? > %(outfile)s;
cat $temp_quality
| awk '{OFS="\\t"} {print $1,$2}'
> %(outfile_quality)s;
rm $temp_quality;
'''
P.run(statement)
@follows(validateCramFiles)
@merge(validateCramFiles,
"validate.cram.dir/summary.txt")
def inspectValidations(infiles, outfile):
'''Check that all crams pass validation or
raise an Error.'''
validation_files = [fn
for filenames in infiles
for fn in filenames
if fn.endswith(".validate")]
outfile_handle = open(outfile, "w")
exit_states = []
for validation_file in validation_files:
with open(validation_file, "r") as vf_handle:
exit_status = vf_handle.read().strip("\n")
exit_states.append(int(exit_status))
outfile_handle.write("\t".join([validation_file, exit_status])+"\n")
outfile_handle.close()
if sum(exit_states) != 0:
raise ValueError("One or more cram files failed validation")
@follows(validateCramFiles)
@merge(validateCramFiles,
"validate.cram.dir/cram_quality.load")
def loadCramQuality(infiles, outfile):
''' Load the quality scores for the different cells
into the database (summarized table).
'''
quality_files = [fn
for filenames in infiles
for fn in filenames
if fn.endswith(".quality")]
P.concatenate_and_load(quality_files, outfile,
regex_filename="validate.cram.dir/(.*).quality",
cat="track",
has_titles=False,
header="cramID,number_reads,cram_quality_score")
@follows(inspectValidations,
mkdir("cell.info.dir"))
@merge(glob.glob("data.dir/*.cram"),
"cell.info.dir/cells.txt")
def extractSampleInformation(infiles, outfile):
'''Make a table of cells and corresponding cram files'''
# build a dictionary of cell to cram file mappings
cells = {}
for cram_file in infiles:
cram = pysam.AlignmentFile(cram_file, "rc")
print(cram.header)
cell = cram.header["RG"][0]["SM"]
if cell not in cells.keys():
cells[cell] = [cram_file]
else:
cells[cell].append(cram_file)
cram.close()
# write out a per-cell list of cram files
outdir = os.path.dirname(outfile)
outfile_handle = open(outfile, "w")
outfile_handle.write("#cell\tcram_files\n")
for cell in cells.keys():
outfile_handle.write("\t".join([cell, ",".join(cells[cell])])+"\n")
outfile_handle.close()
@split(extractSampleInformation,
"cell.info.dir/*.cell")
def cellCramLists(infile, outfiles):
'''Make a per-cell file containing the cram file(s)
corresponding to the cell'''
out_dir = os.path.dirname(infile)
with open(infile, "r") as cell_list:
for record in cell_list:
if record.startswith("#"):
continue
cell, cram_list = record.strip("\n").split("\t")
crams = cram_list.split(",")
cell_outfile_name = os.path.join(out_dir, cell+".cell")
with open(cell_outfile_name, "w") as cell_file_handle:
for cram in crams:
cell_file_handle.write(cram+"\n")
@follows(mkdir("fastq.dir"),
mkdir("fastq.temp.dir"),
extractSampleInformation)
@transform(cellCramLists,
regex(r".*/(.*).cell"),
(r"fastq.dir/\1.fastq.1.gz",
r"fastq.dir/\1.fastq.2.gz"))
def cram2fastq(infile, outfiles):
'''Convert Sanger CRAM files to Fastq format
Takes care of merging, quality trimming
and pair reconciliation.
Intermediate files are not kept by default.'''
# TODO: make quality trimming optional.
###################################
# set variables and open a log file
###################################
cell_name = os.path.basename(infile)[:-len(".cell")]
out_dir = os.path.dirname(outfiles[0])
temp_dir = "fastq.temp.dir"
log_file = os.path.join(temp_dir,
cell_name + ".fastq.extraction.log")
log = open(log_file, "w")
log.write("Fastq extraction log file for %(infile)s\n\n")
def _merge_dicts(a, b):
x = a.copy()
x.update(b)
return(x)
temp_files = []
# ##############################################
# Extract per-end Fastq(s) from the cram file(s)
# ##############################################
raw_fastq_names = []
with open(infile, "rb") as cram_files:
for line in cram_files:
cram = line.strip()
cram_basename = os.path.basename(cram)[:-len(".cram")]
raw_fastq_name = os.path.join(temp_dir, cram_basename)
raw_fastq_names.append(raw_fastq_name)
job_memory = PARAMS["preprocess_memory"]
statement = '''cramtools fastq
--enumerate
--reverse
-F %(raw_fastq_name)s
-I %(cram)s
--gzip
'''
log.write("Extracting fastqs from %(cram)s:" % locals() + "\n")
log.write(statement % locals() + "\n")
P.run(statement)
log.write("done.\n\n")
# ####################################
# Perform quality trimming
# Merging is also taken care of here.
# ####################################
quality = PARAMS["preprocess_quality_threshold"]
minlen = PARAMS["preprocess_min_length"]
trim = PARAMS["preprocess_trim"]
trimmed_fastq_prefix = os.path.join(temp_dir, cell_name)
trimmed_fastq_files = []
# fastq(s) for each end are quality trimmed separately
for end in ["_1", "_2"]:
raw_fastqs = [x + end + ".fastq.gz" for x in raw_fastq_names]
temp_files += raw_fastqs
fastq_list = " ".join(raw_fastqs)
trimmed_fastq_name = trimmed_fastq_prefix + end + ".trimmed.fastq.gz"
trimmed_fastq_files.append(trimmed_fastq_name)
log.write(">> Quality trimming %(fastq_list)s: " % locals() + "\n")
if trim:
statement = '''zcat %(fastq_list)s
| fastq_quality_trimmer
-Q33
-t %(quality)s
-l %(minlen)s
| gzip -c
> %(trimmed_fastq_name)s
'''
else:
statement = '''zcat %(fastq_list)s
| gzip -c
> %(trimmed_fastq_name)s
'''
log.write(statement % _merge_dicts(PARAMS, locals()) + "\n")
P.run(statement)
log.write("done. \n\n")
# ##################
# Reconcile the ends
# ##################
if PARAMS["preprocess_reconcile"] != "False":
temp_files += trimmed_fastq_files
end1, end2 = trimmed_fastq_files
reconciled_fastq_prefix = outfiles[0][:-len(".1.gz")]
log.write(">> Reconciling pairs, %(end1)s & %(end2)s: "
% locals() + "\n")
statement = '''python %(scriptsdir)s/fastqs2fastqs.py
%(end1)s %(end2)s
--method reconcile
--chop
--unpaired
-o "%(reconciled_fastq_prefix)s.%%s.gz";
'''
log.write(statement % _merge_dicts(PARAMS, locals()) + "\n")
P.run(statement)
log.write("done\n\n")
else:
trimmed_fastq_prefix = outfiles[0][:-len(".1.gz")]
for end in trimmed_fastq_files:
if "1.trimmed" in end:
endn = "1"
else:
endn = "2"
trimmed_end_name = ".".join([trimmed_fastq_prefix, endn, "gz"])
os.symlink(os.path.abspath(end), trimmed_end_name)
##############################
# Clean up the temporary files
##############################
if PARAMS["keep_temporary"] == 0:
temp_file_list = " ".join(temp_files)
# record files sizes and md5 checksums of the temporary files
log.write(">> Recording sizes and checksums of temporary files:\n")
statement = '''ls -l %(temp_file_list)s
> %(temp_dir)s/%(cell_name)s.ls;
checkpoint;
md5sum %(temp_file_list)s
> %(temp_dir)s/%(cell_name)s.md5;
'''
log.write(statement % locals() + "\n")
P.run(statement)
log.write("done\n\n")
# unlink (delete) the temporary files
log.write(">> unlinking temporary files: " + temp_file_list + "\n")
for temp_file in temp_files:
os.unlink(temp_file)
log.write("tempororay files unlinked\n")
log.close()
# ---------------------< generic pipeline tasks >---------------------------- #
@follows(cram2fastq, loadCramQuality)
def full():
pass
# ########################################################################### #
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
snsansom/xcell
|
pipelines/pipeline_cram2fastq.py
|
Python
|
mit
| 12,728
|
#!/usr/bin/env python
# coding=utf-8
import pylab as pl
import numpy as np
from matplotlib.legend_handler import HandlerLine2D
f = file("table3")
next(f)
next(f)
a = [map(eval,l.split()[::2]) for l in f]
a = [x for x in a if x[0] > 0 and x[3] == 25]
pl.figure(figsize=(10, 5), dpi=80)
pl.subplots_adjust(bottom=0.2, left=0.1, top=0.9, right=0.95)
hm = {}
for i, q in enumerate(sorted(set((x[0], x[1]) for x in a))):
X = [x[2] for x in a if tuple(x[:2]) == q]
Y = [x[5] for x in a if tuple(x[:2]) == q]
l, = pl.plot(X, Y, "pos*hd"[i], label="%d Kern%s, %d Thread%s" % (q[0], "e"*(q[0]!=1), q[1] + 1, "s"*(q[1]>0)))
hm[l] = HandlerLine2D(numpoints=1)
xticks = X
pl.xlabel(u"Taktfrequenz in MHz")
pl.ylabel(u"Stromstärke in mA")
pl.legend(loc='upper left', prop={"size": 12}, handler_map=hm)
pl.grid(True, which='major')
pl.xticks(xticks, [240, '', '', '', 360, '', '', 480, '', 600, '', '', '', 720, '', '', 816, '', 912, '', 1008])
pl.xlim(200, 1008 + 40)
#pl.ylim(200, 470)
pl.savefig("cubie-energy.pdf")
pl.show()
|
christoff-buerger/reat
|
index/messung/energy_chart.py
|
Python
|
mit
| 1,030
|
# encoding=utf-8
from functools import partial
from psi.app.models import Organization
from psi.app.utils.security_util import is_super_admin, is_root_organization
from flask_admin.contrib.sqla.fields import QuerySelectField
from flask_admin.form import Select2Widget
from flask_babelex import lazy_gettext, gettext
from flask_login import current_user
from sqlalchemy import func
from wtforms import ValidationError
from psi.app.views.base import CycleReferenceValidator
from psi.app.views.base import ModelViewWithAccess
class OrganizationAdmin(ModelViewWithAccess):
from psi.app.views.formatter import organization_formatter
uos = 'UPDATE ' + Organization.__tablename__ + ' SET'
@property
def can_create(self):
return is_super_admin()
@property
def can_delete(self):
return is_super_admin()
def get_query(self):
return self.get_query_based_on_user(self.model)
def get_count_query(self):
return self.get_query_based_on_user(func.count('*'))
def get_query_based_on_user(self, return_query):
all_ids = Organization.get_children_self_ids(current_user.organization)
return (self.session.query(return_query).filter(self.model.id.in_(all_ids))
if not is_super_admin() else self.session.query(return_query))
column_list = ('id', 'name', 'description', 'type', 'parent',
'immediate_children',)
column_sortable_list = ('id', 'name', 'description', 'type')
column_searchable_list = ('name', 'description', 'parent.name',
'parent.description', 'lft', 'rgt',
'type.code', 'type.display')
column_labels = dict(
id=lazy_gettext('id'),
name=lazy_gettext('Name'),
description=lazy_gettext('Description'),
parent=lazy_gettext('Parent Organization'),
lft=lazy_gettext('Left'),
rgt=lazy_gettext('Right'),
type=lazy_gettext('Type'),
immediate_children=lazy_gettext('Immediate Children'),
all_children=lazy_gettext('All Children'),
)
form_args = dict(
type=dict(query_factory=Organization.type_filter)
)
column_formatters = {
'immediate_children': organization_formatter,
'all_children': organization_formatter,
'parent': organization_formatter
}
column_editable_list = ('description',)
form_excluded_columns = ('lft', 'rgt',)
form_extra_fields = {
'parent': QuerySelectField(
label=lazy_gettext('Parent Organization'),
query_factory=lambda: Organization.query.all(),
widget=Select2Widget(),
allow_blank=False,
)
}
def edit_form(self, obj=None):
form = super(OrganizationAdmin, self).edit_form(obj)
# form.parent._data_list is None at this moment, so it's not feasible to change the _data_list attribute directly here
# to set the query_factory function is the right way to implement a filter.
form.parent.query_factory = partial(Organization.children_remover, obj)
# For root organization, allow blank
if is_root_organization(obj):
form.parent.allow_blank = True
# Does not allow to change type for root organization
delattr(form, "type")
return form
column_details_list = ('id', 'name', 'description', 'lft', 'rgt', 'parent', 'immediate_children', 'all_children')
def after_model_change(self, form, model, is_created):
"""
:param form: form object from the UI
:param model: model, when on after_model_change, it has got id field and necessary default value from DB.
:param is_created: True if model was created, False if model was updated
:return: None
Update left and right field of all impacted organization vai raw sql, and also update left and right
of the newly added organization to it's parent's current right and current right + 1
"""
from sqlalchemy import text
from psi.app.service import Info
db = Info.get_db()
str_id = getattr(form, "parent").raw_data[0]
int_id = int(str_id) if str_id is not None and str_id != u"__None" and len(str_id) > 0 else None
parent = db.session.query(Organization).get(int_id) if int_id is not None else None
if is_created: # New create
# update all exiting node with right and left bigger than current parent's right - 1
if parent is not None:
model.parent = parent
elif parent is not None:
# Changing parent of a subtree or leaf.
# Refer to http://stackoverflow.com/questions/889527/move-node-in-nested-set for detail.
lft = model.lft
rgt = model.rgt
parent_rgt = parent.rgt
ts = int(rgt - lft + 1)
# step 1: temporary "remove" moving node, change lft and right to negative integer
# step 2: decrease left and/or right position values of currently 'lower' items (and parents)
# step 3: increase left and/or right position values of future 'lower' items (and parents)
# step 4: move node (ant it's subtree) and update it's parent item id
c = parent_rgt - ts if parent_rgt > rgt else parent_rgt
d = parent_rgt - rgt - 1 if parent_rgt > rgt else parent_rgt - rgt - 1 + ts
sql = ['{u} lft = 0 - lft, rgt = 0 - rgt where lft >= {lft} and rgt <={rgt}'.format(u=self.uos, lft=lft, rgt=rgt),
'{u} lft = lft - {ts} where lft > {rgt}'.format(u=self.uos, ts=ts, rgt=rgt),
'{u} rgt = rgt - {ts} where rgt > {rgt}'.format(u=self.uos, ts=ts, rgt=rgt),
'{u} lft = lft + {ts} where lft >= {c}'.format(ts=ts, c=c, u=self.uos),
'{u} rgt = rgt + {ts} where rgt >= {c}'.format(ts=ts, c=c, u=self.uos),
'{u} lft = 0-lft+{d}, rgt = 0-rgt + {d} where lft <= 0-{lft} and rgt >= 0-{rgt}'.format(d=d, lft=lft, rgt=rgt, u=self.uos)]
for s in sql:
db.engine.execute(text(s))
db.session.commit()
def on_model_change(self, form, model, is_created):
"""Check whether the parent organization or child organization is same as the value being edited"""
super(OrganizationAdmin, self).on_model_change(form, model, is_created)
if (not is_root_organization(model)) and (getattr(form, "parent") is None or getattr(form, "parent")._data is None):
raise ValidationError(gettext('Please specify parent organization(creation of top level organization not allowed)'))
CycleReferenceValidator.validate(form, model, object_type='Organization', parent='parent',
children='all_children', is_created=is_created)
def on_model_delete(self, model):
"""
Validate model with child organization should not be deleted
:param model: The model to delete
:return: None
"""
if len(model.all_children) > 0:
raise ValidationError(gettext('Can not delete an organization with child organisation exists'))
def after_model_delete(self, model):
"""
Adjust left and right value for organizations in DB after deleting the model.
:param model: Model to delete
:return: None
"""
from sqlalchemy import text
from psi.app.service import Info
db = Info.get_db()
width = model.rgt - model.lft + 1
sql = text("{u} rgt = rgt-{w} WHERE rgt > {rgt};{u} lft = lft-{w} WHERE lft > {lft}".format(rgt=model.rgt, lft=model.lft, w=width, u=self.uos))
db.engine.execute(sql)
db.session.commit()
|
betterlife/psi
|
psi/app/views/organization.py
|
Python
|
mit
| 7,777
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_visual_scenes', 'visual_scene']
tagName = 'instance_camera'
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _tagName, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.tagName = _tagName
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
# Compare number of geometry instances on import and export
self.__assistant.CompareElementCount(context, self.tagList, self.tagName)
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, tagName, attrName, attrVal, dataToCheck);
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/collada/library_visual_scenes/visual_scene/node/instance_geometry/10_instance_of_same_geometry/10_instance_of_same_geometry.py
|
Python
|
mit
| 3,982
|
from i3pystatus import IntervalModule
import subprocess
class Xkblayout(IntervalModule):
"""Displays and changes current keyboard layout.
``change_layout`` callback finds the current layout in the
``layouts`` setting and enables the layout following it. If the
current layout is not in the ``layouts`` setting the first layout
is enabled.
``layouts`` can be stated with or without variants,
e.g.: status.register("xkblayout", layouts=["de neo", "de"])
"""
interval = 1
format = u"\u2328 {name}"
settings = (
("layouts", "List of layouts"),
)
layouts = []
on_leftclick = "change_layout"
def run(self):
kblayout = self.kblayout()
self.output = {
"full_text": self.format.format(name=kblayout).upper(),
"color": "#ffffff"
}
def change_layout(self):
layouts = self.layouts
kblayout = self.kblayout()
if kblayout in layouts:
position = layouts.index(kblayout)
try:
subprocess.check_call(["setxkbmap"] +
layouts[position + 1].split())
except IndexError:
subprocess.check_call(["setxkbmap"] + layouts[0].split())
else:
subprocess.check_call(["setxkbmap"] + layouts[0].split())
def kblayout(self):
kblayout = subprocess.check_output("setxkbmap -query", shell=True)\
.decode("utf-8").splitlines()
kblayout = [l.split() for l in kblayout]
kblayout = [l[1].strip() for l in kblayout
if l[0].startswith(("layout", "variant"))]
return (" ").join(kblayout)
|
eBrnd/i3pystatus
|
i3pystatus/xkblayout.py
|
Python
|
mit
| 1,690
|
# provide easy access to a few of the important interfaces
from .discordant import Discordant
from .logging import configure_logging
from .commands import *
from .events import *
|
jonnyli1125/discordant
|
discordant/__init__.py
|
Python
|
mit
| 179
|
# -*- coding: utf-8 -*-
from app.config import db_sql
from sqlalchemy import and_
from app.models import Usuario, Registro, Detalle_registro
class tiempo(object):
"""
docstring for .
"""
def __init__(self, id_registro, fecha_hora_entrada):
self.id_registro = int(id_registro)
self.fecha_hora_entrada = fecha_hora_entrada
def obten_entradas(self):
"""
Obtiene el maximo de entradas
"""
if self.usuario is None:
raise ValueError("self.usuario no ha sido definido, primero llama \
a self.set_usuario")
if self.usuario is 'admin':
entradas = db_sql.session.query(Registro).filter and_(
Registro.fecha_hora_entrada <= '2017-03-10',
Registro.fecha_hora_entrada >= '2017-03-10')
return entradas # revisar
|
alanudg/SmartCheckIn
|
app/modules/analytics/ATiempo.py
|
Python
|
mit
| 876
|
import pybullet as p
import time
conid = p.connect(p.SHARED_MEMORY)
if (conid < 0):
p.connect(p.GUI)
p.setInternalSimFlags(0)
p.resetSimulation()
p.loadURDF("plane.urdf", useMaximalCoordinates=True)
p.loadURDF("tray/traybox.urdf", useMaximalCoordinates=True)
gravXid = p.addUserDebugParameter("gravityX", -10, 10, 0)
gravYid = p.addUserDebugParameter("gravityY", -10, 10, 0)
gravZid = p.addUserDebugParameter("gravityZ", -10, 10, -10)
p.setPhysicsEngineParameter(numSolverIterations=10)
p.setPhysicsEngineParameter(contactBreakingThreshold=0.001)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
for i in range(10):
for j in range(10):
for k in range(10):
ob = p.loadURDF("sphere_1cm.urdf", [0.02 * i, 0.02 * j, 0.2 + 0.02 * k],
useMaximalCoordinates=True)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
p.setGravity(0, 0, -10)
p.setRealTimeSimulation(1)
while True:
gravX = p.readUserDebugParameter(gravXid)
gravY = p.readUserDebugParameter(gravYid)
gravZ = p.readUserDebugParameter(gravZid)
p.setGravity(gravX, gravY, gravZ)
time.sleep(0.01)
|
MadManRises/Madgine
|
shared/bullet3-2.89/examples/pybullet/examples/manyspheres.py
|
Python
|
mit
| 1,108
|
__author__ = 'allentran'
|
allentran/fed-rates-bot
|
fed_bot/tests/__init__.py
|
Python
|
mit
| 25
|
import cet
import consts
__all__ = ['cet', 'consts']
|
realityone/CetTicket
|
libcet/__init__.py
|
Python
|
mit
| 53
|
# -*- coding: utf-8 -*-
#
# RollerworksSearch documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 02 16:57:26 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_theme/_exts'))
# adding PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RollerworksSearch'
copyright = u'Rollerworks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0-alpha0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'RollerworksSearch'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RollerworksSearchDoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RollerworksSearch.tex', u'RollerworksSearch Documentation',
u'Sebastiaan Stok', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rollerworkssearch', u'RollerworksSearch Documentation',
[u'Sebastiaan Stok'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RollerworksSearch', u'RollerworksSearch Documentation',
u'Sebastiaan Stok', 'RollerworksSearch', 'a light yet powerful search system',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# CUSTOM
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
html_theme_path = ['_theme']
html_theme = 'custom_rtd_theme'
pygments_style = 'native'
primary_domain = 'php'
highlight_language = 'php'
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
lexers['php-standalone'] = PhpLexer(startinline=True)
lexers['php-symfony'] = PhpLexer(startinline=True)
api_url = 'http://rollerworks.github.io/search/doc/master/%s'
|
rollerworks/RollerworksSearch
|
docs/conf.py
|
Python
|
mit
| 8,572
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.line_option_order import LineOptionOrderData
from iris_sdk.models.line_option_order_response import LineOptionOrderResponse
XML_NAME_LINE_OPTION_ORDERS = "LineOptionOrder"
XPATH_LINE_OPTION_ORDERS = "/lineOptionOrders"
class LineOptionOrder(BaseResource, LineOptionOrderData):
"""
Establish Calling Name Display settings for a collection of TNs at a time
"""
_node_name = XML_NAME_LINE_OPTION_ORDERS
_save_post = True
_xpath = XPATH_LINE_OPTION_ORDERS
_xpath_save = _xpath
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
LineOptionOrderData.__init__(self)
def save(self):
response = LineOptionOrderResponse(self._parent)
return self._post_data(response)
|
scottbarstow/iris-python
|
iris_sdk/models/line_option_orders.py
|
Python
|
mit
| 962
|
"""
Contains functions to decode, encode and generate keys.
"""
import enum
import hashlib
import hmac
import libnacl.encode
import libnacl.public
import libnacl.secret
from .exception import GatewayKeyError
__all__ = (
'HMAC',
'Key',
)
class HMAC:
"""
A collection of HMAC functions used for the gateway service.
"""
keys = {
'email': b'\x30\xa5\x50\x0f\xed\x97\x01\xfa\x6d\xef\xdb\x61\x08\x41\x90\x0f'
b'\xeb\xb8\xe4\x30\x88\x1f\x7a\xd8\x16\x82\x62\x64\xec\x09\xba\xd7',
'phone': b'\x85\xad\xf8\x22\x69\x53\xf3\xd9\x6c\xfd\x5d\x09\xbf\x29\x55\x5e'
b'\xb9\x55\xfc\xd8\xaa\x5e\xc4\xf9\xfc\xd8\x69\xe2\x58\x37\x07\x23'
}
@staticmethod
def hash(message, hash_type):
"""
Generate the hash for a message type.
Arguments:
- `message`: A message.
- `hash_type`: `email` or `phone`.
Return a :class:`hmac.HMAC` instance.
"""
return hmac.new(HMAC.keys[hash_type], message.encode('ascii'), hashlib.sha256)
class Key:
"""
Encode or decode a key.
"""
separator = ':'
@enum.unique
class Type(enum.Enum):
"""
The type of a key.
"""
private = 'private'
public = 'public'
@staticmethod
def decode(encoded_key, expected_type):
"""
Decode a key and check its type if required.
Arguments:
- `encoded_key`: The encoded key.
- `expected_type`: One of the types of :class:`Key.Type`.
Return the key as an :class:`libnacl.public.SecretKey` or
:class:`libnacl.public.PublicKey` instance.
"""
# Split key
try:
type_, key = encoded_key.split(Key.separator)
except ValueError as exc:
raise GatewayKeyError('Invalid key format') from exc
type_ = Key.Type(type_)
# Check type
if type_ != expected_type:
raise GatewayKeyError('Invalid key type: {}, expected: {}'.format(
type_, expected_type
))
# De-hexlify
key = libnacl.encode.hex_decode(key)
# Convert to SecretKey or PublicKey
if type_ == Key.Type.private:
key = libnacl.public.SecretKey(key)
elif type_ == Key.Type.public:
key = libnacl.public.PublicKey(key)
return key
@staticmethod
def encode(libnacl_key):
"""
Encode a key.
Arguments:
- `libnacl_key`: An instance of either a
:class:`libnacl.public.SecretKey` or a
:class:`libnacl.public.PublicKey`.
Return the encoded key.
"""
# Detect key type and hexlify
if isinstance(libnacl_key, libnacl.public.SecretKey):
type_ = Key.Type.private
key = libnacl_key.hex_sk()
elif isinstance(libnacl_key, libnacl.public.PublicKey):
type_ = Key.Type.public
key = libnacl.encode.hex_encode(libnacl_key.pk)
else:
raise GatewayKeyError('Unknown key type: {}'.format(libnacl_key))
# Encode key
return Key.separator.join((type_.value, key.decode('utf-8')))
@staticmethod
def generate_pair():
"""
Generate a new key pair.
Return the key pair as a tuple of a
:class:`libnacl.public.SecretKey` instance and a
:class:`libnacl.public.PublicKey` instance.
"""
private_key = libnacl.public.SecretKey()
public_key = libnacl.public.PublicKey(private_key.pk)
return private_key, public_key
@staticmethod
def generate_secret_key():
"""
Generate a new secret key box.
Return a tuple of the key's :class:`bytes` and hex-encoded
representation.
"""
box = libnacl.secret.SecretBox()
return box.sk, box.hex_sk()
@staticmethod
def derive_public(private_key):
"""
Derive a public key from a class:`libnacl.public.SecretKey`
instance.
Arguments:
- `private_key`: A class:`libnacl.public.SecretKey`
instance.
Return the :class:`libnacl.public.PublicKey` instance.
"""
return libnacl.public.PublicKey(private_key.pk)
|
lgrahl/threema-msgapi-sdk-python
|
threema/gateway/key.py
|
Python
|
mit
| 4,302
|