repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
avian2/jsonmerge | tests/test_jsonmerge.py | Python | mit | 69,662 | 0.001349 | # vim:ts=4 sw=4 expandtab softtabstop=4
import unittest
import warnings
from collections import OrderedDict
import jsonmerge
import jsonmerge.strategies
from jsonmerge.exceptions import (
HeadInstanceError,
BaseInstanceError,
SchemaError
)
from jsonmerge.jsonvalue import JSONValue
import jsonschema
try:
Draft6Validator = jsonschema.validators.Draft6Validator
except AttributeError:
Draft6Validator = None
warnings.simplefilter("always")
class TestMerge(unittest.TestCase):
def test_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_overwrite(self):
schema = {'mergeStrategy': 'overwrite'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_version(self):
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_does_not_duplicate(self):
# Don't record change if it didn't change
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_meta(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a", merge_options={
'version': {'metadata': {'uri': 'http://example.com/a'}}})
base = merger.merge(base, "b", merge_options={
'version': {'metadata': {'uri': 'http://example.com/b'}}})
self.assertEqual(base, [
{'value': "a",
'uri': 'http://example.com/a'},
{'value': "b",
'uri': 'http://example.com/b'}])
def test_version_meta_not_obj(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.merge(None, "a", merge_options={'version': {'metadata': 'foo'}})
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
base = merger.merge(None, 'a', meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_version_ignoredups_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'ignoreDups': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_unique_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'unique': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_ignoredups_true(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}])
def test_version_last(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 1}}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "b"}])
def test_version_base_not_a_list(self):
schema = {'mergeStrategy': 'version'}
base = "a"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_not_a_list_of_objects(self):
schema = {'mergeStrategy': 'version'}
base = ["a"]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_no_value_in_object(self):
schema = {'mergeStrategy': 'version'}
base = [{}]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_empty_list(self):
schema = {'mergeStrategy': 'version'}
base = []
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': 'b'}])
def test_append(self):
schema = {'mergeStrategy': 'append'}
base = None
base = jsonmerge.merge(base, ["a"], schema)
base = jsonmerge.merge(base, ["b"], schema)
self.assertEqual(base, ["a", "b"])
def test_append_type_error(self):
schema = {'mergeStrategy': 'append'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_append_type_error_base(self):
schema = {'mergeStrategy': 'append'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, ["a"], schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_empty_schema(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': {'b': 'c'}}, schema)
self.assertEqual(base, {'a': {'b': 'c'}})
def test_merge_trivial(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertTrue(isinstance(base, dict))
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_null(self):
schema = {'mergeStrategy': 'objectMerge'}
base = {'a': 'a'}
head = {'a': None}
r = jsonmerge.merge(base, head, schema)
self.assertEqual(head, r)
def test_merge_type_error(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_type_error_base(self):
schema = {'mergeStrategy': 'objectMerge'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, {'foo': 1}, schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_overwrite(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'a | ': "b"}, schema)
self.assertEqual(base, {'a': "b"})
def test_merge_objclass(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'OrderedDict' | }}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
base = merger.merge(base, {'a': "b"})
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
self.assertEqual(base, {'a': "b", 'c': "a"})
def test_merge_objclass2( |
puhoy/storedDict | storedDict/storedDict.py | Python | mit | 2,106 | 0.001425 | import json
import logging
class AutoVivification(dict):
"""Implementation of perl's autovivification feature.
based on
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
"""
def __init__(self, storedDict, **kwargs):
super(AutoVivification, self).__init__(**kwargs)
self.storedDict = storedDict
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)(self.storedDict)
return value
def __setitem__(self, key, value):
super(AutoVivification, self).__setitem__(key, value)
if self.storedDict.autocommit:
self.storedDict.commit()
class StoredDict(dict):
def __init__(self, filename, autocommit=None, *args, **kwargs):
"""
initialize a new StoredDict
:param filename: filename to save everything in json format
:param autocommit: if True, commit on every change
:param args:
:param kwargs:
:return:
"""
self.filename = filename
self.autocommit = autocommit
try:
with open(filename, 'r') as f:
super(StoredDict,self).__init__(json.load(f))
except ValueError as e:
logging.error(e)
| super(StoredDict,self).__init__({})
except IOError as e:
logging.info('new file')
super(StoredDict, self).__init__({})
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
self[item] = AutoVivification(self)
ret | urn self[item]
def __setitem__(self, key, value):
super(StoredDict, self).__setitem__(key, value)
if self.autocommit:
self.commit()
def commit(self, indent=None):
try:
with open(self.filename, 'w+') as f:
json.dump(self, f, indent=indent)
except Exception as e:
logging.error('error while saving: %s' % e)
|
robclark/shadertoy-render | shadertoy-render.py | Python | bsd-3-clause | 7,005 | 0.004711 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Alex J. Champandard
# Copyright (c) 2015, Vispy Development Team.
# Copyright (c) 2015, Rob Clark
#
# Distributed under the (new) BSD License.
from __future__ import (unicode_literals, print_function)
import sys
import argparse
import datetime
import subprocess
import numpy
import vispy
from vispy import gloo
from vispy import app
import os
import requests
import imageio
import urllib.request, urllib.parse
import json
url = 'https://www.shadertoy.com/api/v1/shaders'
key = '?key=NdnKw7'
vertex = """
#version 120
attribute vec2 position;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
}
"""
fragment = """
#version 120
uniform vec3 iResolution; // viewport resolution (in pixels)
uniform float iGlobalTime; // shader playback time (in seconds)
uniform vec4 iMouse; // mouse pixel coords
uniform vec4 iDate; // (year, month, day, time in seconds)
uniform float iSampleRate; // sound sample rate (i.e., 44100)
uniform vec3 iChannelResolution[4]; // channel resolution (in pixels)
uniform float iChannelTime[4]; // channel playback time (in sec)
uniform float iTime;
%s
%s
void main()
{
mainImage(gl_FragColor, gl_FragCoord.xy);
}
"""
def get_idate():
now = datetime.datetime.now()
utcnow = datetime.datetime.utcnow()
midnight_utc = datetime.datetime.combine(utcnow.date(), datetime.time(0))
delta = utcnow - midnight_utc
return (now.year, now.month, now.day, delta.seconds)
class RenderingCanvas(app.Canvas):
def __init__(self, renderpass, size=None, rate=30.0, duration=None):
app.Canvas.__init__(self, keys='interactive', size=size, title='ShaderToy Renderer')
# Figure out our up-to-four inputs:
samplers = ""
for input in renderpass['inputs']:
#print(str(input))
t = input['ctype']
chan = input['channel'];
if t == "texture":
samp = "sampler2D"
elif t == "cubemap":
samp = "samplerCube"
elif t == "music":
# skip
continue
else:
raise Exception("Unknown sampler type: %s" % t)
samplers = samplers + ("\nuniform %s iChannel%d;" % (samp, chan))
glsl = fragment % (samplers, renderpass['code'])
#print(glsl)
self.program = gloo.Program(vertex, glsl)
self.program["position"] = [(-1, -1), (-1, 1), (1, 1), (-1, -1), (1, 1), (1, -1)]
self.program['iMouse'] = 0.0, 0.0, 0.0, 0.0
self.program['iSampleRate'] = 44100.0
for i in range(4):
self.program['iChannelTime[%d]' % i] = 0.0
self.program['iGlobalTime'] = 0.0
self.activate_zoom()
self._rate = rate
self._duration = duration
self._timer = app.Timer('auto', connect=self.on_timer, start=True)
# Fetch and setup input textures:
for input in renderpass['inputs']:
t = input['ctype']
chan = input['channel']
src = input['src']
print("Fetching texture: %s" % src)
if t == "texture":
img = imageio.imread("https://www.shadertoy.com/%s" % src)
tex = gloo.Texture2D(img)
elif t == "cubemap":
# NOTE: cubemap textures, the src seems to give only the first
# face, ie. cube04_0.png, and we have to infer cube04_1.png,
# to cube04_5.png for the remaining faces..
raise Exception("TODO: TextureCubeMap not implemented!")
elif t == "music":
# skip
continue
tex.interpolation = 'linear'
tex.wrapping = 'repeat'
self.program['iChannel%d' % chan] = tex
self.program['iChannelResolution[%d]' % chan] = img.shape
# TODO this doesn't seem to work with python3
#self.size = (size[0] / self.pixel_scale, size[1] / self.pixel_scale)
self.show()
def on_draw(self, event):
self.program['iGlobalTime'] += 1.0 / self._rate
self.program.draw()
if self._duration is not None and self.program['iGlobalTime'] >= self._duration:
app.quit()
def on_mouse_click(self, event):
imouse = event.pos + event.pos
self.program['iMouse'] = imouse
def on_mouse_move(self, event):
if event.is_dragging:
x, y = event.pos
px, py = event.press_event.pos
imouse = (x, self.size[1] - y, | px, self.size[1] - py)
self.program['iMouse'] = imouse
def on_timer(self, event):
self.update()
def on_resize(self, event):
self.activate_zoom()
def activate_zoom(sel | f):
gloo.set_viewport(0, 0, *self.physical_size)
self.program['iResolution'] = (self.physical_size[0], self.physical_size[1], 0.)
if __name__ == '__main__':
vispy.set_log_level('WARNING')
vispy.use(app='glfw')
parser = argparse.ArgumentParser(description='Render a ShaderToy script.')
parser.add_argument('id', type=str, help='Shadertoy shader id.')
parser.add_argument('--rate', type=int, default=30, help='Number of frames per second to render, e.g. 60 (int).')
parser.add_argument('--duration', type=float, default=None, help='Total seconds of video to encode, e.g. 30.0 (float).')
parser.add_argument('--size', type=str, default='1280x720', help='Width and height of the rendering, e.g. 1920x1080 (string).')
args = parser.parse_args()
resolution = [int(i) for i in args.size.split('x')]
print('Fetching shader: {}'.format(args.id))
# For some reason, this doesn't always work, so if needed try a
# different way:
try:
r = requests.get(url + '/' + args.id + key)
j = r.json()
s = j['Shader']
except KeyError:
alt_url = 'https://www.shadertoy.com/shadertoy'
headers = { 'Referer' : 'https://www.shadertoy.com/' }
values = { 's' : json.dumps ({'shaders' : [args.id]}) }
data = urllib.parse.urlencode (values).encode ('utf-8')
req = urllib.request.Request (alt_url, data, headers)
response = urllib.request.urlopen (req)
shader_json = response.read ().decode ('utf-8')
j = json.loads (shader_json)
s = j[0]
info = s['info']
print('Name: ' + info['name'])
print('Description: ' + info['description'])
print('Author: ' + info['username'])
# first renderpass seems to always be video (and second is audio if present.. we'll skip that..)
renderpass = s['renderpass'][0]
canvas = RenderingCanvas(renderpass,
size=resolution,
rate=args.rate,
duration=args.duration)
try:
canvas.app.run()
except KeyboardInterrupt:
pass
|
scripnichenko/nova | nova/api/openstack/compute/legacy_v2/versions.py | Python | apache-2.0 | 1,026 | 0 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this f | ile except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE- | 2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import versions
from nova.api.openstack.compute.views import versions as views_versions
from nova.api.openstack import wsgi
class VersionV2(object):
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(versions.VERSIONS['v2.0'])
def create_resource():
return wsgi.Resource(VersionV2())
|
coursemdetw/2014django | wsgi/openshift/settings.py | Python | gpl-2.0 | 2,745 | 0.00147 | """
Django settings for openshift project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# 準備要將資料檔案送到 data 目錄
import imp
ON_OPENSHIFT = False
if 'OPENSHIFT_REPO_DIR' in os.environ:
ON_OPENSHIFT = True
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
default_keys = { 'SECRET_KEY': 'tjy&7h%c=q01+c5i@_-t)&n2c+y*tn7v_)vbdksnlv@s5qh%e_' }
use_keys = default_keys
if ON_OPENSHIFT:
imp.find_module('openshiftlibs')
import openshiftlibs
use_keys = openshiftlibs.openshift_secure(default_keys)
SECRET_KEY = ')u(apy!ie&i)5o3@=s9*7+t+3vpmt9p8i!wr!&t+fs(qz)hk8i'
# SECURITY WARNING: don't run with debug turned on in production!
if ON_OPENSHIFT:
DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = True
if DEBUG:
ALLOWED_HOSTS = []
else:
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if ON_OPENSHIFT: |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.environ['OPENSHIFT_ | DATA_DIR'], 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
STATIC_URL = '/static/'
|
bashtage/statsmodels | statsmodels/tsa/base/prediction.py | Python | bsd-3-clause | 6,163 | 0 | import numpy as np
import pandas as pd
from scipy import stats
class PredictionResults(object):
"""
Prediction results
Parameters
----------
predicted_mean : {ndarray, Series, DataFrame}
The predicted mean values
var_pred_mean : {ndarray, Series, DataFrame}
The variance of the predicted mean values
dist : {None, "norm", "t", rv_frozen}
The distribution to use when constructing prediction intervals.
Default is normal.
df : int, optional
The degree of freedom parameter for the t. Not used if dist is None,
"norm" or a callable.
row_labels : {Sequence[Hashable], pd.Index}
Row labels to use for the summary frame. If None, attempts to read the
index of ``predicted_mean``
"""
def __init__(
self,
predicted_mean,
var_pred_mean,
dist=None,
df=None,
row_labels=None,
):
self._predicted_mean = np.asarray(predicted_mean)
self._var_pred_mean = np.asarray(var_pred_mean)
self._df = df
self._row_labels = row_labels
if row_labels is None:
self._row_labels = getattr(predicted_mean, "index", None)
self._use_pandas = self._row_labels is not None
if dist != "t" and df is not None:
raise ValueError('df must be None when dist is not "t"')
if dist is None or dist == "norm":
self.dist = stats.norm
self.dist_args = ()
elif dist == "t":
self.dist = stats.t
self.dist_args = (self._df,)
elif isinstance(dist, stats.distributions.rv_frozen):
self.dist = dist
self.dist_args = ()
else:
raise ValueError('dist must be a None, "norm", "t" or a callable.')
def _wrap_pandas(self, value, name=None, columns=None):
if not self._use_pandas:
return value
if value.ndim == 1:
return pd.Series(value, index=self._row_labels, name=name)
return pd.DataFrame(value, index=self._row_labels, columns=columns)
@property
def row_labels(self):
"""The row labels used in pandas-types."""
return self._row_labels
@property
def predicted_mean(self):
"""The predicted mean"""
return self._wrap_pandas(self._predicted_mean, "predicted_mean")
@property
def var_pred_mean(self):
"""The variance of the predicted mean"""
if self._var_pred_mean.ndim > 2:
return self._var_pred_mean
return self._wrap_pandas(self._var_pred_mean, "var_pred_mean")
@property
def se_mean(self):
"""The standard deviation of the predicted mean"""
ndim = self._var_pred_mean.ndim
if nd | im == 1:
values = np.sqrt(self._var_pred_mean)
elif ndim == 3:
values = np.sqrt(self._var_pred_mean.T.diagonal())
else:
raise NotImplementedError | ("var_pre_mean must be 1 or 3 dim")
return self._wrap_pandas(values, "mean_se")
@property
def tvalues(self):
"""The ratio of the predicted mean to its standard deviation"""
val = self.predicted_mean / self.se_mean
if isinstance(val, pd.Series):
val.name = "tvalues"
return val
def t_test(self, value=0, alternative="two-sided"):
"""
z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : str
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
"""
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args) * 2
elif alternative in ["larger", "l"]:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ["smaller", "s"]:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError("invalid alternative")
return stat, pvalue
def conf_int(self, alpha=0.05):
"""
Confidence interval construction for the predicted mean.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the prediction interval.
The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
pi : {ndarray, DataFrame}
The array has the lower and the upper limit of the prediction
interval in the columns.
"""
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2.0, *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
if self._use_pandas:
return self._wrap_pandas(ci, columns=["lower", "upper"])
return ci
def summary_frame(self, alpha=0.05):
"""
Summary frame of mean, variance and confidence interval.
Returns
-------
DataFrame
DataFrame containing four columns:
* mean
* mean_se
* mean_ci_lower
* mean_ci_upper
Notes
-----
Fixes alpha to 0.05 so that the confidence interval should have 95%
coverage.
"""
ci_mean = np.asarray(self.conf_int(alpha=alpha))
lower, upper = ci_mean[:, 0], ci_mean[:, 1]
to_include = {
"mean": self.predicted_mean,
"mean_se": self.se_mean,
"mean_ci_lower": lower,
"mean_ci_upper": upper,
}
return pd.DataFrame(to_include)
|
orvi2014/kitsune | kitsune/forums/forms.py | Python | bsd-3-clause | 3,129 | 0 | from django import forms
from tower import ugettext_lazy as _lazy
from kitsune.forums.models import Thread, Post
from kitsune.sumo.form_fields import StrippedCharField
MSG_TITLE_REQUIRED = _lazy(u'Please provide a title.')
MSG_TITLE_SHORT = _lazy(
u'Your title is too short (%(show_value)s characters). It must be '
u'at least %(limit_value)s characters.')
MSG_TITLE_LONG = _lazy(
u'Please keep the length of your title to %(limit_value)s characters'
u' or less. It is currently %(show_value)s characters.')
MSG_CONTENT_REQUIRED = _lazy(u'Please provide a message.')
MSG_CONTENT_SHORT = _lazy(
u'Your message is too short (%(show_value)s characters). It must be '
u'at least %(limit_value)s characters.')
MSG_CONTENT_LONG = _lazy(
u'Please keep the length of your message to %(limit_value)s '
u'characters or less. It is currently %(show_value)s characters.')
class ReplyForm(forms.ModelForm):
"""Reply form for forum threads."""
content = StrippedCharField(
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 10, 'cols': | 80}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CO | NTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class Meta:
model = Post
fields = ('content', )
class NewThreadForm(forms.Form):
"""Form to start a new thread."""
title = StrippedCharField(min_length=5, max_length=255,
widget=forms.TextInput(attrs={'size': 80}),
error_messages={'required': MSG_TITLE_REQUIRED,
'min_length': MSG_TITLE_SHORT,
'max_length': MSG_TITLE_LONG})
content = StrippedCharField(
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 30, 'cols': 76}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CONTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class EditThreadForm(forms.ModelForm):
"""Form to start a new thread."""
title = StrippedCharField(min_length=5, max_length=255,
widget=forms.TextInput(attrs={'size': 80}),
error_messages={'required': MSG_TITLE_REQUIRED,
'min_length': MSG_TITLE_SHORT,
'max_length': MSG_TITLE_LONG})
class Meta:
model = Thread
fields = ('title',)
class EditPostForm(forms.Form):
"""Form to edit an existing post."""
content = StrippedCharField(
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 30, 'cols': 76}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CONTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class Meta:
model = Post
exclude = ('thread', 'author', 'updated', 'created', 'updated_by')
|
RokKos/FRI_Programiranje | OUI/planet-lia/rokkos/core/bot.py | Python | mit | 112 | 0 | class Bot:
def setup(self, initi | al_d | ata):
pass
def update(self, state, response):
pass
|
AstroPrint/AstroBox | src/astroprint/network/mac_dev.py | Python | agpl-3.0 | 5,835 | 0.033248 | # coding=utf-8
__author__ = "Daniel Arroyo <daniel@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import logging
import threading
import time
import os
from octoprint.server import eventManager
from octoprint.events import Events
from octoprint.settings import settings
from astroprint.network import NetworkManager
class MacDevNetworkManager(NetworkManager):
def __init__(self):
self.logger = logging.getLogger(__name__)
self._online = False
self._storedWiFiNetworks = []
self._config = {
"autoConnect" : True,
"name": "astrobox-dev"
}
self._loadDevConfig()
self.name = self._config["name"]
if self._config['autoConnect']:
self._setActiveWifi(self.getWifiNetworks()[0])
super(MacDevNetworkManager, self).__init__()
def getActiveConnections(self):
wireless = None
wired = None
wired = {
'name': 'Wired Test',
'ip': '127.0.0.1:5000',
'mac': 'wi:re:d2:34:56:78:90',
}
if self._storedWiFiNetworks:
for n in self._storedWiFiNetworks:
if n['active']:
wireless = {
'id': 'localhost',
'signal': 80,
'name': n['name'],
'ip': '127.0.0.1:5000',
'mac': 'wi:fi:12:34:56:78:90',
'secured': True
}
return {
'wired': wired,
'wireless': wireless,
'manual': None
}
def storedWifiNetworks(self):
return self._storedWiFiNetworks
def deleteStoredWifiNetwork(self, networkId):
for i in range(0, len(self._storedWiFiNetworks)):
n = self._storedWiFiNetworks[i]
if n['id'] == networkId:
if n['active']:
self._goOffline()
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'disconnected'})
del self._storedWiFiNetworks[i]
self.logger.info("Network [%s] with id [%s] deleted." % (n['name'], n['id']))
return n['id']
def hasWifi(self):
return True
def getWifiNetworks(self):
return [
{"id": "80:1F:02:F9:16:1B", "name": "Secured Network", "secured": True, "signal": 80, "wep": False},
{"id": "90:1F:02:F9:16:1C", "name": "Open Network", "secured": False, "signal": 78, "wep": False},
{"id": "74:DA:38:88:51:90", "name": "WEP Network", "secured": True, "signal": 59, "wep": True},
{"id": "C0:7B:BC:1A:5C:81", "name": "Open Failed", "secured": False, "signal": 37, "wep": False}
]
def setWifiNetwork(self, bssid, password):
for n in self.getWifiNetworks():
if n['id'] == bssid:
if n['secured']:
if not password or len(password) < 3:
self.logger.info("Missing password for a secured network")
time.sleep(2)
return {
'err_code': 'invalid_psk',
'message': 'Invalid Password'
}
elif password != 'pwd':
self.logger.info("Password invalid. Needs to be 'pwd'")
def action():
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'connecting'})
time.sleep(2)
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'failed', 'reason': "no_secrets"})
timer = threading.Timer(3, action)
timer.daemon = True
timer.start()
return {"name": n['name']}
else:
if n["id"] == 'C0:7B:BC:1A:5C:81':
self.logger.info("Open network with NO connection")
def action():
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'connecting'})
time.sleep(2)
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'failed', 'reason': "no_connection"})
timer = threading.Timer(3, action)
timer.daemon = True
timer.start()
return {"name": n['name']}
time.sleep(1)
return self._setActiveWifi(n)
def isAstroprintReachable(self):
return self.isOnline()
def checkOnline(self):
return self.isOnline()
def isOnline(self):
return self._online
def startHotspot(self):
#return True when succesful
return "Not supporded on Mac"
def stopHotspot(self):
#return True when succesful
return "Not supporded on Mac"
def getHostname(self):
return self.name
def setHostname(self, name):
self.name = name
self.logger.info('Host name is set to %s ' % name)
return True
@property
def activeIpAddress(self):
return '127.0.0.1'
@property
def networkDeviceInfo(self):
return [
{
'id': 'eth0',
'mac': 'wi:re:d2:34:56:78:90',
'type': 'wired',
'connected': True
},
{
'id': 'wlan0',
'mac': 'wi:fi:12:34:56:78:90',
'type': 'wifi',
'connected': False
}
]
def _goOnline(self):
self._online = T | rue
eventManager.fire(Events.NETWORK_STATUS, 'online')
def _goOffline(sel | f):
self._online = False
eventManager.fire(Events.NETWORK_STATUS, 'offline')
def _setActiveWifi(self, network):
self.logger.info("Selected WiFi: %s" % network['name'])
for n in self._storedWiFiNetworks:
n['active'] = False
self._storedWiFiNetworks.append({
'id': network['id'],
'name': network['name'],
'active': True
})
def action():
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {'status': 'connecting'})
time.sleep(1)
eventManager.fire(Events.INTERNET_CONNECTING_STATUS, {
'status': 'connected',
'info': {
'type': 'wifi',
'signal': network['signal'],
'name': network['name'],
'ip': '127.0.0.1:5000'
}
})
self._goOnline()
timer = threading.Timer(2, action)
timer.daemon = True
timer.start()
return {'name': network['name']}
def _loadDevConfig(self):
settings_file = "%s/mac-dev-network.yaml" % settings().getConfigFolder()
if os.path.isfile(settings_file):
import yaml
config = None
with open(settings_file, "r") as f:
config = yaml.safe_load(f)
if config:
def merge_dict(a,b):
for key in b:
if isinstance(b[key], dict):
merge_dict(a[key], b[key])
else:
a[key] = b[key]
merge_dict(self._config, config)
|
digwiz/engrss | write_rss_xml.py | Python | mit | 1,634 | 0.002448 | import os
from lxml import etree
# write_rss_xml writes name and date data for podcast RSS feeds to XML files
# contained in the relative path ./feeds. It is currently assumed that each
# podcast will have its data stored in a separate file.
def write_rss_xml( feed, feed_url, latest_title, latest_date ):
| # the function currently assumes that the storage location will be
file_path = "feeds"
feed_name = feed
file_name = feed_name + "_rss.xml"
# Open the file to write binary data. write_rss_xml will currently only
# operate by re-writing the xml file every time; this will be improved in
# a later version.
with open(os.path.join(file_path, file_name), "wb") as write_file:
# root of lxml ElementTree
root = etree.Element(" | podcast")
# subElements of root. Right now there should only be four of these,
# corresponding to the podcast name, feed url, latest episode name, and
# latest episode date.
pageElement = etree.SubElement(root, "name").text = feed_name
pageElement = etree.SubElement(root, "url").text = feed_url
pageElement = etree.SubElement(root, "episode").text = latest_title
pageElement = etree.SubElement(root, "date").text = latest_date
# Create a string from the ElementTree with pretty print and necessary
# encoding, for printing to file.
out_xml = etree.tostring(root, xml_declaration=True, pretty_print=True)
# Print the string out_xml to the file whose name is contained in the
# variable file_name.
write_file.write(out_xml)
write_file.close()
|
cournape/numscons | numscons/scons-local/scons-local-1.2.0/SCons/Tool/m4.py | Python | bsd-3-clause | 2,350 | 0.004255 | """SCons.Tool.m4
Tool-specific initialization for m4.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Softwa | re is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EX | PRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/m4.py 2009/09/04 16:33:07 david"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add Builders and construction variables for m4 to an Environment."""
M4Action = SCons.Action.Action('$M4COM', '$M4COMSTR')
bld = SCons.Builder.Builder(action = M4Action, src_suffix = '.m4')
env['BUILDERS']['M4'] = bld
# .m4 files might include other files, and it would be pretty hard
# to write a scanner for it, so let's just cd to the dir of the m4
# file and run from there.
# The src_suffix setup is like so: file.c.m4 -> file.c,
# file.cpp.m4 -> file.cpp etc.
env['M4'] = 'm4'
env['M4FLAGS'] = SCons.Util.CLVar('-E')
env['M4COM'] = 'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}'
def exists(env):
return env.Detect('m4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
RedhawkSDR/integration-gnuhawk | components/conjugate_cc/tests/test_conjugate_cc.py | Python | gpl-3.0 | 4,067 | 0.006885 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Softwar | e
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distr | ibuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in conjugate_cc"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../conjugate_cc.spd.xml") # By default tests all implementations
|
deapplegate/wtgpipeline | dump_cat_filters.py | Python | mit | 3,166 | 0.00916 | #!/usr/bin/env python
#########################
# Read a catalog, and dump all filters that match
# Instrum-config-chipid-filter pattern
#########################
import utilities, sys, ldac, re
from optparse import OptionParser
##########################
__cvs_id__ = "$Id: dump_cat_filters.py,v 1.6 2010-10-05 22:34:57 dapple Exp $"
###########################
filter_patterns = [re.compile('^FLUX_ISO\d?-(.+)'), re.compile('^FLUX_APER\d?-(.+)'), re.compile('^MAG_APER\d?-(.+)')]
def extractFilter(fluxkey):
for filter_pattern in filter_patterns:
match = filter_pattern.match(fluxkey)
if match is not None:
return match.group(1)
return None
################################
def _isNotValidFilter(filter):
if filter is None:
return True
try:
utilities.parseFilter(filter)
return False
except utilities.UnrecognizedFilterException:
return True
###################################
#adam-tmp# START
def adam_dumpF | ilters(cat, appendAppers = False):
filters = []
for fluxkey in cat.keys():
filter = extractFilter(fluxkey)
if _isNotValidFilter(filter):
print "_isNotValidFilter"
continue
print "filter=",filter," fluxkey=",fluxkey
if appendAppers:
nApers = cat[fluxkey].shape[1]
| for i in xrange(nApers):
filter = '%s_A%d' % (filter, i)
if filter not in filters:
filters.append(filter)
elif filter not in filters:
filters.append(filter)
return filters
#cat1 = ldac.openObjectFile(fl1)
#cat2 = ldac.openObjectFile(fl2)
#adam-tmp# END
###################################
def dumpFilters(cat, appendAppers = False):
filters = []
for fluxkey in cat.keys():
filter = extractFilter(fluxkey)
if _isNotValidFilter(filter):
continue
if appendAppers:
nApers = cat[fluxkey].shape[1]
for i in xrange(nApers):
filter = '%s_A%d' % (filter, i)
if filter not in filters:
filters.append(filter)
elif filter not in filters:
filters.append(filter)
return filters
###################################
ns=globals() #adam-tmp
def main(argv):
parser = OptionParser(usage='dump_cat_filters.py <-a> cat')
parser.add_option('-a', '--apers',
help='Append aperture numbers to filter names',
dest='appendAppers',
action='store_true',
default=False)
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Specify catalog file!')
catfile = args[0]
cat = ldac.openObjectFile(catfile)
filters = dumpFilters(cat, options.appendAppers)
ns.update(locals()) #adam-tmp
for filter in filters:
print filter
###################################
if __name__ == '__main__':
#print "sys.argv=",sys.argv
from adam_quicktools_ArgCleaner import ArgCleaner
argv=ArgCleaner()
#print "argv=",argv
main(argv[1:])
|
MounirMesselmeni/django | django/contrib/admin/sites.py | Python | bsd-3-clause | 21,392 | 0.001309 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigu | red(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_ | engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
Y |
3dfxsoftware/cbss-addons | webdemo/__init__.py | Python | gpl-2.0 | 16 | 0.0625 | #i | mpor | t webdemo
|
viliusl/dockery | objects/myparser.py | Python | apache-2.0 | 246 | 0.01626 | import sys, argparse
class MyParser( | argparse.ArgumentParser):
def error(self, message):
'''Wraps error and prints in a shorter way'''
sys.stderr.write('error: %s\n' % message)
| #self.print_help()
sys.exit(2)
|
cinepost/Copperfield_FX | copper/core/parameter/parameter.py | Python | unlicense | 6,311 | 0.036286 | import logging
from PyQt5 import QtCore
from collections import OrderedDict
from copper.core.utils.copper_string import CopperString
from .parm_template import ParmTemplate, ParmLookScheme, ParmNamingScheme, ParmTemplateType, StringParmType
logger = logging.getLogger(__name__)
CopperLinear = 0
CopperBezier = 2
#CopperParmInt = int
#CopperParmInt2 = CopperInt2
#CopperParmInt3 = CopperInt3
#CopperParmInt4 = CopperInt4
#CopperParmBool = bool
#CopperParmFloat = float
#CopperParmFloat2 = CopperFloat2
#CopperParmFloat3 = CopperFloat3
#CopperParmFloat4 = CopperFloat4
#CopperParmString = CopperString
#CopperParmOpPath = "oppath"
#CopperParmFile = str
#CopperParmButton = "button"
#CopperParmOrderedMenu = "menu"
class CopperKeyframe(object):
def __init__(self, engine, time=None, value=None):
self.v = value
self.f = None
self.t = time
self.in_type = CopperLinear
self.out_type = CopperLinear
def value(self):
return self.v
def setValue(self, time, value):
self.v = value
def dump(self):
return { "t": self.t, "v": self.v }
def frame(self):
return self.f
def setFrame(self, frame):
self.f = frame
def setTime(self, time):
self.t = time
def __repr__(self):
return "<CopperKeyframe: t:%s v:%s>" % (self.t, self.v)
class ParmSignals(QtCore.QObject):
parameterChanged = QtCore.pyqtSignal()
setParameter = QtCore.pyqtSignal(object) # fired by GUI ParameterWidget... maybe not only by GUI... hmmm
def __init__(self):
QtCore.QObject.__init__(self)
class CopperParameter(QtCore.QObject):
def __init__(self, node, name, parm_template, default_value=None, callback = None, spare=True):
QtCore.QObject.__init__(self)
self.__keyframes__ = []
self.value = default_value
self._node = node
self._name = name
self._parm_template = parm_template
self._spare = spare
self.signals = ParmSignals()
# connect signals
self.signals.setParameter.connect(self._setParameter)
def isSpare(self):
return self._spare
def parmTemplate(self) -> ParmTemplate:
'''
Returns the template for this parameter.
'''
return self._parm_template
def node(self):
return self._node
def name(self):
return self._name
def path(self):
return "%s/%s" % (self.node().path(), self.name())
def dump(self):
if self.animated():
return [key.dump() for key in self.__keyframes__]
else:
return self.value
def menuItems(self):
if self.parmTemplate().type() is ParmTemplateType.Menu:
return self.parmTemplate().menuItems()
else:
raise BaseException("Cannot get menu items for a non-menu parm")
def menuLabels(self):
if self.parmTemplate().type() is ParmTemplateType.Menu:
return self.parmTemplate().menuLabels()
else:
raise BaseException("Cannot get menu values for a non-menu parm")
def pressButton(self):
if self.parmTemplate().type() is ParmTemplateType.Button:
self.parmTemplate().callback()
def invalidateNode(self):
self.node.invalidate()
# call this method to force recook node. e.g. parameter changed
#def set(self, value):
# self.value = value
# self.invalidateNode()
# logger.debug("Parameter value set to: %s of type %s" % (self.value, type(self.value)))
def animated(self):
if self.__keyframes__:
return True
else:
return False
def eval(self):
if self.animated():
# Animated parameter
return self.evalAtTime(self.__node__.engine.time())
else:
# Constant parameter
return self.value
def evalAsInt(self):
return int(self.eval())
def evalAsFloat(self):
return float(self.eval())
def evalAsBool(self):
return bool(self.eval())
def evalAsString(self):
if self.parmTemplate().type() == ParmTemplateType.Menu:
return self.menuItems()[self.eval()]
return CopperString(self.eval()).expandedString()
def evalAtTime(self, time):
lesser_keys = sorted([k for k in self.__keyframes__ if k.t <= time], key=lambda x: x.t)
greater_keys = sorted([k for k in self.__keyframes__ if k.t >= time], key=lambda x: x.t)
#self.log("lesser_keys: %s" % ["t:%s, v:%s ; "%(key.t, key.value()) for key in lesser_keys])
#self.log("greater_keys: %s" % ["t:%s, v:%s ; "%(key.t, key.value()) for key in greater_keys])
if lesser_keys:
left_k = lesser_keys[-1]
else:
left_k = None
if greater_keys:
right_k = greater_keys[0]
else:
right_k = None
if not left_k:
# no interpolation
self.log("No interpolation. Using closest right key at time %s with value %s" % (right_k.t, right_k.value()))
#self.log(["t:%s,v:%s ; " % (key.t, key.value()) for key in self.__keyframes__])
return right_k.value()
if not right_k:
# no interpolation
self.log("No interpolation. Using closest left key at time %s with value %s" % (left_k.t, left_k.value()))
#self.log(["t:%s,v:%s ; " % (key.t, key.value()) for key in self.__keyframes__])
return left_k.value()
if right_k.t == left_k.t:
return left_k.value()
min_w = (time - left_k.t) / (right_k.t - left_k.t)
max_w = (right_k.t - time) / (right_k.t - left_k.t)
interp = min_w * right_k.value() + max_w * left_k.value()
self.log("Interpolated value is %s" % interp)
return interp
#raise BaseException("Unimplemented evalAtTime(self, time) in %s" % self)
def evalAtFrame(self, frame):
raise NotImplementedError
def evalAsStringAtFrame(self, frame):
return self.evalAsString()
def unexpandedString(self):
return str(self.eval())
@QtCore.pyqtSlot(object)
def _setParameter(self, value):
self.set(value)
def set(self, value):
if type(value) in [list, tuple]:
# set up animated parameter
for key in value:
keyframe = CopperKeyframe(self.node().engine, time=key["t"], value=key["v"])
self.setKeyframe(keyframe)
else:
# set up single parameter value
if self.__keyframes__:
# Animated parameter
raise BaseException("Unable to set parm that contains curve animation !!! Use addKeyFrame(time, key) instead !!!")
else:
# Constant parameter
self.value = value
self.signals.parameterChanged.emit() # emit signal to GUI
self.node().setModified(True) # This is important ! We need to say node that it needs to recook itself whe | n needed, because some parameter was changed
def setKeyfram | e(self, keyframe):
self.__keyframes__.append(keyframe)
#def __str__(self):
# return self.value |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-queue/tests/settings/settings_fake.py | Python | mit | 647 | 0 | # -------------------------------------------------------------------------
# C | opyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------- | ------------
STORAGE_ACCOUNT_NAME = "fakename"
STORAGE_ACCOUNT_KEY = "fakekey"
TENANT_ID = "00000000-0000-0000-0000-000000000000"
CLIENT_ID = "00000000-0000-0000-0000-000000000000"
CLIENT_SECRET = "00000000-0000-0000-0000-000000000000"
ACCOUNT_URL_SUFFIX = 'core.windows.net'
RUN_IN_LIVE = "False"
SKIP_LIVE_RECORDING = "True"
PROTOCOL = "https"
|
viblo/pymunk | pymunk/pyglet_util.py | Python | mit | 8,494 | 0.001766 | # ----------------------------------------------------------------------------
# pymunk
# Copyright (c) 2007-2016 Victor Blomqvist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
"""This submodule contains helper functions to help with quick prototyping
using pymunk together with pyglet.
Intended to help with debugging and prototyping, not for actual production use
in a full application. The methods contained in this module is opinionated
about your coordinate system and not very optimized (they use batched
drawing, but there is probably room for optimizations still).
"""
__docformat__ = "reStructuredText"
import math
from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type
import pyglet # type: ignore
import pymunk
from pymunk.space_debug_draw_options import SpaceDebugColor
from pymunk.vec2d import Vec2d
if TYPE_CHECKING:
from types import TracebackType
class DrawOptions(pymunk.SpaceDebugDrawOptions):
def __init__(self, **kwargs: Any) -> None:
"""Draw a pymunk.Space.
Typical usage::
>>> import pymunk
>>> import pymunk.pygame_util
>>> s = pymunk.Space()
>>> options = pymunk.pyglet_util.DrawOptions()
>>> s.debug_draw(options)
You can control the color of a Shape by setting shape.color to the color
you want it drawn in.
>>> c = pymunk.Circle(None, 10)
>>> c.color = (255, 0, 0, 255) # will draw my_shape in red
You can optionally pass in a batch to use for drawing. Just
remember that you need to call draw yourself.
>>> my_batch = pyglet.graphics.Batch()
| >>> s = pymunk.Space()
>>> options = pymunk.pyglet_util.DrawOptions(batch=my_batch)
>>> | s.debug_draw(options)
>>> my_batch.draw()
See pyglet_util.demo.py for a full example
:Param:
kwargs : You can optionally pass in a pyglet.graphics.Batch
If a batch is given all drawing will use this batch to draw
on. If no batch is given a a new batch will be used for the
drawing. Remember that if you pass in your own batch you
need to call draw on it yourself.
"""
self.new_batch = False
if "batch" not in kwargs:
self.new_batch = True
else:
self.batch = kwargs["batch"]
super(DrawOptions, self).__init__()
def __enter__(self) -> None:
if self.new_batch:
self.batch = pyglet.graphics.Batch()
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional["TracebackType"],
) -> None:
if self.new_batch:
self.batch.draw()
def draw_circle(
self,
pos: Vec2d,
angle: float,
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
circle_center = pos
# http://slabode.exofire.net/circle_draw.shtml
num_segments = int(4 * math.sqrt(radius))
theta = 2 * math.pi / num_segments
c = math.cos(theta)
s = math.sin(theta)
x = radius # we start at angle 0
y: float = 0
ps = []
for i in range(num_segments):
ps += [Vec2d(circle_center.x + x, circle_center.y + y)]
t = x
x = c * x - s * y
y = s * t + c * y
mode = pyglet.gl.GL_TRIANGLE_STRIP
ps2 = [ps[0]]
for i in range(1, int(len(ps) + 1 / 2)):
ps2.append(ps[i])
ps2.append(ps[-i])
ps = ps2
vs = []
for p in [ps[0]] + ps + [ps[-1]]:
vs += [p.x, p.y]
cc = circle_center + Vec2d(radius, 0).rotated(angle)
cvs = [circle_center.x, circle_center.y, cc.x, cc.y]
bg = pyglet.graphics.OrderedGroup(0)
fg = pyglet.graphics.OrderedGroup(1)
l = len(vs) // 2
self.batch.add(
len(vs) // 2, mode, bg, ("v2f", vs), ("c4B", fill_color.as_int() * l)
)
self.batch.add(
2, pyglet.gl.GL_LINES, fg, ("v2f", cvs), ("c4B", outline_color.as_int() * 2)
)
def draw_segment(self, a: Vec2d, b: Vec2d, color: SpaceDebugColor) -> None:
pv1 = a
pv2 = b
line = (int(pv1.x), int(pv1.y), int(pv2.x), int(pv2.y))
self.batch.add(
2, pyglet.gl.GL_LINES, None, ("v2i", line), ("c4B", color.as_int() * 2)
)
def draw_fat_segment(
self,
a: Vec2d,
b: Vec2d,
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
pv1 = a
pv2 = b
d = pv2 - pv1
atan = -math.atan2(d.x, d.y)
radius = max(radius, 1)
dx = radius * math.cos(atan)
dy = radius * math.sin(atan)
p1 = pv1 + Vec2d(dx, dy)
p2 = pv1 - Vec2d(dx, dy)
p3 = pv2 + Vec2d(dx, dy)
p4 = pv2 - Vec2d(dx, dy)
vs = [i for xy in [p1, p2, p3] + [p2, p3, p4] for i in xy]
l = len(vs) // 2
self.batch.add(
l,
pyglet.gl.GL_TRIANGLES,
None,
("v2f", vs),
("c4B", fill_color.as_int() * l),
)
self.draw_circle(a, 0, radius, fill_color, fill_color)
self.draw_circle(b, 0, radius, fill_color, fill_color)
def draw_polygon(
self,
verts: Sequence[Vec2d],
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
mode = pyglet.gl.GL_TRIANGLE_STRIP
l = len(verts)
mid = len(verts) // 2
if radius >= 3:
# print("POLY", verts)
pass
vs = []
for i in range(mid):
vs += [verts[i].x, verts[i].y]
vs += [verts[l - 1 - i].x, verts[l - 1 - i].y]
if l % 2:
vs += [verts[mid].x, verts[mid].y]
vs = [vs[0], vs[1]] + vs + [vs[-2], vs[-1]]
l = len(vs) // 2
self.batch.add(l, mode, None, ("v2f", vs), ("c4B", fill_color.as_int() * l))
if radius > 0:
for i in range(len(verts)):
a = verts[i]
b = verts[(i + 1) % len(verts)]
# print(a, b)
self.draw_fat_segment(a, b, radius, outline_color, outline_color)
def draw_dot(self, size: float, pos: Vec2d, color: SpaceDebugColor) -> None:
# todo: optimize this functions
self.batch.add(
1,
pyglet.gl.GL_POINTS,
_GrPointSize(size),
("v2f", pos),
("c4B", color.as_int() * 1),
)
class _GrPointSize(pyglet.graphics.Group): # type: ignore
"""
This pyglet rendering group se |
izberg-marketplace/django-izberg | django_iceberg/templatetags/iceberg.py | Python | mit | 1,539 | 0.005198 | # -*- coding: utf-8 -*-
import json
from django import template
from django.conf import settings
register = template.Library()
from django_iceberg.auth_utils import init_iceberg
@register.inclusion_tag('django_iceberg/javascript_sdk.html', takes_context=True)
def iceberg_javascript_sdk(context):
"""
To Finish
"""
if getattr(settings, 'ICEBERG_USE_LOCAL', False):
livrary_path = 'http://connect.local.iceberg-marketplace.com:9000/script.js'
else:
livrary_path = 'http://connect.iceberg-marketplace.com/script.js'
return {
'LIBRARY_URL': livrary_path
}
@register.inclusion_tag('django_iceberg/sso.html', takes_context=True)
def iceberg_sso(context):
api_handler = init_iceberg(context | ['request'])
if hasattr(api_handler, '_sso_response'):
return {
'appNamespace': api_handler.conf.ICEBERG_APPLICATION_NAMESPACE,
"sso_data": json.dumps(api_handler._sso_response)
}
else:
return {}
@register.inclusion_tag('django_iceberg/sso.html', takes_context=True)
def iceberg_sso_with_seller(context, seller_id):
api_handler = in | it_iceberg(context['request'])
if hasattr(api_handler, '_sso_response'):
return {
"modules": json.dumps(['client', 'seller']),
'appNamespace': api_handler.conf.ICEBERG_APPLICATION_NAMESPACE,
"sso_data": json.dumps(api_handler._sso_response),
"seller": json.dumps({"id": seller_id}),
}
else:
return {}
|
zsiki/ulyxes | pyapi/angle.py | Python | gpl-2.0 | 7,587 | 0.002636 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
.. module:: angle.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results.
GPL v2.0 license
Copyright (C) 2010- Zoltan Siki <siki.zoltan@epito.bme.hu>
.. moduleauthor:: dr. Zoltan Siki <siki.zoltan@epito.bme.hu>
Bence Turak <bence.turak@gmail.com>
"""
import math
import re
RO = 180 * 60 * 60 / math.pi
PI2 = 2 * math.pi
def _deg2rad(angle):
""" Convert DEG to RAD
"""
return math.radians(angle)
def _gon2rad(angle):
""" Convert GON to RAD
"""
return angle / 200.0 * math.pi
def _dms2rad(dms):
""" Convert DMS to RAD
"""
if re.search('^[0-9]{1,3}(-[0-9]{1,2}){0,2}$', dms):
items = [float(item) for item in dms.split('-')]
div = 1.0
a = 0.0
for val in items:
a += val / div
div *= 60.0
a = math.radians(a)
else:
raise ValueError("Angle invalid argument", dms)
return a
def _dm2rad(angle):
""" Convert DDMM.nnnnnn NMEA angle to radian"
"""
w = angle / 100.0
d = int(w)
return math.radians(d + (w - d) * 100.0 / 60.0)
def _pdeg2rad(angle):
""" Convert dd.mmss to radian
"""
d = math.floor(angle)
angle = round((angle - d) * 100, 10)
m = math.floor(angle)
s = round((angle - m) * 100, 10)
return math.radians(d + m / 60.0 + s / 3600.0)
def _sec2rad(angle):
""" Convert seconds to radian
"""
return angle / RO
def _mil2rad(angle):
""" Convert mills to radian
"""
return angle / 6400.0 * 2.0 * math.pi
def _rad2gon(value):
""" Convert radian to GON
"""
return value / math.pi * 200.0
def _rad2sec(value):
""" Convert radian to seconds
"""
return value * RO
def _rad2deg(value):
""" Convert radian to decimal degrees
"""
return math.degrees(value)
def _dms(value):
""" Convert radian to DMS
"""
signum = "-" if value < 0 else ""
secs = round(_rad2sec(abs(value)))
mi, sec = divmod(secs, 60)
deg, mi = divmod(mi, 60)
deg = int(deg)
return "%s%d-%02d-%02d" % (signum, deg, mi, sec)
def _rad2dm(value):
""" Convert radian to NMEA DDDMM.nnnnn
"""
w = value / math.pi * 180.0
d = int(w)
return d * 100 + (w - d) * 60
def _rad2pdeg(value):
""" Convert radian to pseudo DMS ddd.mmss
"""
secs = round(_rad2sec(value))
mi, sec = divmod(secs, 60)
deg, mi = divmod(mi, 60)
deg = int(deg)
return deg + mi / 100.0 + sec / 10000.0
def _rad2mil(value):
""" Convert radian to mills
"""
return value / math.pi / 2.0 * 6400.0
class Angle(object):
""" Angle class, value stored in radian internally. Angle units supported:
* RAD radians (e.g. 1.54678432)
* DMS sexagesimal (Degree-Minute-Second, e.g. 123-54-24)
* DEG decimal degree (e.g. 25.87659)
* GON gradians, the whole circle is 400g (e.g. 387.7857)
* NMEA ddmm.mmmm used in NMEA sentences (e.g. 47.338765)
* PDEG pseudo sexagesimal (e.g. 156.2745 = 156-27-45)
* SEC sexagesimal seconds
* MIL mills the whole circle is 6400 mills
Operators supported:
* \+ add two angles (e.g. c = Angle(180, 'DEG') + Angle('12-34-56', 'DMS'))
* \- substract two angles (e.g. d = Angle(180, 'DEG') - Angle('12-34-56', 'DMS'))
* += increment angle (e.g. c += Angle(1, 'GON'))
* -= decrement angle (e.g. d -= Angle(1, 'GON'))
* str() convert angle to GON string, used in print
:param value: angle value
:param unit: angle unit (available units RAD/DMS/DEG/GON/NMEA/PDEG/SEC/MIL)
"""
# jump table to import from
im = {'DMS': _dms2rad, 'DEG': _deg2rad,
'GON': _gon2rad, 'NMEA': _dm2rad,
'PDEG': _pdeg2rad, 'SEC': _sec2rad,
'MIL': _mil2rad}
# jump table for convert to
ex = {'DMS': _dms, 'DEG': _rad2deg,
'GON': _rad2gon, 'NMEA': _rad2dm,
'PDEG': _rad2pdeg, 'SEC': _rad2sec,
'MIL': _rad2mil}
def __init__(self, value, unit='RAD'):
""" Constructor for an angle instance.
"""
self.value = None
self.SetAngle(value, unit)
def GetAngle(self, out='RAD'):
""" Get angle value in different units
:param out: output unit (str RAD/DMS/DEG/GON/NMEA/PDEG/SEC/MIL)
:returns: value (float or string)
"""
if out == 'RAD' or self.value is None:
output = self.value # no conversion
elif out in self.ex:
output = self.ex[out](self.value) # call converter based on output format
else:
output = None # unsupported output format
return output
def SetAngle(self, value, unit='RAD'):
""" Set or change value of angle.
:param value: new value for angle (str or float)
:param unit: unit for the new value (str)
"""
if unit == 'RAD' or value is None:
self.value = value
elif unit in self.im:
self.value = self.im[unit](value)
else:
# unknown unit
self.value = None
# move angle to -2*PI : 2*PI interval
if self.value is not None:
while self.value >= PI2:
self.value -= PI2
while self.value < -PI2:
self.value += PI2
def Positive(self):
""" Change stored value to positive
"""
if self.value < 0:
self.value += PI2
return self
def Normalize(self):
""" Normalize angle between 0-360 DEG
"""
while self.value < 0:
self.value += PI2
while self.value >= PI2:
self.value -= PI2
return self
def __str__(self):
""" GON string representation of angle
:returns: GON string
"""
return "{0:.4f}".format(self.GetAngle('GON'))
def __repr__(self):
"""
angle object representation
:returns: angle object string
"""
return type(self).__name__+"({0:f})".format(self.GetAngle())
def __add__(self, a):
""" add angles
:param a: Angle to add
:returns: sum of the two angles (Angle)
"""
return Angle(self.value + a.GetAngle('RAD'), 'RAD')
def __iadd__(self, a):
""" add an angle to current
:param a: Angle to add
"""
self.value += a.GetAngle('RAD')
return self
def __sub__(self, a):
""" substract angles
:param a: Angle to substract
| :returns: difference of the two angles (Angle)
"""
return Angle(self.value - a.GetAngle('RAD'), 'RAD')
def __isub__(self, a):
""" substract an agle from current
:param a: Angle to substract
"""
self.value -= | a.GetAngle('RAD')
return self
if __name__ == "__main__":
a = Angle(-0.01112, "DEG")
print(a.GetAngle("DMS"))
a1 = Angle("204-55-28", "DMS")
print(a1.GetAngle('DMS'))
a1 += Angle(180, 'DEG')
print(a1.Normalize().GetAngle('DMS'))
a2 = Angle('280-03-24', 'DMS')
print(a2.GetAngle('DMS'))
a3 = Angle(360, 'DEG')
print(a3.GetAngle('DMS'))
a2 = a3 - a2
print(a2.Normalize().GetAngle('DMS'))
for u in ['RAD', 'DMS', 'GON', 'NMEA', 'DEG', 'PDEG', 'MIL']:
print(a1.GetAngle(u))
b1 = Angle(1.1111, 'PDEG')
print(b1.GetAngle("DMS"))
c1 = a1 + b1
print(c1.GetAngle("DMS"))
print(c1)
print((a1-b1).GetAngle("DMS"))
a2 = Angle(-90, 'DEG')
a2.Positive()
print(a2)
|
c2corg/v6_api | c2corg_api/models/common/sortable_search_attributes.py | Python | agpl-3.0 | 4,890 | 0 | # coding: utf-8
# enum mappers: To be able to search e.g. a route with a rating between
# 'AD' an | d 'ED', certain enum values are converted to integers using the
# mappers listed below, and stored as number in ElasticSearch. This allows
# to do range queries.
sortable_quality_types = {
'empty': 0,
'draft': 1,
| 'medium': 2,
'fine': 3,
'great': 4
}
sortable_access_times = {
'1min': 0,
'5min': 1,
'10min': 2,
'15min': 3,
'20min': 4,
'30min': 5,
'45min': 6,
'1h': 7,
'1h30': 8,
'2h': 9,
'2h30': 10,
'3h': 11,
'3h+': 12
}
sortable_climbing_ratings = {
'2': 0,
'3a': 1,
'3b': 2,
'3c': 3,
'4a': 4,
'4b': 5,
'4c': 6,
'5a': 7,
'5a+': 8,
'5b': 9,
'5b+': 10,
'5c': 11,
'5c+': 12,
'6a': 13,
'6a+': 14,
'6b': 15,
'6b+': 16,
'6c': 17,
'6c+': 18,
'7a': 19,
'7a+': 20,
'7b': 21,
'7b+': 22,
'7c': 23,
'7c+': 24,
'8a': 25,
'8a+': 26,
'8b': 27,
'8b+': 28,
'8c': 29,
'8c+': 30,
'9a': 31,
'9a+': 32,
'9b': 33,
'9b+': 34,
'9c': 35,
'9c+': 36
}
sortable_paragliding_ratings = {
'1': 0,
'2': 1,
'3': 2,
'4': 3,
'5': 4
}
sortable_exposition_ratings = {
'E1': 0,
'E2': 1,
'E3': 2,
'E4': 3
}
sortable_equipment_ratings = {
'P1': 0,
'P1+': 1,
'P2': 2,
'P2+': 3,
'P3': 4,
'P3+': 5,
'P4': 6,
'P4+': 7
}
sortable_route_duration_types = {
'1': 0,
'2': 1,
'3': 2,
'4': 3,
'5': 4,
'6': 5,
'7': 6,
'8': 7,
'9': 8,
'10': 9,
'10+': 10
}
sortable_ski_ratings = {
'1.1': 0,
'1.2': 1,
'1.3': 2,
'2.1': 3,
'2.2': 4,
'2.3': 5,
'3.1': 6,
'3.2': 7,
'3.3': 8,
'4.1': 9,
'4.2': 10,
'4.3': 11,
'5.1': 12,
'5.2': 13,
'5.3': 14,
'5.4': 15,
'5.5': 16,
'5.6': 17
}
sortable_labande_ski_ratings = {
'S1': 0,
'S2': 1,
'S3': 2,
'S4': 3,
'S5': 4,
'S6': 5,
'S7': 6
}
sortable_global_ratings = {
'F': 0,
'F+': 1,
'PD-': 2,
'PD': 3,
'PD+': 4,
'AD-': 5,
'AD': 6,
'AD+': 7,
'D-': 8,
'D': 9,
'D+': 10,
'TD-': 11,
'TD': 12,
'TD+': 13,
'ED-': 14,
'ED': 15,
'ED+': 16,
'ED4': 17,
'ED5': 18,
'ED6': 19,
'ED7': 20
}
sortable_engagement_ratings = {
'I': 0,
'II': 1,
'III': 2,
'IV': 3,
'V': 4,
'VI': 5
}
sortable_risk_ratings = {
'X1': 0,
'X2': 1,
'X3': 2,
'X4': 3,
'X5': 4
}
sortable_ice_ratings = {
'1': 0,
'2': 1,
'3': 2,
'3+': 3,
'4': 4,
'4+': 5,
'5': 6,
'5+': 7,
'6': 8,
'6+': 9,
'7': 10,
'7+': 11
}
sortable_mixed_ratings = {
'M1': 0,
'M2': 1,
'M3': 2,
'M3+': 3,
'M4': 4,
'M4+': 5,
'M5': 6,
'M5+': 7,
'M6': 8,
'M6+': 9,
'M7': 10,
'M7+': 11,
'M8': 12,
'M8+': 13,
'M9': 14,
'M9+': 15,
'M10': 16,
'M10+': 17,
'M11': 18,
'M11+': 19,
'M12': 20,
'M12+': 21
}
sortable_exposition_rock_ratings = {
'E1': 0,
'E2': 1,
'E3': 2,
'E4': 3,
'E5': 4,
'E6': 5
}
sortable_aid_ratings = {
'A0': 0,
'A0+': 1,
'A1': 2,
'A1+': 3,
'A2': 4,
'A2+': 5,
'A3': 6,
'A3+': 7,
'A4': 8,
'A4+': 9,
'A5': 10,
'A5+': 11
}
sortable_via_ferrata_ratings = {
'K1': 0,
'K2': 1,
'K3': 2,
'K4': 3,
'K5': 4,
'K6': 5
}
sortable_hiking_ratings = {
'T1': 0,
'T2': 1,
'T3': 2,
'T4': 3,
'T5': 4
}
sortable_snowshoe_ratings = {
'R1': 0,
'R2': 1,
'R3': 2,
'R4': 3,
'R5': 4
}
sortable_mtb_up_ratings = {
'M1': 0,
'M2': 1,
'M3': 2,
'M4': 3,
'M5': 4
}
sortable_mtb_down_ratings = {
'V1': 0,
'V2': 1,
'V3': 2,
'V4': 3,
'V5': 4
}
sortable_frequentation_types = {
'quiet': 0,
'some': 1,
'crowded': 2,
'overcrowded': 3
}
sortable_condition_ratings = {
'excellent': 0,
'good': 1,
'average': 2,
'poor': 3,
'awful': 4
}
sortable_snow_quality_ratings = {
'excellent': 0,
'good': 1,
'average': 2,
'poor': 3,
'awful': 4
}
sortable_snow_quantity_ratings = {
'excellent': 0,
'good': 1,
'average': 2,
'poor': 3,
'awful': 4
}
sortable_glacier_ratings = {
'easy': 0,
'possible': 1,
'difficult': 2,
'impossible': 3
}
sortable_severities = {
'severity_no': 0,
'1d_to_3d': 1,
'4d_to_1m': 2,
'1m_to_3m': 3,
'more_than_3m': 4,
}
sortable_avalanche_levels = {
'level_na': 0,
'level_1': 1,
'level_2': 2,
'level_3': 3,
'level_4': 4,
'level_5': 5
}
sortable_avalanche_slopes = {
'slope_lt_30': 0,
'slope_30_35': 1,
'slope_35_40': 2,
'slope_40_45': 3,
'slope_gt_45': 4
}
|
briancurtin/python-openstacksdk | openstack/cluster/v1/_proxy.py | Python | apache-2.0 | 52,340 | 0.000611 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.cluster.v1 import action as _action
from openstack.cluster.v1 import build_info
from openstack.cluster.v1 import cluster as _cluster
from openstack.cluster.v1 import cluster_attr as _cluster_attr
from openstack.cluster.v1 import cluster_policy as _cluster_policy
from openstack.cluster.v1 import event as _event
from openstack.cluster.v1 import | node as _node
from openstack.cluster.v1 import policy as _policy
from openstack.cluster.v1 import policy_type as _policy_type
from openstack.cluster.v1 import profile as _profile
from openstack.cluster.v1 import profile_type as _profile_type
from openstack.cluster.v1 import receiver as _receiver
from openstack.cluster.v1 import service as _service
from openstack import proxy2
from openstack import resource2
from openstack import utils
class Proxy(proxy2.BaseProxy):
de | f get_build_info(self):
"""Get build info for service engine and API
:returns: A dictionary containing the API and engine revision string.
"""
return self._get(build_info.BuildInfo, requires_id=False)
def profile_types(self, **query):
"""Get a generator of profile types.
:returns: A generator of objects that are of type
:class:`~openstack.cluster.v1.profile_type.ProfileType`
"""
return self._list(_profile_type.ProfileType, paginated=False, **query)
def get_profile_type(self, profile_type):
"""Get the details about a profile_type.
:param name: The name of the profile_type to retrieve or an object of
:class:`~openstack.cluster.v1.profile_type.ProfileType`.
:returns: A :class:`~openstack.cluster.v1.profile_type.ProfileType`
object.
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
profile_type matching the name could be found.
"""
return self._get(_profile_type.ProfileType, profile_type)
def policy_types(self, **query):
"""Get a generator of policy types.
:returns: A generator of objects that are of type
:class:`~openstack.cluster.v1.policy_type.PolicyType`
"""
return self._list(_policy_type.PolicyType, paginated=False, **query)
def get_policy_type(self, policy_type):
"""Get the details about a policy_type.
:param policy_type: The name of a poicy_type or an object of
:class:`~openstack.cluster.v1.policy_type.PolicyType`.
:returns: A :class:`~openstack.cluster.v1.policy_type.PolicyType`
object.
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
policy_type matching the name could be found.
"""
return self._get(_policy_type.PolicyType, policy_type)
def create_profile(self, **attrs):
"""Create a new profile from attributes.
:param dict attrs: Keyword arguments that will be used to create a
:class:`~openstack.cluster.v1.profile.Profile`, it is comprised
of the properties on the Profile class.
:returns: The results of profile creation.
:rtype: :class:`~openstack.cluster.v1.profile.Profile`.
"""
return self._create(_profile.Profile, **attrs)
def delete_profile(self, profile, ignore_missing=True):
"""Delete a profile.
:param profile: The value can be either the name or ID of a profile or
a :class:`~openstack.cluster.v1.profile.Profile` instance.
:param bool ignore_missing: When set to ``False``, an exception
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the profile could not be found. When set to ``True``, no exception
will be raised when attempting to delete a non-existent profile.
:returns: ``None``
"""
self._delete(_profile.Profile, profile, ignore_missing=ignore_missing)
def find_profile(self, name_or_id, ignore_missing=True):
"""Find a single profile.
:param str name_or_id: The name or ID of a profile.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.cluster.v1.profile.Profile` object
or None
"""
return self._find(_profile.Profile, name_or_id,
ignore_missing=ignore_missing)
def get_profile(self, profile):
"""Get a single profile.
:param profile: The value can be the name or ID of a profile or a
:class:`~openstack.cluster.v1.profile.Profile` instance.
:returns: One :class:`~openstack.cluster.v1.profile.Profile`
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
profile matching the criteria could be found.
"""
return self._get(_profile.Profile, profile)
def profiles(self, **query):
"""Retrieve a generator of profiles.
:param kwargs \*\*query: Optional query parameters to be sent to
restrict the profiles to be returned. Available parameters include:
* name: The name of a profile.
* type: The type name of a profile.
* metadata: A list of key-value pairs that are associated with a
profile.
* sort: A list of sorting keys separated by commas. Each sorting
key can optionally be attached with a sorting direction
modifier which can be ``asc`` or ``desc``.
* limit: Requests a specified size of returned items from the
query. Returns a number of items up to the specified limit
value.
* marker: Specifies the ID of the last-seen item. Use the limit
parameter to make an initial limited request and use the ID of
the last-seen item from the response as the marker parameter
value in a subsequent limited request.
* global_project: A boolean value indicating whether profiles
from all projects will be returned.
:returns: A generator of profile instances.
"""
return self._list(_profile.Profile, paginated=True, **query)
def update_profile(self, profile, **attrs):
"""Update a profile.
:param profile: Either the name or the ID of the profile, or an
instance of :class:`~openstack.cluster.v1.profile.Profile`.
:param attrs: The attributes to update on the profile represented by
the ``value`` parameter.
:returns: The updated profile.
:rtype: :class:`~openstack.cluster.v1.profile.Profile`
"""
return self._update(_profile.Profile, profile, **attrs)
def validate_profile(self, **attrs):
"""Validate a profile spec.
:param dict attrs: Keyword arguments that will be used to create a
:class:`~openstack.cluster.v1.profile.ProfileValidate`, it is
comprised of the properties on the Profile class.
:returns: The results of profile validation.
:rtype: :class:`~openstack.cluster.v1.profile.ProfileValidate`.
"""
return self._create(_profile.ProfileValidate, **attrs)
def create_cluster(self, **attrs):
"""Create a new cluster from attributes.
:param dict attrs: Keyword arguments that wil |
s0x90/python-mailchimp | mailchimp3/entities/template.py | Python | mit | 887 | 0.001127 | from ..baseapi import BaseApi
class Template(BaseApi):
def __init__(self, *args, **kwargs):
super(Template, self).__init__(*args, **kwargs)
se | lf.endpoint = 'templates'
self.list_id = None
def all(self):
"""
returns a list of available template | s.
"""
return self._mc_client._get(url=self.endpoint)
def get(self, template_id):
"""
returns a specific template.
"""
return self._mc_client._get(url=self._build_path(template_id))
def update(self, template_id, data):
"""
updates a specific template
"""
return self._mc_client._patch(url=self._build_path(template_id), data=data)
def delete(self, template_id):
"""
removes a specific template.
"""
return self._mc_client._delete(url=self._build_path(template_id))
|
endlessm/chromium-browser | third_party/angle/src/libANGLE/gen_overlay_widgets.py | Python | bsd-3-clause | 11,216 | 0.002496 | #! /usr/bin/python
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_overlay_widgets.py:
# Code generation for overlay widgets. Should be run when the widgets declaration file,
# overlay_widgets.json, is changed.
# NOTE: don't run this script directly. Run scripts/run_code_generation.py.
from datetime import date
import json
import sys
out_file = 'Overlay_autogen.cpp'
in_file = 'overlay_widgets.json'
template_out_file = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Autogenerated overlay widget declarations.
#include "libANGLE/renderer/driver_utils.h"
#include "libANGLE/Overlay.h"
#include "libANGLE/OverlayWidgets.h"
#include "libANGLE/Overlay_font_autogen.h"
namespace gl
{{
using namespace overlay;
namespace
{{
int GetFontSize(int fontSize, bool largeFont)
{{
if (largeFont && fontSize > 0)
{{
return fontSize - 1;
}}
return fontSize;
}}
}} // anonymous namespace
void Overlay::initOverlayWidgets()
{{
const bool kLargeFont = rx::IsAndroid();
{init_widgets}
}}
}} // namespace gl
"""
template_init_widget = u"""{{
const int32_t fontSize = GetFontSize({font_size}, kLargeFont);
const int32_t offsetX = {offset_x};
const int32_t offsetY = {offset_y};
const int32_t width = {width};
const int32_t height = {height};
widget->{subwidget}type = WidgetType::{type};
widget->{subwidget}fontSize = fontSize;
widget->{subwidget}coords[0] = {coord0};
widget->{subwidget}coords[1] = {coord1};
widget->{subwidget}coords[2] = {coord2};
widget->{subwidget}coords[3] = {coord3};
widget->{subwidget}color[0] = {color_r};
widget->{subwidget}color[1] = {color_g};
widget->{subwidget}color[2] = {color_b};
widget->{subwidget}color[3] = {color_a};
}}
"""
def extract_type_and_constructor(properties):
constructor = properties['type']
args_separated = constructor.split('(', 1)
if len(args_separated) == 1:
return constructor, constructor
type_no_constructor = args_separated[0]
return type_no_constructor, constructor
def get_font_size_constant(properties):
return 'kFontLayer' + properties['font'].capitalize()
def is_graph_type(type):
return type == 'RunningGraph' or type == 'RunningHistogram'
def is_text_type(type):
return not is_graph_type(type)
class OverlayWidget:
def __init__(self, properties, is_graph_description=False):
if not is_graph_description:
self.name = properties['name']
self.type, self.constructor = extract_type_and_constructor(properties)
self.extract_common(properties)
if is_graph_type(self.type):
description_properties = properties['description']
description_properties['type'] = 'Text'
self.description = OverlayWidget(description_properties, True)
def extract_common(self, properties):
self.color = properties['color']
self.coords = properties['coords']
if is_graph_type(self.type):
self.bar_width = properties['bar_width']
self.height = properties['height']
else:
self.font = get_font_size_constant(properties)
self.length = properties['length']
self.negative_alignment = [False, False]
def is_negative_coord(coords, axis, widgets_so_far):
if isinstance(coords[axis], unicode):
coord_split = coords[axis].split('.')
# The coordinate is in the form other_widget.edge.mode
# We simply need to know if other_widget's coordinate is negative or not.
return widgets_so_far[coord_split[0]].negative_alignment[axis]
return coords[axis] < 0
def set_alignment_flags(overlay_widget, widgets_so_far):
overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0,
widgets_so_far)
overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1,
widgets_so_far)
if is_graph_type(overlay_widget.type):
set_alignment_flags(overlay_widget.description, widgets_so_far)
def get_offset_helper(widget, axis, smaller_coord_side):
# Assume axis is X. This function returns two values:
# - An offset where the bounding box is placed at,
# - Whether this offset is for the left or right edge.
#
# The input coordinate (widget.coord[axis]) is either:
#
# - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box.
# - other_widget.edge.mode: this has multiple possibilities:
# * edge=left, mode=align: the offset is other_widget.left, the edge is left.
# * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right.
# * edge=right, mode=align: the offset is other_widget.right, the edge is right.
# * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left.
#
# The case for the Y axis is similar, with the edge values being top or bottom.
coord = widget.coords[axis]
if not isinstance(coord, unicode):
is_left = coord >= 0
return coord, is_left
coord_split = coord.split('.')
is_left = coord_split[1] == smaller_coord_side
is_align = coord_split[2] == 'align'
other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords'
other_widget_coord_index = axis + (0 if is_left else 2)
offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']'
return offset, is_left == is_align
def get_offset_x(widget):
return get_offset_helper(widget, 0, 'left')
def get_offset_y(widget):
return get_offset_helper(widget, 1, 'top')
def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned):
# See comment in generate_widget_init_helper. This function is implementing the following:
#
# - offset_is_left && is_left_aligned: [offset, offset + width]
# - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)]
# - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset]
# - !offset_is_left && !is_left_aligned: [offset - width, offset]
coord_left = offset if offset_is_left else (offset + ' - ' + width)
coord_right = (offset + ' + ' + width) if offset_is_left else offset
if offset_is_left and not is_left_aligned:
coord_right = 'std::min(' + coord_right + ', -1)'
if not offset_is_left and is_left_aligned:
coord_left = 'std::max(' + coord_left + ', 1)'
return coord_left, coord_right
def generate_widget_init_helper(widget, is_graph_description=False):
font_size = '0'
# Common attributes
color = [channel / 255.0 for channel in widget.color]
offset_x, offset_x_is_left = get_offset_x(widget)
offset_y, offset_y_is_top = get_offset_y(widget)
if is_text_type(widget.type):
# Attributes deriven from text properties
font_size = widget.font
width = str(widget.length) + ' * kFontGlyphWidths[fontSize]'
height = 'kFontGlyphHeights[fontSize]'
else:
# Attributes deriven from graph properties
width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())'
height = widget.height
is_left_aligned = not wi | dget.negative_alignment[0]
is_top_aligned = not widget.negative_alignment[1]
# We have offset_x, offset_y, width and height which together determine the bounding box. If
# offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it
# would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to
# m | ean aligned to the right side of the screen, we need to make s |
camilonova/django | tests/gis_tests/relatedapp/tests.py | Python | bsd-3-clause | 14,177 | 0.001904 | from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from ..utils import no_oracle
from .models import (
Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel,
)
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat, srid=c.location.point.srid), c.location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_aggregate(self):
"Testing the `Extent` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.aggregate(Extent('location__point'))['location__point__extent']
e2 = City.objects.exclude(state='NM').aggregate(Extent('location__point'))['location__point__extent']
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_annotate(self):
"""
Test annotation with Extent GeoAggregate.
"""
cities = City.objects.annotate(points_extent=Extent('location__point')).order_by('name')
tol = 4
self.assertAlmostEqual(
cities[0].points_extent,
(-97.516111, 33.058333, -97.516111, 33.058333),
tol
)
@skipUnlessDBFeature('supports_union_aggr')
def test_related_union_aggregate(self):
"Testing the `Union` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.aggregate(Union()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.aggregate(Union('location__point'))['location__point__union']
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).aggregate(Union('location__point'))['location__point__union']
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# a | ctually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, | len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list()."
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by each QuerySet.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def tes |
materialsproject/MPContribs | mpcontribs-io/mpcontribs/io/core/components/sdata.py | Python | mit | 1,600 | 0.00125 | # -*- coding: utf-8 -*-
from mpcontribs.io.core import mp_level01_titles
from mpcontribs.io.core.recdict import RecursiveDict
from IPython.display import display_html
class Structures(RecursiveDict):
"""class to hold and display list of pymatgen structures for single mp-id"""
def __init__(self, content):
from pymatgen.core import Structure
super(Structures, self).__init__(
(key, Structure.from_dict(struc))
for key, struc in content.get(mp_level01_titles[3], {}).items()
)
def _ipython_display_(self):
for name, structure in self.items():
if structure:
display_html("<h4>{}</h4>".format(name), raw=True)
display_html(
"<p>{}</p>".format(
structure.__repr__()
.replace("\n", "<br>")
.replace(" ", " ")
),
raw=True,
)
class StructuralData(Re | cursiveDict):
"""class to hold and display all pymatgen structures in MPFile"""
def __init__(s | elf, document):
super(StructuralData, self).__init__(
(identifier, Structures(content))
for identifier, content in document.items()
)
def _ipython_display_(self):
for identifier, sdata in self.items():
if identifier != mp_level01_titles[0] and sdata:
display_html(
"<h2>Structural Data for {}</h2>".format(identifier), raw=True
)
display_html(sdata)
|
pinterest/kingpin | kingpin/kazoo_utils/utils.py | Python | apache-2.0 | 1,118 | 0 | #!/usr/bin/python
#
# Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
hostname = socket.get | hostname()
def _escape_path_for_stats_name(path):
# Do some formatting the file path.
if path is None:
return None
if path.startswith("/"):
path = path[1:]
return path.replace("/", "_")
class DummyStatsdClient:
def __init__(self, *args, **kwar | gs):
pass
def increment(self, stats, sample_rate=1, tags={}):
pass
def gauge(self, stats, value, sample_rate=1, tags={}):
pass
dummy_statsd = DummyStatsdClient()
|
cloudbase/neutron-virtualbox | neutron/extensions/l3agentscheduler.py | Python | apache-2.0 | 7,118 | 0 | # Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.extensions import agent
from neutron.i18n import _LE
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron import policy
from neutron import wsgi
LOG = logging.getLogger(__name__)
L3_ROUTER = 'l3-router'
L3_ROUTERS = L3_ROUTER + 's'
L3_AGENT = 'l3-agent'
L3_AGENTS = L3_AGENT + 's'
class RouterSchedulerController(wsgi.Controller):
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if not plugin:
LOG.error(_LE('No plugin for L3 routing registered to handle '
'router scheduling'))
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context,
"get_%s" % L3_ROUTERS,
{})
return plugin.list_routers_on_l3_agent(
request.context, kwargs['agent_id'])
def create(self, request, body, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context,
"create_%s" % L3_ROUTER,
{})
agent_id = kwargs['agent_id']
router_id = body['router_id']
result = plugin.add_router_to_l3_agent(request.context, agent_id,
router_id)
notify(request.context, 'l3_agent.router.add', router_id, agent_id)
return result
def delete(self, request, id, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context,
"delete_%s" % L3_ROUTER,
{})
agent_id = kwargs['agent_id']
result = plugin.remove_router_from_l3_agent(request.context, agent_id,
id)
notify(request.context, 'l3_agent.router.remove', id, agent_id)
return result
class L3AgentsHostingRouterController(wsgi.Controller):
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if not plugin:
LOG.error(_LE('No plugin for L3 routing registered to handle '
'router scheduling'))
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context,
"get_%s" % L3_AGENTS,
{})
return plugin.list_l3_agents_hosting_router(
request.context, kwargs['router_id'])
class L3agentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting l3 agent scheduler.
"""
@clas | smethod
def get_name(cls):
return "L3 Agent Scheduler"
@classmethod
def get_alias(cls):
return constants.L3_AGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_descripti | on(cls):
return "Schedule routers among l3 agents"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/l3_agent_scheduler/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(RouterSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
L3_ROUTERS, controller, parent))
parent = dict(member_name="router",
collection_name="routers")
controller = resource.Resource(L3AgentsHostingRouterController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
L3_AGENTS, controller, parent))
return exts
def get_extended_resources(self, version):
return {}
class InvalidL3Agent(agent.AgentNotFound):
message = _("Agent %(id)s is not a L3 Agent or has been disabled")
class RouterHostedByL3Agent(exceptions.Conflict):
message = _("The router %(router_id)s has been already hosted"
" by the L3 Agent %(agent_id)s.")
class RouterSchedulingFailed(exceptions.Conflict):
message = _("Failed scheduling router %(router_id)s to"
" the L3 Agent %(agent_id)s.")
class RouterReschedulingFailed(exceptions.Conflict):
message = _("Failed rescheduling router %(router_id)s: "
"no eligible l3 agent found.")
class RouterNotHostedByL3Agent(exceptions.Conflict):
message = _("The router %(router_id)s is not hosted"
" by L3 agent %(agent_id)s.")
class RouterL3AgentMismatch(exceptions.Conflict):
message = _("Cannot host %(router_type)s router %(router_id)s "
"on %(agent_mode)s L3 agent %(agent_id)s.")
class DVRL3CannotAssignToDvrAgent(exceptions.Conflict):
message = _("Not allowed to manually assign a %(router_type)s "
"router %(router_id)s from an existing DVR node "
"to another L3 agent %(agent_id)s.")
class L3AgentSchedulerPluginBase(object):
"""REST API to operate the l3 agent scheduler.
All of method must be in an admin context.
"""
@abc.abstractmethod
def add_router_to_l3_agent(self, context, id, router_id):
pass
@abc.abstractmethod
def remove_router_from_l3_agent(self, context, id, router_id):
pass
@abc.abstractmethod
def list_routers_on_l3_agent(self, context, id):
pass
@abc.abstractmethod
def list_l3_agents_hosting_router(self, context, router_id):
pass
def notify(context, action, router_id, agent_id):
info = {'id': agent_id, 'router_id': router_id}
notifier = n_rpc.get_notifier('router')
notifier.info(context, action, {'agent': info})
|
hoechenberger/psychopy | psychopy/misc.py | Python | gpl-3.0 | 1,519 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
"""Wrapper for all miscellaneous functions and classes from psychopy.tools
"""
# pylint: disable=W0611
# W0611 = Unused import %s
from __future__ import absolute_import, print_function
from psychopy.tools.arraytools import (createXYs, | extendArr, makeRadialMatrix,
ratioRange, shuffleArray, val2array)
from psychop | y.tools.attributetools import (attributeSetter, setAttribute,
logAttrib)
from psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,
hsv2rgb, lms2rgb,
rgb2dklCart, rgb2lms)
from psychopy.tools.coordinatetools import (cart2pol, pol2cart,
cart2sph, sph2cart)
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.tools.filetools import toFile, fromFile, mergeFolder
from psychopy.tools.imagetools import array2image, image2array, makeImageAuto
from psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,
deg2pix, pix2deg, convertToPix)
from psychopy.tools.plottools import plotFrameIntervals
from psychopy.tools.typetools import float_uint8, float_uint16, uint8_float
from numpy import radians, degrees
|
cangencer/hazelcast-python-client | hazelcast/protocol/codec/queue_is_empty_codec.py | Python | apache-2.0 | 1,041 | 0.000961 | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.queue_message_type import *
REQUEST_TYPE = QUEUE_ISEMPTY
RESPONSE_TYPE = 101
RETRYABLE = False
def calculate_size(name):
""" Calculates the request payload size"""
data_size = | 0
data_s | ize += calculate_size_str(name)
return data_size
def encode_request(name):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/common/system/executive_mock.py | Python | gpl-3.0 | 7,994 | 0.001251 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import logging
import os
from webkitpy.common.system.executive import ScriptError
_log = logging.getLogger(__name__)
class MockProcess(object):
def __init__(self, stdout='MOCK STDOUT\n', stderr=''):
self.pid = 42
self.stdout = StringIO.StringIO(stdout)
self.stderr = StringIO.StringIO(stderr)
self.stdin = StringIO.StringIO()
self.returncode = 0
def wait(self):
return
def poll(self):
# Consider the process completed when all the stdout and stderr has been read.
if self.stdout.len != self.stdout.tell() or self.stderr.len != self.stderr.tell():
return None
return self.returncode
def communicate(self, *_):
return (self.stdout.getvalue(), self.stderr.getvalue())
# FIXME: This should be unified with MockExecutive2 (http://crbug.com/626115).
class MockExecutive(object):
PIPE = "MOCK PIPE"
STDOUT = "MOCK STDOUT"
@staticmethod
def ignore_error(error):
pass
def __init__(self, should_log=False, should_throw=False,
should_throw_when_run=None, should_return_zero_when_run=None):
self._should_log = should_log
self._should_throw = should_throw
self._should_throw_when_run = should_throw_when_run or set()
self._should_return_zero_when_run = should_return_zero_when_run or set()
# FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
self._running_pids = {'test-webkitpy': os.getpid()}
self.calls = []
self._output = "MOCK output of child process"
self._proc = None
def check_running_pid(self, pid):
return pid in self._running_pids.values()
def running_pids(self, process_name_filter):
running_pids = []
for process_name, process_pid in self._running_pids.iteritems():
if process_name_filter(process_name):
running_pids.append(process_pid)
_log.info("MOCK running_pids: %s", running_pids)
return running_pids
def command_for_printing(self, args):
string_args = map(unicode, args)
return " ".join(string_args)
def run_command(self,
args,
cwd=None,
input=None,
# pylint: disable=W0613
# unused argument
timeout_seconds=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None,
debug_logging=False):
self.calls.append(args)
assert isinstance(args, list) or isinstance(args, tuple)
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
input_string = ""
if input:
input_string = ", input=%s" % input
_log.info("MOCK run_command: %s, cwd=%s%s%s", args, cwd, env_string, input_string)
if | self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
if self._should_throw:
raise ScriptError("MOCK ScriptError", output=self._output)
if return_exit_code and self. | _should_return_zero_when_run.intersection(args):
return 0
return self._output
def cpu_count(self):
return 2
def kill_all(self, process_name):
pass
def kill_process(self, pid):
pass
def popen(self, args, cwd=None, env=None, **kwargs):
assert all(isinstance(arg, basestring) for arg in args)
self.calls.append(args)
if self._should_log:
cwd_string = ""
if cwd:
cwd_string = ", cwd=%s" % cwd
env_string = ""
if env:
env_string = ", env=%s" % env
_log.info("MOCK popen: %s%s%s", args, cwd_string, env_string)
if not self._proc:
self._proc = MockProcess(self._output)
return self._proc
def call(self, args, **kwargs):
assert all(isinstance(arg, basestring) for arg in args)
self.calls.append(args)
_log.info('Mock call: %s', args)
def run_in_parallel(self, commands):
assert len(commands)
num_previous_calls = len(self.calls)
command_outputs = []
for cmd_line, cwd in commands:
assert all(isinstance(arg, basestring) for arg in cmd_line)
command_outputs.append([0, self.run_command(cmd_line, cwd=cwd), ''])
new_calls = self.calls[num_previous_calls:]
self.calls = self.calls[:num_previous_calls]
self.calls.append(new_calls)
return command_outputs
def map(self, thunk, arglist, processes=None):
return map(thunk, arglist)
def process_dump(self):
return []
class MockExecutive2(MockExecutive):
"""MockExecutive2 is like MockExecutive except it doesn't log anything."""
def __init__(self, output='', exit_code=0, exception=None, run_command_fn=None, stderr=''):
super(MockExecutive2, self).__init__()
self._output = output
self._stderr = stderr
self._exit_code = exit_code
self._exception = exception
self._run_command_fn = run_command_fn
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None,
debug_logging=False):
self.calls.append(args)
assert isinstance(args, list) or isinstance(args, tuple)
assert all(isinstance(arg, basestring) for arg in args)
if self._exception:
raise self._exception # pylint: disable=E0702
if self._run_command_fn:
return self._run_command_fn(args)
if return_exit_code:
return self._exit_code
if self._exit_code and error_handler:
script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
error_handler(script_error)
if return_stderr:
return self._output + self._stderr
return self._output
|
laurentluce/lfu-cache | lfucache/test/all_tests.py | Python | mit | 539 | 0.001855 | """Run all of the tests."""
import sys
import unittest2 as unittest
def main(args=None):
unittest_dir = '.'
unittest_suite = unittest.defaultTestLoader.discover(unittest_dir)
kwargs = {}
if args and '-v' in args:
kw | args['verbosity'] = 2
runner = unittest.TextTestRunner(sys.stdout, "Unittests",
**kwargs)
results = runner.run(unittest_suite)
return results.wasSuccessful()
if __name__ == '__main__': |
status = main(sys.argv[1:])
sys.exit(int(not status))
|
HHSIDEAlab/DDOD-HealthData.gov | data_json_counts.py | Python | gpl-2.0 | 11,117 | 0.011882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
######################
## 2016-02-20 Created by David Portnoy
######################
import os
import glob # Wildcard search
import json
import os
import csv
def support_old_schema(dataset_list):
if isinstance(dataset_list, dict):
return dataset_list["dataset"]
elif isinstance(dataset_list, list):
return dataset_list
else:
return None
# Pull out the most important elements to tally on
def get_keys(dataset):
keys = ["bureauCode", "programCode", "publisher",
"landingPage","modified",
"Identifier", "downloadURL"]
'''
Characteristics of non-federal entries for DKAN
→ Publisher:Name is "State of" or "City of"
→ downloadURL has non-hhs domain
→ Identifier has non-hhs domain
→ Usually "bureauCode": ["009:00" and "programCode": [ "009:000"
'''
key_values = []
for i,key in enumerate(keys):
if key in dataset:
key_values.append(dataset[key])
else:
key_values.append(None)
return dict(zip(keys, key_values))
# FIXME: Code not yet finished
# FIXME: Should call get_keys
# Create a dictionary of values for comparison
def get_key_list(dataset_list):
key_list = []
for index, dataset in enumerate(dataset_list):
key_list.append(get_keys(dataset))
#for # List of unique bureauCode values
totals = len(dataset_list)
return key_list
def parse_date(file_name):
starting_point_of_date = "_20"
date_pos_start = file_name.find(starting_point_of_date)+1
return file_name[date_pos_start:date_pos_start+10]
def get_agency_abbrev_list(agency_lookup):
# Looks more complex than needed, but due to sorting by key
bureau_code_list = []
for bureau_code in agency_lookup.keys():
bureau_code_list.append(bureau_code)
bureau_code_list.sort()
agency_abbrev_list = []
for bureau_code in bureau_code_list:
agency_abbrev_list.append(agency_lookup[bureau_code])
return agency_abbrev_list
#: Convert to ordered list
def convert_dict_to_list(dict_counts_by_date,agency_lookup):
# --- Be sure list of abbreviations is sorted by key ---
agency_abbrev_list = get_agency_abbrev_list(agency_lookup)
row_csv = []
row_csv_list = []
# --- Build header ---
row_csv.append("Date")
for agency_abbrev in agency_abbrev_list:
row_csv.append(agency_abbrev)
row_csv_list.append(row_csv)
# --- Build row list in order ---
for row_date,row_counts in dict_counts_by_date.items():
row_csv = []
row_csv.append(row_date)
# Using this method because want to be sorted by bureau_code
for agency_abbrev in agency_abbrev_list:
row_csv.append(str(row_counts.get(agency_abbrev,0)))
row_csv_list.append(row_csv)
return row_csv_list
def save_list_to_csv(csv_data):
print("Saving to CSV file")
with open(CSV_FILE_NAME, "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data)
# Keep track of last update
mtime = os.path.getmtime(CSV_FILE_NAME)
return
def get_agency_counts(key_list,agency_lookup):
agency_counts = {}
for index,key_item in enumerate(key_list):
agencies = key_item["bureauCode"]
# Just in case it's not a list, make it one
agencies = agencies if isinstance(agencies,list) else [agencies]
for agency in agencies:
#agency = agency.encode('ascii','ignore')
agency_abbrev = agency_lookup.get(agency,"Other")
# Occassionally "bureauCode"][0] == "009:00" is used for State/Local
if agency == "009:00":
publisher_name = key_item["publisher"]
# Handle when publisher is not a dictionary
if isinstance(publisher_name, dict): publisher_name = str(publisher_name)
if "State of" in publisher_name:
agency_abbrev = "State"
elif "City of" in publisher_name:
agency_abbrev = "City"
agency_counts[agency_abbrev] = agency_counts.get(agency_abbrev, 0) + 1
#if 0 < index < MAX_LOOP: break # Don't run all for debugging
return agency_counts
def load_agency_lookup():
with open('agency_lookup_columns.json') as data_file:
agency_lookup_columns = json.load(data_file)
bureau_code_index = agency_lookup_columns['columns'].index('bureau_code')
agency_abbrev_index = agency_lookup_columns['columns'].index('agency_abbrev')
agency_lookup = {}
for agency_record in agency_lookup_columns['data']:
# TBD: May want to convert unicode using .encode('ascii','ignore')
agency_lookup[agency_record[bureau_code_index]] = str(agency_record[agency_abbrev_index])
return agency_lookup
def get_file_name_list():
file_pattern = "snapshots/"
file_pattern += "HealthData.gov[_][0-9][0-9][0-9][0-9][-][0-9][0-9][-][0-9][0-9][_]data.json"
file_name_list = glob.glob(file_pattern)
return sorted(file_name_list)
def get_csv_date_list(csv_data):
csv_date_list = []
header = csv_data[0]
date_pos = header.index('Date')
for index, row in enumerate(csv_data[1:]):
csv_date_list.append(row[date_pos])
return csv_date_list
'''
------------------------------------------------
--- Reload the file only if it changed
------------------------------------------------
'''
def get_csv_data(csv_data = []):
#: Remember values from last run
global mtime
try:
mtime
except NameError:
mtime = 0
#: Don't do anything, if no file to load
if not os.path.exists(CSV_FILE_NAME) or os.path.getsize(CSV_FILE_NAME) == 0:
return csv_data
last_mtime = mtime
mtime = os.path.getmtime(CSV_FILE_NAME)
#: Reload if there's a newer file
if mtime > last_mtime or len(csv_data)==0:
print("Loading from CSV file")
last_mtime = mtime
csv_file = open(CSV_FILE_NAME)
csv_reader = csv.reader(csv_file)
csv_data = []
for index, row in enumerate(csv_reader):
csv_data.append(row)
if 0 < index < MAX_LOOP: break # Don't run all for debugging
#: Sorted dates needed by some charting libraries
csv_data = csv_data[0:1]+sorted(csv_data[1:])
return csv_data
def load_file(file_name):
with open(file_name) as json_file:
json_data = json.load(json_file)
return json_data
def get_missing_csv_data(csv_data,agency_lookup):
dict_counts_by_date = {}
if len(csv_data) > 0:
csv_date_list = get_csv_date_list(csv_data)
else:
csv_date_list = []
file_name_list = get_file_name_list()
#: Load missing dates
for index, file_name in enumerate(reversed(file_name_list)):
snapshot_file_date = parse_date(file_name)
if snapshot_file_date not in csv_date_list:
print("Loading missing date: "+file_name)
dataset_list = load_file(file_name)
dataset_list = support_old_schema(dataset_list)
key_list = get_key_list(dataset_list)
agency_counts = get_agency_counts(key_list,agency_lookup)
dict_counts_by_date[snapshot_file_date]=agency_counts
if 0 < index < MAX_LOOP: break # Don't run all for debugging
if len(dict_counts_by_date) > 0:
missing_csv_data = convert_dict_to_list(dict_cou | nts_by_date,agen | cy_lookup)
else:
missing_csv_data = []
return missing_csv_data
def get_dict_counts_by_date(file_name_list,csv_date_list,agency_lookup):
dict_counts_by_date = {}
#: Load missing dates
for index, file_name in enumerate(reversed(file_name_list)):
snapshot_file_date = parse_date(file_name)
if snapshot_file_date not in csv_date_list:
|
Theyrealone/ExcelMapper | ExcelMapper/mapper.py | Python | gpl-3.0 | 2,950 | 0.036949 | import xlrd
from map_creator import *
class MissingRuleError(Exception):
pass
def create_mapper(wb,table_index,col_rules,row_rules):
#once based indexing
excel_map = create_map(wb,table_index)
for row in excel_map.rows:
if not row_rules.has_key(row[0]):
raise MissingRuleError("Missing row rule: {}".format(repr(row[0])))
for col in excel_map.cols:
if not col_rules.has_key(col[0]):
raise MissingRuleError("Missing col rule: {}".format(repr(col[0])))
return Excel_Mapper(excel_map=excel_map,col_rules=col_rules,row_rules=row_rules)
class Excel_Mapper():
"""a worker object encapsulates all required data and functions to perform the data summary task"""
def __init__(self,excel_map,row_rules,col_rules):
self.excel_map = excel_map
self.col_rules = col_rules
self.row_rules = row_rules
self.row_index_map = dict(excel_map.rows)
self.index_row_map = {v: k for k, v in self.row_index_map.iteritems()}
self.col_index_map = dict(excel_map.cols)
self.index_col_map = {v: k for k, v in self.col_index_map.iteritems()}
def run(self,excel_data):
"""executes data summary"""
results = {} #dict of keys are touples of positions(row,col), value are lists of results
rule_map = self.rules_map()
for data_sheet in excel_data.sheets():
top_row = [data_sheet.cell_value(0,col) for col in range(0, data_sheet.ncols)]
for row in range(0, data_sheet.nrows):
data_row = [data_sheet.cell_value(row,col) for col in range(0, data_sheet.ncols)]
data = dict(zip(top_row,data_row))
for col_rule in self.col_rules:
for row_rule in self.row_rules:
funcs = rule_map[(self.row_index_map[row_rule],self.col_index_map[col_rule])]
if funcs[0] and funcs[1]:
if funcs[0](data) and funcs[1](data):
if (self.row_index_map[row_rule],self.col_index_map[col_rule]) in results:
results[(self.row_index_map[row_rule],self.col_index_map[col_rule])].append(data_row)
else:
results[(self.row_index_map[row_rule],self.col_index_map[col_rule])] = [data_row]
else:
if not (self.row_index_map[row_rule],self.col_index_map[col_rule]) in results:
results[(self.row_index_map[row_rule],self.col_index_map[col_rule])] = []
else:
if not (self.row_index_map[row_rule],self.col_index_map[col_rule]) in results:
results[(self.row_index_map[row_rule],self.col_index_map[col_rule])] = []
#print "Error with a rule, one or both of {}".format(col_rul | e,row_rule)
return results
def read_rules(self,row,col):
self.row_rules[self.index_row_map[row]]
self.col_rules[self.index_col_map[col]]
return self.row_rules[self.index_row_map[row]],self.col_rules[self.index_col_map[col]]
def rules_map(self):
| rule_map = {}
for row in self.excel_map.rows:
for col in self.excel_map.cols:
rule_map[(row[1],col[1])] = self.read_rules(row[1],col[1])
return rule_map
|
tasfe/datax-modify | release/datax.py | Python | gpl-2.0 | 4,840 | 0.007438 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
/**
* (C) 2010-2011 Alibaba Group Holding Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
*/
"""
import os
import sys
import time
import signal
import subprocess
import os.path
import urllib2
from optparse import OptionParser
from string import Template
engineCmd='''
java -Xmx800m -Djava.ext.dirs=${libs} -Djava.library.path=${share_library} ${params} -jar ${jar} ${jobdescpath}
'''
editCmd='''
java -jar -Djava.ext.dirs=${libs} -jar ${jar}
'''
childProcess = None
def getCopyRight():
copyright = """
DataX V 1.0, Taobao Data Platform
Copyright (C) 2010-2011, Alibaba Group. All Rights Reserved.
"""
return copyright
def getUsage():
usage = '''Usage: datax.py [-e] [-p params] job.xml'''
return usage
def showUsage():
print getUsage()
return
def initOptionParser():
op = OptionParser()
op.add_option('-e', '--edit', action="store_true", dest="edit", help='edit job config file .')
op.add_option('-p', '--params', default="", help='add DataX runtime parameters .')
op.set_usage(getUsage())
return op
def registerSignal(process):
global childProcess
childProcess = process
signal.signal(2, suicide)
signal.signal(3, suicide)
signal.signal(15, suicide)
return
def suicide(signum, e):
print >> sys.stderr, "[Error] DataX receive unexpected signal %d, starts to suicide ." % (signum)
if childProcess is not None:
childProcess.send_signal(signal.SIGQUIT)
time.sleep(1)
childProcess.kill()
return
def getJobName(urlStr):
name = urlStr[urlStr.find(r'=') + 1:]
return name
def isUrl(arg):
return arg.strip().lower().find('http') == 0
def isJobMsg(jobMsg):
sflag = jobMsg.find('''<?xml version="1.0" encoding="UTF-8"?>''')
eflag = jobMsg.find('''</job></jobs>''')
return sflag != -1 and eflag != -1
def genJobXml(jobMsg,jobName):
fileLocation = os.path.abspath('jobs/' + jobName + '.xml')
with open(fileLocation, r'w') as fp:
fp.write(jobMsg)
return
def setSharePath():
if r'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH'] = ''
os.environ['LD_LIBRARY_PATH'] = r"/home/taobao/datax/libs:" + os.environ['LD_LIBRARY_PATH']
return
if __name__ == '__main__':
if len(sys.argv) == 1:
showUsage()
sys.exit(0)
os.chdir(sys.path[0]+"/..")
ctxt={}
ctxt['jar'] = "engine/engine-1.0.0.jar"
ctxt['libs'] = "libs:common"
ctxt['share_library'] = r"plugins/reader/hdfsreader:plugins/writer/hdfswriter"
options, args = initOptionParser().parse_args(sys.argv[1:])
if options.edit:
cmd = Template(editCmd).substitute(**ctxt)
sys.exit(os.system(cmd))
print(getCopyRight())
sys.stdout.flush()
ctxt['params'] = options.params
if not isUrl(args[0]):
ctxt['jobdescpath'] = os.path.abspath(args[0])
else:
counter = -1
response = None
while counter < 3:
counter += 1
try:
#try to fetch job.xml from skynet
response = urllib2.urlopen(args[0])
jobMsg = response.read()
if isJobMsg(jobMsg):
genJobXml(jobMsg,getJobName(args[0]))
ctxt['jobdescpath'] = os.path.abspath("jobs/" + getJobName(args[0]) +".xml")
break
else:
| print >>sys.stderr, r"[Warning] DataX querying Job config file failed, sleep %d sec and try again." % (2**counter)
| time.sleep(2**counter)
continue
except Exception, ex:
print >>sys.stderr, str(ex)
print >>sys.stderr, r"[Warning] DataX querying Job config file failed, sleep %d sec and try again." % (2**counter)
time.sleep(2**counter)
finally:
if response is not None:
response.close()
if counter >= 3 and \
ctxt.get(r'jobdescpath', None) is None:
print >>sys.stderr, r"[Error] DataX querying Job config file failed!"
sys.exit(2)
try:
from interceptor import InterceptorRegister
interceptors = InterceptorRegister.instance()
interceptors.process(ctxt)
except Exception, ex:
print("[INFO] Mysql Swither function disable : " + str(ex))
sys.stdout.flush()
cmd = Template(engineCmd).substitute(**ctxt)
p = subprocess.Popen(cmd, shell=True)
registerSignal(p)
(stdo, stde) = p.communicate()
retCode = p.returncode
if 0 != retCode:
sys.exit(2)
else:
sys.exit(0)
|
algiopensource/l10n-spain | l10n_es_aeat_mod340/models/__init__.py | Python | agpl-3.0 | 971 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General | Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################### | ##########################
from . import account_invoice
from . import account
from . import res_partner
from . import mod340
|
flgiordano/netcash | +/google-cloud-sdk/lib/surface/dataflow/jobs/cancel.py | Python | bsd-3-clause | 2,531 | 0.003556 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud dataflow jobs cancel command.
"""
from googlecloudsdk.api_lib.dataflow import job_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from surface import dataflow as commands
from googlecloudsdk.third_party.apitools.base.py import exceptions
class Cancel(base.Command):
"""Cancels all jobs that match the command line arguments.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
job_utils.ArgsForJobRefs(parser, nargs='+')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: all the arguments that were provided to this command invocation.
Returns:
A pair of lists indicating the jobs that were successfully cancelled and
those that failed to be cancelled.
"""
for job_ref in job_utils.ExtractJobRefs(self.context, args):
self._CancelJob(job_ref)
return None
def _CancelJob(self, job_ref):
"""Cancels a job.
Args:
job_ref: resources.Resource, The reference to the job to cancel.
"""
apitools_client = self.context[commands.DATAFLOW_APITOOLS_CLIENT_KEY]
dataflow_messages = self.context[commands.DATAFLOW_MESSAGES_MODULE_KEY]
request = dataflow_messages.DataflowProjectsJobsUpdateRequest(
projectId=job_ref.projectId,
jobId=job_ref.jobId, |
# We don't need to send the full job, because only the state can be
# updated, and the other fields are ignored.
job=dataflow_messages.Job(
requestedState=(dataflow_messages.Job.RequestedStateValueValuesEnum
.JOB_STATE_CANCELLED)))
try:
apitools_client.projects_jobs.Up | date(request)
log.status.Print('Cancelled job [{0}]'.format(job_ref.jobId))
except exceptions.HttpError as unused_error:
log.err.Print('Failed to cancel job [{0}]'.format(job_ref.jobId))
|
UOSHUB/BackEnd | Website/urls.py | Python | gpl-3.0 | 347 | 0 | from django. | conf.urls import url
from . import views
urlpatterns = [
# C | aptures special abbreviations and redirects to UOS websites
url(r"^(?P<site>bb|udc|ms|uos)/?$", views.redirect_to_uos),
# All website related requests link to views.layout
# as the layout loads other dependencies as per request
url(r"", views.layout),
]
|
tokunbo/behave-parallel | behave/formatter/plain.py | Python | bsd-2-clause | 1,652 | 0 | # -*- coding: utf-8 -*-
from behave.formatter.base import Formatter
class PlainFormatter(Formatter):
"""
Provides a simple plain formatter without coloring/formatting.
In addition, multi-line text and tables are not shown in output (SAD).
"""
name = 'plain'
description = 'Very basic formatter with maximum compatibility'
def __init__(self, stream, config):
super(PlainFormatter, self).__init__(stream, config)
self.steps = []
self.show_timings = config.show_timings
def reset_steps(self):
self.steps = []
def feature(self, feature):
self.reset_steps()
self.stream.write(u'%s: %s\n' % (feature.keyword, feature.name))
def background(self, background):
self.stream.write(u'%s: %s\n' % (background.keyword, background.name))
def scenario(self, scenario):
self.reset_steps()
self.stream.write(u'%11s: %s\n' % (scenario.keyword, scenar | io.name))
def scenario_outline(self, outline):
self.reset_steps()
self.stream.write(u' %s: %s\n' % (outline.keyword, outline.name))
def step(self, step):
self.steps.append(step)
def result(self, result):
step = self.steps.pop(0)
# TODO right-align the keyword to | maximum keyword width?
self.stream.write(u'%12s %s ... ' % (step.keyword, step.name))
status = result.status
if self.show_timings:
status += " in %0.2fs" % step.duration
if result.error_message:
self.stream.write(u'%s\n%s\n' % (status, result.error_message))
else:
self.stream.write(u'%s\n' % status)
|
AndrewBuck/osm2apt | osm2apt.py | Python | gpl-3.0 | 49,862 | 0.004533 | #!/usr/bin/env python
# Global variables that are user settable.
shoulderWidth = 1.0
import argparse
import math
import copy
from imposm.parser import OSMParser
from shapely.geometry import *
from shapely.geometry.polygon import orient
from shapely.ops import cascaded_union
overpassQueryFile = open('overpass_query.txt', 'w')
overpassQueryFile.write('data=\n\n[timeout:600];\n\n(\n')
def lookahead(iterable):
it = iter(iterable)
last = it.next()
for val in it:
yield last, False
last = val
yield last, True
def metersToDeg(meters):
return (meters / (6371000 * 2 * 3.1415927)) * 360
deltaM = metersToDeg(0.1)
def computeHeading(coord1, coord2):
lat1 = coord1[1]; lon1 = coord1[0];
lat2 = coord2[1]; lon2 = coord2[0];
lat1 *= math.pi / 180.0
lon1 *= math.pi / 180.0
lat2 *= math.pi / 180.0
lon2 *= math.pi / 180.0
dLon = lon2 - lon1;
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1)*math.sin(lat2) - \
math.sin(lat1)*math.cos(lat2)*math.cos(dLon)
hdg = math.atan2(y, x) * 180.0 / math.pi
hdg = normalizeHeading(hdg)
return hdg
def headingToRunwayInt(heading):
num = round(heading/ 10.0)
if num == 0:
num = 36
return int(num)
def headingToRunwayString(heading):
return '{0:02d}'.format(headingToRunwayInt(heading))
def normalizeHeading(heading):
while heading < 0:
heading += 360
while heading > 360:
heading -= 360
return heading
def headingToDeg(heading):
return (450 - heading) % 360.0
def computeTurnTo(pos1, origHeading, pos2):
origHeading = normalizeHeading(origHeading)
newHeading = computeHeading(pos1, pos2)
return computeHeadingDelta(origHeading, newHeading)
def computeHeadingDelta(origHeading, newHeading):
origHeading = normalizeHeading(origHeading)
newHeading = normalizeHeading(newHeading)
diff = math.fabs(origHeading - newHeading)
if diff <= 180:
amount = diff
if newHeading >= origHeading:
direction = 'right'
else:
direction = 'left'
else:
amount = 360 - diff
if origHeading >= newHeading:
direction = 'right'
else:
direction = 'left'
return (amount, direction)
def computeSegmentHeading(node, nodes, coords):
# Determine which node along the way the node occurs on.
index = nodes.index(node)
# If the node is any but the last one, we use the direction of the
# _following_ segment of the taxiway to get the direction, however
# if the node is the last in the taxiway, then there is no
# following segement so we use the previous segment instead.
if index < (len(nodes) - 1):
p1 = coords[index]
p2 = coords[index+1]
pos = p1
else:
p1 = coords[index-1]
p2 = coords[index]
pos = p2
heading = computeHeading(p1, p2)
return (heading, pos)
def computeHeadingAtPoint(line, point, towardPoint=-1, delta=metersToDeg(0.5)):
distance = line.project(point)
if towardPoint != -1:
distanceToward = line.project(towardPoint)
if distanceToward < distance:
delta *= -1.0
deltaPoint = line.interpolate(distance+delta)
return computeHeading((point.x, point.y), (deltaPoint.x, deltaPoint.y))
def computeNearestObject(obj, otherObjs):
tempDistance = -1
shortestDistance = -1
nearestObject = -1
if isinstance(obj, SpatialObject):
geometry = obj.geometry
else:
geometry = obj
for otherObj in otherObjs:
if isinstance(otherObj, SpatialObject):
otherGeometry = otherObj.geometry
else:
otherGeometry = otherObj
tempDistance = geometry.distance(otherGeometry)
if shortestDistance == -1:
shortestDistance = tempDistance
nearestObject = otherObj
else:
if tempDistance < shortestDistance:
shortestDistance = tempDistance
nearestObject = otherObj
return (nearestObject, shortestDistance)
def computeJunctionSigns(coord, ways, parentAerodrome):
signs = []
junctions = []
wayWidths = []
wayGeoms = []
# Make lists of all the taxiway widths and centerline geometries so we can
# find the maximum width and combine all the geometries into a temporary
# union of all the taxiways/runways that join at this junction.
for way in ways:
wayWidths.append(way.width)
wayGeoms.append(way.geometry)
geometryUnion = cascaded_union(wayGeoms)
# Compute the intersection points of the geometries, first at a large ring
# out to where the signs will be placed, then again at a 1 meter ring right
# around the junction node to see what direction each way enters the
# junction from.
setbackDistance = metersToDeg(max(wayWidths) + 5.0)
setbackRing = Point(coord).buffer(setbackDistance).exterior
setbackPoints = setbackRing.intersection(geometryUnion)
junctionDistance = metersToDeg(1.0)
junctionRing = Point(coord).buffer(junctionDistance).exterior
junctionPoints = junctionRing.intersection(geometryUnion)
# If at the setback distance there is only a single point rather than a
# multipoint then at most one taxiway is long enough to even reach the
# setback ring, so we just return no signs since they are not needed
# anyway.
if not isinstance(setbackPoints, MultiPoint):
return signs
for setbackPoint in setbackPoints:
closestWay, distance = computeNearestObject(setbackPoint, ways)
wayDistanceSetback = closestWay.geometry.project(setbackPoint)
wayDistanceJunction = closestWay.geometry.project(Point(coord))
if wayDistanceSetback > wayDistanceJunction:
junctionPoint = closestWay.geometry.interpolate(wayDistanceJunction + metersToDeg(1.0))
else:
| junctionPoint = closestWay.geometry.interpolate(wayDistanceJunction - metersToDeg(1.0))
#junctionPoint, distance = computeNearestObject(closestWay, junctionPoints) |
setbackHeading = computeHeadingAtPoint(closestWay.geometry, setbackPoint, junctionPoint)
junctionHeading = computeHeadingAtPoint(closestWay.geometry, junctionPoint, Point(coord))
junctions.append((closestWay, setbackPoint, setbackHeading, junctionHeading))
for (way1, setbackPoint1, setbackHeading1, junctionHeading1) in junctions:
signLoc = travel(setbackPoint1.coords[0], setbackHeading1-90, metersToDeg(way1.width/2.0 + 2.5))
# If the location of this sign would place it on an apron then just skip it.
# TODO: Could try to project to the nearest edge of the apron and maybe place the sign there instead.
nearestApron, distance = computeNearestObject(Point(signLoc), parentAerodrome.listObjectsByType(Apron))
if distance < 1E-14:
continue
subsignParts = []
# Determine the text to place on the sign.
for (way2, setbackPoint2, setbackHeading2, junctionHeading2) in junctions:
if setbackPoint1.distance(setbackPoint2) > metersToDeg(1.0):
headingString = ''
deltaHeading, direction = computeHeadingDelta(junctionHeading1, junctionHeading2+180)
directionLetter = direction[0]
if deltaHeading <= 22.5:
headingString = '{^u}'
elif deltaHeading <= 67.5:
headingString = '{^' + directionLetter + 'u}'
elif deltaHeading <= 112.5:
headingString = '{^' + directionLetter + '}'
elif deltaHeading <= 157.5:
headingString = '{^' + directionLetter + 'd}'
else:
headingString = '{^d}'
# Print leftward arrows to the left of the name and rightward arrows on the right.
if direction == 'left':
deltaHeading *= -1.0
text = headingString + way2.name
else:
text = way2.name + headingString
sub |
cedricB/circeCharacterWorksTools | rbfTool.py | Python | mit | 9,899 | 0.006566 | """
General descrition of your module here.
"""
from functools import partial
from maya import OpenMaya
from maya import OpenMayaUI
from maya import cmds
from PySide import QtCore
from PySide import QtGui
from shiboken import wrapInstance
from shiboken import getCppPointer
class RbfSettings(object):
"""
Class for storing rbf network creation options.
"""
def __init__(self):
self.connectMatrix = False
self.connectRgbValues = False
self.connectAlphaValues = False
self.useAttributeAlias = False
self.visualizeFalloff = False
class RbfManager(object):
"""
Pose driver mixing contribution of various elements in n spaces.
"""
def __init__(self):
self.pluginState = self.initPlugins()
def createNetwork(self, inputRbfSettings):
if self.pluginState is False:
return
def vizualizeSigma(self):
pass
def createSigmaShader(self):
pass
def initPlugins(self):
try:
#you dont seem to use the class elements nor Api related encapsulation
#of pymel so basically you can stick to maya python commands?
cmds.loadPlugin('jsRadial.mll')
except:
cmds.error('ERROR: jsRadial.mll not loaded.')
class RbfOptionsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RbfOptionsWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
#create widget
self.connectMatrixCheckBox = QtGui.QCheckBox('Connect Matrix')
self.connectRgbCheckBox = QtGui.QCheckBox('Connect RGB Values from Material')
self.connectAlphaCheckBox = QtGui.QCheckBox('Connect Alpha Values from Material')
self.useAliasCheckBox = QtGui.QCheckBox('Use Aliases for Targets on RBF Node')
sphereLabel = 'Create Spheres to Visualize Falloff (most accurate for Gaussian)'
self.createSphereCheckBox = QtGui.QCheckBox(sphereLabel)
#Create layout
self.mainLayout = QtGui.QVBoxLayout()
#Set properties
self.mainLayout.setContentsMargins(5, 5, 5, 5)
for widget in [self.connectMatrixCheckBox,
| self.connectRgbCheckBox,
| self.connectAlphaCheckBox,
self.useAliasCheckBox,
self.createSphereCheckBox]:
#Set properties
widget.setChecked(True)
#Assign widget to layouts
self.mainLayout.addWidget(widget)
#set the main layout for this UI part
self.setLayout(self.mainLayout)
class RbfListWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RbfListWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
#create widget
self.poseListWidget = QtGui.QListView()
self.targetListWidget = QtGui.QListView()
#Create layout
self.poselistLayout = QtGui.QVBoxLayout()
#Set properties
self.poseListWidget.setMaximumHeight(20)
self.poseListWidget.setMinimumWidth(190)
self.targetListWidget.setMinimumHeight(260)
self.poselistLayout.setContentsMargins(0, 0, 0, 0)
self.poselistLayout.setSpacing(14)
#Assign widget to layouts
self.poselistLayout.addWidget(self.poseListWidget)
self.poselistLayout.addWidget(self.targetListWidget)
#set the main layout for this UI part
self.setLayout(self.poselistLayout)
class RbfDataIoWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RbfDataIoWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
#create widget
self.anchorWidget = QtGui.QWidget()
self.addPoseButton = QtGui.QPushButton('Add Pose')
self.removePoseButton = QtGui.QPushButton('Remove Pose')
self.addTargetButton= QtGui.QPushButton('Add Target')
self.removeTargetButton = QtGui.QPushButton('Remove Target')
#Create layout
self.ioLayout = QtGui.QGridLayout()
self.mainLayout = QtGui.QVBoxLayout()
#Set properties
ioWidth = 78
self.ioLayout.setContentsMargins(0, 0, 0, 0)
self.ioLayout.setColumnMinimumWidth(0, ioWidth)
self.ioLayout.setColumnMinimumWidth(1, ioWidth)
self.ioLayout.setSpacing(10)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
#Assign widget to layouts
self.ioLayout.addWidget(self.removePoseButton, 0 , 0)
self.ioLayout.addWidget(self.addPoseButton, 0 , 1)
self.ioLayout.addWidget(self.removeTargetButton, 1 , 0)
self.ioLayout.addWidget(self.addTargetButton, 1 , 1)
self.mainLayout.addWidget(self.anchorWidget)
self.mainLayout.addStretch()
#set the main layout for this UI part
self.anchorWidget.setLayout(self.ioLayout)
self.setLayout(self.mainLayout)
#Connect signals
self.addPoseButton.clicked.connect(self._addPose)
self.removePoseButton.clicked.connect(self._removePose)
self.addTargetButton.clicked.connect(self._addTargets)
self.removeTargetButton.clicked.connect(self._removeTargets)
def _addPose(self):
pass
def _addTargets(self):
pass
def _removeTargets(self):
pass
def _removePose(self):
pass
class RbfHeaderWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RbfHeaderWidget, self).__init__(parent)
self.setupUI()
def setupUI(self):
#create widget
self.headerLabel = QtGui.QLabel('RBF Network Builder')
self.creditLabel = QtGui.QLabel('by James Sumner III')
self.websiteLabel = QtGui.QLabel('www.jamessumneriii.com')
#Create layout
self.headerLayout = QtGui.QVBoxLayout()
#Set properties
self.headerLabel.setStyleSheet('font-size: 16pt' )
self.creditLabel.setStyleSheet('color: rgb(140,140,140)')
self.websiteLabel.setStyleSheet('color: rgb(140,140,140); link-decoration: none;')
#Assign widget to layouts
self.headerLayout.addWidget(self.headerLabel)
self.headerLayout.addWidget(self.creditLabel)
self.headerLayout.addWidget(self.websiteLabel)
#set the main layout for this UI part
self.setLayout(self.headerLayout)
class RbfManagerTool(QtGui.QDialog):
"""
General UI used to create and maintain pose drivers.
"""
def __init__(self, parent=None):
super(RbfManagerTool, self).__init__(parent=parent)
#Parent widget under Maya main window
self.setParent(parent)
self.setWindowFlags(QtCore.Qt.Window)
self.toolName = 'RBF Tool'
self.pose = []
self.targets = []
self.setupUI()
def setupUI(self):
#cmds.undoInfo(openChunk=True) will bundle a list of commands
#which will modify the Dag or the dg hence the separation in the
#API into 2 classes MDAGModifier / MDGModifier.
#not sure about its usefulness for UI?
#create widget
self.tabWidget = QtGui.QTabWidget()
self.headerWidget = RbfHeaderWidget()
self.createTab = self._buildCreateTab()
#Create layout
self.mainLayout = QtGui.QVBoxLayout()
#Set properties
self.setWindowTitle(self.toolName)
self.mainLayout.setContentsMargins(10, 10, 10, 10)
#Assign widget to layouts
self.tabWidget.addTab(self.createTab, 'Create')
#self.tabWidget.addTab(self.editTab, 'Edit')
self.mainLayout.addWidget(self.headerWidget)
self.mainLayout.addWidget(self.tabWidget)
self.setLayout(self.mainLayout)
def _buildCreateTab(self):
#create widget
self.createTabWidget = QtGui.QWidget()
self.createTabAnch |
ubirch/aws-tools | virtual-env/lib/python2.7/site-packages/boto/cloudtrail/layer1.py | Python | apache-2.0 | 17,002 | 0.000882 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudtrail import exceptions
from boto.compat import json
class CloudTrailConnection(AWSQueryConnection):
"""
AWS CloudTrail
This is the CloudTrail API Reference. It provides descriptions of
actions, data types, common parameters, and common errors for
CloudTrail.
CloudTrail is a web service that records AWS API calls for your
AWS account and delivers log files to an Amazon S3 bucket. The
recorded information includes the identity of the user, the start
time of the AWS API call, the source IP address, the request
parameters, and the response elements returned by the service.
As an alternative to using the API, you can use one of the AWS
SDKs, which consist of libraries and sample code for various
programming languages and platforms (Java, Ruby, .NET, iOS,
Android, etc.). The SDKs provide a convenient way to create
programmatic access to AWSCloudTrail. For example, the SDKs take
care of cryptographically signing requests, managing errors, and
retrying requests automatically. For information about the AWS
SDKs, including how to download and install them, see the `Tools
for Amazon Web Services page`_.
See the CloudTrail User Guide for information about the data that
is included with each AWS API call listed in the log files.
"""
APIVersion = "2013-11-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com"
ServiceName = "CloudTrail"
TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101"
ResponseError = JSONResponseError
_faults = {
"InvalidMaxResultsException": exceptions.InvalidMaxResultsException,
"InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException,
"InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException,
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
"InvalidCloudWatchLogsLogG | roupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException,
"InvalidCloudWatch | LogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
"CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudTrailConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_trail(self, name, s3_bucket_name, s3_key_prefix=None,
sns_topic_name=None, include_global_service_events=None,
cloud_watch_logs_log_group_arn=None,
cloud_watch_logs_role_arn=None):
"""
From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
log data to an Amazon S3 bucket.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type cloud_watch_logs_log_group_arn: string
:param cloud_watch_logs_log_group_arn: Specifies a log group name using
an Amazon Resource Name (ARN), a unique identifier that represents
the log group to which CloudTrail logs will be delivered. Not
required unless you specify CloudWatchLogsRoleArn.
:type cloud_watch_logs_role_arn: string
:param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch
Logs endpoint to assume to write to a users log group.
"""
params = {'Name': name, 'S3BucketName': s3_bucket_name, }
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if cloud_watch_logs_log_group_arn is not None:
params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn
if cloud_watch_logs_role_arn is not None:
params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn
return self.make_request(action='CreateTrail',
body=json.dumps(params))
def delete_trail(self, name):
"""
Deletes a trail.
:type name: string
:param name: The name of a trail to be deleted.
"""
params = {'Name': name, }
return self.make_request(action='DeleteTrail',
body=json.dumps(params))
def describe_trails(self, trail_name_list=None):
"""
Retrieves settings for the trail associated with the current
region for your account.
:type trail_name_list: list
:param trail_name_list: The trail returned.
"""
params = {}
if trail_name_list is not None:
params['trailNameList'] = trail_name_list
return self.make_reque |
deepinsight/Deformable-ConvNets | deeplab/core/loader.py | Python | apache-2.0 | 9,374 | 0.0032 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import numpy as np
import mxnet as mx
import random
import math
from mxnet.executor_manager import _split_input_slice
from utils.image import tensor_vstack
from segmentation.segmentation import get_segmentation_train_batch, get_segmentation_test_batch
from PIL import Image
from multiprocessing import Pool
class TestDataLoader(mx.io.DataIter):
def __init__(self, segdb, config, batch_size=1, shuffle=False):
super(TestDataLoader, self).__init__()
# save parameters as properties
self.segdb = segdb
self.batch_size = batch_size
self.shuffle = shuffle
self.config = config
# infer properties from roidb
self.size = len(self.segdb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [None for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label, im_info = get_segmentation_test_batch(segdb, self.config)
self.data = [[mx.nd.array(data[i][name]) for name in self.data_name] for i in xrange(len(data))]
self.im_info = im_info
class TrainDataLoader(mx.io.DataIter):
def __init__(self, sym, segdb, config, batch_size=1, crop_height = 768, crop_width = 1024, shuffle=False, ctx=None, work_load_list=None):
"""
This Iter will provide seg data to Deeplab network
:param sym: to infer shape
:param segdb: must be preprocessed
:param config: config file
:param batch_size: must divide BATCH_SIZE(128)
:param crop_height: the height of cropped image
:param crop_width: the width of cropped image
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:return: DataLoader
"""
super(TrainDataLoader, self).__init__()
# save parameters as properties
self.sym = sym
self.segdb = segdb
self.config = config
self.batch_size = batch_size
if self.config.TRAIN.ENABLE_CROP:
self.crop_height = crop_height
self.crop_width = crop_width
else:
self.crop_height = None
self.crop_width = None
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
# infer properties from segdb
self.size = len(segdb)
self.index = np.arange(self.size)
# decide data and label names
self.data_name = ['data']
self.label_name = ['label']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# init multi-process pool
self.pool = Pool(processes = len(self.ctx))
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_parallel()
random.seed()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
|
def next(self):
if self.iter_next():
self.get_batch_parallel()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
| provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
_, label_shape, _ = self.sym.infer_shape(**max_shapes)
label_shape = [(self.label_name[0], label_shape)]
return max_data_shape, label_shape
def get_batch_parallel(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
multiprocess_results = []
for idx, islice in enumerate(slices):
isegdb = [segdb[i] for i in range(islice.start, islice.stop)]
multiprocess_results.append(self.pool.apply_async(parfetch, (self.config, self.crop_width, self.crop_height, isegdb)))
rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results]
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] fo |
StamusNetworks/scirius | rules/management/commands/kibana_export.py | Python | gpl-3.0 | 1,475 | 0 | '''
Copyright(C) 2016, Stamus Networks
Written by Laurent Defert <lds@stamus-networks.com>
This file is part of Scirius.
Scirius is free software: you can redistribute it and/or modif | y
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Scirius is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; with | out even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Scirius. If not, see <http://www.gnu.org/licenses/>.
'''
import os
from django.core.management.base import BaseCommand
from rules.es_data import ESData
class Command(BaseCommand, ESData):
help = 'Export Kibana dashboards.'
def __init__(self, *args, **kw):
BaseCommand.__init__(self, *args, **kw)
ESData.__init__(self)
def add_arguments(self, parser):
parser.add_argument(
'--full',
action='store_true',
dest='full',
default=False,
help='Save everything (SN dashboards and index)'
)
def handle(self, *args, **options):
tar_name, tar_file = self.kibana_export(options['full'])
os.rename(tar_file, tar_name)
self.stdout.write('Kibana dashboards saved to %s' % tar_name)
|
jniediek/mne-python | mne/gui/__init__.py | Python | bsd-3-clause | 3,384 | 0 | """Convenience functions for opening GUIs."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from ..utils import _check_mayavi_version
def combine_kit_markers():
"""Create a new KIT marker file by interpolating two marker files
Notes
-----
The functionality in this GUI is also part of :func:`kit2fiff`.
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._marker_gui import CombineMarkersFrame
gui = CombineMarkersFrame()
gui.configure_traits()
return gui
def coregistration(tabbed=False, split=True, scene_width=500, inst=None,
subject=None, subjects_dir=None):
"""Coregister an MRI with a subject's head shape
The recom | mended way to use the GUI is through bash with::
$ mne coreg
Parameters
----------
tabbed : bool
| Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
inst : None | str
Path to an instance file containing the digitizer data. Compatible for
Raw, Epochs, and Evoked files.
subject : None | str
Name of the mri subject.
subjects_dir : None | path
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Notes
-----
Step by step instructions for the coregistrations can be accessed as
slides, `for subjects with structural MRI
<http://www.slideshare.net/mne-python/mnepython-coregistration>`_ and `for
subjects for which no MRI is available
<http://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._coreg_gui import CoregFrame, _make_view
view = _make_view(tabbed, split, scene_width)
gui = CoregFrame(inst, subject, subjects_dir)
gui.configure_traits(view=view)
return gui
def fiducials(subject=None, fid_file=None, subjects_dir=None):
"""Set the fiducials for an MRI subject
Parameters
----------
subject : str
Name of the mri subject.
fid_file : None | str
Load a fiducials file different form the subject's default
("{subjects_dir}/{subject}/bem/{subject}-fiducials.fif").
subjects_dir : None | str
Overrule the subjects_dir environment variable.
Notes
-----
All parameters are optional, since they can be set through the GUI.
The functionality in this GUI is also part of :func:`coregistration`.
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._fiducials_gui import FiducialsFrame
gui = FiducialsFrame(subject, subjects_dir, fid_file=fid_file)
gui.configure_traits()
return gui
def kit2fiff():
"""Convert KIT files to the fiff format
The recommended way to use the GUI is through bash with::
$ mne kit2fiff
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._kit2fiff_gui import Kit2FiffFrame
gui = Kit2FiffFrame()
gui.configure_traits()
return gui
|
harveyc95/ProgrammingProblems | LeetCode/678_valid_parenthesis_string/checkValidString.py | Python | mit | 495 | 0.006061 | class Solution(object):
def | checkValidString(self, s):
lower = upper = 0
for c in s:
if c == '(':
lower += 1
upper += 1
elif c == ')':
lower -= 1
upper -= 1
else:
lower -= 1
upper += 1
lower = max(0, lower)
if upper < 0:
return False
return lo | wer == 0
|
gferreira/hTools2_extension | hTools2.roboFontExt/lib/Scripts/batch folder/actions.py | Python | bsd-3-clause | 152 | 0 | # [h] apply actions
import hTools2.dialogs.folder.actions
reload(hTools2.dialogs.folder.actions)
hTools2.dialogs.folder.actions.actionsFolderDialog | ()
| |
chdonncha/Cloud-Computing-Labs-AmazonServer | Lab11/read-aws-queue.py | Python | mit | 965 | 0.009326 | # Thi | s script created a queue
#
# Author - Paul Doyle Nov 2015
#
#
import httplib
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
keys = httplib.HTTPConnection("ec2-52-30-7-5.eu-west-1.compute.amazonaws.com:81")
keys.request( | "GET", "/key")
r1 = keys.getresponse().read().split(":")
# Get the keys from a specific url and then use them to connect to AWS Service
access_key_id = r1[0]
secret_access_key = r1[1]
# Set up a connection to the AWS service.
conn = boto.sqs.connect_to_region(
"eu-west-1",
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
# Get a list of the queues that exists and then print the list out
# Do not use / or " in the name
q = conn.get_queue("D14123580-%s" % sys.argv[1])
m = q.get_messages()
for i in range(0, q.count()):
m = q.read(60)
print("Message = %s" % m.get_body())
|
Winterflower/mdf | mdf/io/simplezipfile.py | Python | mit | 3,174 | 0.001575 | """
classes used for reading and writing compressed mdf objects
"""
from zipfile import ZipFile, ZIP_DEFLATED
from tempfile import NamedTemporaryFile
import logging
import os
_log = logging.getLogger(__name__)
class SimpleZipFile(object):
"""
File-like object for reading from and writing to simple zipfiles
with a single inner file
"""
def __init__(self, filename, inner_filename=None, mode="w",
compression=ZIP_DEFLATED, allowZip64=False):
mode = mode.rstrip("+b")
assert mode in ("r", "w"), "unsupported mode '%s'" % mode
self.__mode = mode
self.__filename = filename
self.__inner_filename = inner_filename
self.__compression = compression
self.__allow_zip_64 = allowZip64
# keep a reference to unlink as during shutdown the os module may be
# discarded before this object is
self.__unlink = os.unlink
self.__closed = False
# Open a named temporary file for reading and writing.
#
# Don't delete on | close because the file needs to be closed before it
# can be written to the zipfile as there's no way to incremenatally write
# bytes to a zipfile, and loading the whole file into memory could be |
# problematic.
#
self._fh = NamedTemporaryFile("w+b", delete=False)
if mode == "r":
# unzip the whole file into the temporary file
zip_fh = ZipFile(filename, "r")
try:
if inner_filename:
self._fh.write(zip_fh.read(inner_filename))
else:
infos = zip_fh.infolist()
assert len(infos) == 1, "Multiple entries found (%s)" % infos
self._fh.write(zip_fh.read(infos[0]))
# seek self back to the start of the file for reading
self._fh.seek(0, os.SEEK_SET)
finally:
zip_fh.close()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
if name in self.__class__.__dict__:
return self.__dict__.__class__[name]
return getattr(self.__dict__["_fh"], name)
def __del__(self):
self.close()
def close(self):
if self.__closed:
return
tmp_filename = self._fh.name
self._fh.close()
self.__closed = True
try:
# write everything to the zipfile
if "w" in self.__mode:
inner_filename = self.__inner_filename
if not inner_filename:
base, ext = os.path.splitext(self.__filename)
inner_filename = os.path.basename(base) + ".txt"
_log.debug("Compressing %s" % (self.__filename))
zip_fh = ZipFile(self.__filename, "w", self.__compression)
try:
zip_fh.write(tmp_filename, inner_filename)
finally:
zip_fh.close()
finally:
# delete the tempfile
self.__unlink(tmp_filename)
self._fh = None
|
caseywstark/colab | colab/apps/object_feeds/signals.py | Python | mit | 918 | 0.007625 | from django.template.defaultfilters import slugify
from django.contrib.contenttypes.models import ContentType
from object_feeds.models import Action, Feed
def pre_save(sender, instance, **kwargs):
"""
Makes sure that the feed object has a feed setup.
"""
opts = instance._meta
feed = getattr(instance, opts.feed_attr)
if not feed: # no feed, yet
content_type = ContentType.objects.get_for_model(in | stance)
model_name = unicode(opts.verbose_name)
the_feed = Feed.objects.create( | content_type=content_type)
setattr(instance, opts.feed_attr, the_feed)
def post_save(sender, instance, created, **kwargs):
"""
Updates things only accessible after the instance is saved, like object_id.
"""
if not instance.feed.feed_object:
instance.feed.feed_object = instance
instance.feed.save()
|
soundstorm/RDWorksFileserver | RDWorks/RDLaser.py | Python | gpl-3.0 | 10,961 | 0.058115 |
# encoding: utf-8
import socket
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class RDLaser(object):
BUF_SIZE = 4096
CLIENT_PORT = 40200
SERVER_PORT = 50200
CMD_STOP = "d801".decode("HEX")
CMD_PAUSE_CONTINUE = "d803".decode("HEX")
CMD_HOME_XY = "d9100000000000000000000000".decode("HEX") #goto abs coord (0|0)
CMD_HOME_Z = "d82c".decode("HEX")
CMD_HOME_U = "de25".decode("HEX")
CMD_FOCUS = "dea5".decode("HEX")
HANDSHAKE = "da00057e".decode("HEX")
STATUS = "da000004".decode("HEX")
CFG_X_SETTINGS = "da000020".decode("HEX") #binary
CFG_X_STEP_LENGTH = "da000021".decode("HEX") #picometer
CFG_X_MAX_SPEED = "da000023".decode("HEX") #nanometer/s
CFG_X_JUMPOFF_SPEED = "da000024".decode("HEX") #nanometer/s
CFG_X_MAX_ACC = "da000025".decode("HEX") #nanometer/s^2
CFG_X_BREADTH = "da000026".decode("HEX") #nanometer
CFG_X_KEY_JUMPOFF_SPEED = "da000027".decode("HEX") #nanometer/s
CFG_X_KEY_ACC = "da000028".decode("HEX") #nanometer/s^2
CFG_X_ESTOP_ACC = "da000029".decode("HEX") #nanometer/s^2
CFG_X_HOME_OFFSET = "da00002a".decode("HEX") #nanometer
CFG_X_SETTINGS = "da000030".decode("HEX") #binary
CFG_Y_STEP_LENGTH = "da000031".decode("HEX") #picometer
CFG_Y_MAX_SPEED = "da000033".decode("HEX") #nanometer/s
CFG_Y_JUMPOFF_SPEED = "da000034".decode("HEX") #nanometer/s
CFG_Y_MAX_ACC = "da000035".decode("HEX") #nanometer/s^2
CFG_Y_BREADTH = "da000036".decode("HEX") #nanometer
CFG_Y_KEY_JUMPOFF_SPEED = "da000037".decode("HEX") #nanometer/s
CFG_Y_KEY_ACC = "da000038".decode("HEX") #nanometer/s^2
CFG_Y_ESTOP_ACC = "da000039".decode("HEX") #nanometer/s^2
CFG_Y_HOME_OFFSET = "da00003a".decode("HEX") #nanometer
CFG_Z_SETTINGS = "da000040".decod | e("HEX") #binary
CFG_Z_STEP_LENGTH = "da000041".decode("HEX") #picometer
CFG_Z_MAX_SPEED = "da000043".decode("HEX") #nanometer/s
CFG_Z_JUMPOFF_SPEED = "da000044".decode("HEX") #nanometer/s
CFG_Z_MAX_ACC | = "da000045".decode("HEX") #nanometer/s^2
CFG_Z_BREADTH = "da000046".decode("HEX") #nanometer
CFG_Z_KEY_JUMPOFF_SPEED = "da000047".decode("HEX") #nanometer/s
CFG_Z_KEY_ACC = "da000048".decode("HEX") #nanometer/s^2
CFG_Z_ESTOP_ACC = "da000049".decode("HEX") #nanometer/s^2
CFG_Z_HOME_OFFSET = "da00004a".decode("HEX") #nanometer
CFG_U_SETTINGS = "da000050".decode("HEX") #binary
CFG_U_STEP_LENGTH = "da000051".decode("HEX") #picometer
CFG_U_MAX_SPEED = "da000053".decode("HEX") #nanometer/s
CFG_U_JUMPOFF_SPEED = "da000054".decode("HEX") #nanometer/s
CFG_U_MAX_ACC = "da000055".decode("HEX") #nanometer/s^2
CFG_U_BREADTH = "da000056".decode("HEX") #nanometer
CFG_U_KEY_JUMPOFF_SPEED = "da000057".decode("HEX") #nanometer/s
CFG_U_KEY_ACC = "da000058".decode("HEX") #nanometer/s^2
CFG_U_ESTOP_ACC = "da000059".decode("HEX") #nanometer/s^2
CFG_U_HOME_OFFSET = "da00005a".decode("HEX") #nanometer
CFG_LASER12_TYPE = "da000010".decode("HEX") #binary
# glass/RF/RF+preignition
CFG_LASER34 = "da000226".decode("HEX") #binary
CFG_LASER1_FREQUENCE = "da000011".decode("HEX") #Hz
CFG_LASER1_POWER_MIN = "da000012".decode("HEX") #percent
CFG_LASER1_POWER_MAX = "da000013".decode("HEX") #percent
CFG_LASER1_PREIG_FREQ = "da00001a".decode("HEX") #Hz
CFG_LASER1_PREIG_PULSE = "da00001b".decode("HEX") #1/10 percent
CFG_LASER2_FREQUENCE = "da000017".decode("HEX") #Hz
CFG_LASER2_POWER_MIN = "da000018".decode("HEX") #percent
CFG_LASER2_POWER_MAX = "da000019".decode("HEX") #percent
CFG_LASER2_PREIG_FREQ = "da00001c".decode("HEX") #Hz
CFG_LASER2_PREIG_PULSE = "da00001d".decode("HEX") #1/10 percent
CFG_LASER3_POWER_MIN = "da000063".decode("HEX") #percent
CFG_LASER3_POWER_MAX = "da000064".decode("HEX") #percent
CFG_LASER3_FREQUENCE = "da000065".decode("HEX") #Hz
CFG_LASER3_PREIG_FREQ = "da000066".decode("HEX") #Hz
CFG_LASER3_PREIG_PULSE = "da000067".decode("HEX") #1/10 percent
CFG_LASER4_POWER_MIN = "da000068".decode("HEX") #percent
CFG_LASER4_POWER_MAX = "da000069".decode("HEX") #percent
CFG_LASER4_FREQUENCE = "da00006a".decode("HEX") #Hz
CFG_LASER4_PREIG_FREQ = "da00006b".decode("HEX") #Hz
CFG_LASER4_PREIG_PULSE = "da00006c".decode("HEX") #1/10 percent
CFG_LASER_ATTENUATION = "da000016".decode("HEX") #1/10 percent
CFG_HEAD_DISTANCE = "da00001e".decode("HEX") #nanometer
CFG_ENABLE_AUTOLAYOUT = "da000004".decode("HEX") #binary
#02cfd48989e9
CFG_FEED_TRANSMISSION = "da000200".decode("HEX") #binary
CFG_BROKEN_DELAY = "da00020d".decode("HEX") #milliseconds
LASER_STATE = "da000400".decode("HEX")
TOTAL_ON_TIME = "da000401".decode("HEX") #seconds
TOTAL_PROCESSING_TIME = "da000402".decode("HEX") #seconds
TOTAL_TRAVEL_X = "da000423".decode("HEX") #meters
TOTAL_TRAVEL_Y = "da000433".decode("HEX") #meters
TOTAL_PROCESSING_TIMES = "da000403".decode("HEX") #count
TOTAL_LASER_ON_TIME = "da000411".decode("HEX") #seconds
PREVIOUS_PROCESSING_TIME = "da00048f".decode("HEX") #milliseconds
PREVIOUS_WORK_TIME = "da000408".decode("HEX") #milliseconds
MAINBOARD_VERSION = "da00057f".decode("HEX") #string
POSITION_AXIS_X = "da000421".decode("HEX") #nanometer
POSITION_AXIS_Y = "da000431".decode("HEX") #nanometer
POSITION_AXIS_Z = "da000441".decode("HEX") #nanometer
POSITION_AXIS_U = "da000451".decode("HEX") #nanometer
# thanks to http://stefan.schuermans.info/rdcam/scrambling.html
MAGIC = 0x88
values = {
STATUS : 0,
CFG_X_SETTINGS : 0,
CFG_X_STEP_LENGTH : 0,
CFG_X_MAX_SPEED : 0,
CFG_X_JUMPOFF_SPEED : 0,
CFG_X_MAX_ACC : 0,
CFG_X_BREADTH : 0,
CFG_X_KEY_JUMPOFF_SPEED : 0,
CFG_X_KEY_ACC : 0,
CFG_X_ESTOP_ACC : 0,
CFG_X_HOME_OFFSET : 0,
CFG_X_SETTINGS : 0,
CFG_Y_STEP_LENGTH : 0,
CFG_Y_MAX_SPEED : 0,
CFG_Y_JUMPOFF_SPEED : 0,
CFG_Y_MAX_ACC : 0,
CFG_Y_BREADTH : 0,
CFG_Y_KEY_JUMPOFF_SPEED : 0,
CFG_Y_KEY_ACC : 0,
CFG_Y_ESTOP_ACC : 0,
CFG_Y_HOME_OFFSET : 0,
CFG_Z_SETTINGS : 0,
CFG_Z_STEP_LENGTH : 0,
CFG_Z_MAX_SPEED : 0,
CFG_Z_JUMPOFF_SPEED : 0,
CFG_Z_MAX_ACC : 0,
CFG_Z_BREADTH : 0,
CFG_Z_KEY_JUMPOFF_SPEED : 0,
CFG_Z_KEY_ACC : 0,
CFG_Z_ESTOP_ACC : 0,
CFG_Z_HOME_OFFSET : 0,
CFG_U_SETTINGS : 0,
CFG_U_STEP_LENGTH : 0,
CFG_U_MAX_SPEED : 0,
CFG_U_JUMPOFF_SPEED : 0,
CFG_U_MAX_ACC : 0,
CFG_U_BREADTH : 0,
CFG_U_KEY_JUMPOFF_SPEED : 0,
CFG_U_KEY_ACC : 0,
CFG_U_ESTOP_ACC : 0,
CFG_U_HOME_OFFSET : 0,
CFG_LASER12_TYPE : 0,
CFG_LASER34 : 0,
CFG_LASER1_FREQUENCE : 0,
CFG_LASER1_POWER_MIN : 0,
CFG_LASER1_POWER_MAX : 0,
CFG_LASER1_PREIG_FREQ : 0,
CFG_LASER1_PREIG_PULSE : 0,
CFG_LASER2_FREQUENCE : 0,
CFG_LASER2_POWER_MIN : 0,
CFG_LASER2_POWER_MAX : 0,
CFG_LASER2_PREIG_FREQ : 0,
CFG_LASER2_PREIG_PULSE : 0,
CFG_LASER3_POWER_MIN : 0,
CFG_LASER3_POWER_MAX : 0,
CFG_LASER3_FREQUENCE : 0,
CFG_LASER3_PREIG_FREQ : 0,
CFG_LASER3_PREIG_PULSE : 0,
CFG_LASER4_POWER_MIN : 0,
CFG_LASER4_POWER_MAX : 0,
CFG_LASER4_FREQUENCE : 0,
CFG_LASER4_PREIG_FREQ : 0,
CFG_LASER4_PREIG_PULSE : 0,
CFG_LASER_ATTENUATION : 0,
CFG_HEAD_DISTANCE : 0,
CFG_ENABLE_AUTOLAYOUT : 0,
CFG_FEED_TRANSMISSION : 0,
CFG_BROKEN_DELAY : 0,
LASER_STATE : 0,
TOTAL_ON_TIME : 0,
TOTAL_PROCESSIN |
levilucio/SyVOLT | UMLRT2Kiltera_MM/graph_Type.py | Python | mit | 1,617 | 0.022882 | """
__graph_Type.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
__________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_Type(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 123.0, 63
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
| else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([142.0, 48.0, 142.0, 48.0]), tags = (self.tag, ' | connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 18.0, 141.0, 79.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'gray')
self.gf139 = GraphicalForm(drawing, h, "gf139")
self.graphForms.append(self.gf139)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_Type
|
vasiliykochergin/euca2ools | euca2ools/commands/ec2/deletenetworkaclentry.py | Python | bsd-2-clause | 1,948 | 0 | # Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
| # conditions are met:
#
# Redistributions of source code must retain the ab | ove copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class DeleteNetworkAclEntry(EC2Request):
DESCRIPTION = 'Delete a network acl rule'
ARGS = [Arg('NetworkAclId', metavar='NACL', help='''ID of the
network ACL to delete an entry from (required)'''),
Arg('-n', '--rule-number', dest='RuleNumber', required=True,
type=int, help='number of the entry to delete (required)'),
Arg('--egress', dest='Egress', action='store_true', help='''delete
an egress entry (default: delete an ingress entry)''')]
|
Openergy/oplus | oplus/__init__.py | Python | mpl-2.0 | 404 | 0.002475 | from .version import version as __version__
from oplus.configuration import CONF
from oplus.epm.api import *
from oplus.weather_data.api import *
from oplus.standard_outpu | t.api import *
from oplus.eio import Eio
from oplus.mtd import Mtd
from oplus.err import Err
from oplus.summary_table import Sum | maryTable
from oplus.output_table import OutputTable
from oplus.simulation import Simulation, simulate
|
dbarbier/privot | python/test/t_Normal_wrongarg.py | Python | lgpl-3.0 | 760 | 0.011842 | #! /usr/bin/env python
fro | m openturns import *
TESTPREAMBLE()
try :
try :
# Instanciate one distribution object
meanPoint = NumericalPoint(1)
meanPoint[0] = 1.0
sigma = NumericalPoint(1)
sigma[0] = 1.0
R = CorrelationMatrix(1)
distribution = Normal(meanPoint, sigma, R)
print "Distribution " , repr(distribution)
| # We try to set an erroneous covariance matrix (wrong dimension) into distribution
newR = CorrelationMatrix(2)
distribution.setCorrelationMatrix(newR)
# Normally, we should never go here
raise
except :
pass
#except TestFailed, ex :
except :
import sys
print "t_Normal_wrongarg.py", sys.exc_type, sys.exc_value
|
polyaxon/polyaxon | platform/coredb/coredb/query_managers/project.py | Python | apache-2.0 | 2,249 | 0 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License | is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing pe | rmissions and
# limitations under the License.
from coredb.query_managers.manager import BaseQueryManager
from polyaxon.pql.builder import (
ArrayCondition,
DateTimeCondition,
SearchCondition,
ValueCondition,
)
from polyaxon.pql.parser import (
parse_datetime_operation,
parse_search_operation,
parse_value_operation,
)
class ProjectQueryManager(BaseQueryManager):
NAME = "project"
FIELDS_PROXY = {
"id": "uuid",
"uid": "uuid",
"user": "user__username",
}
FIELDS_ORDERING = ("created_at", "updated_at", "name", "user")
CHECK_ALIVE = True
PARSERS_BY_FIELD = {
# Uuid
"id": parse_search_operation,
"uid": parse_search_operation,
"uuid": parse_search_operation,
# Dates
"created_at": parse_datetime_operation,
"updated_at": parse_datetime_operation,
# Name
"name": parse_search_operation,
# Description
"description": parse_search_operation,
# Tags
"tags": parse_value_operation,
# User
"user": parse_value_operation,
# Live state
"live_state": parse_value_operation,
}
CONDITIONS_BY_FIELD = {
# Uuid
"id": SearchCondition,
"uid": SearchCondition,
"uuid": SearchCondition,
# Dates
"created_at": DateTimeCondition,
"updated_at": DateTimeCondition,
# Name
"name": SearchCondition,
# Description
"description": SearchCondition,
# User
"user": ValueCondition,
# Tags
"tags": ArrayCondition,
# Live state
"live_state": ValueCondition,
}
|
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/uv_magic_uv/muv_common.py | Python | gpl-3.0 | 1,988 | 0 | # <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Nutti <nutti.metro@gmail.com>"
__status__ = "production"
__version__ = "4.1"
__date__ = "13 Nov 2016"
import bpy
from . import muv_props
PHI = 3.1415926535
def debug_print(*s):
"""
Print message to console in debugging mode
"""
if muv_props.DEBUG:
print(s)
def check_ve | rsion(major, minor, unused):
"""
Check blender version
"""
if bpy.app.version[0] == major and bpy.app.version[1] == minor:
return 0
if bpy.app.version[0] > major:
return 1
else:
if bpy.app.version[1] > minor:
return 1
else:
return -1
def redraw_all_areas():
"""
Redraw all areas
"""
for area in bpy.context.screen.a | reas:
area.tag_redraw()
def get_space(area_type, region_type, space_type):
"""
Get current area/region/space
"""
for area in bpy.context.screen.areas:
if area.type == area_type:
break
for region in area.regions:
if region.type == region_type:
break
for space in area.spaces:
if space.type == space_type:
break
return (area, region, space)
|
tklovett/MaudeMiner | MaudeMiner/core/loader/devices.py | Python | mit | 3,344 | 0.032297 | from MaudeMiner.core.database import db
from MaudeMiner.core.models import Device
from MaudeMiner.utils import update_progress
from MaudeMiner.core.loader.utils import *
from MaudeMiner.settings import LINES_PER_DB_COMMIT
EXPECTED_NUMBER_OF_FIELDS = 45
def load():
# ensure tables exists
db.create_tables(["Devices"])
print " === Loading Devices === "
files = get_files_with_prefix("foidev", excludes=['foidevproblem', 'foidevAdd', 'foidevChange'])
for line in files:
v = split_fields(line)
if len(v) != EXPECTED_NUMBER_OF_FIELDS:
continue
device = Device()
device.report_key = v[0]
device.device_event_key = v[1]
device.implant_flag = v[2]
device.date_removed_flag = v[3]
device.device_sequence_number = v[4]
device.date_received = v[5]
device.brand_name = v[6]
device.generic_name = v[7]
device.manufacturer_name = v[8]
device.manufacturer_address_1 = v[9]
device.manufacturer_address_2 = v[10]
device.manufacturer_city = v[11]
device.manufacturer_state_code = v[12]
device.manufacturer_zip_code = v[13]
device.manufacturer_zip_code_ext = v[14]
device.manufacturer_country_code = v[15]
device.manufacturer_postal_code = v[16]
device.expiration_date_of_device = v[17]
device.model_number = v[18]
device.lot_number = v[19]
d | evice.catalog_number = v[20]
device.other_id_number = v[21]
device.device_operator = v[22]
device.device_availability = v[23]
device.date_reported_to_manufacturer = v[24]
device.device_report_product_code = v[25]
device.device_age = v[26]
device.device_evalu | ated_by_manufacturer = v[27]
device.baseline_brand_name = v[28]
device.baseline_generic_name = v[29]
device.baseline_model_number = v[30]
device.baseline_catalog_number = v[31]
device.baseline_other_id_number = v[32]
device.baseline_device_family = v[33]
device.baseline_shelf_life_contained_in_label = v[34]
device.baseline_shelf_life_in_months = v[35]
device.baseline_pma_flag = v[36]
device.baseline_pma_number = v[37]
device.baseline_510k_flag = v[38]
device.baseline_510k_number = v[39]
device.baseline_preamendment = v[40]
device.baseline_transitional = v[41]
device.baseline_510k_exempt_flag = v[42]
device.baseline_date_first_marketed = v[43]
device.baseline_date_ceased_marketing = v[44]
db.save(device, commit=False)
if files.filelineno() % 1000 == 0:
update_progress("Loaded: ", files.filelineno(), LINES_IN_CURRENT_FILE[0])
if files.filelineno() % LINES_PER_DB_COMMIT == 0:
db.commit()
db.commit()
print "\n # Done # \n" |
krafczyk/root | tutorials/pyroot/zdemo.py | Python | lgpl-2.1 | 8,180 | 0.066137 | ## \file
## \ingroup tutorial_pyroot
## This macro is an example of graphs in log scales with annotations.
##
## The presented results
## are predictions of invariant cross-section of Direct Photons produced
## at RHIC energies, based on the universality of scaling function H(z).
##
##
## These Figures were published in JINR preprint E2-98-64, Dubna,
## 1998 and submitted to CPC.
##
## \macro_image
## \macro_code
##
## \authors Michael Tokarev, Elena Potrebenikova (JINR Dubna)
import ROOT
from array import array
from math import *
NMAX = 20
Z = array( 'f', [0.]*NMAX )
HZ = array( 'f', [0.]*NMAX )
PT = array( 'f', [0.]*NMAX )
INVSIG = array( 'f', [0.]*NMAX )
NLOOP = 0
saves = {}
#_______________________________________________________________________________
def zdemo():
global NLOOP
global Z, HZ, PT, INVSIG
global saves
# Create a new canvas.
c1 = ROOT.TCanvas( 'zdemo', 'Monte Carlo Study of Z scaling', 10, 40, 800, 600 )
c1.Range( 0, 0, 25, 18 )
c1.SetFillColor( 40 )
saves[ 'c1' ] = c1 # prevent deteletion at end of zdemo
pl = ROOT.TPaveLabel( 1, 16.3, 24, 17.5,
'Z-scaling of Direct Photon Productions in pp Collisions at RHIC Energies', 'br' )
pl.SetFillColor(18)
pl.SetTextFont(32)
pl.SetTextColor(49)
pl.Draw()
saves[ 'pl' ] = pl
t = ROOT.TLatex()
t.SetTextFont(32)
t.SetTextColor(1)
t.SetTextSize(0.03)
t.SetTextAlign(12)
t.DrawLatex( 3.1, 15.5, 'M.Tokarev, E.Potrebenikova ')
t.DrawLatex( 14., 15.5, 'JINR preprint E2-98-64, Dubna, 1998 ')
saves[ 't' ] = t
pad1 = ROOT.TPad( 'pad1', 'This is pad1', 0.02, 0.02, 0.48, 0.83, 33 )
pad2 = ROOT.TPad( 'pad2', 'This is pad2', 0.52, 0.02, 0.98, 0.83, 33 )
pad1.Draw()
pad2.Draw()
saves[ 'pad1' ] = pad1; saves[ 'pad2' ] = pad2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 63
dens = 1.766
tgrad = 90.
ptmin = 4.
ptmax = 24.
delp = 2.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
pad1.cd()
pad1.Range( -0.255174, -19.25, 2.29657, -6.75 )
pad1.SetLogx()
pad1.SetLogy()
# create a 2-d histogram to define the range
pad1.DrawFrame( 1, 1e-18, 110, 1e-8 )
pad1.GetFrame().SetFillColor( 19 )
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.20, 0.45, 'Ed^{3}#sigma/dq^{3}' )
t.DrawLatex( 0.18, 0.40, '(barn/Gev^{2})' )
t.SetTextSize( 0.045 )
t.SetTextColor( ROOT.kBlue )
t.DrawLatex( 0.22, 0.260, '#sqrt{s} = 63(GeV)' )
t.SetTextColor( ROOT.kRed )
t.DrawLatex( 0.22, 0.205,'#sqrt{s} = 200(GeV)' )
t.SetTextColor( 6 )
t.DrawLatex( 0.22, 0.15, '#sqrt{s} = 500(GeV)' )
t.Se | tTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.6, 0.06, 'q_{T} (Gev/c)' )
saves[ 't2' ] = t # note the label that is used!
gr1 = ROOT.TGraph( NLOOP, PT, INVSIG )
g | r1.SetLineColor( 38 )
gr1.SetMarkerColor( ROOT.kBlue )
gr1.SetMarkerStyle( 21 )
gr1.SetMarkerSize( 1.1 )
gr1.Draw( 'LP' )
saves[ 'gr1' ] = gr1
#
# Cross-section of direct photon production in pp collisions at 200 GeV vs Pt
#
energ = 200
dens = 2.25
tgrad = 90.
ptmin = 4.
ptmax = 64.
delp = 6.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr2 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr2.SetLineColor( 38 )
gr2.SetMarkerColor( ROOT.kRed )
gr2.SetMarkerStyle( 29 )
gr2.SetMarkerSize( 1.5 )
gr2.Draw( 'LP' )
saves[ 'gr2' ] = gr2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 500
dens = 2.73
tgrad = 90.
ptmin = 4.
ptmax = 104.
delp = 10.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr3 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr3.SetLineColor( 38 )
gr3.SetMarkerColor( 6 )
gr3.SetMarkerStyle( 8 )
gr3.SetMarkerSize( 1.1 )
gr3.Draw( 'LP' )
saves[ 'gr3' ] = gr3
dum = array( 'f', [0.] )
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kBlue )
graph.SetMarkerStyle( 21 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 1.e-16 )
graph.Draw( 'LP' )
saves[ 'graph' ] = graph
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kRed )
graph.SetMarkerStyle( 29 )
graph.SetMarkerSize( 1.5 )
graph.SetPoint( 0, 1.7, 2.e-17 )
graph.Draw( 'LP' )
saves[ 'graph2' ] = graph # note the label that is used!
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( 6 )
graph.SetMarkerStyle( 8 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 4.e-18)
graph.Draw( 'LP' )
saves[ 'graph3' ] = graph # note the label that is used!
pad2.cd()
pad2.Range( -0.43642, -23.75, 3.92778, -6.25 )
pad2.SetLogx()
pad2.SetLogy()
pad2.DrawFrame( 1, 1e-22, 3100, 1e-8 )
pad2.GetFrame().SetFillColor( 19 )
gr = ROOT.TGraph( NLOOP, Z, HZ )
gr.SetTitle( 'HZ vs Z' )
gr.SetFillColor( 19 )
gr.SetLineColor( 9 )
gr.SetMarkerColor( 50 )
gr.SetMarkerStyle( 29 )
gr.SetMarkerSize( 1.5 )
gr.Draw( 'LP' )
saves[ 'gr' ] = gr
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.70, 0.55, 'H(z)' )
t.DrawLatex( 0.68, 0.50, '(barn)' )
t.SetTextSize( 0.045 )
t.SetTextColor( 46 )
t.DrawLatex( 0.20, 0.30, '#sqrt{s}, GeV' )
t.DrawLatex( 0.22, 0.26, '63' )
t.DrawLatex( 0.22, 0.22, '200' )
t.DrawLatex( 0.22, 0.18, '500' )
t.SetTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.88, 0.06, 'z' )
saves[ 't3' ] = t # note the label that is used!
c1.Modified()
c1.Update()
#_______________________________________________________________________________
def hz_calc( ENERG, DENS, TGRAD, PTMIN, PTMAX, DELP ):
global NLOOP
global Z, HZ, PT, INVSIG
CSEFT= 1.
GM1 = 0.00001
GM2 = 0.00001
A1 = 1.
A2 = 1.
ALX = 2.
BETA = 1.
KF1 = 8.E-7
KF2 = 5.215
MN = 0.9383
DEGRAD=0.01745329
# print 'ENR= %f DENS= %f PTMIN= %f PTMAX= %f DELP= %f ' % (ENERG,DENS,PTMIN,PTMAX,DELP)
DNDETA= DENS
MB1 = MN*A1
MB2 = MN*A2
EB1 = ENERG/2.*A1
EB2 = ENERG/2.*A2
M1 = GM1
M2 = GM2
THET = TGRAD*DEGRAD
NLOOP = int((PTMAX-PTMIN)/DELP)
for I in range(NLOOP):
PT[I]=PTMIN+I*DELP
PTOT = PT[I]/sin(THET)
ETOT = sqrt(M1*M1 + PTOT*PTOT)
PB1 = sqrt(EB1*EB1 - MB1*MB1)
PB2 = sqrt(EB2*EB2 - MB2*MB2)
P2P3 = EB2*ETOT+PB2*PTOT*cos(THET)
P1P2 = EB2*EB1+PB2*PB1
P1P3 = EB1*ETOT-PB1*PTOT*cos(THET)
X1 = P2P3/P1P2
X2 = P1P3/P1P2
Y1 = X1+sqrt(X1*X2*(1.-X1)/(1.-X2))
Y2 = X2+sqrt(X1*X2*(1.-X2)/(1.-X1))
S = (MB1*MB1)+2.*P1P2+(MB2*MB2)
SMIN = 4.*((MB1*MB1)*(X1*X1) +2.*X1*X2*P1P2+(MB2*MB2)*(X2*X2))
SX1 = 4.*( 2*(MB1*MB1)*X1+2*X2*P1P2)
SX2 = 4.*( 2*(MB2*MB2)*X2+2*X1*P1P2)
SX1X2= 4.*(2*P1P2)
DELM = pow((1.-Y1)*(1.-Y2),ALX)
Z[I] = sqrt(SMIN)/DELM/pow(DNDETA,BETA)
Y1X1 = 1. +X2*(1-2.*X1)/(2.*(Y1-X1)*(1.-X2))
Y1X2 = X1*(1-X1)/(2.*(Y1-X1)*(1.-X2)*(1.-X2))
Y2X1 = X2*(1-X2)/(2.*(Y2-X2)*(1.-X1)*(1.-X1))
Y2X2 = 1. +X1*(1-2.*X2)/(2.*(Y2-X2)*(1.-X1))
Y2X1X2= Y2X1*( (1.-2.*X2)/(X2*(1-X2)) -( Y2X2-1.)/(Y2-X2))
Y1X1X2= Y1X2*( (1.-2.*X1)/(X1*(1-X1)) -( Y1X1-1.)/(Y1-X1))
KX1=-DELM*(Y1X1*ALX/(1.-Y1) + Y2X1*ALX/(1.-Y2))
KX2=-DELM*(Y2X2*ALX/(1.-Y2) + Y1X2*ALX/(1.-Y1))
ZX1=Z[I]*(SX1/(2.*SMIN)-KX1/DELM)
ZX2=Z[I]*(SX2/(2.*SMIN)-KX2/DELM)
H1=ZX1*ZX2
HZ[I]=KF1/pow(Z[I],KF2)
INVSIG[I]=(HZ[I]*H1*16.)/S
# run if loaded as script
if __name__ == '__main__':
zdemo()
|
jaydenkieran/Turbo | turbo/database.py | Python | mit | 2,402 | 0.001665 | import logging
import rethinkdb as r
log = logging.getLogger(__name__)
class Database():
def __init__(self, bot):
self.bot = bot
self.db_name = self.bot.config.rname
self.db = None
r.set_loop_type("asyncio")
self.ready = False
def get_db(self):
"""
Returns the RethinkDB module/instance
"""
return r
async def insert(self, table, data):
"""
Insert a document into a table
"""
log.debug(
"Saving document to table {} with data: {}".format(table, data))
return await r.table(table).insert(data, conflict="update").run(self.db)
async def delete(self, table, primary_key=None):
"""
Deletes a document(s) from a table
"""
log.debug(
"Deleting document from table {} with primary key {}".format(table, primary_key))
if primary_key is not None:
# Delete one document with the key name
| return await r.table(table).get(primary_key).delete().run(self.db)
else:
# Delete all documents in the table
return aw | ait r.table(table).delete().run(self.db)
async def connect(self, host, port, user, password):
"""
Establish a database connection
"""
log.info("Connecting to database: {}".format(self.db_name))
try:
self.db = await r.connect(db=self.db_name, host=host, port=port, user=user, password=password)
except r.errors.ReqlDriverError as e:
log.error(e)
return False
info = await self.db.server()
# Create the database if it does not exist
try:
await r.db_create(self.db_name).run(self.db)
log.info("Created database: {}".format(self.db_name))
except r.errors.ReqlOpFailedError:
log.debug(
"Database {} already exists, skipping creation".format(self.db_name))
return True
async def create_table(self, name, primary='id'):
"""
Creates a new table in the database
"""
try:
await r.table_create(name, primary_key=primary).run(self.db)
log.info("Created table: {}".format(name))
except r.errors.ReqlOpFailedError:
log.debug(
"Table {} already exists, skipping creation".format(name))
|
lettersonsounds/twenty | score.py | Python | unlicense | 1,251 | 0.031974 | from pippi import dsp
from pippi import tune
def ping(maxlen=44100, freqs=None):
out = ''
if freqs is None:
freqs = [ dsp.rand(20,10000) for i in range(4) ]
tlen = dsp.randint(10, maxlen)
tones = [ dsp.tone(length=tlen, freq=freq, amp=0.1, wavetype='random')
for freq in freqs ]
tones = [ dsp.split(tone, 64) for tone in tones ]
pcurves = [ dsp.breakpoint([ dsp.rand() for t in range(len(tones[i]) / 20) ],
len(tones[i])) for i in range(len(tones)) ]
tones = [ [ dsp.pan(t, pcurves[i][ti]) for ti, t in enumerate(tones[i]) ]
for i in range(len(tones)) ]
fcurves = [ dsp.breakpoint([ dsp.rand(0.0, 0.1) + 0.9 for t in range(len(tones[i]) / 20) ],
len(tones[i])) for i in range(len(tones)) ]
tones = [ [ dsp.transpose(t, fcurves[i][ti] + 0.1) for ti, t in enumerate(tones[i]) ]
for i in rang | e(len(tones)) ]
out = dsp.mix([ dsp.env(''.join(tone), 'random') for tone in tones ])
out = dsp.env(out, 'random')
out = dsp.pad(out, 0, dsp.randint(0, maxlen * 3))
return out
#freqs = tune.fromdegrees( | [1,3,5,6,9], 6, 'a')
freqs = None
out = ''.join([ ping(dsp.mstf(200), freqs) for i in range(100)])
dsp.write(out, 'twenty')
|
RobSpectre/garfield | garfield/contacts/migrations/0007_contact_recruiter.py | Python | mit | 392 | 0 | # Generated by Django 2.0.4 o | n 2018-04-24 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0006_auto_20180423_1629'),
]
operations = [
migrations.AddField(
model_name='contact',
name='recruiter',
field=models.B | ooleanField(default=False),
),
]
|
marcardioid/DailyProgrammer | solutions/227_Hard/solution.py | Python | mit | 1,712 | 0.005257 | def build_matrix(lines):
edges = {(0, 1): '-', (0, -1): '-', (1, 0): '|', (-1, 0): '|', (1, 1): '\\', (-1, -1): '\\', (1,-1): '/', (-1, 1): '/'}
connections = {}
rows = [list(l.rstrip()) for l in lines[1:]]
def valid(y, x):
return True if y >= 0 and y < len(rows) and x >= 0 and x < len(rows[y]) else False
def trace(y, x, dy, dx, edge):
while True:
y, x = y + dy, x + dx
c = rows[y][x] if valid(y, x) else ' '
if c.isalpha():
return c
elif c == '#':
rows[y][x] = ' '
for (dy, dx), edge in edges.items():
if valid(y + dy, x + dx) and rows[y + dy][x + dx] == edge:
return trace(y, x, dy, dx, edge)
elif c == edge:
rows[y][x] = ' '
for y, row i | n enumer | ate(rows):
for x, a in enumerate(row):
if a.isalpha():
if not a in connections:
connections[a] = []
for (dy, dx), edge in edges.items():
if valid(y + dy, x + dx) and rows[y + dy][x + dx] == edge:
b = trace(y, x, dy, dx, edge)
connections[a].append(b)
if not b in connections:
connections[b] = []
connections[b].append(a)
matrix = []
k = sorted(connections.keys())
for y in k:
matrix.append(''.join(['1' if x in connections[y] else '0' for x in k]))
return matrix
if __name__ == "__main__":
with open("input/input1.txt", "r") as file:
lines = file.read().splitlines()
print(build_matrix(lines)) |
pydanny/djangopackages | searchv2/tests/__init__.py | Python | mit | 163 | 0.006135 | from | searchv2.tests.test_builders import *
from searchv2.tests.test_models import *
from searchv2.tests.test_utils import *
from searchv2.tests.test_views | import * |
plus3it/watchmaker | docs/conf.py | Python | apache-2.0 | 11,108 | 0 | # -*- coding: utf-8 -*-
"""Configure Watchmaker documentation."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import os
import sys
#
# Created by sphinx-quickstart on Thu Jun 30 20:11:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath('../src/'))
rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = []
extensions = [
'myst_parser',
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
autoclass_content = 'class'
autodoc_member_order = 'bysource'
autodoc_default_options = {'members': True, 'show-inheritance': True}
myst_heading_anchors = 4
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffixes as a map of suffix => parser:
#
# source_suffix = {
# '.md': 'markdown',
# '.rst': 'restructuredtext',
# }
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Watchmaker'
copyright = u'2016, Plus3 IT Systems' # noqa: A001
author = u'Plus3 IT Systems'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'0.1'
# The full version, including alpha/beta/rc tags.
# release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default | _role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended | to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme
# Add any paths that contain custom themes here, relative to this directory.
#
# html_them_path
if not rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'MothBall v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Watchmaker'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper' |
EmanueleCannizzaro/scons | test/MSVS/vs-11.0-scc-files.py | Python | mit | 3,967 | 0.000756 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-11.0-scc-files.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can generate Visual Studio 11.0 project (.vcxproj) and
solution (.sln) files that contain SCC information and look correct.
"""
import os
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['11.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_11_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_11_0
SConscript_contents = """\
env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='11.0',
CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
CPPPATH=['inc1', 'inc2'],
MSVS_SCC_CONNECTION | _ROOT='.',
MSVS_SCC_PROVIDER='MSSCCI:Perforce SCM',
MSVS_SCC_PROJECT_NAME='Perforce Pro | ject')
testsrc = ['test1.cpp', 'test2.cpp']
testincs = ['sdk_dir\sdk.h']
testlocalincs = ['test.h']
testresources = ['test.rc']
testmisc = ['readme.txt']
env.MSVSProject(target = 'Test.vcxproj',
srcs = testsrc,
incs = testincs,
localincs = testlocalincs,
resources = testresources,
misc = testmisc,
buildtarget = 'Test.exe',
variant = 'Release')
"""
expected_sln_sccinfo = """\
\tGlobalSection(SourceCodeControl) = preSolution
\t\tSccNumberOfProjects = 2
\t\tSccProjectName0 = Perforce\u0020Project
\t\tSccLocalPath0 = .
\t\tSccProvider0 = MSSCCI:Perforce\u0020SCM
\t\tCanCheckoutShared = true
\t\tSccProjectUniqueName1 = Test.vcxproj
\t\tSccLocalPath1 = .
\t\tCanCheckoutShared = true
\t\tSccProjectFilePathRelativizedFromConnection1 = .\\\\
\tEndGlobalSection
"""
expected_vcproj_sccinfo = """\
\t\t<SccProjectName>Perforce Project</SccProjectName>
\t\t<SccLocalPath>.</SccLocalPath>
\t\t<SccProvider>MSSCCI:Perforce SCM</SccProvider>
"""
test.write('SConstruct', SConscript_contents)
test.run(arguments="Test.vcxproj")
test.must_exist(test.workpath('Test.vcxproj'))
vcproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '11.0', None, 'SConstruct',
vcproj_sccinfo=expected_vcproj_sccinfo)
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '11.0', None, 'SConstruct',
sln_sccinfo=expected_sln_sccinfo)
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gitgik/updown | api/migrations/0001_initial.py | Python | mit | 1,477 | 0.002708 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-04 09:48
from __future__ import unicode_literals
import api.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DateMixin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model | s.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='File',
fields=[
('datemixin_ptr', models.One | ToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='api.DateMixin')),
('file_id', models.CharField(default=api.utils.generate_uid, max_length=20)),
('name', models.CharField(max_length=255)),
('_file', models.FileField(upload_to='files')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to=settings.AUTH_USER_MODEL)),
],
bases=('api.datemixin',),
),
]
|
kwikteam/global_superclustering | global_code/compute_penalty.py | Python | gpl-2.0 | 1,609 | 0.03294 | import numpy as np
__all__ = ['compute_penalties']
def compute_penalty(kk, clusters):
'''Takes kk.clusters (clusters currently assigned)
and computes the penalty'''
if clusters is None:
clusters = kk.clusters
#print(clusters)
num_cluster_membs = np.array(np.bincount(clusters), dtype=int)
alive = num_cluster_membs>0
num_clusters = np.sum(aliv | e)
#print('num_cluster_members', num_cluster_membs)
# print('num_clusters', num_clusters)
# #This now only depends on the number of clusters
# #cluster_penalty = np.zeros(num_clusters)
num_spikes = kk.num_spikes
#D_k = kk.D_k
num_kkruns = kk.num_KKruns
num_bern_params = kk.num_bern_params #This was determined in the previous M-step
num_bern_params_used = num | _bern_params[np.nonzero(num_cluster_membs>0)]
#print('num_bern_params_used = ', num_bern_params_used)
#num_bern_params = [70 71 63 63 64 62 83 79] for 8 clusters
penalty_k = kk.penalty_k
penalty_k_log_n = kk.penalty_k_log_n
#mean_params = (np.sum(D_k)-num_kkruns)*num_clusters - 1
#effective_params = bernoulli params + mixture weight
effective_params = np.sum(num_bern_params_used)-num_kkruns*num_clusters + (num_clusters -1)
penalty = (2*penalty_k*effective_params + penalty_k_log_n*effective_params*np.log(num_spikes)/2)
#print('penalty = ', penalty)
#do_compute_penalty(cluster_penalty, num_spikes, clusters,
# penalty_k, penalty_k_log_n)
#may not need to import
return penalty
|
sameerparekh/pants | src/python/pants/backend/core/tasks/markdown_to_html.py | Python | apache-2.0 | 15,151 | 0.009702 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import codecs
import os
import re
import sys
import markdown
from docutils.core import publish_parts
from pkg_resources import resource_string
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import PythonLexer, TextLexer, guess_lexer_for_filename
from pygments.styles import get_all_styles
from pygments.util import ClassNotFound
from six.moves import range
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.task import Task
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.generator import Generator
from pants.base.workunit import WorkUnitLabel
from pants.binaries import binary_util
from pants.util.dirutil import safe_mkdir, safe_open
def emit_codehighlight_css(path, style):
with safe_open(path, 'w') as css:
css.write((HtmlFormatter(style=style)).get_style_defs('.codehilite'))
return path
WIKILINKS_PATTERN = r'\[\[([^\]]+)\]\]'
class WikilinksPattern(markdown.inlinepatterns.Pattern):
def __init__(self, build_url, markdown_instance=None):
markdown.inlinepatterns.Pattern.__init__(self, WIKILINKS_PATTERN, markdown_instance)
self.build_url = build_url
def handleMatch(self, m):
alias, url = self.build_url(m.group(2).strip())
el = markdown.util.etree.Element('a')
el.set('href', url)
el.text = markdown.util.AtomicString(alias)
return el
class WikilinksExtension(markdown.Extension):
def __init__(self, build_url, configs=None):
markdown.Extension.__init__(self, configs or {})
self.build_url = build_url
def extendMarkdown(self, md, md_globals):
md.inlinePatterns['wikilinks'] = WikilinksPattern(self.build_url, md)
# !inc[start-at=void main&end-before=private HelloMain](HelloMain.java)
INCLUDE_PATTERN = r'!inc(\[(?P<params>[^]]*)\])?\((?P<path>[^' + '\n' + r']*)\)'
def choose_include_text(s, params, source_path):
"""Given the contents of a file and !inc[these params], return matching lines
If there was a problem matching parameters, return empty list.
:param s: file's text
:param params: string like "start-at=foo&end-at=bar"
:param source_path: path to source .md. Useful in error messages
"""
lines = s.splitlines()
start_after = None
start_at = None
end_before = None
end_at = None
for term in params.split("&"):
if '=' in term:
param, value = [p.strip() for p in term.split('=', 1)]
else:
param, value = term.strip(), ''
if not param: continue
if param == "start-after":
start_after = value
elif param == "start-at":
start_at = value
elif param == "end-before":
end_before = value
elif param == "end-at":
end_at = value
else:
raise TaskError('Invalid include directive "{0}"'
' in {1}'.format(params, source_path))
chosen_lines = []
# two loops, one waits to "start recording", one "records"
for line_ix in range(0, len(lines)):
line = lines[line_ix]
if (not start_at) and (not start_after):
# if we didn't set a start-* param, don't wait to start
break
if start_at is not None and start_at in line:
break
if start_after is not None and start_after in line:
line_ix += 1
break
else:
# never started recording:
return ''
for line_ix in range(line_ix, len(lines)):
line = lines[line_ix]
if end_before is not None and end_before in line:
break
chosen_lines.append(line)
if end_at is not None and end_at in line:
break
else:
if (end_before or end_at):
# we had an end- filter, but never encountered it.
return ''
return '\n'.join(chosen_lines)
class IncludeExcerptPattern(markdown.inlinepatterns.Pattern):
def __init__(self, source_path=None):
"""
:param string source_path: Path to source `.md` file.
"""
markdown.inlinepatterns.Pattern.__init__(self, INCLUDE_PATTERN)
self.source_path = source_path
def handleMatch(self, match):
params = match.group('params') or ''
rel_include_path = match.group('path')
source_dir = os.path.dirname(self.source_path)
include_path = os.path.join(source_dir, rel_include_path)
try:
with open(include_path) as include_file:
file_text = include_file.read()
except IOError as e:
raise IOError('Markdown file {0} tried to include file {1}, got '
'{2}'.format(self.source_path,
rel_include_path,
e.strerror))
include_text = choose_include_text(file_text, params, self.source_path)
if not include_text:
raise TaskError('Markdown file {0} tried to include file {1} but '
'filtered out everything'.format(self.source_path,
rel_include_path))
el = markdown.util.etree.Element('div')
el.set('class', 'md-included-snippet')
try:
lexer = | guess_lexer_for_filename(include_path, file_text)
except ClassNotFound:
# e.g., ClassNotFound: no lexer for filename u'BUILD' found
if 'BUILD' in include_path:
lexer = PythonLexer()
else:
lexer = TextLexer() # the boring plain-text lexer
html_snippet = highlight(include_text,
lexer,
HtmlFormatter(cssclass='codehilite'))
el.text = html_snippet
| return el
class IncludeExcerptExtension(markdown.Extension):
def __init__(self, source_path, configs=None):
markdown.Extension.__init__(self, configs or {})
self.source_path = source_path
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('excerpt',
IncludeExcerptPattern(source_path=self.source_path),
'_begin')
def page_to_html_path(page):
"""Given a page target, return partial path for an output `.html`."""
source_path = page.sources_relative_to_buildroot()[0]
return os.path.splitext(source_path)[0] + ".html"
def rst_to_html(in_rst, stderr):
"""Renders HTML from an RST fragment.
:param string in_rst: An rst formatted string.
:param stderr: An open stream to use for docutils stderr output.
:returns: A tuple of (html rendered rst, return code)
"""
if not in_rst:
return '', 0
# Unfortunately, docutils is really setup for command line use.
# We're forced to patch the bits of sys its hardcoded to use so that we can call it in-process
# and still reliably determine errors.
# TODO(John Sirois): Move to a subprocess execution model utilizing a docutil chroot/pex.
orig_sys_exit = sys.exit
orig_sys_stderr = sys.stderr
returncodes = []
try:
sys.exit = returncodes.append
sys.stderr = stderr
pp = publish_parts(in_rst,
writer_name='html',
# Report and exit at level 2 (warnings) or higher.
settings_overrides=dict(exit_status_level=2, report_level=2),
enable_exit_status=True)
finally:
sys.exit = orig_sys_exit
sys.stderr = orig_sys_stderr
return_value = ''
if 'title' in pp and pp['title']:
return_value += '<title>{0}</title>\n<p style="font: 200% bold">{0}</p>\n'.format(pp['title'])
return_value += pp['body'].strip()
return return_value, returncodes.pop() if returncodes else 0
class MarkdownToHtml(Task):
@classmethod
def register_options(cls, register):
register('--code-style', choices=list(get_all_styles()), default='friendly',
fingerprint=True,
help='Use this stylesheet for code highlights.')
register('--open', action='store_true',
help='Open the generated documents in a browser.')
register('--fragment', action='store_true',
fingerprint= |
mistercrunch/panoramix | superset/db_engine_specs/trino.py | Python | apache-2.0 | 6,834 | 0.000585 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License | .
from datetime import datetime
from typing import Any, Dict, List, Optional
from urllib import parse
import simplejson as json
from sqlalchemy.engine.url import make_url, URL
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
class TrinoEngineSpec(BaseEngineSpec):
engine = "trino"
engine_name = "Trino"
# pylint: disable=line-too-long
_time_grain_expressions = {
None: "{col}", |
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
# "1969-12-28T00:00:00Z/P1W", # Week starting Sunday
# "1969-12-29T00:00:00Z/P1W", # Week starting Monday
# "P1W/1970-01-03T00:00:00Z", # Week ending Saturday
# "P1W/1970-01-04T00:00:00Z", # Week ending Sunday
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
value = dttm.date().isoformat()
return f"from_iso8601_date('{value}')"
if tt == utils.TemporalType.TIMESTAMP:
value = dttm.isoformat(timespec="microseconds")
return f"from_iso8601_timestamp('{value}')"
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
def adjust_database_uri(
cls, uri: URL, selected_schema: Optional[str] = None
) -> None:
database = uri.database
if selected_schema and database:
selected_schema = parse.quote(selected_schema, safe="")
database = database.split("/")[0] + "/" + selected_schema
uri.database = database
@classmethod
def update_impersonation_config(
cls, connect_args: Dict[str, Any], uri: str, username: Optional[str],
) -> None:
"""
Update a configuration dictionary
that can set the correct properties for impersonating users
:param connect_args: config to be updated
:param uri: URI string
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
:return: None
"""
url = make_url(uri)
backend_name = url.get_backend_name()
# Must be Trino connection, enable impersonation, and set optional param
# auth=LDAP|KERBEROS
# Set principal_username=$effective_username
if backend_name == "trino" and username is not None:
connect_args["user"] = username
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
# Do nothing and let update_impersonation_config take care of impersonation
@classmethod
def get_allow_cost_estimate(cls, extra: Dict[str, Any]) -> bool:
return True
@classmethod
def estimate_statement_cost( # pylint: disable=too-many-locals
cls, statement: str, cursor: Any
) -> Dict[str, Any]:
"""
Run a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param database: Database instance
:param cursor: Cursor instance
:param username: Effective username
:return: JSON response from Trino
"""
sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {statement}"
cursor.execute(sql)
# the output from Trino is a single column and a single row containing
# JSON:
#
# {
# ...
# "estimate" : {
# "outputRowCount" : 8.73265878E8,
# "outputSizeInBytes" : 3.41425774958E11,
# "cpuCost" : 3.41425774958E11,
# "maxMemory" : 0.0,
# "networkCost" : 3.41425774958E11
# }
# }
result = json.loads(cursor.fetchone()[0])
return result
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: JSON estimate from Trino
:return: Human readable cost estimate
"""
def humanize(value: Any, suffix: str) -> str:
try:
value = int(value)
except ValueError:
return str(value)
prefixes = ["K", "M", "G", "T", "P", "E", "Z", "Y"]
prefix = ""
to_next_prefix = 1000
while value > to_next_prefix and prefixes:
prefix = prefixes.pop(0)
value //= to_next_prefix
return f"{value} {prefix}{suffix}"
cost = []
columns = [
("outputRowCount", "Output count", " rows"),
("outputSizeInBytes", "Output size", "B"),
("cpuCost", "CPU cost", ""),
("maxMemory", "Max memory", "B"),
("networkCost", "Network cost", ""),
]
for row in raw_cost:
estimate: Dict[str, float] = row.get("estimate", {})
statement_cost = {}
for key, label, suffix in columns:
if key in estimate:
statement_cost[label] = humanize(estimate[key], suffix).strip()
cost.append(statement_cost)
return cost
|
echevemaster/fudcon | fudcon/modules/speakers/models.py | Python | mit | 461 | 0 | # -*- coding: utf-8 -*-
"""
fudcon.modules.speakers.models
~~~~~~~~~~~~~~~~~~~~~
Speakers models
"""
from fudcon.database import db
class Speaker(db.Model):
"""
Model for listing all the attributes of
the speakers
"""
__tablename__ = 'spea | kers'
id = db.Column(db.Intege | r, primary_key=True)
names = db.Column(db.String(255))
fas = db.Column(db.String(255))
bio = db.Column(db.Text())
active = db.Column(db.Boolean())
|
gsmcwhirter/c-simulations | docs/source/conf.py | Python | mit | 8,316 | 0.006133 | # -*- coding: utf-8 -*-
#
# c-simulations documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 19 12:55:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'c-simulations'
copyright = u'2014, Gregory McWhirter'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be | rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter | .
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'c-simulationsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'c-simulations.tex', u'c-simulations Documentation',
u'Gregory McWhirter', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'c-simulations', u'c-simulations Documentation',
[u'Gregory McWhirter'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'c-simulations', u'c-simulations Documentation',
u'Gregory McWhirter', 'c-simulations', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inli |
dannyboi104/SickRage | sickbeard/tv.py | Python | gpl-3.0 | 117,710 | 0.004885 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import datetime
import threading
import re
import glob
import stat
import traceback
import sickbeard
import xml.etree.cElementTree as etree
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
import subliminal
try:
from send2trash import send2trash
except ImportError:
pass
from imdb import imdb
from sickbeard import db
from sickbeard import helpers, exceptions, logger
from sickbeard.exceptions import ex
from sickbeard import image_cache
from sickbeard import notifiers
from sickbeard import postProcessor
from sickbeard import subtitles
from sickbeard import history
from sickbeard.blackandwhitelist import BlackAndWhiteList
from sickbeard import sbdatetime
from sickbeard import network_timezones
from sickbeard.indexers.indexer_config import INDEXER_TVRAGE
from dateutil.tz import *
from sickbeard import encodingKludge as ek
from common import Quality, Overview, statusStrings
from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, \
UNKNOWN, FAILED
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \
NAMING_LIMITED_EXTEND_E_PREFIXED
import shutil
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
def dirty_setter(attr_name):
def wrapper(self, val):
if getattr(self, attr_name) != val:
setattr(self, attr_name, val)
self.dirty = True
return wrapper
class TVShow(object):
def __init__(self, indexer, indexerid, lang=""):
self._indexerid = int(indexerid)
self._indexer = int(indexer)
self._name = ""
self._imdbid = ""
self._network = ""
self._genre = ""
self._classification = ""
self._runtime = 0
self._imdb_info = {}
self._quality = int(sickbeard.QUALITY_DEFAULT)
self._flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT)
self._status = "Unknown"
self._airs = ""
self._startyear = 0
self._paused = 0
self._air_by_date = 0
self._subtitles = int(sickbeard.SUBTITLES_DEFAULT)
self._dvdorder = 0
self._archive_firstmatch = 0
self._lang = lang
self._last_update_indexer = 1
self._sports = 0
self._anime = 0
self._scene = 0
self._rls_ignore_words = ""
self._rls_require_words = ""
self._default_ep_status = SKIPPED
self.dirty = True
self._location = ""
self.lock = threading.Lock()
self.isDirGood = False
self.episodes = {}
self.nextaired = ""
self.release_groups = None
otherShow = helpers.findCertainShow(sickbeard.showList, self.indexerid)
if otherShow != None:
raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists")
self.loadFromDB()
name = property(lambda self: self._name, dirty_setter("_name"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
# location = property(lambda self: self._location, dirty_setter("_location"))
imdbid = property(lambda self: self._imdbid, dirty_setter("_imdbid"))
network = property(lambda self: self._network, dirty_setter("_network"))
genre = property(lambda self: self._genre, dirty_setter("_genre"))
classification = property(lambda self: self._classification, dirty_setter("_classification"))
runtime = property(lambda self: self._runtime, dirty_setter("_runtime"))
imdb_info = property(lambda self: self._imdb_info, dirty_setter("_imdb_info"))
quality = property(lambda self: self._quality, dirty_setter("_quality"))
flatten_folders = property(lambda self: self._flatten_folders, dirty_setter( | "_flatten_folders"))
status = property(lambda self: self._status, dirty_setter("_status"))
airs = property(lambda self: self._airs, dirty_setter("_airs"))
startyear = property(lambda self: self._startyear, dirty_setter("_startyear"))
paused = property(lambda self: self._paused, dirty_setter("_paused"))
air_by_date = property(lambda self: self._air_by_date, dirty_setter("_air_by_date"))
subtitles = property(lambda self: self._subtitles, dirty_se | tter("_subtitles"))
dvdorder = property(lambda self: self._dvdorder, dirty_setter("_dvdorder"))
archive_firstmatch = property(lambda self: self._archive_firstmatch, dirty_setter("_archive_firstmatch"))
lang = property(lambda self: self._lang, dirty_setter("_lang"))
last_update_indexer = property(lambda self: self._last_update_indexer, dirty_setter("_last_update_indexer"))
sports = property(lambda self: self._sports, dirty_setter("_sports"))
anime = property(lambda self: self._anime, dirty_setter("_anime"))
scene = property(lambda self: self._scene, dirty_setter("_scene"))
rls_ignore_words = property(lambda self: self._rls_ignore_words, dirty_setter("_rls_ignore_words"))
rls_require_words = property(lambda self: self._rls_require_words, dirty_setter("_rls_require_words"))
default_ep_status = property(lambda self: self._default_ep_status, dirty_setter("_default_ep_status"))
@property
def is_anime(self):
if int(self.anime) > 0:
return True
else:
return False
@property
def is_sports(self):
if int(self.sports) > 0:
return True
else:
return False
@property
def is_scene(self):
if int(self.scene) > 0:
return True
else:
return False
@property
def network_logo_name(self):
return self.network.replace(u'\u00C9', 'e').replace(u'\u00E9', 'e').lower()
def _getLocation(self):
# no dir check needed if missing show dirs are created during post-processing
if sickbeard.CREATE_MISSING_SHOW_DIRS:
return self._location
if ek.ek(os.path.isdir, self._location):
return self._location
else:
raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it")
def _setLocation(self, newLocation):
logger.log(u"Setter sets location to " + newLocation, logger.DEBUG)
# Don't validate dir if user wants to add shows without creating a dir
if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation):
dirty_setter("_location")(self, newLocation)
self._isDirGood = True
else:
raise exceptions.NoNFOException("Invalid folder for the show!")
location = property(_getLocation, _setLocation)
# delete references to anything that's not in the internal lists
def flushEpisodes(self):
for curSeason in self.episodes:
for curEp in self.episodes[curSeason]:
myEp = self.episodes[curSeason][curEp]
self.episodes[curSeason][curEp] = None
del myEp
def getAllEpisodes(self, season=None, has_location=False):
sql_selection = "SELECT season, episode, "
# subselection to detect multi-episodes early, share_location > 0
sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location ! |
androm3da/struct_bench | types_.py | Python | mit | 505 | 0.011881 | #!/usr/bin/env python
import collections
def read_only(self, name, val):
raise ValueError('not supported')
class SimpleObject(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.b = c
class SimpleObjectImmutable(object):
def __init__(self, a, | b, c):
self.a = a
self.b = b
self.b = c
NamedTuple = collections.namedtuple('NamedTuple', ['a', 'b', 'c',])
def SimpleTuple(a, b, c): return (a, b, c)
from types_c import c_st | ruct
|
profitware/python-sandbox-algo | sandboxalgo/binsearch.py | Python | mit | 2,401 | 0 | # -*- coding: utf-8 -*-
__author__ = 'Sergey Sobko'
KEY_NOT_FOUND = -1
def midpoint(imin, imax):
"""Returns middle point
>>> midpoint(0, 0)
0
>>> midpoint(0, 1)
0
>>> midpoint(0, 2)
1
>>> midpoint(1, 1)
1
>>> midpoint(1, 2)
1
>>> midpoint(1 | , 5)
3
"""
middle_point = (int(imin) + int(imax)) / 2
return middle_point
def binary_search(search_list, key):
"""Binary search algorithm
>>> binar | y_search([], 1) == KEY_NOT_FOUND
True
>>> binary_search([1, 3, 4, 6, 8, 9, 11], 4)
2
>>> binary_search([1, 2, 3, 4, 6, 8, 9, 11], 4)
3
>>> binary_search([1, 2, 3, 4, 6, 8, 9, 11], 1)
0
>>> binary_search([1, 2, 3, 4, 6, 8, 9, 11], 11)
7
>>> binary_search([1, 2, 3, 4, 8, 9, 11], 11)
6
>>> binary_search([1, 2, 3], 4) == KEY_NOT_FOUND
True
>>> binary_search([-1, 2, 4], 0) == KEY_NOT_FOUND
True
"""
if not search_list:
return KEY_NOT_FOUND
list_last_index = len(search_list) - 1
if search_list[list_last_index] == key:
return list_last_index
def _binary_search(imin, imax, previous_index=-1):
current_index = midpoint(imin, imax)
if previous_index == current_index:
return KEY_NOT_FOUND
if search_list[current_index] == key:
return current_index
if key < search_list[current_index]:
return _binary_search(imin, current_index, current_index)
else:
return _binary_search(current_index, imax, current_index)
return _binary_search(0, list_last_index)
def binary_search_func(function, key, eps, imin, imax):
"""Binary search for monotonically increasing function.
"""
# FIXME: Doctests
def _mid_point(xmin, xmax):
return (xmin + xmax) / 2
xmin, xmax = imin, imax
previous_x = None
while True:
current_x = _mid_point(xmin, xmax)
current_value = function(current_x)
if abs(current_value - key) < eps:
return current_x
if previous_x is not None:
if abs(current_x - previous_x) < eps:
break
if key < current_value:
xmax = current_x
else:
xmin = current_x
previous_x = current_x
return None
if __name__ == '__main__':
import doctest
doctest.testmod()
|
CFDEMproject/LAMMPS | tools/moltemplate/src/ttree_lex.py | Python | gpl-2.0 | 82,841 | 0.006808 | # -*- coding: iso-8859-1 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""A lexical analyzer class for simple shell-like syntaxes.
This version has been modified slightly to work better with unicode.
It was forked from the version of shlex that ships with python 3.2.2.
A few minor features and functions have been added. """
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# ("wordterminators" (unicode support) hack by Andrew Jewett September 2011)
import os.path
import sys
from collections import deque
import re, fnmatch
import string
#import gc
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__all__ = ["TtreeShlex",
"split",
"LineLex",
"SplitQuotedString",
"EscCharStrToChar",
"SafelyEncodeString",
"RemoveOuterQuotes",
"MaxLenStr",
"HasWildCard",
#"IsRegex",
"InputError",
"ErrorLeader",
"SrcLoc",
"OSrcLoc",
"TextBlock",
"VarRef",
"VarNPtr",
"VarBinding",
"SplitTemplate",
"SplitTemplateMulti",
"TableFromTemplate",
"ExtractCatName",
#"_TableFromTemplate",
#"_DeleteLineFromTemplate",
"DeleteLinesWithBadVars",
"TemplateLexer"]
class TtreeShlex(object):
""" A lexical analyzer class for simple shell-like syntaxes.
TtreeShlex is a backwards-compatible version of python's standard shlex
module. It has the additional member: "self.wordterminators", which
overrides the "self.wordchars" member. This enables better handling of
unicode characters by allowing a much larger variety of characters to
appear in words or tokens parsed by TtreeShlex.
"""
custom_path = None
def __init__(self,
instream=None,
infile=None,
custom_include_path=None,
posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.wordterminators = set([]) #WORDTERMINATORS
self.prev_space_terminator = '' #WORDTERMINATORS
self.whitespace = ' \t\r\f\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
# self.source_triggers
# are tokens which allow the seamless insertion of other
# files into the file being read.
self.source_triggers=set(['source'])
self.source_triggers_x=set([])
#Note: self.source_triggers_x
# This is a subset of self.source_triggers.
# In this case file inclusion is exclusive.
# In other words, if one of these tokens
# is encountered, the file is only included
# if it has not been included already.
self.source_files_restricted = set([])
self.include_path = []
if TtreeShlex.custom_path:
include_path_list = TtreeShlex.custom_path.split(':')
self.include_path += [d for d in include_path_list if len(d)>0]
if 'TTREE_PATH' in os.environ:
include_path_list = os.environ['TTREE_PATH'].split(':')
self.include_path += [d for d in include_path_list if len(d)>0]
if self.debug:
print('TtreeShlex: reading from %s, line %d' \
% (self.instream, self.lineno))
self.end_encountered = False
@staticmethod #WORDTERMINATORS
def _belongs_to(char, include_chars, exclude_chars): #WORDTERMINATORS
if ((not exclude_chars) or (len(exclude_chars)==0)): #WORDTERMINATORS
return char in include_chars #WORDTERMINATORS
else: #WORDTERMINATORS
return char not in exclude_chars #WORDTERMINATORS
def push_raw_text(self, text):
"""Push a block of text onto the stack popped by the ReadLine() method.
(If multiple lines are present in the text, (which is determined by
self.line_terminators), then the text is split into multiple lines
and each one of them is pushed onto this stack individually.
The "self.lineno" counter is also adjusted, depending on the number
of newline characters in "line".
Do not strip off the newline, or other line terminators
at the end of the text block before using push_raw_text()!
"""
if self.debug >= 1:
print("TtreeShlex: pushing token " + repr(text))
for c in reversed(text): #WORDTERMINATORS
self.pushback.appendleft(c) #WORDTERMINATORS
if c == '\n': #WORDTERMINATORS
self.lineno -= 1 #WORDTERMINATORS
if len(text) > 0: #WORDTERMINATORS |
self.end_encountered = False #WORDTERMINATORS
def push_token(self, text):
"Push a token onto the stack popped by the get_token method"
self.push_raw_text(text+self.prev_space_terminator)
de | f push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('TtreeShlex: pushing to file %s' % (self.infile,))
else:
print('TtreeShlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('TtreeShlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
#### #CHANGING: self.pushback is now a stack of characters, not tokens #WORDTERMINATORS
#### if self.pushback: #WORDTERMINATORS
#### tok = self.pushback.popleft() #WORDTERMINATORS
#### if self.debug >= 1: #WORDTERMINATORS
#### print("TtreeShlex: popping token " + repr(tok)) #WORDTERMINATORS
#### return tok #WORDTERMINATORS
#### No pushback. Get a token. |
geimer/easybuild-framework | setup.py | Python | gpl-2.0 | 4,276 | 0.012161 | # #
# Copyright 2012-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichti | ng.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY | WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This script can be used to install easybuild-framework, e.g. using:
easy_install --user .
or
python setup.py --prefix=$HOME/easybuild
@author: Kenneth Hoste (Ghent University)
"""
import os
from distutils import log
from easybuild.tools.version import VERSION
API_VERSION = str(VERSION).split('.')[0]
# Utility function to read README file
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# log levels: 0 = WARN (default), 1 = INFO, 2 = DEBUG
log.set_verbosity(1)
try:
from setuptools import setup
log.info("Installing with setuptools.setup...")
except ImportError, err:
log.info("Failed to import setuptools.setup, so falling back to distutils.setup")
from distutils.core import setup
log.info("Installing version %s (API version %s)" % (VERSION, API_VERSION))
def find_rel_test():
"""Return list of files recursively from basedir (aka find -type f)"""
basedir = os.path.join(os.path.dirname(__file__), "test", "framework")
current = os.getcwd()
os.chdir(basedir)
res = []
for subdir in ["sandbox", "easyconfigs", "modules"]:
res.extend([os.path.join(root, filename)
for root, dirnames, filenames in os.walk(subdir)
for filename in filenames if os.path.isfile(os.path.join(root, filename))])
os.chdir(current)
return res
easybuild_packages = [
"easybuild", "easybuild.framework", "easybuild.framework.easyconfig", "easybuild.framework.easyconfig.format",
"easybuild.toolchains", "easybuild.toolchains.compiler", "easybuild.toolchains.mpi",
"easybuild.toolchains.fft", "easybuild.toolchains.linalg", "easybuild.tools",
"easybuild.tools.toolchain", "easybuild.tools.module_naming_scheme", "easybuild.tools.repository",
"test.framework", "test",
"vsc", "vsc.utils",
]
setup(
name = "easybuild-framework",
version = str(VERSION),
author = "EasyBuild community",
author_email = "easybuild@lists.ugent.be",
description = """The EasyBuild framework supports the creation of custom easyblocks that \
implement support for installing particular (groups of) software packages.""",
license = "GPLv2",
keywords = "software build building installation installing compilation HPC scientific",
url = "http://hpcugent.github.com/easybuild",
packages = easybuild_packages,
package_dir = {'test.framework': "test/framework"},
package_data = {"test.framework": find_rel_test()},
scripts = ["eb", "optcomplete.bash", "minimal_bash_completion.bash"],
data_files = [
('easybuild', ["easybuild/easybuild_config.py"]),
],
long_description = read('README.rst'),
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.4",
"Topic :: Software Development :: Build Tools",
],
platforms = "Linux",
provides = ["eb"] + easybuild_packages,
test_suite = "test.framework.suite",
zip_safe = False,
)
|
aronparsons/spacewalk | client/tools/rhncfg/config_common/utils.py | Python | gpl-2.0 | 5,530 | 0.003436 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
# guaranteed to exist even on RHEL 5 because we now require python-hashlib
import hashlib
import re
import shutil
import pwd
import urlparse
import inspect
from config_common.rhn_log import log_debug
hashlib_has_usedforsecurity = False
if 'usedforsecurity' in inspect.getargspec(hashlib.new)[0]:
hashlib_has_usedforsecurity = True
_normpath_re = re.compile("^(%s)+" % os.sep)
def normalize_path(path):
"""
os.path.normpath does not remove path separator duplicates at the
beginning of the path
"""
return _normpath_re.sub(os.sep, os.path.normpath(path))
def join_path(*args):
return normalize_path(os.sep.join(args))
def path_full_split(path):
"""
Given a path, it fully splits it into constituent path
components (as opposed to os.path.split which splits it into
trailing component and preceeding path
"""
path = normalize_path(path)
splitpath = []
while 1:
path, current = os.path.split(path)
if current == '':
if path:
# Absolute path
splitpath.append(os.sep)
break
splitpath.append(current)
splitpath.reverse()
return splitpath
def copyfile_p(src, dst):
"""
Simple util function, copies src path to dst path, making
directories as necessary. File permissions are not preserved.
"""
directory = os.path.split(dst)[0]
try:
os.makedirs(directory)
except OSError, e:
if e.errno != 17:
# not File exists
raise
if os.path.isdir(src):
if not os.path.exists(dst):
os.mkdir(dst)
elif os.path.islink(src):
exists = hasattr(os.path, "lexists") and os.path.lexists or os.path.exists
if exists(dst):
os.remove(dst)
os.symlink(os.readlink(src), dst)
else:
shutil.copyfile(src, dst)
def mkdir_p(path, mode=None, symlinks=None, allfiles=None):
"""
Similar to 'mkdir -p' -- makes all directories necessary to ensure
the 'path' is a directory, and return the list of directories that were
made as a result
"""
if mode is None:
mode = 0700
dirs_created = []
components = path_full_split(path)
for i in range(1,len(components)):
d = os.path.join(*components[:i+1])
if symlinks:
for symlink in symlinks:
if symlink['path'] == d:
# create symlink and remove it from symlink list
os.symlink(symlink['symlink'], symlink['path'])
symlinks.remove(symlink)
allfiles.remove(symlink)
dirs_created.append(symlink)
continue
log_debug(8, "testing",d)
try:
os.mkdir(d, mode)
except OSError, e:
if e.errno != 17:
raise
else:
log_debug(8, "created",d)
dirs_creat | ed.append(d)
log_debug(6, "dirs_created:",dirs_created)
return dirs_created
def rmdir_p(path, stoppath):
"""
if rmdir had a -p option, this would be it. remove dir and up
until empty dir is hit, or stoppath i | s reached
path and stoppath have to be absolute paths
"""
# First normalize both paths
stoppath = normalize_path(os.sep + stoppath)
path = normalize_path(os.sep + path)
# stoppath has to be a prefix of path
if path[:len(stoppath)] != stoppath:
raise OSError, "Could not remove %s: %s is not a prefix" % (
path, stoppath)
while 1:
if stoppath == path:
# We're done
break
# Try to remove the directory
try:
os.rmdir(path)
except OSError:
# Either the directory is full, or we don't have permissions; stop
break
path, current = os.path.split(path)
if current == '':
# We're done - reached the root
break
#returns slashstring with any trailing slash removed
def rm_trailing_slash(slashstring):
if slashstring[-1] == "/":
slashstring = slashstring[0:-1]
return slashstring
def getContentChecksum(checksum_type, contents):
if hashlib_has_usedforsecurity:
engine = hashlib.new(checksum_type, usedforsecurity=False)
else:
engine = hashlib.new(checksum_type)
engine.update(contents)
return engine.hexdigest()
def sha256_file(filename):
engine = hashlib.new('sha256')
fh = open(filename, "r")
while 1:
buf = fh.read(4096)
if not buf:
break
engine.update(buf)
return engine.hexdigest()
def parse_url(server_url, scheme="https"):
return urlparse.urlparse(server_url, scheme=scheme)
def unparse_url(url_tuple):
return urlparse.urlunparse(url_tuple)
def get_home_dir():
uid = os.getuid()
ent = pwd.getpwuid(uid)
return ent[5]
|
eduNEXT/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_publish.py | Python | agpl-3.0 | 55,886 | 0.002595 | """
Test the publish code (mostly testing that publishing doesn't result in orphans)
"""
import itertools
import os
import re
import unittest
import uuid
import xml.etree.ElementTree as ET
from contextlib import contextmanager
from shutil import rmtree
from tempfile import mkdtemp
import pytest
import ddt
from openedx.core.lib.tests import attr
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls, mongo_uses_error_check
from xmodule.modulestore.tests.test_split_w_old_mongo import SplitWMongoCourseBootstrapper
from xmodule.modulestore.tests.utils import (
DRAFT_MODULESTORE_SETUP,
MODULESTORE_SETUPS,
SPLIT_MODULESTORE_SETUP,
MongoContentstoreBuilder,
MongoModulestoreBuilder
)
from xmodule.modulestore.xml_exporter import export_course_to_xml
@attr('mongo')
class TestPublish(SplitWMongoCourseBootstrapper):
"""
Test the publish code (primary causing orphans)
"""
def _create_course(self): # lint-amnesty, pylint: disable=arguments-differ
"""
Create the course, publish all verticals
* some detached items
"""
# There are 12 created items and 7 parent updates
# create course: finds: 1 to verify uniqueness, 1 to find parents
# sends: 1 to create course, 1 to create overview
with check_mongo_calls(4, 2):
super()._create_course(split=False) # 2 inserts (course and overview)
# with bulk will delay all inheritance computations which won't be added into the mongo_calls
with self.draft_mongo.bulk_operations(self.old_course_key):
# finds: 1 for parent to add child and 2 to get ancestors
# sends: 1 for insert, 1 for parent (add child)
with check_mongo_calls(3, 2):
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False) # lint-amnesty, pylint: disable=line-too-long
with check_mongo_calls(4, 2):
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False) # lint-amnesty, pylint: disable=line-too-long
# For each vertical (2) created:
# - load draft
# - load non-draft
# - get last error
# - load parent
# - get ancestors
# - load inheritable data
with check_mongo_calls(15, 6):
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False) # lint-amnesty, pylint: disable=line-too-long
self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False) # lint-amnesty, pylint: disable=line-too-long
# For each (4) item created
# - try to find draft
# - try to find non-draft
# - compute what is parent
# - load draft parent again & compute its parent chain up to course
# count for updates increased to 16 b/c of edit_info updating
with check_mongo_calls(36, 16):
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False) # lint-amnesty, pylint: disable=line-too-long
self._create_item(
'discussion', 'Discussion1',
"discussion discussion_category=\"Lecture 1\" discussion_id=\"a08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 1\"/>\n", # lint-amnesty, pylint: disable=line-too-long
{
"discussion_category": "Lecture 1",
"discussion_target": "Lecture 1",
"display_name": "Lecture 1 Discussion",
"discussion_id": "a08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert1',
| split=False
)
| self._create_item('html', 'Html2', "<p>Hello</p>", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False) # lint-amnesty, pylint: disable=line-too-long
self._create_item(
'discussion', 'Discussion2',
"discussion discussion_category=\"Lecture 2\" discussion_id=\"b08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 2\"/>\n", # lint-amnesty, pylint: disable=line-too-long
{
"discussion_category": "Lecture 2",
"discussion_target": "Lecture 2",
"display_name": "Lecture 2 Discussion",
"discussion_id": "b08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert2',
split=False
)
with check_mongo_calls(2, 2):
# 2 finds b/c looking for non-existent parents
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, split=False) # lint-amnesty, pylint: disable=line-too-long
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, split=False) # lint-amnesty, pylint: disable=line-too-long
def test_publish_draft_delete(self):
"""
To reproduce a bug (STUD-811) publish a vertical, convert to draft, delete a child, move a child, publish.
See if deleted and moved children still is connected or exists in db (bug was disconnected but existed)
"""
vert_location = self.old_course_key.make_usage_key('vertical', block_id='Vert1')
item = self.draft_mongo.get_item(vert_location, 2)
# Finds:
# 1 get draft vert,
# 2 compute parent
# 3-14 for each child: (3 children x 4 queries each)
# get draft, compute parent, and then published child
# compute inheritance
# 15 get published vert
# 16-18 get ancestor chain
# 19 compute inheritance # 20-22 get draft and published vert, compute parent
# Sends:
# delete the subtree of drafts (1 call),
# update the published version of each node in subtree (4 calls),
# update the ancestors up to course (2 calls)
if mongo_uses_error_check(self.draft_mongo):
max_find = 23
else:
max_find = 22
with check_mongo_calls(max_find, 7):
self.draft_mongo.publish(item.location, self.user_id)
# verify status
item = self.draft_mongo.get_item(vert_location, 0)
assert not getattr(item, 'is_draft', False), 'Item was published. Draft should not exist'
# however, children are still draft, but I'm not sure that's by design
# delete the draft version of the discussion
location = self.old_course_key.make_usage_key('discussion', block_id='Discussion1')
self.draft_mongo.delete_item(location, self.user_id)
draft_vert = self.draft_mongo.get_item(vert_location, 0)
assert getattr(draft_vert, 'is_draft', False), "Deletion didn't convert parent to draft"
assert location not in draft_vert.children
# move the other child
other_child_loc = self.old_course_key.make_usage_key('html', block_id='Html2')
draft_vert.children.remove(other_child_loc)
other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', block_id='Vert2'), 0)
other_vert.children.append(other_child_loc)
self.draft_mongo.update_item(draft_vert, self.user_id)
self.draft_mongo.update_item(other_vert, self.user_id)
# publish
self.draft_mongo.publish(vert_location, self.user_id)
item = self.draft_mongo.get_item(draft_vert.location, revision=ModuleStoreEnum.RevisionOption.published_only)
assert location not in item.children
|
Aluriak/MusicGenerator | generator/test/test_markov.py | Python | gpl-2.0 | 423 | 0.002364 |
from generator import markov
def test_chain():
chain = markov.chain('acab', order=1)
assert chain == {('a',): {'c': .5, 'b': .5}, ('c',): {'a': 1.}}
assert 'a' == markov.random_walk(chain, 'c')
assert markov.random_walk(chain, 'a') in | 'cb'
def test_random_walk():
chain = markov.chain('abc', order=2)
assert chain == {('a', 'b'): {'c': 1.}}
assert 'c' == markov.random_walk | (chain, 'ab')
|
Luftzig/pypatterns | setup.py | Python | mit | 809 | 0 | from setuptools import setup, find_packages
setup(
name="pypatterns",
version="0.2.0",
| packages=find_packages(),
author="Yoav Luft",
author_email="yoav.luft@gmail.com",
description="Functional style pattern mat | ching for Python",
license="MIT",
url="https://github.com/Luftzig/pypatterns",
test_requires=['pytest'],
keywords=['functional', 'pattern matching'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
joshjo/django-sentry | sentry/conf/defaults.py | Python | apache-2.0 | 2,997 | 0.001001 | """
sentry.conf.defaults
~~~~~~~~~~~~~~~~~~~~
Represents the default values for all Sentry settings.
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import os.path
import socket
ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
# Allow local testing of Sentry even if DEBUG is enabled
DEBUG = False
DATABASE_USING = None
THRASHING_TIMEOUT = 60
THRASHING_LIMIT = 10
FILTERS = (
'sentry.filters.StatusFilter',
'sentry.filters.LoggerFilter',
'sentry.filters.LevelFilter',
'sentry.filters.ServerNameFilter',
'sentry.filters.SiteFilter',
)
# Sentry allows you to specify an alternative search backend for itself
SEARCH_ENGINE = None
SEARCH_OPTIONS = {}
KEY = socket.gethostname() + '1304u13oafjadf0913j4'
LOG_LEVELS = (
(logging.DEBUG, 'debug'),
(logging.INFO, 'info'),
(logging.WARNING, 'warning'),
(logging.ERROR, 'error'),
(logging.FATAL, 'fatal'),
)
# This should be the full URL to sentries store view
SERVERS = None
TIMEOUT = 5
ADMINS = []
CLIENT = 'sentry.client.base.SentryClient'
NAME = socket.gethostname()
# We allow setting the site name either by explicitly setting it with the
# SENTRY_SITE setting, or using the django.contrib.sites framework for
# fetching the current site. Since we can't reliably query the database
# from this module, the specific logic is within the SiteFilter
SITE = None
# Extending this allow you to ignore module prefixes when we attempt to
# discover which function an error comes from (typically a view)
EXCLUDE_PATHS = []
# By default Sentry only looks at modules in INSTALLED_APPS for drilling down
# where an exception is located
INCLUDE_PATHS = []
# Absolute URL to the sentry root directory. Should not include a trailing slash.
URL_PREFIX = ''
# Allow access to Sentry without authentication.
PUBLIC = False
# The maximum number of elements to store for a list-like structure.
MAX_LENGTH_LIST = 50
# The maximum length to store of a string-like structure.
MAX_LENGTH_STRING = 200
EMAIL_SUBJECT_PREFIX = ''
INTERNAL_IPS = set()
SERVER_EMAIL = 'root@localhost'
LOGIN_URL = None
# Automatically log frame stacks from all ``logging`` messages.
AUTO_LOG_STACKS = False
# Only store a portion of all messages per unique group.
SAMPLE_DATA = True
# Restrict emails to only ``messages >= this value``.
MAIL_LEVEL = logging.DEBUG
# A list of loggers to restrict emails to.
MAIL_INCLUDE_LOGGERS = None
# A list of loggers to exclude in emails.
MAIL_EXCLUDE_LOGGERS = []
# Normalize counts to the 15 minute marker. This value MUST be less than 60. A
# value of 0 would store counts for every minute, and is the lowest level of
# accuracy provided.
MINUTE_NORMALIZATION | = 15
## The following settings refer | to the built-in webserver
WEB_HOST = 'localhost'
WEB_PORT = 9000
WEB_LOG_FILE = os.path.join(ROOT, 'sentry.log')
WEB_PID_FILE = os.path.join(ROOT, 'sentry.pid') |
bswartz/manila | manila/tests/conf_fixture.py | Python | apache-2.0 | 2,989 | 0 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_policy import opts
from oslo_service import wsgi
from manila.common import config
CONF = config.CONF
def set_defaults(conf):
_safe_set_of_opts(conf, 'verbose', True)
_safe_set_of_opts(conf, 'state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..',
'..')))
_safe_set_of_opts(conf, 'connection', "sqlite://", group='database')
_safe_set_of_opts(conf, 'sqlite_synchronous', False)
_POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path,
'manila/tests/policy.json'))
opts.set_defaults(conf, policy_file=_POLICY_PATH)
_safe_set_of_opts(conf, 'share_export_ip', '0.0.0.0')
_safe_set_of_opts(conf, 'service_instance_user', 'fake_user')
_API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path,
'etc/manila/api-paste.ini'))
wsgi.register_opts(conf)
_safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH)
_safe_set_of_opts(conf, 'share_driver',
| 'manila.tests.fake_driver.FakeShareDriver')
_safe_set_of_opts(conf, 'auth_strategy', 'noauth')
_safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1')
_safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2')
_safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar'])
_safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper')
_safe_set_of_opts(conf, 'zfs_replic | a_snapshot_prefix', 'foo_prefix_')
_safe_set_of_opts(conf, 'hitachi_hsp_host', '172.24.47.190')
_safe_set_of_opts(conf, 'hitachi_hsp_username', 'hsp_user')
_safe_set_of_opts(conf, 'hitachi_hsp_password', 'hsp_password')
_safe_set_of_opts(conf, 'qnap_management_url', 'http://1.2.3.4:8080')
_safe_set_of_opts(conf, 'qnap_share_ip', '1.2.3.4')
_safe_set_of_opts(conf, 'qnap_nas_login', 'admin')
_safe_set_of_opts(conf, 'qnap_nas_password', 'qnapadmin')
_safe_set_of_opts(conf, 'qnap_poolname', 'Storage Pool 1')
def _safe_set_of_opts(conf, *args, **kwargs):
try:
conf.set_default(*args, **kwargs)
except config.cfg.NoSuchOptError:
# Assumed that opt is not imported and not used
pass
|
tdryer/hangups | hangups/conversation.py | Python | mit | 46,809 | 0 | """Conversation objects."""
import asyncio
import datetime
import logging
from hangups import (parsers, event, user, conversation_event, exceptions,
hangouts_pb2)
logger = logging.getLogger(__name__)
CONVERSATIONS_PER_REQUEST = 100
MAX_CONVERSATION_PAGES = 100
async def build_user_conversation_list(client):
"""Build :class:`.UserList` and :class:`.ConversationList`.
This method requests data necessary to build the list of conversations and
users. Users that are not in the contact list but are participating in a
conversation will also be retrieved.
Args:
client (Client): Connected client.
Returns:
(:class:`.UserList`, :class:`.ConversationList`):
Tuple of built objects.
"""
conv_states, sync_timestamp = await _sync_all_conversations(client)
# Retrieve entities participating in all conversations.
required_user_ids = set()
for conv_state in conv_states:
required_user_ids |= {
user.UserID(chat_id=part.id.chat_id, gaia_id=part.id.gaia_id)
for part in conv_state.conversation.participant_data
}
required_entities = []
if required_user_ids:
logger.debug('Need to request additional users: {}'
.format(required_user_ids))
try:
response = await client.get_entity_by_id(
hangouts_pb2.GetEntityByIdRequest(
request_header=client.get_request_header(),
batch_lookup_spec=[
hangouts_pb2.EntityLookupSpec(
gaia_id=user_id.gaia_id,
create_offnetwork_gaia=True,
)
for user_id in required_user_ids
],
)
)
for entity_result in response.entity_result:
required_entities.extend(entity_result.entity)
except exceptions.NetworkError as e:
logger.warning('Failed to request missing users: {}'.format(e))
# Build list of conversation participants.
conv_part_list = []
for conv_state in conv_states:
conv_part_list.extend(conv_state.conversation.participant_data)
# Retrieve self entity.
get_self_info_response = await client.get_self_info(
hangouts_pb2.GetSelfInfoRequest(
| request_header=client.get_request_header(),
)
)
self_entit | y = get_self_info_response.self_entity
user_list = user.UserList(client, self_entity, required_entities,
conv_part_list)
conversation_list = ConversationList(client, conv_states,
user_list, sync_timestamp)
return (user_list, conversation_list)
async def _sync_all_conversations(client):
"""Sync all conversations by making paginated requests.
Conversations are ordered by ascending sort timestamp.
Args:
client (Client): Connected client.
Raises:
NetworkError: If the requests fail.
Returns:
tuple of list of ``ConversationState`` messages and sync timestamp
"""
conv_states = []
sync_timestamp = None
request = hangouts_pb2.SyncRecentConversationsRequest(
request_header=client.get_request_header(),
max_conversations=CONVERSATIONS_PER_REQUEST,
max_events_per_conversation=1,
sync_filter=[
hangouts_pb2.SYNC_FILTER_INBOX,
hangouts_pb2.SYNC_FILTER_ARCHIVED,
]
)
for _ in range(MAX_CONVERSATION_PAGES):
logger.info(
'Requesting conversations page %s', request.last_event_timestamp
)
response = await client.sync_recent_conversations(request)
conv_states = list(response.conversation_state) + conv_states
sync_timestamp = parsers.from_timestamp(
# SyncRecentConversations seems to return a sync_timestamp 4
# minutes before the present. To prevent SyncAllNewEvents later
# breaking requesting events older than what we already have, use
# current_server_time instead.
response.response_header.current_server_time
)
if response.continuation_end_timestamp == 0:
logger.info('Reached final conversations page')
break
else:
request.last_event_timestamp = response.continuation_end_timestamp
else:
logger.warning('Exceeded maximum number of conversation pages')
logger.info('Synced %s total conversations', len(conv_states))
return conv_states, sync_timestamp
class Conversation:
"""A single chat conversation.
Use :class:`.ConversationList` methods to get instances of this class.
"""
def __init__(self, client, user_list, conversation, events=[],
event_cont_token=None):
# pylint: disable=dangerous-default-value
self._client = client # Client
self._user_list = user_list # UserList
self._conversation = conversation # hangouts_pb2.Conversation
self._events = [] # [hangouts_pb2.Event]
self._events_dict = {} # {event_id: ConversationEvent}
self._send_message_lock = asyncio.Lock()
self._watermarks = {} # {UserID: datetime.datetime}
self._event_cont_token = event_cont_token
for event_ in events:
# Workaround to ignore observed events returned from
# syncrecentconversations.
if event_.event_type != hangouts_pb2.EVENT_TYPE_OBSERVED_EVENT:
self.add_event(event_)
self.on_event = event.Event('Conversation.on_event')
"""
:class:`.Event` fired when an event occurs in this conversation.
Args:
conv_event: :class:`.ConversationEvent` that occurred.
"""
self.on_typing = event.Event('Conversation.on_typing')
"""
:class:`.Event` fired when a users starts or stops typing in this
conversation.
Args:
typing_message: :class:`~hangups.parsers.TypingStatusMessage` that
occurred.
"""
self.on_watermark_notification = event.Event(
'Conversation.on_watermark_notification'
)
"""
:class:`.Event` fired when a watermark (read timestamp) is updated for
this conversation.
Args:
watermark_notification:
:class:`~hangups.parsers.WatermarkNotification` that occurred.
"""
self.on_watermark_notification.add_observer(
self._on_watermark_notification
)
@property
def id_(self):
"""The conversation's ID (:class:`str`)."""
return self._conversation.conversation_id.id
@property
def users(self):
"""List of conversation participants (:class:`~hangups.user.User`)."""
return [self._user_list.get_user(user.UserID(chat_id=part.id.chat_id,
gaia_id=part.id.gaia_id))
for part in self._conversation.participant_data]
@property
def name(self):
"""The conversation's custom name (:class:`str`)
May be ``None`` if conversation has no custom name.
"""
custom_name = self._conversation.name
return None if custom_name == '' else custom_name
@property
def last_modified(self):
"""When conversation was last modified (:class:`datetime.datetime`)."""
timestamp = self._conversation.self_conversation_state.sort_timestamp
# timestamp can be None for some reason when there is an ongoing video
# hangout
if timestamp is None:
timestamp = 0
return parsers.from_timestamp(timestamp)
@property
def latest_read_timestamp(self):
"""Timestamp of latest read event (:class:`datetime.datetime`)."""
timestamp = (self._conversation.self_conversation_state.
self_read_state.latest_read_timestamp)
return parsers.from_timestamp(timestamp)
@property
def events(self):
"""Loaded events sorted oldest to newes |
crmauceri/VisualCommonSense | code/database_builder/tools/cmd_arguments_helper.py | Python | mit | 1,911 | 0.030351 | # Author: Edison Huang
# Email: hom.tao@yahoo.com
#
# Example:
# def main():
# from CmdArgumentsHelper import CmdArgumentsHelper;
# arg_helper = CmdArgumentsHelper();
# arg_helper.add_argument('query', 'q', 'query', 1);
# args = arg_helper.read_arguments();
#
# query_string = args['query'];
# ... manipulating query_string ...
#
# if __name__ == "__main__":
# main();
#
class CmdArgumentsHelper(object):
args = [];
args_cmd = {};
args_option = {};
args_has_value = {};
def add_argument(self, argument_name, argument_cmd, argument_option, has_value):
self.args.append(argument_name);
self.args_cmd[argument_name] = argument_cmd;
self.args_option[argu | ment_name] = argument_option;
self.args_has_value[argument_name] = has_value;
def gen_help_message(self):
help_message = '';
for arg in self.args:
help_message = help_message + ' -' + self.args_cmd[arg] + ' ' + '<' + arg + | '>';
return help_message;
def gen_cmds(self):
cmds = 'h';
for arg in self.args:
cmds = cmds + self.args_cmd[arg];
if (self.args_has_value[arg]):
cmds = cmds + ':';
return cmds;
def gen_options(self):
options = [];
for arg in self.args:
if (self.args_has_value[arg]):
options.append(self.args_option[arg] + '=');
return options;
def _read_arguments(self, argv):
import sys, getopt;
help_message = self.gen_help_message();
try:
opts, args = getopt.getopt(argv, self.gen_cmds(), self.gen_options());
except:
print (help_message);
sys.exit(2);
ret = {};
for opt, arg_value in opts:
for arg_name in self.args:
if (opt in ('-' + self.args_cmd[arg_name], '--' + self.args_option[arg_name])):
ret[arg_name] = arg_value;
return ret;
def read_arguments(self):
import sys;
return self._read_arguments(sys.argv[1:]);
|
pywinauto/pywinauto | pywinauto/findbestmatch.py | Python | bsd-3-clause | 20,676 | 0.004159 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to find the closest match of a string in a list"""
from __future__ import unicode_literals
import re
import difflib
import six
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"""A suitable match could not be found"""
def __init__(self, items = None, tofind = ''):
"""Init the parent with the message"""
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '{0}' in '{1}'".format(tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"""Get the match ratio of how each item in texts compared to match_against"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(match_against), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_eol(_cut_at_tab(search_text))
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_eol(_cut_at_tab(text))] = item
ratios, best_ratio, best_text = \
_get_match_ratios(text_item_map.keys(), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = text_item_map.keys(), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_after_eol = re.compile(r"\n.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first tab
return _after_tab.sub("", text)
def _cut_at_eol(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first EOL
return _after_eol.sub("", text)
def _clean_non_chars(text):
"""Remove non word characters"""
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def is_above_or_to_left(ref_control, other_ctrl):
"""Return true if the other_ctrl is above or to the left of ref_control"""
text_r = other_ctrl.rectangle()
ctrl_r = ref_control.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def get_non_text_control_name(ctrl, controls, text_ctrls):
"""
return the name for this control by finding the closest
text control above and to its left
"""
names = []
# simply look for an instance of the control in the list,
# we don't use list.index() method as it invokes __eq__
ctrl_index = 0
for i, c in enumerate(controls):
if c is ctrl:
ctrl_index = i
break
ctrl_friendly_class_name = ctrl.friendly_class_name()
if | ctrl_index != 0:
pr | ev_ctrl = controls[ctrl_index-1]
prev_ctrl_text = prev_ctrl.window_text()
if prev_ctrl.friendly_class_name() == "Static" and \
prev_ctrl.is_visible() and prev_ctrl_text and \
is_above_or_to_left(ctrl, prev_ctrl):
names.append(
prev_ctrl_text +
ctrl_friendly_class_name)
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.rectangle()
ctrl_r = ctrl.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continu |
sergey-tomin/workshop | 2_tracking.py | Python | mit | 2,857 | 0.00175 | """
This script was created by Sergey Tomin for Workshop: Designing future X-ray FELs. Source and license info is on GitHub.
August 2016.
"""
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
# import from Ocelot main modules and fun | ctions
from ocelot import *
# import from Ocelot graphical modules
from ocelot.gui.accelerator import *
from ocelot.adaptors.astra2ocelot import *
# import injector lattice
from ocelot.test.workshop.injector_lat | tice import *
lat = MagneticLattice(cell)
# initialization of Twiss object
tws0 = Twiss()
# defining initial twiss parameters
tws0.beta_x = 29.171
tws0.beta_y = 29.171
tws0.alpha_x = 10.955
tws0.alpha_y = 10.955
# defining initial electron energy in GeV
tws0.E = 0.005
# calculate twiss functions with initial twiss parameters
tws = twiss(lat, tws0, nPoints=None)
tws1 = tws[-1]
print(tws[-1])
# ploting twiss paramentrs.
plot_opt_func(lat, tws, top_plot=["Dx"], fig_name="i1", legend=False)
# Loading of beam distribution
p_array_init = astraBeam2particleArray(filename='beam_130MeV.ast')
# initialization of tracking method
method = MethodTM()
# for second order tracking we have to choose SecondTM
method.global_method = SecondTM
# for first order tracking uncomment next line
# method.global_method = TransferMap
# we will start simulation from the first quadrupole (QI.46.I1) after RF section.
# you can change stop element (and the start element, as well)
# START_73_I1 - marker before Dog leg
# START_96_I1 - marker before Bunch Compresion
lat = MagneticLattice(cell, start=QI_46_I1, stop=None, method=method)
navi = Navigator(lat)
p_array = deepcopy(p_array_init)
tws_track, p_array = track(lat, p_array, navi)
# you can change top_plot argument, for example top_plot=["alpha_x", "alpha_y"]
plot_opt_func(lat, tws_track, top_plot=["E"], fig_name=0, legend=False)
plt.show()
# Current profile
bins_start, hist_start = get_current(p_array, charge=p_array.q_array[0], num_bins=200)
plt.figure(4)
plt.title("current: end")
plt.plot(bins_start*1000, hist_start)
plt.xlabel("s, mm")
plt.ylabel("I, A")
plt.grid(True)
plt.show()
# Beam distribution
tau = np.array([p.tau for p in p_array])
dp = np.array([p.p for p in p_array])
x = np.array([p.x for p in p_array])
y = np.array([p.y for p in p_array])
ax1 = plt.subplot(311)
ax1.plot(-tau*1000, x*1000, 'r.')
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel("x, mm")
plt.grid(True)
ax2 = plt.subplot(312, sharex=ax1)
ax2.plot(-tau*1000, y*1000, 'r.')
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel("y, mm")
plt.grid(True)
ax3 = plt.subplot(313, sharex=ax1)
ax3.plot(-tau*1000, dp, 'r.')
plt.ylabel("dp/p")
plt.xlabel("s, mm")
plt.grid(True)
plt.show() |
vjorlikowski/plexus | plexus/tables.py | Python | apache-2.0 | 11,545 | 0.000866 | # Copyright (c) 2015 Duke University.
# This software is distributed under the terms of the MIT License,
# the text of which is included in this distribution within the file
# named LICENSE.
#
# Portions of this software are derived from the "rest_router" controller
# application included with Ryu (http://osrg.github.io/ryu/), which is:
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Modifications and additions were made to the original content by the
# following authors:
# Author: Victor J. Orlikowski <vjo@duke.edu>
import time
from plexus import *
from plexus.util import *
class PortData(dict):
def __init__(self, ports):
super(PortData, self).__init__()
for port in ports:
self[port.port_no] = port
def update(self, port):
self[port.port_no] = port
def delete(self, port):
del self[port.port_no]
class AddressData(dict):
def __init__(self):
super(AddressData, self).__init__()
self.address_id = 1
def add(self, address):
err_msg = 'Invalid [%s] value.' % REST_ADDRESS
nw_addr, mask, default_gw = nw_addr_aton(address, err_msg=err_msg)
# Check overlaps
for other in six.itervalues(self):
other_mask = mask_ntob(other.netmask)
add_mask = mask_ntob(mask, err_msg=err_msg)
if (other.nw_addr == ipv4_apply_mask(default_gw, other.netmask) or
nw_addr == ipv4_apply_mask(other.default_gw, mask,
err_msg)):
msg = 'Address overlaps [address_id=%d]' % other.address_id
raise CommandFailure(msg=msg)
address = Address(self.address_id, nw_addr, mask, default_gw)
ip_str = ip_addr_ntoa(nw_addr)
key = '%s/%d' % (ip_str, mask)
self[key] = address
self.address_id += 1
self.address_id &= UINT16_MAX
if self.address_id == COOKIE_DEFAULT_ID:
self.address_id = 1
return address
def delete(self, address_id):
for key, value in self.items():
if value.address_id == address_id:
del self[key]
return
def get_default_gw(self):
return [address.default_gw for address in six.itervalues(self)]
def get_data(self, addr_id=None, ip=None):
if addr_id is not None:
def find_address(address):
return bool(addr_id == address.address_id)
elif ip is not None:
def find_address(address):
return bool(ipv4_apply_mask(ip, address.netmask) ==
address.nw_addr)
try:
for address in six.itervalues(self):
if find_address(address):
return address
except NameError:
# We only get here if find_address() was undefined.
# That only happens, if both addr_id and ip were None.
# In that case, bail out and return None.
pass
return None
class Address(object):
def __init__(self, address_id, nw_addr, netmask, default_gw):
super(Address, self).__init__()
self.address_id = address_id
self.nw_addr = nw_addr
self.netmask = netmask
self.default_gw = default_gw
def __contains__(self, ip):
return bool(ipv4_apply_mask(ip, self.netmask) == self.nw_addr)
class PolicyRoutingTable(dict):
def __init__(self):
super(PolicyRoutingTable, self).__init__()
self[INADDR_ANY] = RoutingTable()
self.route_id = 1
self.dhcp_servers = []
def add(self, dst_nw_addr, dst_vlan, gateway_ip, src_address=None):
err_msg = 'Invalid [%s] value.'
added_route = None
key = INADDR_ANY
if src_address is not None:
ip_str = ip_addr_ntoa(src_address.nw_addr)
key = '%s/%d' % (ip_str, src_address.netmask)
if key not in self:
self.add_table(key, src_address)
table = self[key]
added_route = table.add(dst_nw_addr, dst_vlan, gateway_ip, self.route_id)
if added_route is not None:
self.route_id += 1
self.route_id &= UINT16_MAX
if self.route_id == COOKIE_DEFAULT_ID:
self.route_id = 1
return added_route
def delete(self, route_id):
for table in six.itervalues(self):
table.delete(route_id)
return
def add_table(self, key, address):
self[key] = RoutingTable(address)
return self[key]
def get_all_gateway_info(self):
all_gateway_info = []
for table in six.itervalues(self):
| all_gateway_info += table.get_all_gateway_info()
return all_gateway_info
def get_data(self, gw_mac=None, dst_ip=None, src_ip=None):
desired_table = self[INADDR_ANY]
if src_ip is not None:
for table in six.itervalues(self):
if table.src_address is not None:
if (table.src_address.nw_addr == ipv4_apply_mask(src_ip, table.src_addres | s.netmask)):
desired_table = table
break
route = desired_table.get_data(gw_mac, dst_ip)
if ((route is None) and (desired_table != self[INADDR_ANY])):
route = self[INADDR_ANY].get_data(gw_mac, dst_ip)
return route
class RoutingTable(dict):
def __init__(self, address=None):
super(RoutingTable, self).__init__()
self.src_address = address
def add(self, dst_nw_addr, dst_vlan, gateway_ip, route_id):
err_msg = 'Invalid [%s] value.'
if dst_nw_addr == INADDR_ANY:
dst_ip = 0
dst_netmask = 0
else:
dst_ip, dst_netmask, dst_dummy = nw_addr_aton(
dst_nw_addr, err_msg=err_msg % REST_DESTINATION)
gateway_ip = ip_addr_aton(gateway_ip, err_msg=err_msg % REST_GATEWAY)
dst_ip_str = ip_addr_ntoa(dst_ip)
key = '%s/%d' % (dst_ip_str, dst_netmask)
# Check overlaps
overlap_route = None
if key in self:
overlap_route = self[key].route_id
if overlap_route is not None:
msg = 'Destination overlaps [route_id=%d]' % overlap_route
raise CommandFailure(msg=msg)
routing_data = Route(route_id, dst_ip, dst_netmask, dst_vlan, gateway_ip, self.src_address)
self[key] = routing_data
return routing_data
def delete(self, route_id):
for key, value in self.items():
if value.route_id == route_id:
del self[key]
return
def get_all_gateway_info(self):
all_gateway_info = []
for route in six.itervalues(self):
gateway_info = (route.gateway_ip, route.gateway_mac)
all_gateway_info.append(gateway_info)
return all_gateway_info
def get_data(self, gw_mac=None, dst_ip=None):
if gw_mac is not None:
for route in six.itervalues(self):
if gw_mac == route.gateway_mac:
return route
return None
elif dst_ip is not None:
get_route = None
mask = 0
for route in six.itervalues(self):
if ipv4_apply_mask(dst_ip, route.dst_netmask) == route.dst_ip:
# For longest match
if mask < route.dst_netmask:
get_route = route
mask = route.dst_netmask
if get_route is None:
get_route = self.get(INADDR_ANY, None)
return get_route
else:
return None
class Route(object):
def __init__(self, route_id, dst_ip, dst_netmask, dst_vlan, gateway_ip, src_address=None):
super(Route, self).__init__()
self.route_id = route_id
self.dst_ip = dst_ip
self.dst_netmask = dst_netmask
self.dst_vlan = dst_vlan
self.gateway_ip = gateway_ip
self.gateway_mac = None
if src_address is None:
self.src_ip = 0
self.src_netmask = 0
else:
|
gltn/stdm | stdm/third_party/sqlalchemy/testing/suite/test_select.py | Python | gpl-2.0 | 24,377 | 0 | from .. import config
from .. import fixtures
from ..assertions import eq_
from ..assertions import in_
from ..schema import Column
from ..schema import Table
from ... import bindparam
from ... import case
from ... import Computed
from ... import exists
from ... import false
from ... import func
from ... import Integer
from ... import literal
from ... import literal_column
from ... import null
from ... import select
from ... import String
from ... import testing
from ... import text
from ... import true
from ... import tuple_
from ... import union
from ... import util
class CollateTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(100)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "collate data1"},
{"id": 2, "data": "collate data2"},
],
)
def _assert_result(self, select, result):
eq_(config.db.execute(select).fetchall(), result)
@testing.requires.order_by_collation
def test_collate_order_by(self):
collation = testing.requires.get_order_by_collation(testing.config)
self._assert_result(
select([self.tables.some_table]).order_by(
self.tables.some_table.c.data.collate(collation).asc()
),
[(1, "collate data1"), (2, "collate data2")],
)
class OrderByLabelTest(fixtures.TablesTest):
"""Test the dialect sends appropriate ORDER BY expressions when
labels are used.
This essentially exercises the "supports_simple_order_by_label"
setting.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("q", String(50)),
Column("p", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
{"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
{"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
],
)
def _assert_result(self, select, result):
eq_(config.db.execute(select).fetchall(), result)
def test_plain(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select([lx]).order_by(lx), [(1,), (2,), (3,)])
def test_composed_int(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select([lx]).order_by(lx), [(3,), (5,), (7,)])
def test_composed_multiple(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
ly = (func.lower(table.c.q) + table.c.p).label("ly")
self._assert_result(
select([lx, ly]).order_by(lx, ly.desc()),
[(3, util.u("q1p3")), (5, util.u("q2p2")), (7, util.u("q3p1"))],
)
def test_plain_desc(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(
select([lx]).order_by(lx.desc()), [(3,), (2,), (1,)]
)
def test_composed_int_desc(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(
select([lx]).order_by(lx.desc()), [(7,), (5,), (3,)]
)
@testing.requires.group_by_complex_expression
def test_group_by_composed(self):
table = self.tables.some_table
expr = (table.c.x + table.c.y).label("lx")
stmt = (
select([func.count(table.c.id), expr])
.group_by(expr)
.order_by(expr)
)
self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])
class LimitOffsetTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=()):
eq_(config.db.execute(select, params).fetchall(), result)
def test_simple_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2),
[(1, 1, 2), (2, 2, 3)],
)
@testing.requires.offset
def test_simple_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(2),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.offset
def test_simple_limit_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2).offset(1),
[(2, 2, 3), (3, 3, 4)],
)
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select([table]).order_by(table.c.id).limit(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.bound_limit_offset
def test_bound_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3)],
params={"l": 2},
)
@testing.requires.bound_limit_offset
def test_bound_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5)],
params={"o": 2},
)
@testing.requires.bound_limit_offset
def test_bound_limit_offset(self):
table = se | lf.tables.some_table
self._assert_result(
| select([table])
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1},
)
class CompoundSelectTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=()):
eq_(config.db.execute(select, params).fetchall(), result)
def test_plain_union(self):
table = self.tables.some_table
s1 = select([table]).where(table.c.id == 2)
s2 = select([table]).where(table.c.id == 3)
u1 = union(s1, s2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_select_f |
amirgeva/commit | utils.py | Python | gpl-2.0 | 3,355 | 0.000894 | import os
import subprocess
import select
from PyQt5 import QtGui, QtWidgets
iconsDir = '.'
def setIconsDir(dir):
global iconsDir
iconsDir = dir
def loadIcon(name):
if not name.endswith('.png'):
name = name + ".png"
path = os.path.join(iconsDir, name)
# print "Loading '{}'".format(path)
return QtGui.QIcon(path)
def message(msg):
m = QtWidgets.QMessageBox()
m.setText(msg)
m.exec_()
def errorMessage(msg):
message(msg)
def checkFor(cmd):
try:
res = subprocess.call(['which', | cmd], stdout=open('/dev/null', 'w'))
return res == 0
except OSError:
return False
def appendOutput(output, text):
# text=output.toPlainText()
# output.setPlain | Text(text+added)
# c=output.textCursor()
# c.movePosition(QtGui.QTextCursor.End)
# output.setTextCursor(c)
# output.ensureCursorVisible()
output.appendPlainText(text)
def appendColorLine(output, line, color):
line = line.decode('utf8')
c = output.textCursor()
c.movePosition(QtGui.QTextCursor.End)
f = c.charFormat()
f.setForeground(QtGui.QBrush(QtGui.QColor(color)))
c.setCharFormat(f)
output.setTextCursor(c)
output.appendPlainText(line)
def appendLine(output, line):
if line.find('Nothing to be done for') > 0:
return
if line != '':
parts = line.split(' ')
color = '#000000'
if len(parts) > 2 and parts[1] == '-c':
line = 'Compiling ' + parts[-1]
color = '#000080'
elif line.startswith('ar cr '):
libname = (parts[2].split('/'))[-1]
line = 'Creating library {}'.format(libname)
color = '#000080'
elif line.startswith('g++ -o'):
appname = (parts[2].split('/'))[-1]
line = 'Linking {}'.format(appname)
color = '#000080'
lower = line.lower()
if lower.find('error') > 0:
appendColorLine(output, line, '#ff0000')
else:
appendColorLine(output, line, color)
def runcmd(dir, cmdlist):
return subprocess.Popen(cmdlist, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dir)
def run(dir, cmd, *args):
return runcmd(dir, [cmd] + list(args))
def call(dir, cmd, *args):
return runcmd(dir, [cmd] + list(args)).communicate()
def execute(output, dir, cmd, *args):
output.clear()
cmdlist = [cmd] + list(args)
text = []
p = subprocess.Popen(cmdlist, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dir)
while True:
reads = [p.stdout.fileno(), p.stderr.fileno()]
rc = select.select(reads, [], [])
for fd in rc[0]:
if fd == p.stdout.fileno():
line = p.stdout.readline().strip()
appendLine(output, line)
text.append(line)
if fd == p.stderr.fileno():
line = p.stderr.readline().strip()
appendLine(output, line)
text.append(line)
if p.poll() != None:
break
return text
def findLine(path, prefix, removePrefix=False):
f = open(path, "r")
for line in f:
if line.startswith(prefix):
line = line.strip()
if not removePrefix:
return line
return line[len(prefix):]
return ''
|
open-power-ref-design-toolkit/cluster-genesis | scripts/python/inv_add_switches.py | Python | apache-2.0 | 1,219 | 0.00082 | #!/usr/bin/env python3
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissio | ns and
# limitations under the License.
import sys
fro | m lib.inventory import Inventory
from lib.logger import Logger
class InventoryAddSwitches(object):
def __init__(self, log, inv_file):
inv = Inventory(log, inv_file)
inv.add_switches()
if __name__ == '__main__':
"""
Arg1: inventory file
Arg2: log level
"""
LOG = Logger(__file__)
if len(sys.argv) != 3:
try:
raise Exception()
except:
LOG.error('Invalid argument count')
sys.exit(1)
INV_FILE = sys.argv[1]
LOG.set_level(sys.argv[2])
InventoryAddSwitches(LOG, INV_FILE)
|
obi-two/Rebelion | data/scripts/templates/object/tangible/ship/crafted/weapon/shared_wpn_heavy_blaster.py | Python | mit | 474 | 0.046414 | #### NOT | ICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_wpn_heavy_blaster.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","wpn_heavy_blaster")
#### BE | GIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
ivotkv/redmsg-logger | redmsg_logger/logger.py | Python | mit | 2,648 | 0.005665 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function, unicode_literals, absolute_import
import sys
from redmsg import Subscriber
from .handlers.sqlalchemy import SQLAlchemyHandler
handlers = {
'sqlalchemy': SQLAlchemyHandler
}
class Logger(object):
def __init__(self, config):
self.channel = config['channel']
self.subscriber = Subscriber(**config['redmsg'])
self.handler = handlers[config['handler']](config[config['handler']])
def start(self):
self.subscriber.subscribe(self.channel)
latest_txid = self.handler.get_latest_txid(self.channel)
generator = self.subscriber.listen() if latest_txid is None else \
self.subscriber.listen_from(latest_txid + 1, ignore_missing=True)
for message in generator:
try:
self.handler.handle(message)
except Exception as e:
sys.stderr.write('{0}: {1}: {2}\n'.format(e.__class__.__name__, e, message).encode('utf-8'))
def main():
import yaml
from argparse import ArgumentParser
arg_parser = ArgumentParser()
arg_parser.description = 'RedMsg logging service.'
arg_parser.add_argument('--config', metavar='FILE', default='config.yaml',
| help='path to config file (default: %(default)s)')
args = arg_parser.parse_args()
with open(args.config, 'r') as file:
| config = yaml.load(file)
logger = Logger(config)
logger.start()
if __name__ == '__main__':
main()
|
carlos-lopez-garces/mapnik-trunk | scons/scons-local-1.2.0/SCons/Tool/JavaCommon.py | Python | lgpl-2.1 | 12,539 | 0.001436 | """SCons.Tool.JavaCommon
Stuff for processing Java.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS | FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommon.py 3842 2008/12/20 22:59:52 scons"
import os
import os.path
im | port re
import string
java_parsing = 1
default_java_version = '1.4'
if java_parsing:
# Parse Java files for class names.
#
# This is a really cool parser from Charles Crain
# that finds appropriate class names in Java source.
# A regular expression that will find, in a java file:
# newlines;
# double-backslashes;
# a single-line comment "//";
# single or double quotes preceeded by a backslash;
# single quotes, double quotes, open or close braces, semi-colons,
# periods, open or close parentheses;
# floating-point numbers;
# any alphanumeric token (keyword, class name, specifier);
# any alphanumeric token surrounded by angle brackets (generics);
# the multi-line comment begin and end tokens /* and */;
# array declarations "[]".
_reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' +
r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' +
r'/\*|\*/|\[\])')
class OuterState:
"""The initial state for parsing a Java file for classes,
interfaces, and anonymous inner classes."""
def __init__(self, version=default_java_version):
if not version in ('1.1', '1.2', '1.3','1.4', '1.5', '1.6'):
msg = "Java version %s not supported" % version
raise NotImplementedError, msg
self.version = version
self.listClasses = []
self.listOutputs = []
self.stackBrackets = []
self.brackets = 0
self.nextAnon = 1
self.localClasses = []
self.stackAnonClassBrackets = []
self.anonStacksStack = [[0]]
self.package = None
def trace(self):
pass
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getPackageState(self):
try:
return self.packageState
except AttributeError:
ret = PackageState(self)
self.packageState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
self.outer_state = self
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def __getAnonStack(self):
return self.anonStacksStack[-1]
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
if len(self.stackBrackets) and \
self.brackets == self.stackBrackets[-1]:
self.listOutputs.append(string.join(self.listClasses, '$'))
self.localClasses.pop()
self.listClasses.pop()
self.anonStacksStack.pop()
self.stackBrackets.pop()
if len(self.stackAnonClassBrackets) and \
self.brackets == self.stackAnonClassBrackets[-1]:
self.__getAnonStack().pop()
self.stackAnonClassBrackets.pop()
def parseToken(self, token):
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
if len(self.listClasses) > 0:
return self.__getAnonClassState()
return self.__getSkipState() # Skip the class name
elif token in ['class', 'interface', 'enum']:
if len(self.listClasses) == 0:
self.nextAnon = 1
self.stackBrackets.append(self.brackets)
return self.__getClassState()
elif token == 'package':
return self.__getPackageState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
def addAnonClass(self):
"""Add an anonymous inner class"""
if self.version in ('1.1', '1.2', '1.3', '1.4'):
clazz = self.listClasses[0]
self.listOutputs.append('%s$%d' % (clazz, self.nextAnon))
elif self.version in ('1.5', '1.6'):
self.stackAnonClassBrackets.append(self.brackets)
className = []
className.extend(self.listClasses)
self.__getAnonStack()[-1] = self.__getAnonStack()[-1] + 1
for anon in self.__getAnonStack():
className.append(str(anon))
self.listOutputs.append(string.join(className, '$'))
self.nextAnon = self.nextAnon + 1
self.__getAnonStack().append(0)
def setPackage(self, package):
self.package = package
class AnonClassState:
"""A state that looks for anonymous inner classes."""
def __init__(self, old_state):
# outer_state is always an instance of OuterState
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brace_level = 0
def parseToken(self, token):
# This is an anonymous class if and only if the next
# non-whitespace token is a bracket. Everything between
# braces should be parsed as normal java code.
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '\n':
return self
elif token[0] == '<' and token[-1] == '>':
return self
elif token == '(':
self.brace_level = self.brace_level + 1
return self
if self.brace_level > 0:
if token == 'new':
# look further for anonymous inner class
|
marios-zindilis/musicbrainz-django-models | musicbrainz_django_models/models/recording_rating_raw.py | Python | gpl-2.0 | 1,133 | 0.000883 | """
.. module:: recording_rating_raw
The **Recording Rating Raw** Model.
PostgreSQL Definition
---------------------
The :code:`recording_rating_raw` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE recording_rating_raw
(
recording INTEGER NOT NULL, -- PK, references recording.id
editor INTEGER NOT NULL, -- PK, references editor.id
rating | SMALLINT NOT NULL CHECK (rating >= 0 AND rating <= 100)
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from .abstract__model_rating_raw import abstract__model_rating_raw
@python_2_unicode_compatible
class recording_rating_raw(abstract__model_rating_raw):
"""
Not all parameters are lis | ted here, only those that present some interest
in their Django implementation.
:param recording: References :class:`recording`.
"""
recording = models.OneToOneField('recording', primary_key=True)
def __str__(self):
return 'Recording Rating Raw'
class Meta:
db_table = 'recording_rating_raw'
|
jestlabs/polyglot | polyglot/heuristics.py | Python | mit | 1,381 | 0.029689 | import polyglot
import yaml
class Heuristics (object):
""" The Heuristics class receives a path to the file that | is trying to be
identified and an array of possible languages for that file. The disambiguate
method will find the array with unambiguous strings of syntax for each language
and check the file in question for those strings. If a match occurrs then the file is
unquestionably | written in the language that the string belongs to. If no match
is found then None is returned and the file wasn't determined to be of a
particular language."""
def __init__(self, path, possibleLanguages):
self.syntaxFile = polyglot.Polyglot.tryOpenFile ('syntax.yml')
# {language1: [bits_of_syntax1, bits_of_syntax2], language2: [bits_of_syntax3, bits_of_syntax4]}
self.syntaxBits = yaml.safe_load (self.syntaxFile)
self.disambiguate(path, possibleLanguages)
def disambiguate(self, path, possibleLanguages):
#checks the syntax strings of every possible language until it finds a match
with open (path) as sourceCode:
for lang in possibleLanguages:
if lang not in self.syntaxBits.keys():
continue #there are no stored syntax strings for that language
else:
for string in self.syntaxBits [lang]:
if string in sourceCode.read():
return lang
sourceCode.seek (0) #re-reads from the beginning of the file
return None
|
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/PyAMF-0.7.2/pyamf/sol.py | Python | mit | 5,052 | 0.000198 | # Copyright (c) The PyAMF Project.
# See LICENSE | .txt for details.
"""
Local Shared Object implementation.
Local Shared Object (LSO), sometimes | known as Adobe Flash cookies, is a
cookie-like data entity used by the Adobe Flash Player and Gnash. The players
allow web content to read and write LSO data to the computer's local drive on
a per-domain basis.
@see: U{Local Shared Object on WikiPedia
<http://en.wikipedia.org/wiki/Local_Shared_Object>}
@since: 0.1
"""
import pyamf
from pyamf import util
#: Magic Number - 2 bytes
HEADER_VERSION = '\x00\xbf'
#: Marker - 10 bytes
HEADER_SIGNATURE = 'TCSO\x00\x04\x00\x00\x00\x00'
#: Padding - 4 bytes
PADDING_BYTE = '\x00'
def decode(stream, strict=True):
"""
Decodes a SOL stream. L{strict} mode ensures that the sol stream is as spec
compatible as possible.
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
# read the version
version = stream.read(2)
if version != HEADER_VERSION:
raise pyamf.DecodeError('Unknown SOL version in header')
# read the length
length = stream.read_ulong()
if strict and stream.remaining() != length:
raise pyamf.DecodeError('Inconsistent stream header length')
# read the signature
signature = stream.read(10)
if signature != HEADER_SIGNATURE:
raise pyamf.DecodeError('Invalid signature')
length = stream.read_ushort()
root_name = stream.read_utf8_string(length)
# read padding
if stream.read(3) != PADDING_BYTE * 3:
raise pyamf.DecodeError('Invalid padding read')
decoder = pyamf.get_decoder(stream.read_uchar())
decoder.stream = stream
values = {}
while 1:
if stream.at_eof():
break
name = decoder.readString()
value = decoder.readElement()
# read the padding
if stream.read(1) != PADDING_BYTE:
raise pyamf.DecodeError('Missing padding byte')
values[name] = value
return (root_name, values)
def encode(name, values, strict=True, encoding=pyamf.AMF0):
"""
Produces a SharedObject encoded stream based on the name and values.
@param name: The root name of the SharedObject.
@param values: A `dict` of name value pairs to be encoded in the stream.
@param strict: Ensure that the SOL stream is as spec compatible as
possible.
@return: A SharedObject encoded stream.
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}, a file like
object.
"""
encoder = pyamf.get_encoder(encoding)
stream = encoder.stream
# write the header
stream.write(HEADER_VERSION)
if strict:
length_pos = stream.tell()
stream.write_ulong(0)
# write the signature
stream.write(HEADER_SIGNATURE)
# write the root name
name = name.encode('utf-8')
stream.write_ushort(len(name))
stream.write(name)
# write the padding
stream.write(PADDING_BYTE * 3)
stream.write_uchar(encoding)
for n, v in values.iteritems():
encoder.serialiseString(n)
encoder.writeElement(v)
# write the padding
stream.write(PADDING_BYTE)
if strict:
stream.seek(length_pos)
stream.write_ulong(stream.remaining() - 4)
stream.seek(0)
return stream
def load(name_or_file):
"""
Loads a sol file and returns a L{SOL} object.
@param name_or_file: Name of file, or file-object.
@type name_or_file: C{string}
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'rb')
opened = True
elif not hasattr(f, 'read'):
raise ValueError('Readable stream expected')
name, values = decode(f.read())
s = SOL(name)
for n, v in values.iteritems():
s[n] = v
if opened is True:
f.close()
return s
def save(sol, name_or_file, encoding=pyamf.AMF0):
"""
Writes a L{SOL} object to C{name_or_file}.
@param name_or_file: Name of file or file-object to write to.
@param encoding: AMF encoding type.
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'wb+')
opened = True
elif not hasattr(f, 'write'):
raise ValueError('Writable stream expected')
f.write(encode(sol.name, sol, encoding=encoding).getvalue())
if opened:
f.close()
class SOL(dict):
"""
Local Shared Object class, allows easy manipulation of the internals of a
C{sol} file.
"""
def __init__(self, name):
self.name = name
def save(self, name_or_file, encoding=pyamf.AMF0):
save(self, name_or_file, encoding)
def __repr__(self):
return '<%s %s %s at 0x%x>' % (
self.__class__.__name__,
self.name,
dict.__repr__(self),
id(self)
)
LSO = SOL
|
ESS-LLP/erpnext-medical | erpnext/stock/doctype/delivery_note/delivery_note_dashboard.py | Python | gpl-3.0 | 674 | 0.045994 | from frappe import _
def get_data():
return {
'fieldname': 'delivery_note',
'non_standard_fieldnames': {
'Stock Entry': 'delivery_note_no',
'Quality Inspection': 'reference_name',
'Auto Repeat': 'reference_document',
},
'internal_links': {
'Sa | les Order': ['items', 'against_sales_order'],
},
'transactions': [
{
'label': _('Related'),
'items': ['Sales Invoice', 'Packing Slip', 'Delivery Trip']
},
{
'label': _('Reference'),
'items': ['Sales Order', 'Quality Inspection']
},
{
'label': _('Returns'),
' | items': ['Stock Entry']
},
{
'label': _('Subscription'),
'items': ['Auto Repeat']
},
]
} |
olduvaihand/ProjectEuler | src/python/problem388.py | Python | mit | 583 | 0.003442 | # -*- coding: utf-8 -*-
# ProjectEuler/src | /python/problem388.py
#
# Distinct Lines
# ==============
# Published on Saturday, 9th June 2012, 02:00 pm
#
# Consider all lattice points (a,b,c) with 0 a,b,c N. From the origin
# O(0,0,0) all lines are drawn to the other lattice points. Let | D(N) be the
# number of distinct such lines. You are given that D(1 000 000) =
# 831909254469114121. Find D(1010). Give as your answer the first nine
# digits followed by the last nine digits.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.