repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
TwolDE2/enigma2 | lib/python/Components/Element.py | Python | gpl-2.0 | 2,950 | 0.028136 | from Tools.CList import CList
# down up
# Render Converter Converter Source
# a bidirectional connection
def cached(f):
name = f.__name__
def wrapper(self):
cache = self.cache
if cache is None:
return f(self)
if name not in cache:
cache[name] = (True, f(self))
return cache[name][1]
return wrapper
class ElementError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return self.msg
class Element(object):
CHANGED_DEFAULT = 0 # initial "pull" | state
CHANGED_ALL = 1 # really everything changed
CHANGED_CLEAR = 2 # we're expecting a real update soon. don't bother polling NOW, but c | lear data.
CHANGED_SPECIFIC = 3 # second tuple will specify what exactly changed
CHANGED_POLL = 4 # a timer expired
SINGLE_SOURCE = True
def __init__(self):
self.downstream_elements = CList()
self.master = None
self.sources = []
self.source = None
self.__suspended = True
self.cache = None
def connectDownstream(self, downstream):
self.downstream_elements.append(downstream)
if self.master is None:
self.master = downstream
def connectUpstream(self, upstream):
assert not self.SINGLE_SOURCE or self.source is None
self.sources.append(upstream)
# self.source always refers to the last recent source added.
self.source = upstream
self.changed((self.CHANGED_DEFAULT,))
def connect(self, upstream):
self.connectUpstream(upstream)
upstream.connectDownstream(self)
# we disconnect from down to up
def disconnectAll(self):
# we should not disconnect from upstream if
# there are still elements depending on us.
assert len(self.downstream_elements) == 0, "there are still downstream elements left"
# Sources don't have a source themselves. don't do anything here.
for s in self.sources:
s.disconnectDownstream(self)
if self.source:
# sources are owned by the Screen, so don't destroy them here.
self.destroy()
self.source = None
self.sources = []
def disconnectDownstream(self, downstream):
self.downstream_elements.remove(downstream)
if self.master == downstream:
self.master = None
if len(self.downstream_elements) == 0:
self.disconnectAll()
# default action: push downstream
def changed(self, *args, **kwargs):
self.cache = {}
self.downstream_elements.changed(*args, **kwargs)
self.cache = None
def setSuspend(self, suspended):
changed = self.__suspended != suspended
if not self.__suspended and suspended:
self.doSuspend(1)
elif self.__suspended and not suspended:
self.doSuspend(0)
self.__suspended = suspended
if changed:
for s in self.sources:
s.checkSuspend()
suspended = property(lambda self: self.__suspended, setSuspend)
def checkSuspend(self):
self.suspended = self.downstream_elements and reduce(lambda x, y: x and y.__suspended, self.downstream_elements, True)
def doSuspend(self, suspend):
pass
def destroy(self):
pass
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/lsa/ForestTrustInformation.py | Python | gpl-2.0 | 1,274 | 0.007064 | # encoding: utf-8
# module samba.dcerpc.lsa
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/lsa.so
# by generator 1.135
""" lsa DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class ForestTrustInformation(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ndr_pack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_pack(object) -> blob
NDR pack
"""
pass
def __ndr_print__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_print(object) -> None
NDR print
"""
pass
def __ndr_unpack__(self, *args, **kwargs): # real signature unknown
"""
S.n | dr_unpack(class, blob, allow_remaining=False) -> None
NDR unpack
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc | __
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
entries = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
SlashDK/Memify | Main/Memify.py | Python | mit | 4,703 | 0.0202 | import cv2
import sys
import datetime
import numpy
import subprocess
def makeThug(frame,faces,mouths,eyes):
glasses = cv2.imread("glasses.png",-1)
joint = cv2.imread("joint.png",-1)
t=datetime.datetime.now().time()
filename="Saved/"+str(t.hour)+str(t.minute)+str(t.second)+".jpg"
#work only inside faces
for (x, y, w, h) in faces:
for (x2, y2, w2, h2) in eyes:
if(x2>x and x2+w2<x+w and y2>y and y2+h2<y+h and y2+h2/2<y+h/2 and x2+w2/2>x+h/3 and x2+w2/2<x+2*h/3):
glasses=cv2.resize(glasses,(w2,h2))
for c in range(0,3):
frame[y2+h2/5:y2+6*h2/5, x2:x2+w2, c] = glasses[:,:,c] * (glasses[:,:,3]/255.0) + frame[y2+h2/5:y2+6*h2/5, x2:x2+w2, c] * (1.0 - glasses[:,:,3]/255.0)
break
for (x2, y2, w2, h2) in mouths:
if(x2>x and x2<x+w and y2>y+h/2 and y2+h2<y+5*h/4 and x2+w2/2>x+h/3 and x2+w2/2<x+2*h/3):
joint=cv2.resize(joint,(w2,h2))
for c in range(0,3):
frame[y2+h2/4:y2+5*h2/4, x2+w2/2:x2+3*w2/2, c] = joint[:,:,c] * (joint[:,:,3]/255.0) + frame[y2+h2/4:y2+5*h2/4, x2+w2/2:x2+3*w2/2, c] * (1.0 - joint[:,:,3]/255.0)
break
cv2.imwrite(filename,frame)
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
def makeVideo(frame,w,h,mkey):
fps=25
capSize = (int(w),int(h))
fourcc=cv2.cv.CV_FOURCC('m','p','4','v')
out = cv2.VideoWriter()
success = out.open('output.mov',fourcc,fps,capSize,True)
frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)
# for i in range(100):
# temp=cv2.resize(frame,(int(w)+int(w*i/100),int(h)+int(h*i/100)))
# #tempFrame = temp[i/2:int(h)+i/2,i/2:int(w)+i/2]
# tempFrame = temp[int((h+h*i/100)/2 - h/2):int((h+h*i/100)/2 + h/2) + 10,
# int((w+w*w/100)/2 - w/2):int((w+w*i/100)/2 + w/2) +10]
# tempFrame=tempFrame[0:int(h),0:int(w)]
# out.write(tempFrame)
# for i in range (65):
# out.write(tempFrame)
if mkey == 'p':
for i in range(162):
temp=cv2.resize(frame,(int(w)+2*i,int(h)+2*i))
tempFrame = temp[i/2:int(h)+i/2,i/2:int(w)+i/2]
out.write(tempFrame)
out.release()
out=None
#add audio and make final video
cmd = 'ffmpeg -y -i Final.mp4 -r 30 -i output.mov -filter:a aresample=async=1 -c:a flac -c:v copy -shortest result.mkv'
subprocess.call(cmd, shell=True) # "Muxing Done
print('Muxing Done')
cmd = '~/../../Applications/VLC.app/Contents/MacOS/VLC "result.mkv" -f --play-and-stop'
subprocess.call(cmd, shell=True)
#realpython.com
def capVideo():
#initialize cascades
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
mouthCascade = cv2.CascadeClassifier("Mouth.xml")
eyesCascade = cv2.CascadeClassifier("frontalEyes35x16.xml")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
# save for later use
ret, orig = video_capture.read()
# for processing
gray = cv2.cvtColor(frame | , cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=10,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
| mouths = mouthCascade.detectMultiScale(
gray,
scaleFactor=1.7,
minNeighbors=10,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
eyes = eyesCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=10,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
for (x, y, w, h) in mouths:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 1)
for (x, y, w, h) in eyes:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 1)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('p'):
w,h=video_capture.get(3),video_capture.get(4)
finalImage=makeThug(orig,faces,mouths,eyes)
makeVideo(finalImage,w,h,'p')
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
capVideo()
|
Pyrrvs/txdbus | txdbus/test/test_message.py | Python | mit | 815 | 0.006135 | import os
import unittest
from txdbus import error, message
class MessageTester(unittest.TestCase):
def test_too_long(self):
class E(message.ErrorMessage):
_maxMsgLen = 1
def c():
E('foo.bar', 5)
self.assertRaises(error.MarshallingError, c)
def test_reserv | ed_path(self):
def c():
message.MethodCallMessage('/org/freedesktop/DBus/Local', 'foo')
self.assertRaises(error.MarshallingError, c)
def test_invalid_message_type(self):
class E(message.ErrorMessage):
_messageType=99
try:
message.parseMessage(E('foo.bar', 5) | .rawMessage)
self.assertTrue(False)
except Exception as e:
self.assertEquals(str(e), 'Unknown Message Type: 99')
|
sklam/numba | numba/core/typing/templates.py | Python | bsd-2-clause | 43,345 | 0.000323 | """
Define typing templates
"""
from abc import ABC, abstractmethod
import functools
import sys
import inspect
import os.path
from collections import namedtuple
from collections.abc import Sequence
from types import MethodType, FunctionType
import numba
from numba.core import types, utils
from numba.core.errors import TypingError, InternalError
from numba.core.cpu_options import InlineOptions
# info store for inliner callback functions e.g. cost model
_inline_info = namedtuple('inline_info',
'func_ir typemap calltypes signature')
class Signature(object):
"""
The signature of a function call or operation, i.e. its argument types
and return type.
"""
# XXX Perhaps the signature should be a BoundArguments, instead
# of separate args and pysig...
__slots__ = '_return_type', '_args', '_recvr', '_pysig'
def __init__(self, return_type, args, recvr, pysig=None):
if isinstance(args, list):
args = tuple(args)
self._return_type = return_type
self._args = args
self._recvr = recvr
self._pysig = pysig
@property
def return_type(self):
return self._return_type
@property
def args(self):
return self._args
@property
def recvr(self):
return self._recvr
@property
def pysig(self):
return self._pysig
def replace(self, **kwargs):
"""Copy and replace the given attributes provided as keyword arguments.
Returns an updated copy.
"""
curstate = dict(return_type=self.return_type,
args=self.args,
recvr=self.recvr,
pysig=self.pysig)
curstate.update(kwargs)
return Signature(**curstate)
def __getstate__(self):
"""
Needed because of __slots__.
"""
return self._return_type, self._args, self._recvr, self._pysig
def __setstate__(self, state):
"""
Needed because of __slots__.
"""
self._return_type, self._args, self._recvr, self._pysig = state
def __hash__(self):
return hash((self.args, self.return_type))
def __eq__(self, other):
if isinstance(other, Signature):
return (self.args == other.args and
self.return_type == other.return_type and
self.recvr == other.recvr and
self.pysig == other.pysig)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s -> %s" % (self.args, self.return_type)
@property
def is_method(self):
"""
Whether this signature represents a bound method or a regular
function.
"""
return self.recvr is not None
def as_method(self):
"""
Convert this signature to a bound method signature.
"""
if self.recvr is not None:
return self
sig = signature(self.return_type, *self.args[1:],
recvr=self.args[0])
# Adjust the python signature
params = list(self.pysig.parameters.values())[1:]
sig = sig.replace(
pysig=utils.pySignature(
parameters=params,
return_annotation=self.pysig.return_annotation,
),
)
return sig
def as_function(self):
"""
Convert this signature to a regular function signature.
"""
if self.recvr is None:
return self
sig = signature(self.return_type, *((self.recvr,) + self.args))
return sig
def as_type(self):
"""
Convert this signature to a first-class function type.
"""
return types.FunctionType(self)
def __unliteral__(self):
return signature(types.unliteral(self.return_type),
*map(types.unliteral, self.args))
def dump(self, tab=''):
c = self.as_type()._code
print(f'{tab}DUMP {type(self).__name__} [type code: {c}]')
print(f'{tab} Argument types:')
for a in self.args:
a.dump(tab=tab + ' | ')
print(f'{tab} Return type:')
self.return_type.dump(tab=tab + ' | ')
print(f'{tab}END DUMP')
def is_precise(self):
for atype in self.args:
if not atype.is_precise():
return False
return self.return_type.is_precise()
def make_concrete_template(name, key, signatures):
baseclasses = (ConcreteTemplate,)
gvars = dict(key=key, cases=list(signatures))
return type(name, baseclasses, gvars)
def make_callable_template(key, typer, recvr=None):
"""
Create a callable template with the given key and typer function.
"""
def generic(self):
return typer
name = "%s_CallableTemplate" % (key,)
bases = (CallableTemplate,)
class_dict = dict(key=key, generic=generic, recvr=recvr)
return type(name, bases, class_dict)
def signature(return_type, *args, **kws):
recvr = kws.pop('recvr', None)
assert not kws
return Signature(return_type, args, recvr=recvr)
def fold_arguments(pysig, args, kws, normal_handler, default_handler,
stararg_handler):
"""
Given the signature *pysig*, explicit *args* and *kws*, resolve
omitted arguments and keyword arguments. A tuple of positional
arguments is returned.
Various handlers allow to process arguments:
- normal_handler(index, param, value) is called for normal arguments
- default_handler(index, param, default) is called for omitted arguments
- stararg_handler(index, param, values) is called for a "*args" argument
"""
if isinstance(kws, Sequence):
# Normalize dict kws
kws = dict(kws)
# deal with kwonly args
params = pysig.parameters
kwonly = []
for name, p in params.items():
if p.kind == p.KEYWORD_ONLY:
kwonly.append(name)
if kwonly:
bind_args = args[:-len(kwonly)]
else:
bind_args = args
bind_kws = kws.copy()
if kwonly:
for idx, n in enumerate(kwonly):
bind_kws[n] = args[len(kwonly) + idx]
# now bind
ba = pysig.bind(*bind_args, **bind_kws)
for i, param in enumerate(pysig.parameters.values()):
name = param.name
default = param.default
if param.kind == param.VAR_POSITIONAL:
# stararg may be omitted, in which case its "default" value
# is simply the empty tuple
if name in ba.arguments:
argval = ba.arguments[name]
# NOTE: avoid wrapping the tuple type for stararg in another
# tuple.
if (len(argval) == 1 and
isinstance(argval[0], (types.StarArgTuple,
types.StarArgUniTuple))):
argval = tuple(argval[0])
else:
argval = ()
out = stararg_handler(i, param, argval)
ba.arguments[name] = out
elif name in ba.arguments:
# Non-stararg, present
ba.arguments[name] = normal_handler(i, param, ba.arguments[name])
else:
# Non-stararg, omitted
assert default is not param.empty
ba.arguments[name] = default_handler(i, param, default)
# Collect args in the right order
args = tuple(ba.arguments[param.name]
for param in pysig.parameters.values())
return args
class FunctionTemplate(ABC):
# Set to true t | o disable unsafe cast.
# subclass overide-able
unsafe_casting = True
# Set to true to require exact match without casting.
# subclass overide-able
exact_match_required = False
# Set to true to prefer literal arguments.
# Useful for definitions | that specialize on literal but also support
# non-literals.
# subclass overide-able
prefer_literal = False
def __init__(self, context):
self.context = context
def _select(self, cases, args, kws):
options = {
'unsafe_casting': self.unsafe_ |
rrrrrr8/vnpy | vnpy/api/fcoin/vnfcoin.py | Python | mit | 8,705 | 0.015528 | # encoding: UTF-8
from __future__ import print_function
import hashlib
import hmac
import json
import ssl
import traceback
import base64
from queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import time
from urlparse import urlparse
from copy import copy
from urllib import urlencode
from threading import Thread
import requests
import websocket
from six.moves import input
REST_HOST = 'https://api.fcoin.com/v2'
WEBSOCKET_HOST = 'wss://api.fcoin.com/v2/ws'
########################################################################
class FcoinRestApi(object):
"""REST API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.apiSecret = ''
self.active = False
self.reqid = 0
self.queue = Queue()
self.pool = None
self.sessionDict = {} # 会话对象字典
#----------------------------------------------------------------------
def init(self, apiKey, apiSecret):
"""初始化"""
self.apiKey = str(apiKey)
self.apiSecret = str(apiSecret)
#----------------------------------------------------------------------
def start(self, n=10):
"""启动"""
if self.active:
return
self.active = True
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.pool:
self.pool.close()
self.pool.join()
#----------------------------------------------------------------------
def addReq(self, method, path, callback, params=None, postdict=None):
"""添加请求"""
self.reqid += 1
req = (method, path, callback, params, postdict, self.reqid)
self.queue.put(req)
return self.reqid
#----------------------------------------------------------------------
def processR | eq(self, req, i):
"""处理请求"""
method, path, callback, params, postdict, reqid = req
url = REST_HOST + path
timestamp = str(int(time()) * 1000)
header = {}
header['FC-ACCESS-TIMESTAMP'] = timestamp
header['FC-ACCESS-KEY'] = self.apiKey
header['FC-ACCESS-SIGNATURE'] = self.generateSignature(method, url, | timestamp, params, postdict)
try:
# 使用长连接的session,比短连接的耗时缩短80%
session = self.sessionDict[i]
resp = session.request(method, url, headers=header, params=params, json=postdict)
#resp = requests.request(method, url, headers=header, params=params, data=postdict)
#if method != 'GET':
#print '-' * 30
#print 'method', method
#print 'url', url
#print 'header', header
#print 'params', params
#print 'postdict', postdict
code = resp.status_code
d = resp.json()
if code == 200:
callback(d, reqid)
else:
self.onError(code, d)
except Exception as e:
self.onError(type(e), e.message)
#----------------------------------------------------------------------
def run(self, i):
"""连续运行"""
self.sessionDict[i] = requests.Session()
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req, i)
except Empty:
pass
#----------------------------------------------------------------------
def generateSignature(self, method, path, timestamp, params=None, postdict=None):
"""生成签名"""
# 对params在HTTP报文路径中,以请求字段方式序列化
if params:
query = urlencode(sorted(params.items()))
path = path + '?' + query
if postdict:
post = urlencode(sorted(postdict.items()))
else:
post = ''
msg = method + path + timestamp + post
msg = base64.b64encode(msg)
signature = hmac.new(self.apiSecret, msg, digestmod=hashlib.sha1).digest()
signature = base64.b64encode(signature)
return signature
#----------------------------------------------------------------------
def onError(self, code, error):
"""错误回调"""
print('on error')
print(code, error)
#----------------------------------------------------------------------
def onData(self, data, reqid):
"""通用回调"""
print('on data')
print(data, reqid)
########################################################################
class FcoinWebsocketApi(object):
"""Websocket API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run)
self.thread.start()
self.onConnect()
#----------------------------------------------------------------------
def reconnect(self):
"""重连"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
#----------------------------------------------------------------------
def run(self):
"""运行"""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.thread:
self.thread.join()
#----------------------------------------------------------------------
def onConnect(self):
"""连接回调"""
print('connected')
#----------------------------------------------------------------------
def onData(self, data):
"""数据回调"""
print('-' * 30)
l = data.keys()
l.sort()
for k in l:
print(k, data[k])
#----------------------------------------------------------------------
def onError(self, msg):
"""错误回调"""
print(msg)
#----------------------------------------------------------------------
def sendReq(self, req):
"""发出请求"""
self.ws.send(json.dumps(req))
if __name__ == '__main__':
from datetime import datetime
from time import sleep
API_KEY = '88893f839fbd49f4b5fcb03e7c15c015'
API_SECRET = 'ef383295cf4e4c128e6d18d7e9564b12'
# REST测试
rest = FcoinRestApi()
rest.init(API_KEY, API_SECRET)
rest.start(3)
#rest.addReq('GET', '/accounts/balance', rest.onData)
# 查委托
#states = ['submitted', 'partial_filled', 'partial_canceled',
#'filled', 'canceled', 'pending_cancel']
#req = {
#'symbol': 'ethusdt',
#'start': datetime.now().strftime('%Y%m%d'),
#'states': 'submitted',
#'limit': 500
#}
#for i in range(10):
#rest.addReq('GET', '/orders', rest.onData, params=req)
#sleep(2)
req = {
'symbol': 'ethusdt',
'side': 'buy',
'type': 'limit',
'price': 300,
'amount': 0.01
}
rest.addReq('POST', '/ord |
jonparrott/gcloud-python | bigquery/google/cloud/bigquery/table.py | Python | apache-2.0 | 48,513 | 0 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Tables."""
from __future__ import absolute_import
import copy
import datetime
import operator
import warnings
import six
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
from google.api_core.page_iterator import HTTPIterator
import google.cloud._helpers
from google.cloud.bigquery import _helpers
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.schema import _build_schema_resource
from google.cloud.bigquery.schema import _parse_schema_resource
from google.cloud.bigquery.external_config import ExternalConfig
_NO_PANDAS_ERROR = (
'The pandas library is not installed, please install '
'pandas to use the to_dataframe() function.'
)
_TABLE_HAS_NO_SCHEMA = 'Table has no schema: call "client.get_table()"'
_MARKER = object()
def _reference_getter(table):
"""A :class:`~google.cloud.bigquery.table.TableReference` pointing to
this table.
Returns:
google.cloud.bigquery.table.TableReference: pointer to this table.
"""
from google.cloud.bigquery import dataset
dataset_ref = dataset.DatasetReference(table.project, table.dataset_id)
return TableReference(dataset_ref, table.table_id)
def _view_use_legacy_sql_getter(table):
"""bool: Specifies whether to execute the view with Legacy or Standard SQL.
This boolean specifies whether to execute the view with Legacy SQL
(:data:`True`) or Standard SQL (:data:`False`). The client side default is
:data:`False`. The server-side default is :data:`True`. If this table is
not a view, :data:`None` is returned.
Raises:
ValueError: For invalid value types.
"""
view = table._properties.get('view')
if view is not None:
# The server-side default for useLegacySql is True.
return view.get('useLegacySql', True)
# In some cases, such as in a table list no view object is present, but the
# resource still represents a view. Use the type as a fallback.
if table.table_type == 'VIEW':
# The server-side default for useLegacySql is True.
return True
class EncryptionConfiguration(object):
"""Custom encryption configuration (e.g., Cloud KMS keys).
Args:
kms_key_name (str): resource ID of Cloud KMS key used for encryption
"""
def __init__(self, kms_key_name=None):
self._properties = {}
if kms_key_name is not None:
self._properties['kmsKeyName'] = kms_key_name
@property
def kms_key_name(self):
"""str: Resource ID of Cloud KMS key
Resource ID of Cloud KMS key or :data:`None` if using default
encryption.
"""
return self._properties.get('kmsKeyName')
@kms_key_name.setter
def kms_key_name(self, value):
self._properties['kmsKeyName'] = value
@classmethod
def from_api_repr(cls, resource):
"""Construct an encryption configuration from its API representation
Args:
resource (Dict[str, object]):
An encryption configuration representation as returned from
the API.
Returns:
google.cloud.bigquery.table.EncryptionConfiguration:
An encryption configuration parsed from ``resource``.
"""
config = cls()
config._properties = copy.deepcopy(resource)
return config
def to_api_repr(self):
"""Construct the API resource representation of this encryption
configuration.
Returns:
Dict[str, object]:
Encryption configuration as represented as an API resource
"""
return copy.deepcopy(self._properties)
def __eq__(self, other):
| if not isinstance(other, EncryptionConfiguration):
return NotImplemented
return self.kms_key_name == other.kms_key_name
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.kms_key_name)
def __repr__(self):
| return 'EncryptionConfiguration({})'.format(self.kms_key_name)
class TableReference(object):
"""TableReferences are pointers to tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
Args:
dataset_ref (google.cloud.bigquery.dataset.DatasetReference):
A pointer to the dataset
table_id (str): The ID of the table
"""
def __init__(self, dataset_ref, table_id):
self._project = dataset_ref.project
self._dataset_id = dataset_ref.dataset_id
self._table_id = table_id
@property
def project(self):
"""str: Project bound to the table"""
return self._project
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._dataset_id
@property
def table_id(self):
"""str: The table ID."""
return self._table_id
@property
def path(self):
"""str: URL path for the table's APIs."""
return '/projects/%s/datasets/%s/tables/%s' % (
self._project, self._dataset_id, self._table_id)
@classmethod
def from_string(cls, table_id, default_project=None):
"""Construct a table reference from table ID string.
Args:
table_id (str):
A table ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and table ID, each separated by ``.``.
default_project (str):
Optional. The project ID to use when ``table_id`` does not
include a project ID.
Returns:
TableReference: Table reference parsed from ``table_id``.
Examples:
>>> TableReference.from_string('my-project.mydataset.mytable')
TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
Raises:
ValueError:
If ``table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
from google.cloud.bigquery.dataset import DatasetReference
output_project_id = default_project
output_dataset_id = None
output_table_id = None
parts = table_id.split('.')
if len(parts) < 2:
raise ValueError(
'table_id must be a fully-qualified table ID in '
'standard SQL format. e.g. "project.dataset.table", got '
'{}'.format(table_id))
elif len(parts) == 2:
if not default_project:
raise ValueError(
'When default_project is not set, table_id must be a '
'fully-qualified table ID in standard SQL format. '
'e.g. "project.dataset_id.table_id", got {}'.format(
table_id))
output_dataset_id, output_table_id = parts
elif len(parts) == 3:
output_project_id, output_dataset_id, output_table_id = parts
if len(parts) > 3:
raise ValueError(
'Too many parts in table_id. Must be a fully-qualified table '
'ID in standard SQL format. e.g. "project.dataset.table", '
'got {}'.format(table_id))
return cls(
DatasetReference(output_project_id, output_dataset_id),
output_table_id,
)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a table reference given its API representation
Args:
resource |
the-blue-alliance/the-blue-alliance | src/backend/web/handlers/tests/error_test.py | Python | mit | 933 | 0 | from typing import Tuple
import bs4
from werkzeug.test import Client
def get_error_title(resp_data: str) -> Tuple[str, str]:
soup = bs4.BeautifulSoup(resp_data, "html.parser")
return soup.find("h1").string, soup.find("h2").string
def test_handle_404(web_cl | ient: Client) -> None:
resp = web_client.get("/asdf")
assert resp.status_code == 404
error_header, error_type = get_error_title(resp. | data)
assert error_header == "Oh Noes!1!!"
assert error_type == "Error 404"
def test_handle_500() -> None:
from backend.web.main import app
def always_throw() -> str:
raise Exception("welp")
app.add_url_rule("/throw_500", view_func=always_throw)
client = app.test_client()
resp = client.get("/throw_500")
assert resp.status_code == 500
error_header, error_type = get_error_title(resp.data)
assert error_header == "Oh Noes!1!!"
assert error_type == "Error 500"
|
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_issue_867.py | Python | gpl-3.0 | 656 | 0.003049 | # -*- coding: utf-8 -*-
from system_tests import CaseMeta, path
class OutOfBoundsReadInIptcParserDecode(metaclass=CaseMeta):
"""
Regression test for the bug described in:
https://github.com/Exiv2/exiv2/issues/867
"""
url = "https://github.com/Exiv2/exiv2/issues/867"
filename = path("$data_path/issue_867_poc.psd")
commands = | ["$exiv2 $filename"]
stdout = ["""File name : $filename
File size : 9830 Bytes
MIME type : image | /x-photoshop
Image size : 150 x 91
"""
]
stderr = [
"""Warning: Failed to decode IPTC metadata.
$filename: No Exif data found in the file
"""
]
retval = [253]
|
cellnopt/cellnopt | cno/misc/dependencies.py | Python | bsd-2-clause | 1,784 | 0.007848 | import easydev
from cno import CNOGraph
def plot_dependencies(package='cno', show=False, filename=None):
main = easydev.dependencies.get_dependencies(package)
# first, we fetch all dependencies
c = CNOGraph()
deps = easydev.dependencies.get_dependencies(package)
package_version = [dep.version for dep in deps if dep.project_name == package][0]
for dep in deps:
version = dep.version
name= dep.project_name
c.add_reaction(package+'-'+package_version + "="+name + "-" + version)
#actually, dependencies themeselves depends on other packages
# that are all present in deps variable but we are missing
# the overall DAG so let us loop over the dependencies
deps = [dep for dep in deps if
len(easydev.dependencies.get_dependencies(dep.project_name))>1]
newdeps = []
count = 0
while len(deps) > 1 and count < 2:
deps = [dep for dep in deps if
len(easydev.dependencies.get_dependencies(dep.project_name))>1]
for dep in deps:
for this in easydev.depend | encies.get_dependencies(dep.project_name):
if this.project_name != dep.project_name:
newdeps.append(this)
version = dep.version
name= dep.project_name
c.add_reaction(this.project_name +"-" +this.version+"="+name + "-" + version)
deps = newdeps
count +=1
c.remove_self_loops()
# keep only longest path. | Not use this is the good way of doing it...
c2 = CNOGraph()
import networkx as nx
for clique in list(nx.find_cliques(c.to_undirected())):
for i in range(0, len(clique)-1):
c2.add_edge(clique[i], clique[i+1])
c2.plot(filename=filename, show=show)
|
deepmind/alphafold | alphafold/data/parsers.py | Python | apache-2.0 | 21,397 | 0.012058 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parsing various file formats."""
import collections
import dataclasses
import itertools
import re
import string
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Set
# Internal import (7716).
DeletionMatrix = Sequence[Sequence[int]]
@dataclasses.dataclass(frozen=True)
class Msa:
"""Class representing a parsed MSA file."""
sequences: Sequence[str]
deletion_matrix: DeletionMatrix
descriptions: Sequence[str]
def __post_init__(self):
if not (len(self.sequences) ==
len(self.deletion_matrix) ==
len(self.descriptions)):
raise ValueError(
'All fields for an MSA must have the same length. '
f'Got {len(self.sequences)} sequences, '
f'{len(self.deletion_matrix)} rows in the deletion matrix and '
f'{len(self.descriptions)} descriptions.')
def __len__(self):
return len(self.sequences)
def truncate(self, max_seqs: int):
return Msa(sequences=self.sequences[:max_seqs],
deletion_matrix=self.deletion_matrix[:max_seqs],
descriptions=self.descriptions[:max_seqs])
@dataclasses.dataclass(frozen=True)
class TemplateHit:
"""Class representing a template hit."""
index: int
name: str
aligned_cols: int
sum_probs: Optional[float]
query: str
hit_sequence: str
indices_query: List[int]
indices_hit: List[int]
def parse_fasta(fasta_string: str) -> Tuple[Sequence[str], Sequence[str]]:
"""Parses FASTA string and returns list of strings with amino-acid sequences.
Arguments:
fasta_string: The string contents of a FASTA file.
Returns:
A tuple of two lists:
* A list of sequences.
* A list of sequence descriptions taken from the comment lines. In the
same order as the sequences.
"""
sequences = []
descriptions = []
index = -1
for line in fasta_string.splitlines():
line = line.strip()
if line.startswith('>'):
index += 1
descriptions.append(line[1:]) # Remove the '>' at the beginning.
sequences.append('')
continue
elif not line:
continue # Skip blank lines.
sequences[index] += line
return sequences, descripti | ons
def parse_stockholm(stockholm_string: str) -> Msa:
"""Parses sequences and deletion matrix from stockholm format alignment.
Args:
stockholm_string: The string contents of a stockholm file. The first
sequence in the file should be the query sequence.
Returns:
A tuple of:
* A list of | sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
* The names of the targets matched, including the jackhmmer subsequence
suffix.
"""
name_to_sequence = collections.OrderedDict()
for line in stockholm_string.splitlines():
line = line.strip()
if not line or line.startswith(('#', '//')):
continue
name, sequence = line.split()
if name not in name_to_sequence:
name_to_sequence[name] = ''
name_to_sequence[name] += sequence
msa = []
deletion_matrix = []
query = ''
keep_columns = []
for seq_index, sequence in enumerate(name_to_sequence.values()):
if seq_index == 0:
# Gather the columns with gaps from the query
query = sequence
keep_columns = [i for i, res in enumerate(query) if res != '-']
# Remove the columns with gaps in the query from all sequences.
aligned_sequence = ''.join([sequence[c] for c in keep_columns])
msa.append(aligned_sequence)
# Count the number of deletions w.r.t. query.
deletion_vec = []
deletion_count = 0
for seq_res, query_res in zip(sequence, query):
if seq_res != '-' or query_res != '-':
if query_res == '-':
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
return Msa(sequences=msa,
deletion_matrix=deletion_matrix,
descriptions=list(name_to_sequence.keys()))
def parse_a3m(a3m_string: str) -> Msa:
"""Parses sequences and deletion matrix from a3m format alignment.
Args:
a3m_string: The string contents of a a3m file. The first sequence in the
file should be the query sequence.
Returns:
A tuple of:
* A list of sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
* A list of descriptions, one per sequence, from the a3m file.
"""
sequences, descriptions = parse_fasta(a3m_string)
deletion_matrix = []
for msa_sequence in sequences:
deletion_vec = []
deletion_count = 0
for j in msa_sequence:
if j.islower():
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
# Make the MSA matrix out of aligned (deletion-free) sequences.
deletion_table = str.maketrans('', '', string.ascii_lowercase)
aligned_sequences = [s.translate(deletion_table) for s in sequences]
return Msa(sequences=aligned_sequences,
deletion_matrix=deletion_matrix,
descriptions=descriptions)
def _convert_sto_seq_to_a3m(
query_non_gaps: Sequence[bool], sto_seq: str) -> Iterable[str]:
for is_query_res_non_gap, sequence_res in zip(query_non_gaps, sto_seq):
if is_query_res_non_gap:
yield sequence_res
elif sequence_res != '-':
yield sequence_res.lower()
def convert_stockholm_to_a3m(stockholm_format: str,
max_sequences: Optional[int] = None,
remove_first_row_gaps: bool = True) -> str:
"""Converts MSA in Stockholm format to the A3M format."""
descriptions = {}
sequences = {}
reached_max_sequences = False
for line in stockholm_format.splitlines():
reached_max_sequences = max_sequences and len(sequences) >= max_sequences
if line.strip() and not line.startswith(('#', '//')):
# Ignore blank lines, markup and end symbols - remainder are alignment
# sequence parts.
seqname, aligned_seq = line.split(maxsplit=1)
if seqname not in sequences:
if reached_max_sequences:
continue
sequences[seqname] = ''
sequences[seqname] += aligned_seq
for line in stockholm_format.splitlines():
if line[:4] == '#=GS':
# Description row - example format is:
# #=GS UniRef90_Q9H5Z4/4-78 DE [subseq from] cDNA: FLJ22755 ...
columns = line.split(maxsplit=3)
seqname, feature = columns[1:3]
value = columns[3] if len(columns) == 4 else ''
if feature != 'DE':
continue
if reached_max_sequences and seqname not in sequences:
continue
descriptions[seqname] = value
if len(descriptions) == len(sequences):
break
# Convert sto format to a3m line by line
a3m_sequences = {}
if remove_first_row_gaps:
# query_sequence is assumed to be the first sequence
query_sequence = next(iter(sequences.values()))
query_non_gaps = [res != '-' for res in query_sequence]
for seqname, sto_sequence in sequences.items():
# Dots are optional in a3m format and are commonly removed.
out_sequen |
ghchinoy/tensorflow | tensorflow/python/tools/optimize_for_inference_test.py | Python | apache-2.0 | 13,544 | 0.005833 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================== | ======================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
@test_util.run_deprecated_v1
def testFoldBatchNorms(self):
with self.cached_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.cached_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
@test_util.run_deprecated_v1
def testFoldFusedBatchNorms(self):
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
with self.cached_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op,
|
catalyst/l3overlay | src/l3overlay/util/exception.py | Python | gpl-3.0 | 914 | 0 | #
# IPsec overlay network manager (l3overlay)
# l3overlay/util/exception.py - exception base class
#
# Copyright | (c) 2017 Catalyst.net Ltd
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# | This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
l3overlay exception base class.
'''
class L3overlayError(Exception):
'''
l3overlay exception base class.
'''
pass
|
infinit/couchdb-python | couchdb/client.py | Python | bsd-3-clause | 45,376 | 0.000375 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python client API for CouchDB.
>>> server = Server()
>>> db = server.create('python-tests')
>>> doc_id, doc_rev = db.save({'type': 'Person', 'name': 'John Doe'})
>>> doc = db[doc_id]
>>> doc['type']
u'Person'
>>> doc['name']
u'John Doe'
>>> del db[doc.id]
>>> doc.id in db
False
>>> del server['python-tests']
"""
import itertools
import mimetypes
import os
from types import FunctionType
from inspect import getsource
from textwrap import dedent
import warnings
from couchdb import http, json, util
__all__ = ['Server', 'Database', 'Document', 'ViewResults', 'Row']
__docformat__ = 'restructuredtext en'
DEFAULT_BASE_URL = os.environ.get('COUCHDB_URL', 'http://localhost:5984/')
class Server(object):
"""Representation of a CouchDB server.
>>> server = Server() # connects to the local_server
>>> remote_server = Server('http://example.com:5984/')
>>> secure_remote_server = Server('https://username:password@example.com:5984/')
This class behaves like a dictionary of databases. For example, to get a
list of database names on the server, you can simply iterate over the
server object.
New databases can be created using the `create` method:
>>> db = server.create('python-tests')
>>> db
<Database 'python-tests'>
You can access existing databases using item access, specifying the database
name as the key:
>>> db = server['python-tests']
>>> db.name
'python-tests'
Databases can be deleted using a ``del`` statement:
>>> del server['python-tests']
"""
def __init__(self, url=DEFAULT_BASE_URL, full_commit=True, session=None):
"""Initialize the server object.
:param url: the URI of the server (for example
``http://localhost:5984/``)
:param full_commit: turn on the X-Couch-Full-Commit header
:param session: an http.Session instance or None for a default session
"""
if isinstance(url, util.strbase):
self.resource = http.Resource(url, session or http.Session())
else:
self.resource = url # treat as a Resource object
if not full_commit:
self.resource.headers['X-Couch-Full-Commit'] = 'false'
def __contains__(self, name):
"""Return whether the server contains a database with the specified
name.
:param name: the database name
:return: `True` if a database with the name exists, `False` otherwise
"""
try:
self.resource.head(name)
return True
except http.ResourceNotFound:
return False
def __iter__(self):
"""Iterate over the names of all databases."""
status, headers, data = self.resource.get_json('_all_dbs')
return iter(data)
def __len__(self):
"""Return the number of databases."""
status, headers, data = self.resource.get_json('_all_dbs')
return len(data)
def __nonzero__(self):
"""Return whether the server is available."""
try:
self.resource.head()
return True
except:
return False
def __bool__(self):
return self.__nonzero__()
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.resource.url)
def __delitem__(self, name):
"""Remove the database with the specified name.
:param name: the name of the database
:raise ResourceNotFound: if no database with that name exists
"""
self.resource.delete_json(name)
def __getitem__(self, name):
"""Return a `Database` object representing the database with the
specified name.
:param name: the name of the database
:return: a `Database` object representing the database
:rtype: `Database`
:raise ResourceNotFound: if no database with that name exists
"""
db = Database(self.resource(name), name)
db.resource.head() # actually make a request to the database
return db
def config(self):
"""The configuration of the CouchDB server.
The configuration is represented as a nested dictionary of sections and
options from the configuration files of the server, or the default
values for options that are not explicitly configured.
:rtype: `dict`
"""
status, headers, data = self.resource.get_json('_config')
return data
def version(self):
"""The version string of the CouchDB server.
Note that this results in a request being made, and can also be used
to check for the availability of the server.
:rtype: `unicode`"""
status, headers, data = self.resource.get_json()
return data['version']
def stats(self, name=None):
"""Server statistics.
:param name: name of single statistic, e.g. httpd/requests
(None -- return all statistics)
"""
if not name:
resource = self.resource('_stats')
else:
resource = self.resource('_stats', *name.split('/'))
status, headers, data = resource.get_json()
return data
def tasks(self):
"""A list of tasks currently active on the server."""
status, headers, data = self.resource.get_json('_active_tasks')
return data
def uuids(self, count=None):
"""Retrieve a batch of uuids
:param count: a number of uuids to fetch
(None -- get as many as the server sends)
:return: a list of uuids
"""
if count is None:
_, _, data = self.resource.get_json('_uuids')
else:
_, _, data = self.resource.get_json('_uuids', count=count)
return data['uuids']
def create(self, name):
"""Create a new database with the given name.
:param name: the name of the database
:return: a `Database` object representing the created database
:rtype: `Database`
:raise PreconditionFailed: if a database with that name already exists
"""
self.resource.put_json(name)
return self[name]
def delete(self, name):
"""Delete the database with the specified name.
:param name: the name of the database
:raise ResourceNotFound: if a database with that name does not exist
:since: 0.6
"""
del self[name]
def replicate(self, source, target, **options):
"""Replicate changes from the source database to the target database.
:param source: URL of the source database
:param target: URL of the target database
:param options: optional replication args, e.g. continuous=True
"""
data = {'sou | rce': source, 'target': target}
data.update(options)
status, headers, data = self.resource.post_json('_replicate', data)
return data
class Database(object):
"""Re | presentation of a database on a CouchDB server.
>>> server = Server()
>>> db = server.create('python-tests')
New documents can be added to the database using the `save()` method:
>>> doc_id, doc_rev = db.save({'type': 'Person', 'name': 'John Doe'})
This class provides a dictionary-like interface to databases: documents are
retrieved by their ID using item access
>>> doc = db[doc_id]
>>> doc #doctest: +ELLIPSIS
<Document u'...'@... {...}>
Documents are represented as instances of the `Row` class, which is
basically just a normal dictionary with the additional attributes ``id`` and
``rev``:
>>> doc.id, doc.rev #doctest: +ELLIPSIS
(u'...', ...)
>>> doc['type']
u'Person'
>>> doc['name']
u'John Doe'
To update an existing document, you use item access, too:
>>> doc['name'] = 'Mary Jane'
>>> db[doc.id] = doc
The `save()` method creates a document with a random ID g |
ComprasTransparentes/api | endpoints/ministerio.py | Python | gpl-3.0 | 9,007 | 0.001665 | import json
import falcon
import peewee
from models import models_api
from utils.myjson import JSONEncoderPlus
class MinisterioId(object):
"""Endpoint para un ministerio en particular, identificado por id"""
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id):
"""Obtiene la informacion sobre un ministerio en particular
:param req: Falcon request object
:param resp: Falcon response object
:param ministerio_id: ID de ministerio
:return:
"""
# Convertir ministerio_id a int
try:
ministerio_id = int(ministerio_id)
except ValueError:
raise falcon.HTTPNotFound()
# Obtener el ministerio
try:
ministerio = models_api.Comparador.select(
models_api.Comparador.id_ministerio,
models_api.Comparador.nombre_ministerio
).where(
models_api.Comparador.id_ministerio == ministerio_id
).get()
except models_api.Comparador.DoesNotExist:
raise falcon.HTTPNotFound()
# Construir la respuesta
response = {
'id': ministerio.id_ministerio,
'nombre': ministerio.nombre_ministerio
}
# Codificar la respuesta en JSON
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class Ministerio(object):
"""Endpoint para todos los ministerios"""
@models_api.database.atomic()
def on_get(self, req, resp):
"""Obtiene informacion de todos los ministerios.
:param req: Falcon request object
:param resp: Falcon response object
"""
# Obtener todos los ministerios
ministerios = models_api.Comparador.select(
models_api.Comparador.id_ministerio,
models_api.Comparador.nombre_ministerio
).distinct().order_by(
models_api.Comparador.id_ministerio
)
# Construir respuesta
response = {
'n_ministerios': ministerios.count(),
'ministerios': [
{
'id': ministerio['id_ministerio'],
'nombre': ministerio['nombre_ministerio']
}
for ministerio in ministerios.dicts().iterator()]
}
# Codificar la respuesta en JSON
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioCategoria(object):
"""Endpoint para las categorias de productos licitados por los ministerios"""
@models_api.database.atomic()
def on_get(self, req, resp):
"""Obtiene las categorias de productos licitados por los ministerios.
Puede ser filtrado por ministerios con el parametro **minsterio**. Para filtrar por varios ministerios a la
vez, se debe incluir el parametro **ministerio** varias veces.
***ministerio** ID de ministerio oara filtrar
:param req: Falcon request object
:param resp: Falcon response object
:return:
"""
# Preparar la lista de filtros que se van a aplicar
filters = []
# Filtrar por ministerio
q_ministerio = req.params.get('ministerio', [])
if q_ministerio:
if isinstance(q_ministerio, basestring):
q_ministerio = [q_ministerio]
try:
q_ministerio = map(lambda x: int(x), q_ministerio)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "ministerio debe ser un entero")
filters.extend([models_api.Comparador.id_ministerio << q_ministerio])
# Obtener las categorias
categorias = models_api.Comparador.select(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1,
peewee.fn.count(models_api.Comparador.id_categoria_nivel1)
).where(
models_api.Comparador.categoria_nivel1.is_null(False),
*filters
).group_by(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1
).having(
peewee.fn.count(models_api.Comparador.id_categoria_nivel1) >= len(q_ministerio)
).order_by(
models_api.Comparador.id_categoria_nivel1
).distinct()
# Construir la respuesta
response = {
'n_categorias': categorias.count(),
'categorias': [
{
'id': categoria['id_categoria_nivel1'],
'nombre': categoria['categoria_nivel1']
}
for categoria in categorias.dicts().iterator()]
}
# Codificar la respuesta en JSON
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioIdCategoria(object):
"""Endpoint para las categorias de productos de un ministerio"""
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id):
"""Obtiene las categorias licitadas por un ministerio en particular.
:param req: Falcon request object
:param resp: Falcon response object
:param ministerio_id: ID de ministerio
:return:
"""
# Obtener categorias del ministerio
categorias = models_api.Comparador.select(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1
).where(
models_api.Comparador.id_ministerio == ministerio_id,
models_api.Comparador.categoria_nivel1.is_null(False)
).order_by(
models_api.Comparador.id_categoria_nivel1
)
# Construir la respuesta
response = {
'n_categorias': categorias.count(),
'categorias': [
{
'id': categoria['id_categoria_nivel1'],
'nombre': categoria['categoria_nivel1']
}
for categoria in categorias.dicts().iterator()]
}
# Codificar la respuesta en JSON
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioIdCategoriaIdStats(object):
"""Endpoint de las estadisticas de las licitaciones emitidas por un ministerio en cierta categoria de producto"""
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id, categoria_id):
"""Obtiene las estadisticas de las licitaciones emitidas por el ministerio **minist | erio_id** en la categoria
de producto **categoria_id**
:param req: Falcon request object
:param resp: Falcon response object
:param ministerio_id: ID de minsiterio
:param categoria_id: ID de categoria de producto
:return:
"""
# Validar que ministerio_id y categoria_id son ints
try:
ministerio_id = int(ministerio_id)
c | ategoria_id = int(categoria_id)
except ValueError:
raise falcon.HTTPNotFound()
# Obtener las estadisticas
try:
stats = models_api.Comparador.get(
models_api.Comparador.id_ministerio == ministerio_id,
models_api.Comparador.id_categoria_nivel1 == categoria_id
)
except models_api.Comparador.DoesNotExist:
stats = None
# Constrir la respuesta
if stats:
# Si se obtuvo un resultado de la BD, rellenar la respuesta con esa informacion
response = {
'categoria': {
"id": stats.id_categoria_nivel1,
"nombre": stats.categoria_nivel1,
},
'ministerio': {
'id': stats.id_ministerio,
'nombre': stats.nombre_ministerio,
},
'monto_promedio': int(stats.monto_promedio),
'monto_total': int(stats.monto),
'n_licitaciones_adjudicadas': stats.licit_adjudicadas,
'n_proveedores': stats.proveed_favorecidos
}
else:
# Si no se obtuvo un resultad |
horpto/peewee-async | peewee_async.py | Python | mit | 16,722 | 0.000897 | """
peewee-async
============
Asynchronous interface for `peewee`_ ORM powered by `asyncio`_:
https://github.com/05bit/peewee-async
.. _peewee: https://github.com/coleifer/peewee
.. _asyncio: https://docs.python.org/3/library/asyncio.html
Licensed under The MIT License (MIT)
Copyright (c) 2014, Alexey Kinev <rudy@05bit.com>
"""
import asyncio
import aiopg
import peewee
import contextlib
__all__ = [
# Queries
'execute',
# Object actions
'get_object',
'create_object',
'delete_object',
'update_object',
# Database backends
'PostgresqlDatabase',
'PooledPostgresqlDatabase',
# Sync calls helpers
'sync_unwanted',
'UnwantedSyncQueryError',
# Aggregation:
'count',
'scalar',
]
@asyncio.coroutine
def execute(query):
"""Execute *SELECT*, *INSERT*, *UPDATE* or *DELETE* query asyncronously.
:param query: peewee query instance created with ``Model.select()``,
``Model.update()`` etc.
:return: result depends on query type, it's the same as for sync ``query.execute()``
"""
if isinstance(query, peewee.UpdateQuery):
coroutine = update
elif isinstance(query, peewee.InsertQuery):
coroutine = insert
elif isinstance(query, peewee.DeleteQuery):
coroutine = delete
else:
coroutine = select
return (yield from coroutine(query))
@asyncio.coroutine
def create_object(model, **data):
"""Create object asynchronously.
:param model: mode class
:param data: data for initializing object
:return: new object saved to database
"""
obj = model(**data)
# NOTE! Here are private calls involved:
#
# - obj._data
# - obj._get_pk_value()
# - obj._set_pk_value()
# - obj._prepare_instance()
#
field_dict = dict(obj._data)
pk = obj._get_pk_value()
pk_from_cursor = yield from insert(obj.insert(**field_dict))
if pk_from_cursor is not None:
pk = pk_from_cursor
obj._set_pk_value(pk) # Do not overwrite current ID with None.
# obj._prepare_instance()
obj._dirty.clear()
obj.prepared()
return obj
@asyncio.coroutine
def get_object(source, *args):
"""Get object asynchronously.
:param source: mode class or query to get object from
:param args: lookup parameters
:return: model instance or raises ``peewee.DoesNotExist`` if object not found
"""
if isinstance(source, peewee.Query):
base_query = source
model = base_query.model_class
else:
base_query = source.select()
model = source
# Return first object from query
for obj in (yield from select(base_query.where(*args).limit(1))):
return obj
# No objects found
raise model.DoesNotExist
@asyncio.coroutine
def delete_object(obj, recursive=False, delete_nullable=False):
"""Delete object asynchronously.
:param obj: object to delete
:param recursive: if ``True`` also delete all other objects depends on object
:param delete_nullable: if `True` and delete is recursive then delete even 'nullable' dependencies
For details please check out `Model.delete_instance()`_ in peewee docs.
.. _Model.delete_instance(): http://peewee.readthedocs.org/en/latest/peewee/api.html#Model.delete_instance
"""
# Here are private calls involved:
# - obj._pk_expr()
if recursive:
dependencies = obj.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model_class
if fk.null and not delete_nullable:
yield from update(model.update(**{fk.name: None}).where(query))
else:
yield from delete(model.delete().where(query))
result = yield from delete(obj.delete().where(obj._pk_expr()))
return result
@asyncio.coroutine
def update_object(obj, only=None):
"""Update object asynchronously.
:param obj: object to update
:param only: list or tuple of fields to updata, is `None` then all fields updated
This function does the same as `Model.save()`_ for already saved object, but it
doesn't invoke ``save()`` method on model class. That is important to know if you
overrided save method for your model.
.. _Model.save(): http://peewee.readthedocs.org/en/latest/peewee/api.html#Model.save
"""
# Here are private calls involved:
#
# - obj._data
# - obj._meta
# - obj._prune_fields()
# - obj._pk_expr()
# - obj._dirty.clear()
#
field_dict = dict(obj._data)
pk_field = obj._meta.primary_key
if only:
field_dict = obj._prune_fields(field_dict, only)
if not isinstance(pk_field, peewee.CompositeKey):
field_dict.pop(pk_field.name, None)
else:
field_dict = obj._prune_fields(field_dict, obj.dirty_fields)
rows = yield from update(obj.update(**field_dict).where(obj._pk_expr()))
obj._dirty.clear()
return rows
@asyncio.coroutine
def select(query):
"""Perform SELECT query asynchronously.
NOTE! It relies on internal peewee logic for generating
results from queries and well, a bit hacky.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
# Perform *real* async query
query = query.clone()
cursor = yield from cursor_with_query(query)
# Perform *fake* query: we only need a result wrapper
# here, not the query result itself:
query._execute = lambda: None
result_wrapper = query.execute()
# Fetch result
result = AsyncQueryResult(result_wrapper=result_wrapper, cursor=cursor)
try:
while True:
yield from result.fetchone()
except GeneratorExit:
pass
# Release cursor and return
cursor.release()
return result
@asyncio.coroutine
def insert(query):
"""Perform INSERT query asynchronously. Returns last insert ID.
"""
assert isinstance(query, peewee.InsertQuery),\
("Error, trying to run insert coroutine"
"with wrong query class %s" % str(query))
cursor = yield from cursor_with_query(query)
result = yield from query.database.last_insert_id_async(
cursor, query.model_class)
cursor.release()
return result
@asyncio.coroutine
def update(query):
"""Perform UPDATE query asynchronously. Returns number of rows updated.
"""
assert isinstance(query, | peewee.UpdateQuery),\
("Error, trying to run update coroutine"
"with wrong query class %s" % str(query))
cursor = yield from cursor_with_query(query)
rowcount = cursor.rowcount
cursor.release()
return rowcount
@asyncio.coroutine
def delete(query):
"""Perform DELETE query | asynchronously. Returns number of rows deleted.
"""
assert isinstance(query, peewee.DeleteQuery),\
("Error, trying to run delete coroutine"
"with wrong query class %s" % str(query))
cursor = yield from cursor_with_query(query)
rowcount = cursor.rowcount
cursor.release()
return rowcount
@asyncio.coroutine
def count(query, clear_limit=False):
"""Perform *COUNT* aggregated query asynchronously.
:return: number of objects in ``select()`` query
"""
if query._distinct or query._group_by or query._limit or query._offset:
# wrapped_count()
clone = query.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
raw_query = query.model_class.raw(wrapped, *params)
return (yield from scalar(raw_query)) or 0
else:
# simple count()
query = query.order_by()
query._select = [peewee.fn.Count(peewee.SQL('*'))]
return (yield from scalar(query)) or 0
@asyncio.coroutine
def scalar(query, as_tuple=False):
"""Get single value from ``select()`` query, i.e. for aggregation.
:return: result is the same as after sync ``query.scalar()`` call
"""
cursor = yield from cursor_with_query(query)
row = yield from cursor.fetchone()
|
Ashkeelun/GitHealth | Testing/UnitTest.py | Python | mit | 4,788 | 0.003133 | import requests
import unittest
class APITestCase(unittest.TestCase):
def setUp(self):
self.url = 'http://127.0.0.1:8000/health/api/'
self.payload = {"url":"https://github.com/celalgorgun/TestFile"}
def testResponse(self):
r = requests.post(self.url, self.payload)
#Test Request
self.assertEqual(r.status_code, 200)
self.assertEquals(r.json()["root"]["total_doc_info"]["slcSize"], 93)
self.assertEquals(r.json()["root"]["total_doc_info"]["mlcNum"], 3)
self.assertEquals(r.json()["root"]["total_doc_info"]["mlcSize"], 271)
self.assertEquals(r.json()["root"]["total_doc_info"]["codeSize"], 551)
self.assertEquals(r.json()["root"]["total_doc_info"]["comtSize"], 364)
self.assertEquals(r.json()["root"]["total_doc_info"]["slcNum"], 6)
#Test 1 file
self.assertEquals(r.json()["root"]["sub_files"][0]["mlc_size"], 72)
self.assertEquals(r.json()["root"]["sub_files"][0]["mlc_num"], 1)
self.assertEquals(r.json()["root"]["sub_files"][0]["slc_size"], 16)
self.assertE | quals(r.json()["root"]["sub_files"][0]["slc_num"], 1)
self.assertEquals(r.json()["root"]["sub_files"][0]["comt_size"], 88)
self.assertEquals(r.json()["root"]["sub_files"][0]["code_size"], 125)
#Test 2 file
self.assertEquals(r.json()["root"]["sub_files"][1]["mlc_size"], 97)
self.assertEquals(r.json()["root"]["sub_files"][1]["mlc_num"], 1)
self.assertEquals(r.json()["roo | t"]["sub_files"][1]["slc_size"], 27)
self.assertEquals(r.json()["root"]["sub_files"][1]["slc_num"], 2)
self.assertEquals(r.json()["root"]["sub_files"][1]["comt_size"], 124)
self.assertEquals(r.json()["root"]["sub_files"][1]["code_size"], 148)
#Test 3 file
self.assertEquals(r.json()["root"]["sub_files"][2]["mlc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][2]["mlc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][2]["slc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][2]["slc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][2]["comt_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][2]["code_size"], 0)
#Test 4 file
self.assertEquals(r.json()["root"]["sub_files"][3]["mlc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][3]["mlc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][3]["slc_size"], 50)
self.assertEquals(r.json()["root"]["sub_files"][3]["slc_num"], 3)
self.assertEquals(r.json()["root"]["sub_files"][3]["comt_size"], 50)
self.assertEquals(r.json()["root"]["sub_files"][3]["code_size"], 2)
#Test 5 file
self.assertEquals(r.json()["root"]["sub_files"][4]["mlc_size"], 102)
self.assertEquals(r.json()["root"]["sub_files"][4]["mlc_num"], 1)
self.assertEquals(r.json()["root"]["sub_files"][4]["slc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][4]["slc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][4]["comt_size"], 102)
self.assertEquals(r.json()["root"]["sub_files"][4]["code_size"], 0)
#Test 6 file
self.assertEquals(r.json()["root"]["sub_files"][5]["mlc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][5]["mlc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][5]["slc_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][5]["slc_num"], 0)
self.assertEquals(r.json()["root"]["sub_files"][5]["comt_size"], 0)
self.assertEquals(r.json()["root"]["sub_files"][5]["code_size"], 276)
#Test Document Status Which is all documantation information in that repo
self.assertEquals(r.json()["document_stats"]["slcSize"], 93)
self.assertEquals(r.json()["document_stats"]["mlcNum"], 3)
self.assertEquals(r.json()["document_stats"]["mlcSize"], 271)
self.assertEquals(r.json()["document_stats"]["codeSize"], 551)
self.assertEquals(r.json()["document_stats"]["comtSize"], 364)
self.assertEquals(r.json()["document_stats"]["slcNum"], 6)
#Test each file extensions, and is py file or not for every subfiles
self.assertEquals(r.json()["root"]["sub_files"][0]["extension"], ".py")
self.assertEquals(r.json()["root"]["sub_files"][1]["extension"], ".py")
self.assertEquals(r.json()["root"]["sub_files"][2]["extension"], ".py")
self.assertEquals(r.json()["root"]["sub_files"][3]["extension"], ".py")
self.assertEquals(r.json()["root"]["sub_files"][4]["extension"], ".py")
self.assertEquals(r.json()["root"]["sub_files"][5]["extension"], ".py")
if __name__ == '__main__':
unittest.main()
|
google/rappor | tests/fastrand.py | Python | apache-2.0 | 1,198 | 0.001669 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License fo | r the specific language governing permissions and
# limitations under the License.
"""fastrand.py - Python wrapper for _fastrand."""
# NOTE: We could retire this module in favor of the C++ client? One reason to
# keep it is if it supports a wider range of params (e.g. more than 32 or 64
# bits.)
import random
import _fastrand
class FastIrrRand(object):
"""Fast insecure version of rappor.SecureIrrRand."""
def __init__(self, params):
randb | its = _fastrand.randbits # accelerated function
num_bits = params.num_bloombits
# IRR probabilities
self.p_gen = lambda: randbits(params.prob_p, num_bits)
self.q_gen = lambda: randbits(params.prob_q, num_bits)
|
kaiserroll14/301finalproject | main/pandas/tslib.py | Python | gpl-3.0 | 300 | 0.016667 | def __bootstrap__():
global __bootstrap__, __loader__, __file__
| import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'tslib.cpython-35m-darwin.so')
__load | er__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
Vagab0nd/SiCKRAGE | lib3/bencode/__init__.py | Python | gpl-3.0 | 1,155 | 0 | # The contents of this file are subject to the BitTorren | t Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the Lice | nse. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
"""bencode.py - bencode encoder + decoder."""
from bencode.BTL import BTFailure
from bencode.exceptions import BencodeDecodeError
from bencodepy import Bencached, Bencode
__all__ = (
'BTFailure',
'Bencached',
'BencodeDecodeError',
'bencode',
'bdecode',
'bread',
'bwrite',
'encode',
'decode'
)
DEFAULT = Bencode(
encoding='utf-8',
encoding_fallback='value',
dict_ordered=True,
dict_ordered_sort=True
)
bdecode = DEFAULT.decode
bencode = DEFAULT.encode
bread = DEFAULT.read
bwrite = DEFAULT.write
decode = bdecode
encode = bencode
|
PytLab/VASPy | vaspy/elements.py | Python | mit | 617 | 0.001621 | ''' Module to store chemical elements related data.
'''
C12 = 1.99264648e-26 # mass of C12 (kg/atom)
amu = 1.66053904e-27 # atomic mass unit (kg)
chem_elements = {}
chem_elements['H'] = dict(index=1, mass=1.00794)
chem_el | ements['C'] = dict(index=6, mass=12.0107)
chem_elements['N'] = dict(index=7, mass=14.0067)
chem_elements['O'] = dict(index=8, mass=15.9994)
chem_elements['F'] = dict(index=9, mass=18.9984032)
chem_elements['Ne'] = dict(index=10, mass=20.1797)
chem_elements | ['S'] = dict(index=16, mass=32.065)
chem_elements['Cl'] = dict(index=17, mass=35.453)
chem_elements['Ni'] = dict(index=28, mass=58.693)
|
LondoMundo/githubFrontendPy | gitGUI.py | Python | gpl-3.0 | 1,265 | 0.029249 | import wx
import os
class bucky(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Git GUI', size =(500,500))
panel = wx.Panel(self)
gitAddB = wx.Button(panel, label = "add", pos = (0,0), size =(60,60))
gitCommitB = wx.Button(panel, label="commit", pos = (60,0), size=(70,60))
gitPushB=wx.Button(panel, label="push", pos = (130,0), size=(60,60))
gitCloneB=wx.Button(panel, label="clone", pos=(190, 0), size=(60,60))
self.Bind(wx.EVT_BUTTON, self.clone, gitCloneB)
def clone(self, event):
print "hello clone"
box=wx.TextEntryDialog(None, "What is the address of the repo you want to clone?", "Git GUI", "")
box.ShowModal()
repo = | box.GetValue()
print repo
secondBox=wx.DirDialog(None)
secondBox.ShowModal()
path = secondBox.GetPath()
print path
| os.system("mkdir /Users/colin/Desktop/helloworld")
#os.system("git clone " + repo)
#use os.system(mkdir) with the git clone command so that we navigate to the directory THEN clone the file
if __name__ =='__main__':
app=wx.PySimpleApp()
frame = bucky(parent = None, id = -1)
frame.Show()
app.MainLoop()
|
mvaled/sentry | src/sentry/models/monitor.py | Python | bsd-3-clause | 5,443 | 0.00147 | from __future__ import absolute_import, print_function
import pytz
import six
from croniter import croniter
from datetime import datetime, timedelta
from dateutil import rrule
from django.db import models
from django.db.models import Q
from django.utils import timezone
from uuid import uuid4
from sentry.constants import ObjectStatus
from sentry.db.models import (
Model,
BoundedPositiveIntegerField,
EncryptedJsonField,
UUIDField,
sane_repr,
)
SCHEDULE_INTERVAL_MAP = {
"year": rrule.YEARLY,
"month": rrule.MONTHLY,
"week": rrule.WEEKLY,
"day": rrule.DAILY,
"hour": rrule.HOURLY,
"minute": rrule.MINUTELY,
}
def generate_secret():
return uuid4().hex + uuid4().hex
def get_next_schedule(base_datetime, schedule_type, schedule):
if schedule_type == ScheduleType.CRONTAB:
itr = croniter(schedule, base_datetime)
next_schedule = itr.get_next(datetime)
elif schedule_type == ScheduleType.INTERVAL:
count, unit_name = schedule
# count is the "number of units" and unit_name is the "unit name of interval"
# which is inverse from what rrule calls them
rule = rrule.rrule(
freq=SCHEDULE_INTERVAL_MAP[unit_name], interval=count, dtstart=base_datetime, count=2
)
if rule[0] > base_datetime:
next_schedule = rule[0]
else:
| next_schedule = rule[1]
else:
raise NotImplementedError("unknown schedule_type")
return next_schedule
class MonitorStatus(ObjectStatus):
OK = 4
ERROR = 5
@classmethod
def as_choices(cls):
return (
| (cls.ACTIVE, "active"),
(cls.DISABLED, "disabled"),
(cls.PENDING_DELETION, "pending_deletion"),
(cls.DELETION_IN_PROGRESS, "deletion_in_progress"),
(cls.OK, "ok"),
(cls.ERROR, "error"),
)
class MonitorType(object):
UNKNOWN = 0
HEALTH_CHECK = 1
HEARTBEAT = 2
CRON_JOB = 3
@classmethod
def as_choices(cls):
return (
(cls.UNKNOWN, "unknown"),
(cls.HEALTH_CHECK, "health_check"),
(cls.HEARTBEAT, "heartbeat"),
(cls.CRON_JOB, "cron_job"),
)
class ScheduleType(object):
UNKNOWN = 0
CRONTAB = 1
INTERVAL = 2
@classmethod
def as_choices(cls):
return ((cls.UNKNOWN, "unknown"), (cls.CRONTAB, "crontab"), (cls.INTERVAL, "interval"))
class Monitor(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
organization_id = BoundedPositiveIntegerField(db_index=True)
project_id = BoundedPositiveIntegerField(db_index=True)
name = models.CharField(max_length=128)
status = BoundedPositiveIntegerField(
default=MonitorStatus.ACTIVE, choices=MonitorStatus.as_choices()
)
type = BoundedPositiveIntegerField(
default=MonitorType.UNKNOWN, choices=MonitorType.as_choices()
)
config = EncryptedJsonField(default=dict)
next_checkin = models.DateTimeField(null=True)
last_checkin = models.DateTimeField(null=True)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_monitor"
index_together = (("type", "next_checkin"),)
__repr__ = sane_repr("guid", "project_id", "name")
def get_audit_log_data(self):
return {"name": self.name, "type": self.type, "status": self.status, "config": self.config}
def get_next_scheduled_checkin(self, last_checkin=None):
if last_checkin is None:
last_checkin = self.last_checkin
tz = pytz.timezone(self.config.get("timezone") or "UTC")
schedule_type = self.config.get("schedule_type", ScheduleType.CRONTAB)
base_datetime = last_checkin.astimezone(tz)
next_checkin = get_next_schedule(base_datetime, schedule_type, self.config["schedule"])
return next_checkin + timedelta(minutes=int(self.config.get("checkin_margin") or 0))
def mark_failed(self, last_checkin=None):
from sentry.coreapi import ClientApiHelper
from sentry.event_manager import EventManager
from sentry.models import Project
from sentry.signals import monitor_failed
if last_checkin is None:
next_checkin_base = timezone.now()
last_checkin = self.last_checkin or timezone.now()
else:
next_checkin_base = last_checkin
affected = (
type(self)
.objects.filter(
Q(last_checkin__lte=last_checkin) | Q(last_checkin__isnull=True), id=self.id
)
.update(
next_checkin=self.get_next_scheduled_checkin(next_checkin_base),
status=MonitorStatus.ERROR,
last_checkin=last_checkin,
)
)
if not affected:
return False
event_manager = EventManager(
{
"logentry": {"message": "Monitor failure: %s" % (self.name,)},
"contexts": {"monitor": {"id": six.text_type(self.guid)}},
},
project=Project(id=self.project_id),
)
event_manager.normalize()
data = event_manager.get_data()
helper = ClientApiHelper(project_id=self.project_id)
helper.insert_data_to_database(data)
monitor_failed.send(monitor=self, sender=type(self))
return True
|
rudhir-upretee/Sumo17_With_Netsim | tools/visualization/mpl_dump_onNet.py | Python | gpl-3.0 | 17,970 | 0.010128 | #!/usr/bin/env python
"""
@file mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_onNet.py 13811 2013-05-01 20:31:43Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + t | oHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a ne | twork, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['speed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append(attrs["shape"])
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
w = values1[edge]
if w>0:
w = 10. * math.log(1 + values1[edge]) + min_width
else:
w = min_width
if options.max_width and w>options.max_width:
w = options.max_width
if w<min_width:
w = min_width
edge2plotWidth[edge] = w
else:
edge2plotWidth[edge] = min_width
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
if not options.show:
rcParams['backend'] = 'Agg'
# set figure size
if options.size and not options.show:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, |
annarev/tensorflow | tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py | Python | apache-2.0 | 3,962 | 0.002019 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.assert_cardinality()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class AssertCardinalityTest(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for `tf.data.experimental.assert_cardinality()`."""
@combinations.generate(test_base.default_test_combinations())
def testCorrectCardinality(self):
dataset = dataset_ops.Dataset.range(10).filter(lambda x: True)
self.assertEqual(
self.evaluate(cardinality.cardinality(dataset)), cardinality.UNKNOWN)
self.assertDatasetProduces(dataset, expected_output=range(10))
dataset = dataset.apply(cardinality.assert_cardinality(10))
self.assertEqual(sel | f.evaluate(cardinality.cardinality(dataset)), 10)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_elements=10,
asserted_cardinality=20,
expected_error="Input da | taset was expected to contain 20 "
"elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=20,
expected_error="Input dataset was expected to contain 20 "
"elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=5,
expected_error="Input dataset was expected to contain 5 "
"elements but contained at least 6 elements.") +
combinations.combine(
num_elements=10,
asserted_cardinality=1,
expected_error="Input dataset was expected to contain 1 "
"element but contained at least 2 elements.")))
def testIncorrectCardinality(self, num_elements, asserted_cardinality,
expected_error):
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
cardinality.assert_cardinality(asserted_cardinality))
get_next = self.getNext(dataset)
with self.assertRaisesRegex(errors.FailedPreconditionError, expected_error):
while True:
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
it-projects-llc/odoo-saas-tools | saas_portal_subscription/models/subscription_log.py | Python | lgpl-3.0 | 352 | 0 | from odoo import fields, models
class SaasSubscriptionLog(models.Model):
| _name = 'saas_portal.subscription_log'
_order = 'id desc'
client_id = fields.Many2one('saas_portal.client', 'Client')
expiration = fields.Datetime('Previous expiration')
expiration_new = fields.Datetime('New expiration')
| reason = fields.Text('Reason')
|
jtrebosc/JTutils | TSpy/norm_noise_per_scan.py | Python | bsd-3-clause | 1,676 | 0.011337 | # -*- coding: utf- | 8 -*-
## normalize the noise level to one scan : divide intensity by sqrt(NS)
def norm(ns=None, dataset=None):
import sys
import os
import os.path
import subprocess
import JTutils
# if this function is called from imported module then one needs to import TOPSPIN functions
# so that they are available in the current namespace
from TopCmds import CURDATA, GETPAR, GETPARSTAT, PUTPAR, RE, I | NPUT_DIALOG, MSG
if dataset == None:
dataset = CURDATA()
# process the arguments that are handled by this script
if ns == None:
ns = GETPARSTAT("NS")
fulldataPATH = JTutils.fullpath(dataset)
opt_args = ["--ns", str(ns)]
JTutils.run_CpyBin_script('norm_noise_per_scan_.py', opt_args + [fulldataPATH])
if __name__ == '__main__':
class dummy():
def __init__(self):
self.ns = 0
try :
import argparse
parser = argparse.ArgumentParser(description='normalize the noise level to one scan : divide intensity by sqrt(NS).')
parser.add_argument('--ns', help='Number of scan used for normalization. Could be float in special cases', default=None)
args = parser.parse_args(sys.argv[1:])
except ImportError :
if len(sys.argv) > 1:
MSG("Argparse module not found!\n Arguments won't be processed")
args = dummy()
except SystemExit:
MSG(""" Script is exiting : either you asked for help or there is an argument error.
Check console for additional information
""" + parser.format_help() )
EXIT()
dataset = CURDATA()
norm(ns=args.ns, dataset=dataset)
RE(dataset)
|
SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/channels/fr/alsace20.py | Python | gpl-2.0 | 7,749 | 0.000516 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2019 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
# TODO
# Find a way for mpd inputstream not protected by DRM to be downloadable by youtube-dl
# Add date info to catch-up tv video
URL_ROOT = "https://www.alsace20.tv"
URL_LIVE = URL_ROOT + "/emb/live1"
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- ...
"""
resp = urlquick.get(URL_ROOT)
root = resp.parse("ul", attrs={"class": "menu-vod hidden-xs"})
for category_datas in root.iterfind(".//li"):
category_name = category_datas.find('.//a').text
if '#' in category_datas.find('.//a').get('href'):
category_url = URL_ROOT
else:
category_url = URL_ROOT + category_datas.find('.//a').get('href')
item = Listitem()
item.label = category_name
item.set_callback(
list_programs, item_id=item_id, category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_url, **kwargs):
"""
Build programs listing
- ...
"""
resp = urlquick.get(category_url)
root = resp.parse("div", attrs={"class": "emissions hidden-xs"})
for program_datas in root.iterfind(".//a"):
if 'VOD/est' in category_url:
if 'Est' in program_datas.get('href').split('/')[2]:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
elif 'VOD' in category_url:
if program_datas.get('href').split('/')[2] in category_url:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
else:
program_name = program_datas.find(".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_url, **kwargs):
resp = urlquick.get(program_url)
root = resp.parse("ul", attrs={"class": "list-vids"})
for video_datas in root.iterfind(".//li"):
video_title = video_datas.find('.//h2').text
video_image = URL_ROOT + '/videoimages/' + video_datas.find(
".//div[@class='img']").get('data-img')
video_plot = ''
if video_datas.find(".//div[@class='resume']").text is not None:
video_plot = video_datas.find(
".//div[@class='resume']").text.strip()
video_url = URL_ROOT + video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(
get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=False)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
video_url, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_stream_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_stream_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreama | ddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
@Resolver.register
def g | et_live_url(plugin, item_id, **kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
URL_LIVE, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_live_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_live_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreamaddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
|
radicalbit/ambari | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase_master.py | Python | apache-2.0 | 1,892 | 0.014271 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agree | ments. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. Y | ou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from hbase import hbase
from hbase_service import hbase_service
from hbase_decommission import hbase_decommission
from resource_management.libraries.functions.check_process_status import check_process_status
class HbaseMaster(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env, action = None):
import params
env.set_params(params)
hbase('master', action)
def start(self, env):
import params
env.set_params(params)
self.configure(env, action = 'start') # for security
hbase_service( 'master',
action = 'start'
)
def stop(self, env):
import params
env.set_params(params)
hbase_service( 'master',
action = 'stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
check_process_status(pid_file)
def decommission(self, env):
import params
env.set_params(params)
hbase_decommission(env)
if __name__ == "__main__":
HbaseMaster().execute()
|
phracek/rebase-helper | rebasehelper/tests/test_archive.py | Python | gpl-2.0 | 2,830 | 0.000353 | # -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public | License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <phracek@redhat.com>
# Tomas Hozza <thozza@redhat.com> |
import os
import pytest
from rebasehelper.archive import Archive
class TestArchive(object):
TAR_GZ = 'archive.tar.gz'
TGZ = 'archive.tgz'
TAR_XZ = 'archive.tar.xz'
TAR_BZ2 = 'archive.tar.bz2'
ZIP = 'archive.zip'
BZ2 = 'file.txt.bz2'
INVALID_TAR_BZ2 = 'archive-invalid.tar.bz2'
INVALID_TAR_XZ = 'archive-invalid.tar.xz'
ARCHIVED_FILE = 'file.txt'
ARCHIVED_FILE_CONTENT = 'simple testing file'
# These files located in TEST_FILES_DIR will be copied into the testing environment
TEST_FILES = [
TAR_GZ,
TGZ,
TAR_XZ,
TAR_BZ2,
BZ2,
ZIP,
INVALID_TAR_BZ2,
INVALID_TAR_XZ,
]
@pytest.fixture
def extracted_archive(self, archive, workdir):
a = Archive(archive)
d = os.path.join(workdir, 'dir')
a.extract_archive(d)
return d
@pytest.mark.parametrize('archive', [
TAR_GZ,
TGZ,
TAR_XZ,
TAR_BZ2,
BZ2,
ZIP,
], ids=[
'tar.gz',
'tgz',
'tar.xz',
'tar.bz2',
'bz2',
'zip',
])
def test_archive(self, extracted_archive):
extracted_file = os.path.join(extracted_archive, self.ARCHIVED_FILE)
# check if the dir was created
assert os.path.isdir(extracted_archive)
# check if the file was extracted
assert os.path.isfile(extracted_file)
# check the content
with open(extracted_file) as f:
assert f.read().strip() == self.ARCHIVED_FILE_CONTENT
@pytest.mark.parametrize('archive', [
INVALID_TAR_BZ2,
INVALID_TAR_XZ,
], ids=[
'tar.bz2',
'tar.xz',
])
def test_invalid_archive(self, archive, workdir):
a = Archive(archive)
d = os.path.join(workdir, 'dir')
with pytest.raises(IOError):
a.extract_archive(d)
|
alfasin/st2 | st2common/tests/unit/test_policies.py | Python | apache-2.0 | 4,902 | 0.000816 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import st2tests
from st2common.bootstrap.policiesregistrar import register_policy_types, register_policies
from st2common.models.api.action import ActionAPI, RunnerTypeAPI
from st2common.models.api.policy import PolicyTypeAPI, PolicyAPI
from st2common.persistence.action import Action
from st2common.persistence.policy import PolicyType, Policy
from st2common.persistence.runner import RunnerType
from st2common.policies import ResourcePolicyApplicator, get_driver
from st2tests import DbTestCase, fixturesloader
TEST_FIXTURES = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policytypes': [
'fake_policy_type_1.yaml',
'fake_policy_type_2.yaml'
],
'policies': [
'policy_1.yaml',
'policy_2.yaml'
]
}
PACK = 'generic'
LOADER = fixturesloader.FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
class PolicyTest(DbTestCase):
@classmethod
def setUpClass(cls):
super(PolicyTest, cls).setUpClass()
| for _, fixture in six.iteritems(FIXTURES['runners']):
instance = RunnerTypeAPI(**fixture)
RunnerType.add_or_update(RunnerTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['policytype | s']):
instance = PolicyTypeAPI(**fixture)
PolicyType.add_or_update(PolicyTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['policies']):
instance = PolicyAPI(**fixture)
Policy.add_or_update(PolicyAPI.to_model(instance))
def test_get_by_ref(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
self.assertIsNotNone(policy_db)
self.assertEqual(policy_db.pack, 'wolfpack')
self.assertEqual(policy_db.name, 'action-1.concurrency')
policy_type_db = PolicyType.get_by_ref(policy_db.policy_type)
self.assertIsNotNone(policy_type_db)
self.assertEqual(policy_type_db.resource_type, 'action')
self.assertEqual(policy_type_db.name, 'concurrency')
def test_get_driver(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
policy = get_driver(policy_db.ref, policy_db.policy_type, **policy_db.parameters)
self.assertIsInstance(policy, ResourcePolicyApplicator)
self.assertEqual(policy._policy_ref, policy_db.ref)
self.assertEqual(policy._policy_type, policy_db.policy_type)
self.assertTrue(hasattr(policy, 'threshold'))
self.assertEqual(policy.threshold, 3)
class PolicyBootstrapTest(DbTestCase):
def test_register_policy_types(self):
self.assertEqual(register_policy_types(st2tests), 2)
type1 = PolicyType.get_by_ref('action.concurrency')
self.assertEqual(type1.name, 'concurrency')
self.assertEqual(type1.resource_type, 'action')
type2 = PolicyType.get_by_ref('action.mock_policy_error')
self.assertEqual(type2.name, 'mock_policy_error')
self.assertEqual(type2.resource_type, 'action')
def test_register_policies(self):
pack_dir = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
self.assertEqual(register_policies(pack_dir=pack_dir), 2)
p1 = Policy.get_by_ref('dummy_pack_1.test_policy_1')
self.assertEqual(p1.name, 'test_policy_1')
self.assertEqual(p1.pack, 'dummy_pack_1')
self.assertEqual(p1.resource_ref, 'dummy_pack_1.local')
self.assertEqual(p1.policy_type, 'action.concurrency')
p2 = Policy.get_by_ref('dummy_pack_1.test_policy_2')
self.assertEqual(p2.name, 'test_policy_2')
self.assertEqual(p2.pack, 'dummy_pack_1')
self.assertEqual(p2.resource_ref, 'dummy_pack_1.local')
self.assertEqual(p2.policy_type, 'action.mock_policy_error')
self.assertEqual(p2.resource_ref, 'dummy_pack_1.local')
|
mahaveerverma/hand-gesture-recognition-opencv | GestureAPI.py | Python | mit | 3,579 | 0.021794 | #!/usr/bin/python
import math
import numpy as np
class Gesture(object):
def __init__(self,name):
self.name=name
def getName(self):
return self.name
def set_palm(self,hand_center,hand_radius):
self.hand_center=hand_center
self.hand_radius=hand_radius
def set_finger_pos(self,finger_pos):
self.finger_pos=finger_pos
self.finger_count=len(finger_pos)
def calc_angles(self):
self.angle=np.zeros(self.finger_count,dtype=int)
for i in range(self.finger_count):
y = self.finger_pos[i][1]
x = self.finger_pos[i][0]
self.angle[i]=abs(math.atan2((self.hand_center[1]-y),(x-self.hand_center[0]))*180/math.pi)
def DefineGestures():
dict={}
# 1. BEGIN ------------------------------------#
V=Gesture("V")
V.set_palm((475,225),45)
V.set_finger_pos([(490,90),(415,105)])
V.calc_angles()
dict[V.getName()]=V
# 1. END --------------------------------------#
# 2. BEGIN ------------------------------------#
L_right=Gesture("L_right")
L_right.set_palm((475,225),50)
L_right.set_finger_pos([(450,62),(345,200)])
L_right.calc_angles()
dict[L_right.getName()]=L_right
# 2. END --------------------------------------#
# 3. BEGIN ------------------------------------#
Index_Pointing=Gesture("Index_Pointing")
Index_Pointing.set_palm((480,230),43)
Index_Pointing.set_finger_pos([(475,102)])
Index_Pointing.calc_angles()
dict[Index_Pointing.getName()]=Index_Pointing
# 3. END --------------------------------------#
return dict
def CompareGestures(src1,src2):
if(src1.finger_count==src2.finger_count):
if(src1.finger_count==1):
angle_diff=src1.angle[0]-src2.angle[0]
if(angle_diff>20):
result=0
else:
len1 = np.sqrt((src1.finger_pos[0][0]- src1.hand_center[0])**2 + (src1.finger_pos[0][1] - src1.hand_center[1])**2)
len2 = np.sqrt((src2.finger_pos[0][0]- src2.hand_center[0])**2 + (src2.finger_pos[0][1] - src2.hand_center[1])**2)
length_diff=len1/len2
radius_diff=src1.hand_radius/src2.hand_radius
length_score=abs(length_dif | f-radius_diff)
if(length_score<0.09):
result=src2.getName()
else:
result=0
else:
angle_diff=[]
for i in range(src1.fin | ger_count):
angle_diff.append(src1.angle[i]-src2.angle[i])
angle_score=max(angle_diff)-min(angle_diff)
if(angle_score<15):
length_diff=[]
for i in range(src1.finger_count):
len1 = np.sqrt((src1.finger_pos[i][0]- src1.hand_center[0])**2 + (src1.finger_pos[i][1] - src1.hand_center[1])**2)
len2 = np.sqrt((src2.finger_pos[i][0]- src2.hand_center[0])**2 + (src2.finger_pos[i][1] - src2.hand_center[1])**2)
length_diff.append(len1/len2)
length_score=max(length_diff)-min(length_diff)
if(length_score<0.06):
result=src2.getName()
else:
result=0
else:
result=0
else:
result=0
return result
def DecideGesture(src,GestureDictionary):
result_list=[]
for k in GestureDictionary.keys():
src2='"'+k+'"'
result=CompareGestures(src,GestureDictionary[k])
if(result!=0):
return result
return "NONE"
|
BreakVoid/DL_Project | data_utils.py | Python | mit | 12,608 | 0.003252 | import os
import copy
import scipy.interpolate as spi
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
data_root = 'toneclassifier'
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
def SetPath(root):
| global data_root, train_data_path, val_data_path, test_data_path
data_root = root
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
labels = {
'one': 0,
'two': 1,
'three': 2,
'four': 3
}
def LoadData(mode='train'):
data_path = tr | ain_data_path
if mode == 'val':
data_path = val_data_path
elif mode == 'test':
data_path = test_data_path
Engy = []
F0 = []
y = []
for labelName, label in labels.iteritems():
data_subset_path = "%s/%s" % (data_path, labelName)
data_names = set()
for filename in os.listdir(data_subset_path):
if filename[0] == ".":
continue
if ".engy" in filename:
data_names.add(filename[0:-5])
elif ".f0" in filename:
data_names.add(filename[0:-3])
for data_name in data_names:
engy = map(float, open("%s/%s.engy" % (data_subset_path, data_name)).readlines())
f0 = map(float, open("%s/%s.f0" % (data_subset_path, data_name)).readlines())
Engy.append(engy)
F0.append(f0)
y.append(label)
return Engy, F0, y
def IgnoreLowEnergyFrequence(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
zero_freq_engy_sum = 0.0
zero_freq_count = 0.0
for j in xrange(data_len):
if f0[j] < 1e-4:
zero_freq_count += 1
zero_freq_engy_sum += math.sqrt(engy[j])
mean_engy = zero_freq_engy_sum / zero_freq_count
for j in xrange(data_len):
if math.sqrt(engy[j]) <= max(mean_engy, 1.0):
f0[j] = 0.0
resEngy.append(engy)
resF0.append(f0)
return resEngy, resF0
def TrimData(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
start = None
end = None
for i in xrange(len(f0)):
if f0[i] > 1e-5:
start = i
break
for i in xrange(len(f0) - 1, -1, -1):
if f0[i] > 1e-5:
end = i + 1
break
resEngy.append(copy.copy(engy[start:end]))
resF0.append(copy.copy(f0[start:end]))
return resEngy, resF0
def TransformToMelFrequencyScale(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = 1127 * math.log(1 + f0[j] / 700)
resF0.append(f0)
return resF0
def DivSingleDataStd(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
f0arr = np.asarray(f0)
std = f0arr.std()
f0arr = f0arr / std
for j in xrange(data_len):
f0[j] = f0arr[j]
resF0.append(f0)
return resF0
def DivDataStd(F0):
data_num = len(F0)
resF0 = []
tmp = []
for i in xrange(data_num):
for j in xrange(len(F0[i])):
tmp.append(F0[i][j])
F0arr = np.asarray(tmp)
std = F0arr.std()
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = f0[j] / std
resF0.append(f0)
return resF0
def SmoothRawF0(F0):
C1 = 15
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for k in xrange(data_len - 1, -1, -1):
for j in xrange(k, data_len):
if abs(f0[j] - f0[j - 1]) < C1:
continue
if abs(f0[j] / 2 - f0[j - 1]) < C1:
f0[j] /= 2
elif abs(2 * f0[j] - f0[j - 1]) < C1:
f0[j] *= 2
resF0.append(f0)
return resF0
def SmoothF0(F0):
C1 = 0.16
C2 = 0.4
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(1, data_len):
if abs(f0[j] - f0[j - 1]) < C1:
continue
if abs(f0[j] / 2 - f0[j - 1]) < C1:
f0[j] /= 2
elif abs(2 * f0[j] - f0[j - 1]) < C1:
f0[j] *= 2
ff0 = copy.copy([f0[0]] + f0 + [f0[-1]])
fff0 = copy.copy(ff0)
data_len = len(ff0)
f0_2 = (ff0[0], ff0[0])
for j in xrange(1, data_len - 1):
if abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) > C2:
ff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) <= C2:
ff0[j] = (ff0[j - 1] + ff0[j + 1]) / 2
f0_2 = (f0_2[1], ff0[j])
res_f0 = None
if abs(ff0[-1] - fff0[-1]) <= C1:
res_f0 = ff0
else:
f0_2 = (fff0[-1], fff0[-1])
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) > C2:
fff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) <= C2:
fff0[j] = (fff0[j - 1] + fff0[j + 1]) / 2
f0_2 = (f0_2[1], fff0[j])
s = 0
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - ff0[j]) < C1:
s = j
break
res_f0 = ff0[: s + 1] + fff0[s + 1: ]
res_f0 = [res_f0[0]] + res_f0 + [res_f0[-1]]
data_len = len(res_f0)
for j in xrange(2, data_len - 2):
res_f0[j] = (res_f0[j - 2] + res_f0[j - 1] + res_f0[j] + res_f0[j + 1] + res_f0[j + 2]) / 5.0
resF0.append(res_f0[2:-2])
return resF0
def NormalizeDataLengthWithInterpolation(Engy, F0, result_len=200):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError(
"the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
k = float(result_len - 1) / float(data_len - 1)
x = [i * k for i in xrange(data_len)]
newX = [i * 1.0 for i in xrange(result_len)]
newX[-1] = x[-1]
new_engy = spi.interp1d(x, engy, kind='cubic')(newX)
new_f0 = spi.interp1d(x, f0, kind='cubic')(newX)
resEngy.append(new_engy)
resF0.append(new_f0)
return resEngy, resF0
def SingleDataDivideMax(data):
mean = np.asarray(data).max()
for i in xrange(len(data)):
data[i] /= mean
return data
def DataSetDi |
rainysia/dotfiles | doc/python/test/dict_clear.py | Python | mit | 172 | 0 | #!/usr/bin/env python
# coding=utf-8
x = {}
y = x
x['key'] = 'value'
print y
x = {}
print y
print x
print '-'*50
a = {}
b = a
a['key'] = 'value'
print b
a.clear | ()
print b
| |
stackforge/tacker | tacker/db/migration/alembic_migrations/versions/d6ae359ab0d6_add_tenant_id_to_lcm_subscriptions_and_.py | Python | apache-2.0 | 1,237 | 0.001617 | # Copyright 2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distri | buted on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add tenant_id to lcm_subscriptions and lcm_op_occs
Revision ID: d6ae359ab0d6
Revises: 3ff50553e9d3
Create Date: 2022-01-06 13:35:53.868106
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, us | ed by Alembic.
revision = 'd6ae359ab0d6'
down_revision = '3ff50553e9d3'
def upgrade(active_plugins=None, options=None):
op.add_column('vnf_lcm_subscriptions',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
op.add_column('vnf_lcm_op_occs',
sa.Column('tenant_id', sa.String(length=64),
nullable=False))
|
HydrelioxGitHub/home-assistant | homeassistant/components/rpi_gpio/cover.py | Python | apache-2.0 | 3,595 | 0 | """Support for controlling a Raspberry Pi cover."""
import logging
from time import sleep
import voluptuous as vol
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.components import rpi_gpio
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COVERS = 'covers'
CONF_RELAY_PIN = 'relay_pin'
CONF_RELAY_TIME = 'relay_time'
CONF_STATE_PIN = 'state_pin'
CONF_STATE_PULL_MODE = 'state_pull_mode'
CONF_INVERT_STATE = 'invert_state'
CONF_INVERT_RELAY = 'invert_relay'
DEFAULT_RELAY_TIME = .2
DEFAULT_STATE_PULL_MODE = 'UP'
DEFAULT_INVERT_STATE = False
DEFAULT_INVERT_RELAY = False
DEPENDENCIES = ['rpi_gpio']
_COVERS_SCHEMA = vol.All(
cv.ensure_list,
[
vol.Schema({
CONF_NAME: cv.string,
CONF_RELAY_PIN: cv.positive_int,
CONF_STATE_PIN: cv.positive_int,
})
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): _COVERS_SCHEMA,
vol.Optional(CONF_STATE_PULL_MODE, default=DEFAULT_STATE_PULL_MODE):
cv.string,
vol.Optional(CONF_RELAY_TIME, default=DEFAULT_RELAY_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_STATE, default=DEFAULT_INVERT_STATE): cv.boolean,
vol.Optional(CONF_INVERT_RELAY, default=DEFAULT_INVERT_RELAY): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the RPi cover platform."""
relay_time = config.get(CONF_RELAY_TIME)
state_pull_mode = config.get(CONF_STATE_PULL_MODE)
invert_state = config.get(CONF_INVERT_STATE)
invert_relay = config.get(CONF_INVERT_RELAY)
covers = []
covers_conf = config.get(CONF_COVERS)
for cover in covers_conf:
covers.append(RPiGPIOCover(
cover[CONF_NAME], cover[CONF_RELAY_PIN], cover[CONF_STATE_PIN],
state_pull_mode, relay_time, invert_state, invert_relay))
add_entities(covers)
class RPiGPIOCover(CoverDevice):
"""Representation of a Raspberry GPIO cover."""
def __init__(self, name, relay_pin, state_pin, state_pull_mode,
relay_time, inver | t_state, invert_relay):
"""Initialize the cover."""
self._name = name
self._state = False
| self._relay_pin = relay_pin
self._state_pin = state_pin
self._state_pull_mode = state_pull_mode
self._relay_time = relay_time
self._invert_state = invert_state
self._invert_relay = invert_relay
rpi_gpio.setup_output(self._relay_pin)
rpi_gpio.setup_input(self._state_pin, self._state_pull_mode)
rpi_gpio.write_output(self._relay_pin, 0 if self._invert_relay else 1)
@property
def name(self):
"""Return the name of the cover if any."""
return self._name
def update(self):
"""Update the state of the cover."""
self._state = rpi_gpio.read_input(self._state_pin)
@property
def is_closed(self):
"""Return true if cover is closed."""
return self._state != self._invert_state
def _trigger(self):
"""Trigger the cover."""
rpi_gpio.write_output(self._relay_pin, 1 if self._invert_relay else 0)
sleep(self._relay_time)
rpi_gpio.write_output(self._relay_pin, 0 if self._invert_relay else 1)
def close_cover(self, **kwargs):
"""Close the cover."""
if not self.is_closed:
self._trigger()
def open_cover(self, **kwargs):
"""Open the cover."""
if self.is_closed:
self._trigger()
|
janjaapbos/nanotest | example1/example1/example1svc.py | Python | mit | 3,143 | 0.001273 | import oi
import os
import sys
import logging
from logging.handlers import SysLogHandler
import time
import service
try:
import config
except ImportError:
import example1.config as config
def stop_function():
ctl = oi.CtlProgram('ctl program', config.ctl_url)
ctl.call('stop')
ctl.client.close()
class Service(service.Service):
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
self.syslog_handler = SysLogHandler(
address=service.find_syslog(),
facility=SysLogHandler.LOG_DAEMON
)
formatter = logging.Formatter(
'%(name)s - %(levelname)s - %(message)s')
self.syslog_handler.setFormatter(formatter)
logging.getLogger().addHandler(self.syslog_handler)
def run(self):
try:
from scheduler import setup_scheduler, scheduler
except ImportError:
from example1.scheduler import setup_scheduler, scheduler
while not self.got_sigterm():
logging.info("Starting")
self.program = oi.Program('example1', config.ctl_url)
self.program.logger = self.logger
self.program.add_command('ping', lambda: 'pong')
self.program.add_command('state', lambda: self.program.state)
def restart():
logging.warning('Restarting')
self.program.continue_event.set()
self.program.restart = restart
setup_scheduler(self.program)
if hasattr(config, 'register_hook'):
config.register_hook(
ctx=dict(
locals=locals(),
global | s=globals(),
program=self.program
| )
)
self.program.run()
logging.warning("Stopping")
scheduler.shutdown()
if not self.program.continue_event.wait(0.1):
break
self.stop()
os.unlink('/tmp/demo.pid')
os.execl(sys.executable, sys.argv[0], 'start')
if self.got_sigterm():
self.program.stop_function()
def main():
import sys
if len(sys.argv) < 2:
sys.exit('Syntax: %s COMMAND' % sys.argv[0])
cmd = sys.argv[1]
sys.argv.remove(cmd)
service = Service('example1', pid_dir='/tmp')
if cmd == 'start':
service.start()
elif cmd == 'stop':
service.stop()
stop_function()
elif cmd == 'restart':
service.stop()
stop_function()
while service.is_running():
time.sleep(0.1)
service.start()
elif cmd == 'status':
if service.is_running():
print "Service is running."
else:
print "Service is not running."
else:
sys.exit('Unknown command "%s".' % cmd)
if __name__ == '__main__':
if hasattr(config, 'main_hook'):
if not config.main_hook(
ctx=dict(
locals=locals(),
globals=globals()
)
):
main()
else:
main()
|
TomAugspurger/pandas | pandas/tests/arithmetic/test_interval.py | Python | bsd-3-clause | 10,306 | 0.000776 | import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
timedelta_range("0 days", periods=3).insert(4, pd.NaT),
timedelta_range("1 day", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3).insert(4, pd.NaT),
date_range("20170102", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
@pytest.fixture
def array(left_right_dtypes):
"""
Fixture to generate an IntervalArray of various dtypes containing NA if possible
"""
left, right = left_right_dtypes
return IntervalArray.from_arrays(left, right)
def create_categorical_intervals(left, right, closed="right"):
return Categorical(IntervalIndex.from_arrays(left, right, closed))
def create_series_intervals(left, right, closed="right"):
return Series(IntervalArray.from_arrays(left, right, closed))
def create_series_categorical_intervals(left, right, closed="right"):
return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))
class TestComparison:
@pytest.fixture(params=[operator.eq, operator.ne])
def op(self, request):
return request.param
@pytest.fixture(
params=[
IntervalArray.from_arrays,
IntervalIndex.from_arrays,
create_categorical_intervals,
create_series_intervals,
create_series_categorical_intervals,
],
ids=[
"IntervalArray",
"IntervalIndex",
"Categorical[Interval]",
"Series[Interval]",
"Series[Categorical[Interval]]",
],
)
def interval_constructor(self, request):
"""
Fixture for all pandas native interval constructors.
To be used as the LHS of IntervalArray comparisons.
"""
return request.param
def elementwise_comparison(self, op, array, other):
"""
Helper that performs elementwise comparisons between `array` and `other`
"""
other = other if is_list_like(other) else [other] * len(array)
return np.array([op(x, y) for x, y in zip(array, other)])
def test_compare_scalar_interval(self, op, array):
# matches first interval
other = array[0]
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# matches on a single endpoint but not both
other = Interval(array.left[0], array.right[1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = Interval(0, 1, closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_na(self, op, array, nulls_fixture, request):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int64"):
mark = pytest.mark.xfail(
reason="broken for non-integer IntervalArray; see GH 31882"
)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize | (
"other",
[
0,
1.0,
True,
"foo",
Timestamp("2017-01-01") | ,
Timestamp("2017-01-01", tz="US/Eastern"),
Timedelta("0 days"),
Period("2017-01-01", "D"),
],
)
def test_compare_scalar_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval(
self, op, array, interval_constructor,
):
# same endpoints
other = interval_constructor(array.left, array.right)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# different endpoints
other = interval_constructor(array.left[::-1], array.right[::-1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# all nan endpoints
other = interval_constructor([np.nan] * 4, [np.nan] * 4)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval_mixed_closed(
self, op, interval_constructor, closed, other_closed
):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = interval_constructor(range(2), range(1, 3), closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
(
Interval(0, 1),
Interval(Timedelta("1 day"), Timedelta("2 days")),
Interval(4, 5, "both"),
Interval(10, 20, "neither"),
),
(0, 1.5, Timestamp("20170103"), np.nan),
(
Timestamp("20170102", tz="US/Eastern"),
Timedelta("2 days"),
"baz",
pd.NaT,
),
],
)
def test_compare_list_like_object(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_nan(self, op, array, nulls_fixture, request):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
if nulls_fixture is pd.NA and array.dtype.subtype != "i8":
reason = "broken for non-integer IntervalArray; see GH 31882"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
np.arange(4, dtype="int64"),
np.arange(4, dtype="float64"),
date_range("2017-01-01", periods=4),
date_range("2017-01-01", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
period_range("2017-01-01", periods=4, freq="D"),
Categorical(list("abab")),
Categorical(date_range("2017-01-01", periods=4)),
pd.array(list("abcd")),
pd.array(["foo", 3.14, None, object()]),
],
ids=lambda x: str(x.dtype),
)
def test_compare_list_like_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
|
kenorb-contrib/BitTorrent | icmp/test.py | Python | gpl-3.0 | 1,236 | 0.005663 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import socket
from socket import AF_UNIX, SOCK_STREAM
import struct
from struct import pack, unpack
def toint(s):
return unpack("!i", s)[0]
path = "/home/dave/.bittorrent/xicmp-unix-socket"
if os.path.exists(path):
print "unix socket exists at ", path
else:
print "unix socket does not exist. | path=", path
exit -1
sock = socket.socket( AF_UNIX, SOC | K_STREAM, 0 );
sock.connect(path)
print "connected to ", path
while 1:
buf = sock.recv(4)
len = toint(buf)
print "lenbuf=", buf
print "len=", len
buf = sock.recv(len)
print "buf=", buf.encode("hex")
|
michalkaptur/buildbot_indicator | state.py | Python | gpl-3.0 | 479 | 0.002088 | class State:
def __init__(se | lf, state_checker):
self.state_checker = state_checker
self.state = None
self.previous_state = None
def get_current_state(self):
return self.state
def update_state(self):
self.state_checker.update_builders_status()
self.previous_state = self.state
self.state = self.state_checker.all_builds_succedded()
def state_changed(self):
return self.previou | s_state != self.state |
ewiger/tree_output | tree_output/log_handler.py | Python | mit | 958 | 0.001044 | from logging import Handler, NOTSET
class HierarchicalOutputHandler(Handler):
def __init__(self, level=NOTSET, houtput=None):
Handler.__init__(self, level)
self.houtput = houtput
def emit(self, record):
try:
msg | = self.format(record)
add_hlevel = record.__dict__.get('add_hl | evel', False)
remove_hlevel = record.__dict__.get('remove_hlevel', False)
hclosed = record.__dict__.get('hclosed', False)
# Do output with thread-safe locking.
self.acquire()
try:
if add_hlevel:
self.houtput.add_level()
self.houtput.emit(msg, closed=hclosed)
if remove_hlevel:
self.houtput.remove_level()
finally:
self.release()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
|
jhogsett/linkit | python/pinwheel.py | Python | mit | 6,131 | 0.022998 | #!/usr/bin/python
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass |
s.read(s.inWaiting()) |
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pau:clr")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zon:red:7:rep:grn:7:rep:org:7:rep:blu:7:rep")
command("5:zon:red:5:rep:grn:5:rep:org:5:rep:blu:5:rep")
command("4:zon:red:3:rep:grn:3:rep:org:3:rep:blu:3:rep")
command("3:zon:red:2:rep:grn:2:rep:org:2:rep:blu:2:rep")
command("2:zon:red:1:rep:grn:1:rep:org:1:rep:blu:1:rep")
command("1:zon:gry")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flu")
def do_zone(zone):
command(str(zone) + ":zon:rot")
def do_zones():
for i in range(2, 7):
do_zone(i)
command("flu")
global idx
idx = -1
def do_zone(zone):
command(str(zone) + ":zon:rot")
def linear_rotate():
for i in range(2, 7):
do_zone(i)
command("flu")
def angular_rotate():
global idx
do_flush = False
idx = idx + 1
if (idx % 15 == 0):
command("6:zon:rot")
do_flush = True
if (idx % 20 == 0):
command("5:zon:rot")
do_flush = True
if (idx % 30 == 0):
command("4:zon:rot")
do_flush = True
if (idx % 40 == 0):
command("3:zon:rot")
do_flush = True
if (idx % 60 == 0):
command("2:zon:rot")
do_flush = True
if do_flush == True:
command("flu")
def loop():
for i in range(0,4):
linear_rotate()
while True:
angular_rotate()
if __name__ == '__main__':
setup()
while True:
loop()
|
timberline-secondary/hackerspace | src/manage.py | Python | gpl-3.0 | 262 | 0.003817 | #!/usr/bin/e | nv python
import sys
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hackerspace_online.settings")
from django.core.management import execute | _from_command_line
execute_from_command_line(sys.argv)
|
kparal/anaconda | pyanaconda/kickstart.py | Python | gpl-2.0 | 89,420 | 0.003825 | #
# kickstart.py: kickstart install support
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pyanaconda.errors import ScriptError, errorHandler
from blivet.deviceaction import ActionCreateFormat, ActionDestroyFormat, ActionResizeDevice, ActionResizeFormat
from blivet.devices import LUKSDevice
from blivet.devices.lvm import LVMVolumeGroupDevice, LVMCacheRequest
from blivet.devicelibs.lvm import LVM_PE_SIZE, KNOWN_THPOOL_PROFILES
from blivet.devicelibs.crypto import MIN_CREATE_ENTROPY
from blivet.formats import getFormat
from blivet.partitioning import doPartitioning
from blivet.partitioning import growLVM
from blivet.errors import PartitioningError, StorageError, BTRFSValueError
from blivet.size import Size, KiB
from blivet import udev
from blivet import autopart
from blivet.platform import platform
import blivet.iscsi
import blivet.fcoe
import blivet.zfcp
import blivet.arch
import glob
from pyanaconda import iutil
from pyanaconda.iutil import open # pylint: disable=redefined-builtin
import os
import os.path
import tempfile
from pyanaconda.flags import flags, can_touch_runtime_system
from pyanaconda.constants import ADDON_PATHS, IPMI_ABORTED
import shlex
import requests
import sys
import pykickstart.commands as commands
from pyanaconda import keyboard
from pyanaconda import ntp
from pyanaconda import timezone
from pyanaconda.timezone import NTP_PACKAGE, NTP_SERVICE
from pyanaconda import localization
from pyanaconda import network
from pyanaconda import nm
from pyanaconda.simpleconfig import SimpleConfigFile
from pyanaconda.users import getPassAlgo
from pyanaconda.desktop import Desktop
from pyanaconda.i18n import _
from pyanaconda.ui.common import collect
from pyanaconda.addons import AddonSection, AddonData, AddonRegistry, collect_addon_paths
from pyanaconda.bootloader import GRUB2, get_bootloader
from pyanaconda.pwpolicy import F22_PwPolicy, F22_PwPolicyData
from pykickstart.constants import CLEARPART_TYPE_NONE, FIRSTBOOT_SKIP, FIRSTBOOT_RECONFIG, KS_SCRIPT_POST, KS_SCRIPT_PRE, \
KS_SCRIPT_TRACEBACK, KS_SCRIPT_PREINSTALL, SELINUX_DISABLED, SELINUX_ENFORCING, SELINUX_PERMISSIVE
from pykickstart.base import BaseHandler
from pykickstart.errors import formatErrorMsg, KickstartError, KickstartValueError
from pykickstart.parser import KickstartParser
from pykickstart.parser import Script as KSScript
from pykickstart.sections import Section
from pykickstart.sections import NullSection, PackageSection, PostScriptSection, PreScriptSection, PreInstallScriptSection, TracebackScriptSection
from pykickstart.version import returnClassForVersion
import logging
log = logging.getLogger("anaconda")
stderrLog = logging.getLogger("anaconda.stderr")
storage_log = logging.getLogger("blivet")
stdoutLog = logging.getLogger("anaconda.stdout")
from pyanaconda.anaconda_log import logger, logLevelMap, setHandlersLevel, DEFAULT_LEVEL
class AnacondaKSScript(KSScript):
""" Execute a kickstart script
This will write the script to a file named /tmp/ks-script- before
execution.
Output is logged by the program logger, the path specified by --log
or to /tmp/ks-script-\\*.log
"""
def run(self, chroot):
""" Run the kickstart script
@param chroot directory path to chroot into before execution
"""
if self.inChroot:
scriptRoot = chroot
else:
scriptRoot = "/"
# Environment variables that cause problems for %post scripts
env_prune = ["LIBUSER_CONF"]
(fd, path) = tempfile.mkstemp("", "ks-script-", scriptRoot + "/tmp")
iutil.eintr_retry_call(os.write, fd, self.script.encode("utf-8"))
iutil.eintr_ignore(os.close, fd)
iutil.eintr_retry_call(os.chmod, path, 0o700)
# Always log stdout/stderr from scripts. Using --log just lets you
# pick where it goes. The script will also be logged to program.log
# because of execWithRedirect.
if self.logfile:
if self.inChroot:
messages = "%s/%s" % (scriptRoot, self.logfile)
else:
messages = self.logfile
d = os.path.dirname(messages)
if not os.path.exists(d):
os.makedirs(d)
else:
# Always log outside the chroot, we copy those logs into the
# chroot later.
messages = "/tmp/%s.log" % os.path.basename(path)
with open(messages, "w") as fp:
rc = iutil.execWithRedirect(self.interp, ["/tmp/%s" % os.path.basename(path)],
stdout=fp,
root=scriptRoot,
env_prune=env_prune)
if rc != 0:
log.error("Error code %s running the kickstart script at line %s", rc, self.lineno)
if self.errorOnFail:
err = ""
with open(messages, "r") as fp:
err = "".join(fp.readlines())
errorHandler.cb(ScriptError(self.lineno, err))
iutil.ipmi_report(IPMI_ABORTED)
sys.exit(0)
class AnacondaInternalScript(AnacondaKSScript):
def __init__(self, *args, **kwargs):
AnacondaKSScript.__init__(self, *args, **kwargs)
self._hidden = True
def __str__(self):
# Scripts that implement portions of anaconda (copying screenshots and
# log files, setfilecons, etc.) should not be written to the output
# kickstart file.
return ""
def getEscrowCertificate(escrowCerts, url):
if not url:
return None
if url in escrowCerts:
return escrowCerts[url]
needs_net = not url.startswith("/") and not url.startswith("file:")
if needs_net and not nm.nm_is_connected():
msg = _("Escrow certificate %s requires the network.") % url
raise KickstartError(msg)
log.info("escrow: downloading %s", url)
try:
request = iutil.requests_session().get(url, verify=True)
except requests.exceptions.SSLError as e:
msg = _("SSL error while downloading the escrow certificate:\n\n%s") % e
raise KickstartError(msg)
except requests.exceptions.RequestException as e:
msg = _("The following error was encountered while downloading the escrow certificate:\n\n%s") % e
raise KickstartError(msg)
try:
escrowCerts[url] = request.content
finally:
request.close()
return escrowCerts[url]
def deviceMatches(spec, devicetree=None):
""" Return names of block devices matching the provided specification.
:param str spec: a device identifier (name, UUID=<uuid>, &c)
:keyword devicetree: device tree to look up devices in (optional)
:type devicetree: :class:`blivet.DeviceTree`
| :returns: names of matching devices
:rtype: list of str
parse methods will not have access to a devicetree, while execute
methods will. The devicetree is superior in that it can resolve md
array names and in that it reflects scheduled device removals, but for
normal local disks | udev.resolve_devspec should suffice.
"""
full_spec = spec
if not full_spec.startswith("/dev/"):
full_spec = os.path.normpath("/dev/" + full_spec)
# the regular case
matches = udev.resolve_glob(full_spec)
# Use spec here instead of full_spec to preserve the spec and let the
# |
itmard/Simple-blog-with-flask | project/apps/main/__init__.py | Python | lgpl-3.0 | 149 | 0.006711 | # -*- coding: utf-8 -*-
| __author__ = 'itmard'
# flask imports
from flask import Blueprint
main = Blueprint('main', __name__) |
from . import views
|
efforia/eos-dashboard | pandora-ckz/pandora/correios/scraping.py | Python | lgpl-3.0 | 1,673 | 0.013748 | import re
import urllib2
from BeautifulSoup import BeautifulSoup
from encomenda import Encomenda, Status
class CorreiosWebsiteScraper(object):
def __init__(self, http_client=urllib2):
self.url = 'http://websro.correios.com.br/sro_bin/txect01$.QueryList?P_ITEMCODE=&P_LINGUA=001&P_TESTE=&P_TIPO=001&P_COD_UNI='
self.http_client = http_client
def get_encomenda_info(self, numero):
request = self.http_client.urlopen('%s%s' % (self.url, numero))
html = request.read()
request.close()
| if html:
encomenda = Encomenda(numero)
[encom | enda.adicionar_status(status) for status in self._get_all_status_from_html(html)]
return encomenda
def _get_all_status_from_html(self, html):
html_info = re.search('.*(<table.*</TABLE>).*', html, re.S)
try:
table = html_info.group(1)
except AttributeError,e:
return [-1]
soup = BeautifulSoup(table)
status = []
count = 0
for tr in soup.table:
if count > 4 and str(tr).strip() != '':
if re.match(r'\d{2}\/\d{2}\/\d{4} \d{2}:\d{2}', tr.contents[0].string):
status.append(
Status(data=unicode(tr.contents[0].string),
local=unicode(tr.contents[1].string),
situacao=unicode(tr.contents[2].font.string))
)
else:
status[len(status) - 1].detalhes = unicode(tr.contents[0].string)
count = count + 1
return status
|
aficiomaquinas/ega | encuentro-gob/ega.py | Python | apache-2.0 | 2,649 | 0.01661 | import webapp2
import jinja2
import sys
sys.path.insert(0, 'lib')
import httplib2
import os
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from oauth2client.appengine import AppAssertionCredentials
class ShowHome(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/index.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowDash(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dash.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowDashHijos(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dashHijos.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowDashEdades(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dashEdades.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowDashOficina(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dashOficina.html'
| self.response.out.write(template.render(temp_path,temp_data))
class ShowDashInteres(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dashInteres.html'
self.response.out. | write(template.render(temp_path,temp_data))
class ShowDashPorcentaje(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/dashPorcentaje.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowMapa(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/mapa.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowCustom(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/custom.html'
self.response.out.write(template.render(temp_path,temp_data))
class ShowTwitter(webapp2.RequestHandler):
def get(self):
temp_data = {}
temp_path = 'Templates/twitter.html'
self.response.out.write(template.render(temp_path,temp_data))
application = webapp2.WSGIApplication([
('/dash',ShowDash),
('/dashEdades',ShowDashEdades),
('/dashHijos',ShowDashHijos),
('/dashOficina',ShowDashOficina),
('/dashInteres',ShowDashInteres),
('/dashPorcentaje',ShowDashPorcentaje),
('/mapa',ShowMapa),
('/twitter',ShowTwitter),
('/custom',ShowCustom),
('/', ShowHome),
], debug=True) |
qedsoftware/commcare-hq | corehq/messaging/smsbackends/push/models.py | Python | bsd-3-clause | 3,673 | 0.000545 | import requests
from corehq.apps.sms.models import SQLSMSBackend
from corehq.messaging.smsbackends.push.forms import PushBackendForm
from django.conf import settings
from lxml import etree
from xml.sax.saxutils import escape
OUTBOUND_REQUEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<methodCall>
<methodName>EAPIGateway.SendSMS</methodName>
<params>
<param>
<value>
<struct>
<member>
<name>Password</name>
<value>{password}</value>
</member>
<member>
<name>Channel</name>
<value><int>{channel}</int></value>
</member>
<member>
<name>Service</name>
<value><int>{service}</int></value>
</member>
<member>
<name>SMSText</name>
<value>{text}</value>
</member>
<member>
<name>Numbers</name>
<value>{number}</value>
</member>
</struct>
</value>
</param>
</params>
</methodCall>
"""
class PushException(Exception):
pass
class PushBackend(SQLSMSBackend):
class Meta:
app_label = 'sms'
proxy = True
@classmethod
def get_available_extra_fields(cls):
return [
'channel',
'service',
'password',
]
@classmethod
def get_url(cls):
return 'http://41.77.230.124:9080'
@classmethod
def get_api_id(cls):
return 'PUSH'
@classmethod
def get_generic_name(cls):
return "Push"
@classmethod
def get_form_class(cls):
return PushBackendForm
def re | sponse_is_error(self, response):
return response.status_code != 200
def handle_error(self, response, msg):
raise PushException("Received HTTP response %s from push backend" % response.status_code)
def handle_success(self, response, msg):
response.encoding = 'utf-8'
if not response.text:
return
try:
xml = etree.fromstring(response.text.encode('utf-8'))
except etree.XMLSyntaxError:
return
data_poi | nts = xml.xpath('/methodResponse/params/param/value/struct/member')
for data_point in data_points:
name = data_point.xpath('name/text()')
name = name[0] if name else None
if name == 'Identifier':
value = data_point.xpath('value/string/text()')
value = value[0] if value else None
msg.backend_message_id = value
break
def get_outbound_payload(self, msg):
config = self.config
return OUTBOUND_REQUEST_XML.format(
channel=escape(config.channel),
service=escape(config.service),
password=escape(config.password),
number=escape(msg.phone_number),
text=escape(msg.text.encode('utf-8')),
)
def send(self, msg, *args, **kwargs):
headers = {'Content-Type': 'application/xml'}
payload = self.get_outbound_payload(msg)
response = requests.post(
self.get_url(),
data=payload,
headers=headers,
timeout=settings.SMS_GATEWAY_TIMEOUT
)
if self.response_is_error(response):
self.handle_error(response, msg)
else:
self.handle_success(response, msg)
|
Johnetordoff/osf.io | osf/exceptions.py | Python | apache-2.0 | 7,309 | 0.003284 | import contextlib
from django.core.exceptions import ValidationError as DjangoValidationError
# Remants from MODM days
# TODO: Remove usages of aliased Exceptions
ValidationError = DjangoValidationError
ValidationValueError = DjangoValidationError
ValidationTypeError = DjangoValidationError
class TokenError(Exception):
pass
class TokenHandlerNotFound(TokenError):
def __init__(self, action, *args, **kwar | gs):
super(TokenHandlerNotFound, self).__init__(*args, **kwargs)
self.action = action
class UnsupportedSanctionHan | dlerKind(Exception):
pass
class OSFError(Exception):
"""Base class for exceptions raised by the Osf application"""
pass
class NodeError(OSFError):
"""Raised when an action cannot be performed on a Node model"""
pass
class NodeStateError(NodeError):
"""Raised when the Node's state is not suitable for the requested action
Example: Node.remove_node() is called, but the node has non-deleted children
"""
pass
class UserStateError(OSFError):
"""Raised when the user's state is not suitable for the requested action
Example: user.gdpr_delete() is called, but the user has resources that cannot be deleted.
"""
pass
class SanctionTokenError(TokenError):
"""Base class for errors arising from the user of a sanction token."""
pass
class MaxRetriesError(OSFError):
"""Raised when an operation has been attempted a pre-determined number of times"""
pass
class InvalidSanctionRejectionToken(TokenError):
"""Raised if a Sanction subclass disapproval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This disapproval link is invalid. Are you logged into the correct account?'
class InvalidSanctionApprovalToken(TokenError):
"""Raised if a Sanction subclass approval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This approval link is invalid. Are you logged into the correct account?'
class InvalidTagError(OSFError):
"""Raised when attempting to perform an invalid operation on a tag"""
pass
class TagNotFoundError(OSFError):
"""Raised when attempting to perform an operation on an absent tag"""
pass
class UserNotAffiliatedError(OSFError):
"""Raised if a user attempts to add an institution that is not currently
one of its affiliations.
"""
message_short = 'User not affiliated'
message_long = 'This user is not affiliated with this institution.'
@contextlib.contextmanager
def reraise_django_validation_errors():
"""Context manager to reraise DjangoValidationErrors as `osf.exceptions.ValidationErrors` (for
MODM compat).
"""
try:
yield
except DjangoValidationError as err:
raise ValidationError(*err.args)
class NaiveDatetimeException(Exception):
pass
class InvalidTriggerError(Exception):
def __init__(self, trigger, state, valid_triggers):
self.trigger = trigger
self.state = state
self.valid_triggers = valid_triggers
self.message = 'Cannot trigger "{}" from state "{}". Valid triggers: {}'.format(trigger, state, valid_triggers)
super(Exception, self).__init__(self.message)
class InvalidTransitionError(Exception):
def __init__(self, machine, transition):
self.message = 'Machine "{}" received invalid transitions: "{}" expected but not defined'.format(machine, transition)
class PreprintError(OSFError):
"""Raised when an action cannot be performed on a Preprint model"""
pass
class PreprintStateError(PreprintError):
"""Raised when the Preprint's state is not suitable for the requested action"""
pass
class DraftRegistrationStateError(OSFError):
"""Raised when an action cannot be performed on a Draft Registration model"""
pass
class PreprintProviderError(PreprintError):
"""Raised when there is an error with the preprint provider"""
pass
class BlockedEmailError(OSFError):
"""Raised if a user tries to register an email that is included
in the blocked domains list
"""
pass
class SchemaBlockConversionError(OSFError):
"""Raised if unexpected data breaks the conversion between the legacy
nested registration schema/metadata format and the new, flattened,
'schema block' format.
"""
pass
class SchemaResponseError(OSFError):
"""Superclass for errors ariseing from unexpected SchemaResponse behavior."""
pass
class SchemaResponseStateError(SchemaResponseError):
"""Raised when attempting to perform an operation against a
SchemaResponse with an invalid state.
"""
pass
class PreviousSchemaResponseError(SchemaResponseError):
"""Raised when attempting to create a new SchemaResponse for a parent that
already has a SchemaResponse in an unsupported state
"""
pass
class RegistrationBulkCreationContributorError(OSFError):
"""Raised if contributor preparation has failed"""
def __init__(self, error=None):
self.error = error if error else 'Contributor preparation error'
class RegistrationBulkCreationRowError(OSFError):
"""Raised if a draft registration failed creation during bulk upload"""
def __init__(self, upload_id, row_id, title, external_id, draft_id=None, error=None, approval_failure=False):
# `draft_id` is provided when the draft is created but not related to the row object
self.draft_id = draft_id
# `approval_failure` determines whether the error happens during the approval process
self.approval_failure = approval_failure
# The error information for logging, sentry and email
self.error = error if error else 'Draft registration creation error'
# The short error message to be added to the error list that will be returned to the initiator via email
self.short_message = 'Title: {}, External ID: {}, Error: {}'.format(title, external_id, self.error)
# The long error message for logging and sentry
self.long_message = 'Draft registration creation failed: [upload_id="{}", row_id="{}", title="{}", ' \
'external_id="{}", error="{}"]'.format(upload_id, row_id, title, external_id, self.error)
class SchemaResponseUpdateError(SchemaResponseError):
"""Raised when assigning an invalid value (or key) to a SchemaResponseBlock."""
def __init__(self, response, invalid_responses=None, unsupported_keys=None):
self.invalid_responses = invalid_responses
self.unsupported_keys = unsupported_keys
invalid_response_message = ''
unsupported_keys_message = ''
if invalid_responses:
invalid_response_message = (
f'\nThe following responses had invalid values: {invalid_responses}'
)
if unsupported_keys:
unsupported_keys_message = (
f'\nReceived the following resposnes had invalid keys: {unsupported_keys}'
)
error_message = (
f'Error update SchemaResponse with id [{response._id}]:'
f'{invalid_response_message}{unsupported_keys_message}'
)
super().__init__(error_message)
|
ibis-inria/wellFARE | wellfare/json_api.py | Python | lgpl-3.0 | 16,896 | 0.000533 | """
This module contains helper functions to communicate with wellfare
using JSON dictionnaries. The main function is json_process, which
will call one of the subsequent functions depending on the TASK
to be performed.
"""
import numpy as np
from .curves import Curve
from .ILM import (infer_growth_rate,
infer_synthesis_rate_onestep,
infer_synthesis_rate_multistep,
infer_prot_conc_onestep,
infer_prot_conc_multistep)
from .preprocessing import filter_outliers, filter_outliersnew, calibration_curve
from .parsing import parse_tecan, merge_wells_dicts
DEFAULTS = {
'n_control_points': 100,
'dRNA': 1.0,
'eps_L': 0.000001,
'alphalow': -10,
'alphahigh': 10,
'nalphastep': 1000,
}
def get_var_with_default(data, var):
if var in data:
return data.get(var)
elif var in DEFAULTS:
return DEFAULTS[var]
else:
raise ValueError("Variable %s was not provided and no default value" % var
+ " is known for this variable (check spelling ?)")
def check_noNaN(array, name, fun, additional_message=''):
if np.isnan(np.sum(array)):
raise AssertionError("Error: Array '%s' in function %s has NaNs ! %s" % (
name, fun, additional_message))
# THE MAIN FUNCTION, CALLED BY THE PYTHON/JS PROCESS:
def json_process(command, input_data):
""" Calls the right function depending on the ``command``.
This function is a 'hub': it will decide which function to
apply to the data, depending on the command.
For inputs and ouputs, see the doc of the different functions
below.
"""
return {'growth': wellfare_growth,
'activity': wellfare_activity,
'concentration': wellfare_concentration,
'outliers': wellfare_outliers,
'outliersnew': wellfare_outliersnew,
'synchronize': wellfare_synchronize,
'subtract': wellfare_subtract,
'calibrationcurve': wellfare_calibrationcurve,
'parsetecan': wellfare_parsetecan
}[command](input_data)
# THE SPECIFIC FUNCTIONS, ONE FOR EACH TASK:
# === INFERENCE ============================================
def wellfare_growth(data):
""" Computes the growth rate from volume data.
Command : 'growth'
Input :
{ 'times_volume': [...] ,
'values_volume': [...],
'n_control_points': 100 // optional, 100 is default
'positive' : boolean
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output :
{ 'times_growth_rate': [...],
'values_growth_rate': [...]
}
"""
curve_v = Curve(data['times_volu | me'],
data['values_volume'])
positive = False
if 'positive' in data:
positive = data['positive']
check_noNaN(curve_v.y, "curve_v.y", "wellfare_growth")
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.l | inspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
growth, volume, _, _, _ = infer_growth_rate(curve_v, ttu,
alphas=alphas, eps_L=eps_L, positive=positive)
check_noNaN(growth.y, "growth.y", "wellfare_growth")
return {'times_growth_rate': list(growth.x.astype(float)),
'values_growth_rate': list(growth.y.astype(float))}
def wellfare_activity(data):
""" Computes protein synthesis rate, or promoter activity,
using a simple one-step model for the GFP synthesis.
Command : 'activity'
Input:
{ 'times_volume': [...] ,
'values_volume': [...],
'times_fluo': [...],
'values_fluo': [...],
'dR': float, // degradation constant of the reporter
'kR': float // (optional) folding constant of the reporter.
'dRNA': float // (optional) degradation constant of the RNA.
'n_control_points':100, // 100 is the default
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output:
{ 'times_activity': [...],
'values_activity': [...]
}
"""
curve_v = Curve(data['times_volume'],
data['values_volume'])
curve_f = Curve(data['times_fluo'],
data['values_fluo'])
dR = data['dR']
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.linspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
if 'kR' in data:
# use a two-step model of reporter expression
# if no dRNA provided it is supposed to be very short-lived so that
# the transcription step won't impact the dynamics of gene expression
dRNA = get_var_with_default(data, 'dRNA')
kR = data['kR']
synth_rate, _, _, _, _ = infer_synthesis_rate_multistep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
drna=dRNA,
kr=kR,
dR=dR,
alphas=alphas,
eps_L=eps_L)
else:
# use a one-step model of reporter expression
synth_rate, _, _, _, _ = infer_synthesis_rate_onestep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
degr=dR,
alphas=alphas,
eps_L=eps_L)
return {'times_activity': list(synth_rate.x.astype(float)),
'values_activity': list(synth_rate.y.astype(float))}
def wellfare_concentration(data):
""" Computes the concentration of a protein from
a fluorescence curve and an absorbance curve.
Command: 'concentration'
Input:
{ 'times_volume': [...] ,
'values_volume': [...],
'times_fluo: [...],
'values_fluo: [...],
'dR': float,
'dP': float,
'n_control_points': 100, // optional, 100 is default
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output:
{ 'times_concentration': [...],
'values_concentration': [...]
}
"""
curve_v = Curve(data['times_volume'],
data['values_volume'])
curve_f = Curve(data['times_fluo'],
data['values_fluo'])
dR = data['dR']
dP = data['dP']
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.linspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
if 'kR' in data:
# use a two-step model of reporter expression
# if no dRNA provided it is supposed to be very short-lived so that
# the transcription step won't impact the dynamics of gene expression
dRNA = get_var_with_default(data, 'dRNA')
kR = data['kR']
concentration, _, _, _, _ = infer_prot_conc_multistep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
drna=dRNA,
kr=kR,
|
stackforge/solum | solum/common/policies/service.py | Python | apache-2.0 | 1,970 | 0 | # Copyright 2018 ZTE Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from solum.common.policies import base
service_policies = [
policy.DocumentedRuleDefault(
name='get_services',
check_str=base.RULE_DEFAULT,
description='Return all services, based on the query provided.',
operations=[{'path': '/v1/services',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name='show_service',
check_str=base.RULE_DEFAULT,
description='Return a service.',
operations=[{'path': '/v1/services/{service_id}',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name='update_service',
check_str=base.RULE_DEFAULT,
description='Modify this service.',
operations=[{'path': '/v1/servic | es/{service_id}',
'method': 'PUT'}]),
policy.DocumentedRuleDefault(
name='create_service',
check_str=base.RULE_DEFAULT,
description='Create a new service.',
operations=[{'path': '/v1/services',
'method': 'POST'}]),
policy.Docum | entedRuleDefault(
name='delete_service',
check_str=base.RULE_DEFAULT,
description='Delete a service.',
operations=[{'path': '/v1/services/{service_id}',
'method': 'DELETE'}])
]
def list_rules():
return service_policies
|
xianjunzhengbackup/code | data science/machine_learning_for_the_web/chapter_6/test_server/addressesapp/migrations/0002_remove_person_mobilephone.py | Python | mit | 351 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('addressesapp', '0001_initial'),
]
operations = [
migr | ations.RemoveField(
mode | l_name='person',
name='mobilephone',
),
]
|
aeklant/scipy | scipy/_lib/tests/test_import_cycles.py | Python | bsd-3-clause | 1,210 | 0.000826 | import sys
import subprocess
MODULES = [
"scipy.cluster",
"scipy.cluster.vq",
"scipy.cluster.hierarchy",
"scipy.constants",
"scipy.fft",
"scipy.fftpack",
"scipy.integrate",
"scipy.interpolate",
"scipy.io",
"scipy.io.arff",
"scipy.io.harwell_boeing",
"scipy.io.idl",
"scipy.io.matlab",
"scipy.io.netcdf",
"scipy.io.wavfile",
"scipy.linalg",
"scipy.linalg.blas",
"scipy.linalg.cython_blas",
"scipy.linalg.lapack",
"scipy.li | nalg.cython_lapack",
"scipy.linalg.interpolative",
"scipy.misc",
"scipy.ndimage",
"scipy.odr",
"scipy.optimize",
"scipy.signal",
"scipy.signal.windows",
"scipy.sparse",
"scipy.sparse.linalg",
"scipy.sparse.csgraph",
"scipy.spatial",
"scipy.spatial.distance",
"scipy.special",
"scipy.stats",
"scipy.stats.distributions",
"scipy.stats.mstats",
]
def te | st_modules_importable():
# Check that all modules are importable in a new Python process.
#This is not necessarily true if there are import cycles present.
for module in MODULES:
cmd = 'import {}'.format(module)
subprocess.check_call([sys.executable, '-c', cmd])
|
bolt-project/bolt | bolt/spark/construct.py | Python | apache-2.0 | 7,798 | 0.001411 | from numpy import unravel_index, prod, arange, asarray, float64
from itertools import product
from bolt.construct import ConstructBase
from bolt.spark.array import BoltArraySpark
from bolt.spark.utils import get_kv_shape, get_kv_axes
class ConstructSpark(ConstructBase):
@staticmethod
def array(a, context=None, axis=(0,), dtype=None, npartitions=None):
"""
Create a spark bolt array from a local array.
Parameters
----------
a : array-like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
context : SparkContext
A context running Spark. (see pyspark)
axis : tuple, optional, default=(0,)
Which axes to distribute the array along. The resulting
distributed object will use keys to represent these axes,
with the remaining axes represented by values.
dtype : data-type, optional, default=None
The desired data-type for the array. If None, will
be determined from the data. (see numpy)
npartitions : int
Number of partitions for parallization.
Returns
-------
BoltArraySpark
"""
if dtype is None:
arry = asarray(a)
dtype = arry.dtype
else:
arry = asarray(a, dtype)
shape = arry.shape
ndim = len(shape)
# handle the axes specification and transpose if necessary
axes = ConstructSpark._format_axes(axis, arry.shape)
key_axes, value_axes = get_kv_axes(arry.shape, axes)
permutation = key_axes + value_axes
arry = arry.transpose(*permutation)
split = len(axes)
if split < 1:
raise ValueError("split axis must be greater than 0, got %g" % split)
if split > len(shape):
raise ValueError("split axis must not exceed number of axes %g, got %g" % (ndim, split))
key_shape = shape[:split]
val_shape = shape[split:]
keys = zip(*unravel_index(arange(0, int(prod(key_shape))), key_shape))
vals = arry.reshape((prod(key_shape),) + val_shape)
rdd = context.parallelize(zip(keys, vals), npartitions)
return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
@staticmethod
def ones(shape, context=None, axis=(0,), dtype=float64, npartitions=None):
"""
Create a spark bolt array of ones.
Parameters
----------
shape : tuple
The desired shape of the array.
context : SparkContext
A context running Spark. (see pyspark)
axis : tuple, optional, default=(0,)
Which axes to distribute the array along. The resulting
distributed object will use keys to represent these axes,
with the remaining axes represented by values.
dtype : data-type, optional, default=floa | t64
The desired data-type for the array. If None, will
be determined from the data. (see numpy)
npartitions : int
Number of partitions for parallization.
Returns |
-------
BoltArraySpark
"""
from numpy import ones
return ConstructSpark._wrap(ones, shape, context, axis, dtype, npartitions)
@staticmethod
def zeros(shape, context=None, axis=(0,), dtype=float64, npartitions=None):
"""
Create a spark bolt array of zeros.
Parameters
----------
shape : tuple
The desired shape of the array.
context : SparkContext
A context running Spark. (see pyspark)
axis : tuple, optional, default=(0,)
Which axes to distribute the array along. The resulting
distributed object will use keys to represent these axes,
with the remaining axes represented by values.
dtype : data-type, optional, default=float64
The desired data-type for the array. If None, will
be determined from the data. (see numpy)
npartitions : int
Number of partitions for parallization.
Returns
-------
BoltArraySpark
"""
from numpy import zeros
return ConstructSpark._wrap(zeros, shape, context, axis, dtype, npartitions)
@staticmethod
def concatenate(arrays, axis=0):
"""
Join two bolt arrays together, at least one of which is in spark.
Parameters
----------
arrays : tuple
A pair of arrays. At least one must be a spark array,
the other can be a local bolt array, a local numpy array,
or an array-like.
axis : int, optional, default=0
The axis along which the arrays will be joined.
Returns
-------
BoltArraySpark
"""
if not isinstance(arrays, tuple):
raise ValueError("data type not understood")
if not len(arrays) == 2:
raise NotImplementedError("spark concatenation only supports two arrays")
first, second = arrays
if isinstance(first, BoltArraySpark):
return first.concatenate(second, axis)
elif isinstance(second, BoltArraySpark):
first = ConstructSpark.array(first, second._rdd.context)
return first.concatenate(second, axis)
else:
raise ValueError("at least one array must be a spark bolt array")
@staticmethod
def _argcheck(*args, **kwargs):
"""
Check that arguments are consistent with spark array construction.
Conditions are:
(1) a positional argument is a SparkContext
(2) keyword arg 'context' is a SparkContext
(3) an argument is a BoltArraySpark, or
(4) an argument is a nested list containing a BoltArraySpark
"""
try:
from pyspark import SparkContext
except ImportError:
return False
cond1 = any([isinstance(arg, SparkContext) for arg in args])
cond2 = isinstance(kwargs.get('context', None), SparkContext)
cond3 = any([isinstance(arg, BoltArraySpark) for arg in args])
cond4 = any([any([isinstance(sub, BoltArraySpark) for sub in arg])
if isinstance(arg, (tuple, list)) else False for arg in args])
return cond1 or cond2 or cond3 or cond4
@staticmethod
def _format_axes(axes, shape):
"""
Format target axes given an array shape
"""
if isinstance(axes, int):
axes = (axes,)
elif isinstance(axes, list) or hasattr(axes, '__iter__'):
axes = tuple(axes)
if not isinstance(axes, tuple):
raise ValueError("axes argument %s in the constructor not specified correctly" % str(axes))
if min(axes) < 0 or max(axes) > len(shape) - 1:
raise ValueError("invalid key axes %s given shape %s" % (str(axes), str(shape)))
return axes
@staticmethod
def _wrap(func, shape, context=None, axis=(0,), dtype=None, npartitions=None):
"""
Wrap an existing numpy constructor in a parallelized construction
"""
if isinstance(shape, int):
shape = (shape,)
key_shape, value_shape = get_kv_shape(shape, ConstructSpark._format_axes(axis, shape))
split = len(key_shape)
# make the keys
rdd = context.parallelize(list(product(*[arange(x) for x in key_shape])), npartitions)
# use a map to make the arrays in parallel
rdd = rdd.map(lambda x: (x, func(value_shape, dtype, order='C')))
return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
|
dikaiosune/whether | py_whether/processor.py | Python | mit | 2,959 | 0.002028 | __author__ = 'adam'
import configparser
import os
import os.path
from sys import argv
import logging
import postgresql
import db
import parsing
# read configuration
config = configparser.ConfigParser()
config.read(argv[1:])
gsod_conf = config['GSOD FILES']
pg_conf = config['POSTGRES']
gsod_root = gsod_conf['base_folder']
station_inventory = gsod_conf['station_inventory']
gsod_unzipped = gsod_conf['unpacked_folder']
pg_host = pg_conf['host']
pg_port = pg_conf['port']
pg_db = pg_conf['db']
pg_user = pg_conf['user']
pg_pass = pg_conf['pass']
log_file = config['LOG']['log_file']
# logger
log = logging.getLogger('whether')
log.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
form | atter = logging.Formatter('%(asctime)s %(name)s^%(levelname)s: %(message)s')
fh.setForma | tter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
# start of main execution
log.info("Starting whether parser...")
# instantiate database connection
connection = postgresql.open(user=pg_user, password=pg_pass, host=pg_host, port=pg_port, database=pg_db)
log.info("Connected to database!")
# init db schema
connection.execute(db.station_create_table_statement)
connection.execute(db.summary_create_table_statement)
log.info("Database schema initialized.")
# find station inventory
# parse station inventory
stations_dict = parsing.parse_stations(gsod_root + os.sep + station_inventory)
log.info("Preparing and persisting stations to database.")
# for each station, persist to database
unique_stations = {s for s in stations_dict.values()}
station_statement = connection.prepare(db.station_insert_statement)
station_statement.load_rows(unique_stations)
# find the folder of unpacked GSOD files
gsod_folder = gsod_root + os.sep + gsod_unzipped
# find all files that end in .op
op_files = [gsod_folder + os.sep + f for f in os.listdir(gsod_folder) if f.endswith('.op')]
log.info('Found %d summary files.', len(op_files))
chunk_size = 40000
op_chunks = [op_files[i:i + chunk_size] for i in range(0, len(op_files), chunk_size)]
number_files_persisted = 0
total_files = len(op_files)
summary_statement = connection.prepare(db.summary_copy_statement)
for chunk in op_chunks:
summaries = []
for file in chunk:
# parse and persist
summaries.extend(parsing.parse_summary(file, stations_dict))
summary_statement.load_rows(summaries)
number_files_persisted += len(chunk)
# report on progress
log.info('%d/%d files parsed and persisted.', number_files_persisted, total_files)
# summary_cur.close()
log.info('All files parsed, cleaning up and adding indexes to database...')
connection.execute(db.index_statement)
connection.execute(db.analyze_statement)
log.info('All done!')
# clean up, close out any unused resources
connection.close()
log.info("Database connection closed!")
log.info("Exiting...\n\n")
|
mpetyx/palmdrop | venv/lib/python2.7/site-packages/cms/middleware/language.py | Python | apache-2.0 | 722 | 0.00277 | # -*- coding: utf-8 -*-
import datetime
from django.utils.translation import get_language
from django.conf import settings
class LanguageCookieMiddleware(object):
def process_response(self, request, response):
language = get_language()
if settings.LANGUAGE_COOKIE_NAME in request.COO | KIES and \
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] == language:
return response
max_age = 365 * 24 * 60 * 60 # 10 years
expires = datetime.datetime.now() + datetime.timedelta(seconds=max_age)
| response.set_cookie(settings.LANGUAGE_COOKIE_NAME, language, expires=expires.utctimetuple(),
max_age=max_age)
return response
|
catapult-project/catapult | telemetry/telemetry/timeline/model_unittest.py | Python | bsd-3-clause | 3,395 | 0.002356 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import unittest
from telemetry import decorators
from telemetry.testing import tab_test_case
from telemetry.timeline import model as timeline_model
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
class TimelineModelUnittest(unittest.TestCase):
def testEmptyImport(self):
timeline_model.TimelineModel(trace_data.CreateFromRawChromeEvents([]))
def testBrowserProcess(self):
trace = trace_data.CreateFromRawChromeEvents([
{
"name": "process_name",
"args": {"name": "Browser"},
"pid": 5,
"ph": "M"
}, {
"name": "thread_name",
"args": {"name": "CrBrowserMain"},
"pid": 5,
"tid": 32578,
"ph": "M"
}])
model = timeline_model.TimelineModel(trace)
self.assertEqual(5, model.browser_process.pid)
class TimelineModelIntegrationTests(tab_test_case.TabTestCase):
def setUp(self):
super(TimelineModelIntegrationTests, self).setUp()
self.tracing_controller = self._browser.platform.tracing_controller
self.config = tracing_config.TracingConfig()
self.config.chrome_trace_config.SetLowOverheadFilter()
self.config.enable_chrome_trace = True
def testGetTrace(self):
self.tracing_controller.StartTracing(self.config)
self.tabs[0].AddTimelineMarker('trace-event')
trace = self.tracing_controller.StopTracing()
model = timeline_model.TimelineModel(trace)
markers = model.FindTimelineMarkers('trace-event')
self.assertEqual(len(markers), 1)
def testGetFirstRendererThread_singleTab(self):
self.assertEqual(len(self.tabs), 1) # We have a single tab/page.
self.tracing_controller.StartTracing(self.config)
self.tabs[0].AddTimelineMarker('single-tab-marker')
trace = self.tracing_controller.StopTracing()
model = timeline_model.TimelineModel(trace)
# Check that we can find the marker injected into the trace.
renderer_thread = model.GetFirstRendererThread(self.tabs[0].id)
markers = list(renderer_thread.IterTimelineMarkers('single-tab-marker'))
self.assertEqual(len(markers), 1)
@decorators.E | nabled('has tabs')
def testGetFirstRendererThread_multipleTabs(self):
# Make sure a couple of tabs exist.
first_tab = self.tabs[0 | ]
second_tab = self.tabs.New()
second_tab.Navigate('about:blank')
second_tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.tracing_controller.StartTracing(self.config)
first_tab.AddTimelineMarker('background-tab')
second_tab.AddTimelineMarker('foreground-tab')
trace = self.tracing_controller.StopTracing()
model = timeline_model.TimelineModel(trace)
# Check that we can find the marker injected into the foreground tab.
renderer_thread = model.GetFirstRendererThread(second_tab.id)
markers = list(renderer_thread.IterTimelineMarkers([
'foreground-tab', 'background-tab']))
self.assertEqual(len(markers), 1)
self.assertEqual(markers[0].name, 'foreground-tab')
# Check that trying to find the background tab rases an error.
with self.assertRaises(AssertionError):
model.GetFirstRendererThread(first_tab.id)
|
xk0/SmartRPyC | smartrpyc/client/exceptions.py | Python | apache-2.0 | 120 | 0 | cl | ass ServerUnavailable(Exception):
"""
Raised when server is not available
for consuming a request
"" | "
|
pernici/sympy | sympy/simplify/tests/test_simplify.py | Python | bsd-3-clause | 24,474 | 0.006292 | from sympy import Symbol, symbols, hypersimp, factorial, binomial, \
collect, Function, powsimp, separate, sin, exp, Rational, fraction, \
simplify, trigsimp, cos, tan, cot, log, ratsimp, Matrix, pi, integrate, \
solve, nsimplify, GoldenRatio, sqrt, E, I, sympify, atan, Derivative, \
S, diff, oo, Eq, Integer, gamma, acos, Integral, logcombine, Wild, \
separatevars, erf, rcollect, count_ops
from sympy.utilities import all
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z, t, a, b, c, d, e
def test_ratsimp():
f, g = 1/x + 1/y, (x + y)/(x*y)
assert f != g and ratsimp(f) == g
f, g = 1/(1 + 1/x), 1 - 1/(x + 1)
assert f != g and ratsimp(f) == g
f, g = x/(x + y) + y/(x + y), 1
assert f != g and ratsimp(f) == g
f, g = -x - y - y**2/(x + y) + x**2/(x + y), -2*y
assert f != g and ratsimp(f) == g
f = (a*c*x*y + a*c*z - b*d*x*y - b*d*z - b*t*x*y - b*t*x - b*t*z + e*x)/(x*y + z)
G = [a*c - b*d - b*t + (-b*t*x + e*x)/(x*y + z),
a*c - b*d - b*t - ( b*t*x - e*x)/(x*y + z)]
assert f != g and ratsimp(f) in G
A = sqrt(pi)
B = log(erf(x) - 1)
C = log(erf(x) + 1)
D = 8 - 8*erf(x)
f = A*B/D - A*C/D + A*C*erf(x)/D - A*B*erf(x)/D + 2*A/D
assert ratsimp(f) == A*B/8 - A*C/8 + A/(4 - 4*erf(x))
def test_trigsimp1():
x, y = symbols('x,y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == cot(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) in \
[2 + 3*cos(x/2)**2, 5 - 3*sin(x/2)**2]
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e), deep=True) == log(2)
def test_trigsimp2():
x, y = symbols('x,y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
def test_issue1274():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2+2.0*cos(x)**2)-2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x,y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
@XFAIL
def test_factorial_simplify():
# There are more tests in test_factorials.py. These are just to
# ensure that simplify() calls factorial_simplify correctly
from sympy.specfun.factorials import factorial
x = Symbol('x')
assert simplify(factorial(x)/x) == factorial(x-1)
assert simplify(factorial(factorial(x))) == factorial(factorial(x))
def test_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('x,y,z,k,n,m,w,f,s,A')
assert all(simplify(tmp) == tmp for tmp in [I, E, oo, x, -x, -oo, -E, -I])
e = 1/x + 1/y
assert e != (x+y)/(x*y)
assert simplify(e) == (x+y)/(x*y)
e = A**2*s**4/(4*pi*k*m**3)
assert simplify(e) == e
e = (4+4*x-2*(2+2*x))/(2+2*x)
assert simplify(e) == 0
e = (-4*x*y**2-2*y**3-2*x**2*y)/(x+y)**2
assert simplify(e) == -2*y
e = -x-y-(x+y)**(-1)*y**2+(x+y)**(-1)*x**2
assert simplify(e) == -2*y
e = (x+x*y)/x
assert simplify(e) == 1 + y
e = (f(x)+y*f(x))/f(x)
assert simplify(e) == 1 + y
e = (2 * (1/n - cos(n * pi)/n))/pi
assert simplify(e) == 2*((1 - 1*cos(pi*n))/(pi*n))
e = integrate(1/(x**3+1), x).diff(x)
assert simplify(e) == 1/(x**3+1)
e = integrate(x/(x**2+3*x+1), x).diff(x)
assert simplify(e) == x/(x**2+3*x+1)
A = Matrix([[2*k-m*w**2, -k], [-k, k-m*w**2]]).inv()
assert simplify((A*Matrix([0,f]))[1]) == \
(f*(2*k - m*w**2))/(k**2 - 3*k*m*w**2 + m**2*w**4)
a, b, c, d, e, f, g, h, i = symbols('a,b,c,d,e,f,g,h,i')
f_1 = x*a + y*b + z*c - 1
f_2 = x*d + y*e + z*f - 1
f_3 = x*g + y*h + z*i - 1
solutions = solve([f_1, f_2, f_3], x, y, z, simplified=False)
assert simplify(solutions[y]) == \
(a*i+c*d+f*g-a*f-c*g-d*i)/(a*e*i+b*f*g+c*d*h-a*f*h-b*d*i-c*e*g)
f = -x + y/(z + t) + z*x/(z + t) + z*a/(z + t) + t*x/(z + t)
assert simplify(f) == (y + a*z)/(z + t)
A, B = symbols('A,B', commutative=False)
assert simplify(A*B - B*A) == A*B - B*A
def test_simplify_ratio():
# roots of x**3-3*x+5
roots = ['(5/2 + 21**(1/2)/2)**(1/3)*(1/2 - I*3**(1/2)/2)'
' + 1/((1/2 - I*3**(1/2)/2)*(5/2 + 21**(1/2)/2)**(1/3))',
'(5/2 + 21**(1/2)/2)**(1/3)*(1/2 + I*3**(1/2)/2)'
' + 1/((1/2 + I*3**(1/2)/2)*(5/2 + 21**(1/2)/2)**(1/3))',
'-1/(5/2 + 21**(1/2)/2)**(1/3) - (5/2 + 21**(1/2)/2)**(1/3)']
for r in roots:
r = S(r)
assert count_ops(simplify(r, ratio=1)) <= count_ops(r)
# If ratio=oo, simplify() is always applied:
assert simplify(r, ratio=oo) is not r
def test_simplify_issue_1308():
assert simplify(exp(-Rational(1, 2)) + exp(-Rational(3, 2))) == \
(1 + E)*exp(-Rational(3, 2))
assert simplify(exp(1)+exp(-exp(1))) == (1 + exp(1 + E))*exp(-E)
def test_simplify_fail1():
x = Symbol('x')
y = Symbol('y')
e = (x+y)**2/(-4*x*y**2-2*y**3-2*x**2*y)
assert simplify(e) == 1 / (-2*y)
def test_fraction():
x, y, z = map(Symbol, 'xyz')
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
| assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x** | 2+1)/y) == (x**2+1, y)
assert fraction(x*(y+1)/y**7) == (x*(y+1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
def test_separate():
x, y, z = symbols('x,y,z')
assert separate((x*y*z)**4) == x**4*y**4*z**4
assert separate((x*y*z)**x) == x**x*y**x*z**x
assert separate((x*(y*z)**2)**3) == x**3*y**6*z**6
assert separate((sin((x*y)**2)*y)**z) == sin((x*y)**2)**z*y**z
assert separate((sin((x*y)**2)*y)**z, deep=True) == sin(x**2*y**2)**z*y**z
assert separate(exp(x)**2) == exp(2*x)
assert separate((exp(x)*exp(y))**2) == exp(2*x)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2) == exp(2*(x*y)**z)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2, deep=True) == exp(2*x**z*y**z)*exp(2*y)
def test_separate_X1():
x, y, z = map(Symbol, 'xyz')
assert separate((exp(x)*exp(y))**z) == exp(x*z)*exp(y*z)
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp( f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep = True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x+y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == exp(x+y)*2**(x+y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x)+sin(y)+2**x*2**y) == exp(2+x+y)*sin(x)+sin(y)+2**(x+y)
assert powsimp( |
kjedruczyk/phabricator-tools | py/phl/phlmail_sender.py | Python | apache-2.0 | 2,042 | 0 | """A mail sender that sends mail via a configured sendmail."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlmail_sender
#
# Public Classes:
# MailSender
# .send
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import phlmail_format
class MailSender(object):
"""A mail sender that sends mail via a configured sendmail."""
def __init__(self, sendmail, from_email):
"""Setup to send mail with 'sendmail' from 'from_email'.
:sendmail: the sendmail instance to send with, e.g. a phlsys_sendmail
:from_email: the address to send from
"""
self._sendmail = sendmail
self._from_email = from_email
def send(self, subject, message, to_add | resses, cc_addresses=None):
mime = phlmail_format.text(
subject, message, self._from_email, to_addresses, cc_addresses)
self._sendmail.send(mime)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
terna/SLAPP3 | 6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/oligopoly/partial_corr.py | Python | cc0-1.0 | 2,311 | 0.007789 | # from https://gist.github.com/fabianp/9396204419c7b638d38f
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the | target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the ta | rget and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
The result is the partial correlation between X and Y while controlling for the effect of Z
Date: Nov 2014
Author: Fabian Pedregosa-Izquierdo, f@bianp.net
Testing: Valentina Borghesani, valentinaborghesani@gmail.com
"""
import numpy as np
from scipy import stats, linalg
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr |
jgravois/ArcREST | samples/create_replica_portal_item.py | Python | apache-2.0 | 2,205 | 0.005896 | """
This sample shows how to create a
replica from portal of a feature service
"""
import arcrest
from arcrest.security import AGOLTokenSecurityHandler
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
if __name__ == "__main__":
username = "<username>"
password = "<password>"
url = "<portal or AGOL url>"
itemId = "<Id of feature service item>"
savePath = "<Path to save replica>"
try:
agolSH = AGOLTokenSecurityHandler(username=username,
password=password)
portalAdmin = arcrest.manageorg.Administration(securityHandler=agolSH)
content = portalAdmin.content
item = content.item(itemId)
uc = content.usercontent(username=item.owner)
res = uc.exportItem(title="TestExport",
itemId=itemId,
exportFormat="File Geodatabase")
exportItemId = res['exportItemId']
jobId = res['jobId']
serviceItemId = res['serviceItemId']
status = uc.status(itemId=exportItemId, jobId=jobId, jobType="export")
while status['status'].lower() != 'completed':
status = uc.status(itemId=exportItemId, jobId=jobId, job | Type="export")
if status['status'].lower() == 'failed':
print status
break
del status
exportItem = content.item(exportItemId)
itemDataPath = exportItem.itemData(f=None, savePath=savePath)
uc.deleteItem(item_id=exportItemId)
print itemDataPath
except:
line, filename, synerror | = trace()
print("error on line: %s" % line)
print("error in file name: %s" % filename)
print("with error message: %s" % synerror) |
natb1/query-tools | tests/examples/fixture_model.py | Python | mit | 554 | 0.005415 | class Penguin | (object):
def __init__(self, name, mood, id=None):
self.name = name
self.mood = mood
self.id = id
def __repr__(self):
return '< %s the %s penguin >' % (self.name, self.mood)
class Goose(object):
def __init__(self, name, favorite_penguin, id=None):
self.name = name
self.favorite_penguin = favorite_p | enguin
self.id = id
def __repr__(self):
template = '< %s, the goose that likes %s >'
return template % (self.name, repr(self.favorite_penguin))
|
lduarte1991/edx-platform | openedx/core/djangoapps/schedules/migrations/0006_scheduleexperience.py | Python | agpl-3.0 | 750 | 0.004 | # -*- coding: utf-8 -*-
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedules', '0005_auto_20171010_1722'),
]
operations = [
migrations.CreateModel(
name='ScheduleExperience',
fields=[
('id', models.AutoFi | eld(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('experience_type', models.PositiveSmallIntegerField(default=0, choices=[(0, b'Recurring Nudge and Upgrade Reminder'), (1, b'Course Updates')])),
('schedule', models.OneToOneField(related_name='experience', to='schedules.Schedule')),
],
),
]
|
RobinQuetin/CAIRIS-web | cairis/cairis/TCNarrativeTextCtrl.py | Python | apache-2.0 | 3,495 | 0.021745 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from ARM import *
from BehaviouralCharacteristicsDialog import Beha | viouralCharacteristicsDialog
from Borg import Borg
from AssumptionTaskModel import AssumptionTaskModel
from ATModelViewer import ATModelViewer
class TCNarrativeTextCtrl(wx.TextCtrl):
def __init__(self, parent, winId):
wx.TextCtrl.__init__(self,parent,winId,size=(150,100),style=wx.TE_MULTILINE)
self.ctrlMenu = wx.Menu()
self.cmItem = self.ctrlMenu.Append(armid.TCNTC_LISTCHARACTERISTICS_ID,'Characteristics')
self.viItem = self.ctrlMenu.Append(armid.TCNTC_VISCHARACTERISTICS_ID,'Visualise')
wx.EVT_MENU(self,armid.TCNTC_LISTCHARACTERISTICS_ID,self.onListCharacteristics)
wx.EVT_MENU(self,armid.TCNTC_VISCHARACTERISTICS_ID,self.onVisualiseCharacteristics)
self.Bind(wx.EVT_RIGHT_DOWN, self.onRightClick)
self.theTaskName = ''
self.cmItem.Enable(False)
self.viItem.Enable(False)
def onRightClick(self,evt):
self.PopupMenu(self.ctrlMenu)
def onListCharacteristics(self,evt):
try:
dialog = BehaviouralCharacteristicsDialog(self,self.theTaskName)
dialog.ShowModal()
dialog.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit Task Characteristics',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def Set(self,tName,ctrlVal):
self.theTaskName = tName
self.SetValue(ctrlVal)
if (tName == ''):
self.cmItem.Enable(False)
self.viItem.Enable(False)
else:
self.cmItem.Enable(True)
self.viItem.Enable(True)
def onListCharacteristics(self,evt):
try:
dialog = BehaviouralCharacteristicsDialog(self,self.theTaskName)
dialog.ShowModal()
dialog.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit Task Characteristics',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onVisualiseCharacteristics(self,evt):
dialog = None
try:
b = Borg()
modelAssocs = b.dbProxy.assumptionTaskModel(self.theTaskName)
if (len(modelAssocs) > 0):
associations = AssumptionTaskModel(modelAssocs)
dialog = ATModelViewer(self.theTaskName)
dialog.ShowModal(associations)
else:
errorTxt = 'No assumption task associations defined'
dlg = wx.MessageDialog(self,errorTxt,'View Assumption Task Model',wx.OK | wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
except ARMException,errorText:
dialog.destroy()
dlg = wx.MessageDialog(self,str(errorText),'Edit Task Characteristics',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
|
ColumbiaCMB/kid_readout | apps/data_taking_scripts/2015-10-jpl-park/sweep_and_stream_at_min_s21_two_groups_cw_noise_power.py | Python | bsd-2-clause | 4,432 | 0.008348 | __author__ = 'gjones'
import time
import sys
import numpy as np
from kid_readout | .roach import heterodyne
from kid_readout.utils import data_file, sweeps
from kid_readout.equipment import hittite_controller, lockin_controller
hittite = hittite_controller.hittiteController(addr='192.168.0.200')
lockin = lockin_controller.lockinController()
print lockin.get_idn()
ri = heterodyne.RoachHeterodyne(adc_valon='/dev/ttyUSB0')
ri.iq_delay = 0
ri.set_lo(1410.0)
#group_1_lo = 1020.0
#group_2_lo = | 1410.0
#all_f0s = np.load('/data/readout/resonances/2016-01-13-jpl-2015-10-park-dark-32-resonances-split-at-1300.npy') -0.5
#group_1_f0 = all_f0s[all_f0s < 1300]
#group_2_f0 = all_f0s[all_f0s > 1300]
"""
all_f0s = np.load('/data/readout/resonances/2016-02-12-jpl-park-100nm-32-resonances.npy')
group_1_f0 = all_f0s[all_f0s<1500]
group_2_f0 = all_f0s[all_f0s>1800]
group_1_lo = 1220.0
group_2_lo = 1810.0
"""
all_f0s = np.load('/data/readout/resonances/2016-02-29-jpl-park-2015-10-40nm-al-niobium-gp-two-groups.npy')
group_1_f0 = all_f0s[all_f0s<1300]*0.9997 - 0.5
group_2_f0 = all_f0s[all_f0s>1300] - 0.5
group_1_lo = 1030.0
group_2_lo = 1420.0
#responsive_resonances = np.load('/data/readout/resonances/2015-11-26-jpl-nevins-responsive-resonances.npy')
suffix = "cw_noise_test"
mmw_source_modulation_freq = ri.set_modulation_output(rate=7)
mmw_source_frequency = 148e9
hittite.set_freq(mmw_source_frequency/12.0)
mmw_atten_turns = (5.0, 5.0)
#print "modulating at: {}".format(mmw_source_modulation_freq),
atonce = 16
for group_num,(lo,f0s) in enumerate(zip([group_1_lo,group_2_lo],[group_1_f0,group_2_f0])):
print "group",group_num,"lo",lo,"min f0",f0s.min()
ri.set_lo(lo)
nsamp = 2**16
step = 1
nstep = 128
f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep + 1), (nstep + 1)) * step
offsets = offset_bins * 512.0 / nsamp
measured_freqs = sweeps.prepare_sweep(ri, f0binned, offsets, nsamp=nsamp)
for hittite_power in np.arange(-3.4,1.1,0.2):
hittite.set_power(hittite_power)
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns = mmw_atten_turns
for atten_index,dac_atten in enumerate([2.]):
print "at dac atten", dac_atten
ri.set_dac_atten(dac_atten)
ri.set_modulation_output('low')
df.log_hw_state(ri)
df.log_adc_snap(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=2)
df.add_sweep(sweep_data)
fmins = []
for k in range(len(f0s)):
fr, s21, errors = sweep_data.select_index(k)
fmins.append(fr[np.abs(s21).argmin()])
fmins.sort()
ri.add_tone_freqs(np.array(fmins),overwrite_last=True)
ri.select_bank(ri.tone_bins.shape[0] - 1)
# ri.set_tone_freqs(responsive_resonances[:32],nsamp=2**15)
ri.select_fft_bins(range(len(f0s)))
ri._sync()
time.sleep(0.5)
print "taking data with source on"
# raw_input("press enter to start")
ri.set_modulation_output('low')
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(256) # about 30 seconds of data
# x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
print "taking data with source modulated"
ri.set_modulation_output(7)
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(16) # about 2 seconds of data
x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg,zbd_voltage=r,mmw_source_freq=mmw_source_frequency)
df.sync()
df.close() |
kizniche/Mycodo | mycodo/inputs/ads1256_analog_ph_ec.py | Python | gpl-3.0 | 31,269 | 0.001664 | # coding=utf-8
import traceback
from flask_babel import lazy_gettext
from mycodo.config import SQL_DATABASE_MYCODO
from mycodo.databases.models import Conversion
from mycodo.databases.models import DeviceMeasurements
from mycodo.databases.utils import session_scope
from mycodo.inputs.base_input import AbstractInput
from mycodo.inputs.sensorutils import convert_from_x_to_y_unit
from mycodo.utils.database import db_retrieve_table_daemon
from mycodo.utils.system_pi import get_measurement
from mycodo.utils.system_pi import return_measurement_info
MYCODO_DB_PATH = 'sqlite:///' + SQL_DATABASE_MYCODO
def constraints_pass_positive_value(mod_input, value):
"""
Check if the user input is acceptable
:param mod_input: SQL object with user-saved Input options
:param value: float or int
:return: tuple: (bool, list of strings)
"""
errors = []
all_pass | ed = True
# Ensure value is positive
if value <= 0:
all_passed = False
| errors.append("Must be a positive value")
return all_passed, errors, mod_input
def execute_at_modification(
messages,
mod_input,
request_form,
custom_options_dict_presave,
custom_options_channels_dict_presave,
custom_options_dict_postsave,
custom_options_channels_dict_postsave):
try:
if (custom_options_dict_postsave['adc_channel_ph'] ==
custom_options_dict_postsave['adc_channel_ec']):
messages["error"].append("Cannot set pH and EC to be measured from the same channel.")
else:
with session_scope(MYCODO_DB_PATH) as new_session:
measurements = new_session.query(DeviceMeasurements).filter(
DeviceMeasurements.device_id == mod_input.unique_id).all()
for each_measure in measurements:
if each_measure.channel == int(custom_options_dict_postsave['adc_channel_ph']):
if each_measure.measurement != 'ion_concentration':
messages["page_refresh"] = True
each_measure.conversion_id = ''
each_measure.measurement = 'ion_concentration'
each_measure.unit = 'pH'
elif each_measure.channel == int(custom_options_dict_postsave['adc_channel_ec']):
if each_measure.measurement != 'electrical_conductivity':
messages["page_refresh"] = True
each_measure.conversion_id = ''
each_measure.measurement = 'electrical_conductivity'
each_measure.unit = 'uS_cm'
else:
if each_measure.measurement != 'electrical_potential':
messages["page_refresh"] = True
each_measure.conversion_id = ''
each_measure.measurement = 'electrical_potential'
each_measure.unit = 'V'
new_session.commit()
except Exception:
messages["error"].append("execute_at_modification() Error: {}".format(traceback.print_exc()))
return (messages,
mod_input,
custom_options_dict_postsave,
custom_options_channels_dict_postsave)
# Measurements
measurements_dict = {
0: {
'measurement': 'ion_concentration',
'unit': 'pH'
},
1: {
'measurement': 'electrical_conductivity',
'unit': 'uS_cm'
},
2: {
'measurement': 'electrical_potential',
'unit': 'V'
},
3: {
'measurement': 'electrical_potential',
'unit': 'V'
},
4: {
'measurement': 'electrical_potential',
'unit': 'V'
},
5: {
'measurement': 'electrical_potential',
'unit': 'V'
},
6: {
'measurement': 'electrical_potential',
'unit': 'V'
},
7: {
'measurement': 'electrical_potential',
'unit': 'V'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'ADS1256_ANALOG_PH_EC',
'input_manufacturer': 'Texas Instruments',
'input_name': 'ADS1256: Generic Analog pH/EC',
'input_library': 'wiringpi, kizniche/PiPyADC-py3',
'measurements_name': 'Ion Concentration/Electrical Conductivity',
'measurements_dict': measurements_dict,
'execute_at_modification': execute_at_modification,
'message': 'This input relies on an ADS1256 analog-to-digital converter (ADC) to measure pH and/or electrical conductivity (EC) from analog sensors. You can enable or disable either measurement if you want to only connect a pH sensor or an EC sensor by selecting which measurements you want to under Measurements Enabled. Select which channel each sensor is connected to on the ADC. There are default calibration values initially set for the Input. There are also functions to allow you to easily calibrate your sensors with calibration solutions. If you use the Calibrate Slot actions, these values will be calculated and will replace the currently-set values. You can use the Clear Calibration action to delete the database values and return to using the default values. If you delete the Input or create a new Input to use your ADC/sensors with, you will need to recalibrate in order to store new calibration data.',
'options_enabled': [
'measurements_select',
'adc_gain',
'adc_sample_speed',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('pip-pypi', 'wiringpi', 'wiringpi'),
('pip-pypi', 'pipyadc_py3', 'git+https://github.com/kizniche/PiPyADC-py3.git') # PiPyADC ported to Python3
],
'interfaces': ['UART'],
# TODO: Next major revision, move settings such as these to custom_options
'adc_gain': [
(1, '1 (±5 V)'),
(2, '2 (±2.5 V)'),
(4, '4 (±1.25 V)'),
(8, '8 (±0.5 V)'),
(16, '16 (±0.25 V)'),
(32, '32 (±0.125 V)'),
(64, '64 (±0.0625 V)')
],
'adc_sample_speed': [
('30000', '30,000'),
('15000', '15,000'),
('7500', '7,500'),
('3750', '3,750'),
('2000', '2,000'),
('1000', '1,000'),
('500', '500'),
('100', '100'),
('60', '60'),
('50', '50'),
('30', '30'),
('25', '25'),
('15', '15'),
('10', '10'),
('5', '5'),
('2d5', '2.5')
],
'custom_options': [
{
'id': 'adc_channel_ph',
'type': 'select',
'default_value': '0',
'options_select': [
('-1', 'Not Connected'),
('0', 'Channel 0'),
('1', 'Channel 1'),
('2', 'Channel 2'),
('3', 'Channel 3'),
('4', 'Channel 4'),
('5', 'Channel 5'),
('6', 'Channel 6'),
('7', 'Channel 7'),
],
'name': 'ADC Channel: pH',
'phrase': 'The ADC channel the pH sensor is connected'
},
{
'id': 'adc_channel_ec',
'type': 'select',
'default_value': '1',
'options_select': [
('-1', 'Not Connected'),
('0', 'Channel 0'),
('1', 'Channel 1'),
('2', 'Channel 2'),
('3', 'Channel 3'),
('4', 'Channel 4'),
('5', 'Channel 5'),
('6', 'Channel 6'),
('7', 'Channel 7'),
],
'name': 'ADC Channel: EC',
'phrase': 'The ADC channel the EC sensor is connected'
},
{
'type': 'message',
'default_value': 'Temperature Compensation',
},
{
'id': 'temperature_comp_meas',
'type': 'select_measurement',
'default_value': '',
'options_select': [
'Input',
'Function',
'Math'
],
'name': "{}: {}".format(lazy_gettext('Tempe |
xdevelsistemas/taiga-back-community | taiga/timeline/management/commands/_update_timeline_for_updated_tasks.py | Python | agpl-3.0 | 3,369 | 0.000594 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.db.models import Prefetch, F
from django.test.utils import override_settings
from taiga.timeline.models import Timeline
from taiga.timeline.timeline_implementations import userstory_timeline
from optparse import make_option
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
def update_timeline(initial_date, final_date):
timelines = Timeline.objects.all()
if initial_date:
timelines = timelines.filter(created__gte=initial_date)
if final_date:
timelines = timelines.filter(created__lt=final_date)
timelines = timelines.filter(event_type="tasks.task.change")
print("Generating tasks indexed by id dict")
task_ids = timelines.values_list("object_id", flat=True)
tasks_iterator = Task.objects.filter(id__in=task_ids).select_related("user_story").iterator()
tasks_per_id = {task.id: task for task in tasks_iterator}
del task_ids
counter = 1
total = timelines.count()
print("Updating timelines")
for timeline in timelines.iterator | ():
print("%s/%s"%(counter, total))
task_id = timeline.object_id
task = tasks_per_id.get(task_id, None)
if not task:
counter += 1
contin | ue
user_story = tasks_per_id[task_id].user_story
if not user_story:
counter += 1
continue
timeline.data["task"]["userstory"] = userstory_timeline(user_story)
timeline.save(update_fields=["data"])
counter += 1
class Command(BaseCommand):
help = 'Regenerate project timeline'
option_list = BaseCommand.option_list + (
make_option('--initial_date',
action='store',
dest='initial_date',
default=None,
help='Initial date for timeline update'),
) + (
make_option('--final_date',
action='store',
dest='final_date',
default=None,
help='Final date for timeline update'),
)
@override_settings(DEBUG=False)
def handle(self, *args, **options):
update_timeline(options["initial_date"], options["final_date"])
|
hamtamtots/website | matt_site/blog/views/articlelistview.py | Python | mit | 240 | 0.012605 | from django.views.generic import ListView
fr | om blog.models.article import Article
class ArticleListView(ListView):
model = Article
template_name = 'blog/articlelist.html'
queryset = Article.objec | ts.order_by('-created') |
sumanthjamadagni/OZ | SinglePointCalculation.py | Python | gpl-3.0 | 1,299 | 0.018476 | import OZ.Potentials as Potentials
import OZ.OZ_Functions as OZF
import OZ.PP_Functions as PP_Functions
#import FigFuncs
#import HNC
import OZ.RHNC as RHNC
import numpy as np
def SinglePointCalculation(r, k, Ur, Ur_ref, rho, T=1.0, OutFile=None, cr_guess=None, tol=1e-8):
#RHNC Solver
print "Function = SinglePointCalculation: rho = ", rho, "T = ", T
Flag, hr, cr, er, hk, Sk = RHNC.OZSolver_RHNC_Iterative(r, | k, Ur, Ur_ref, rho, T = T, maxiter=10000, w_old_start=0.50,w_old_max=0.99,tol=tol, cr_guess=cr_guess)
B2 = Potentials.CalcB2(r,Ur,T=T)
mu_ex, mu = PP_Functions.ExcessChemPot(r, hr, cr, rho, T=T)
rmin = 0.50
if Flag == 0:
gr = hr + 1.0
|
P_virial = PP_Functions.Calc_Pvirial(r, gr, Ur, rho, T=T, rmin=0.50)
s2 = PP_Functions.s2(r, gr, rho)
ListHeader=['T', 'rho', 'B2', 'P_virial' ,'Sk0', 'kappa', 'mu', 's2','Sk_max']
ListValues = [T, rho, B2, P_virial, Sk[0], Sk[0]/(rho*T), mu, s2, np.max(Sk)]
if OutFile != None:
OutFileH = open(OutFile, 'w')
OutFileH.write(OZF.ListToTabbedStr(ListHeader))
OutFileH.write(OZF.ListToTabbedStr(ListValues))
return ListHeader, ListValues, gr, cr, Sk
else:
print "Not converged"
return 0
|
PnEcrins/GeoNature-atlas | atlas/configuration/config_schema.py | Python | gpl-3.0 | 7,525 | 0.001464 | from marshmallow import (
Schema,
fields,
validates_schema,
ValidationError,
validates_schema,
EXCLUDE,
)
from marshmallow.validate import Regexp
MAP_1 = {
"name": "OpenStreetMap",
"layer": "//{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png",
"attribution": "© OpenStreetMap",
}
MAP_2 = {
"name": "OpenTopoMap",
"layer": "//a.tile.opentopomap.org/{z}/{x}/{y}.png",
"attribution": "© OpenStreetMap-contributors, SRTM | Style: © OpenTopoMap (CC-BY-SA)",
}
LANGUAGES = {
"en": {
"name": "English",
"flag_icon": "flag-icon-gb",
"months": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
},
"fr": {
"name": "Français",
"flag_icon": "flag-icon-fr",
"months": [
"Janvier",
"Février",
"Mars",
"Avril",
"Mai",
"Juin",
"Juillet",
"Août",
"Septembre",
"Octobre",
"Novembre",
"Decembre",
],
},
"it": {
"name": "Italiano",
"flag_icon": "flag-icon-it",
"months": [
"Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Agosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre",
],
},
}
class SecretSchemaConf(Schema):
class Meta:
unknown = EXCLUDE
database_connection = fields.String(
required=True,
validate=Regexp(
"^postgresql:\/\/.*:.*@[^:]+:\w+\/\w+$",
error="Database uri is invalid ex: postgresql://monuser:monpass@server:port/db_name",
),
)
GUNICORN_PORT = fields.Integer(load_default=8080)
modeDebug = fields.Boolean(load_default=False)
SECRET_KEY = fields.String(required=True)
class MapConfig(Schema):
LAT_LONG = fields.List(fields.Float(), load_default=[44.7952, 6.2287])
MIN_ZOOM = fields.Integer(load_default=1)
MAX_BOUNDS = fields.List(
fields.List(fields.Float()), load_default=[[-180, -90], [180, 90]]
)
FIRST_MAP = fields.Dict(load_default=MAP_1)
SECOND_MAP = fields.Dict(load_default=MAP_2)
ZOOM = fields.Integer(load_default=10)
STEP = fields.Integer(load_default=1)
BORDERS_COLOR = fields.String(load_default="#000000")
BORDERS_WEIGHT = fields.Integer(load_default=3)
ENABLE_SLIDER = fields.Boolean(load_default=True)
ENABLE_SCALE = fields.Boolean(load_default=True)
MASK_STYLE = fields.Dict(
load_default={"fill": False, "fillColor": "#020202", "fillOpacity": 0.3}
)
class AtlasConfig(Schema):
class Meta:
unknown = EXCLUDE
STRUCTURE = fields.String(load_default="Nom de la structure")
NOM_APPLICATION = fields.String(load_default="Nom de l'application")
CUSTOM_LOGO_LINK = fields.String(load_default="")
URL_APPLICATION = fields.String(load_default="")
DEFAULT_LANGUAGE = fields.String(load_default="fr")
MULTILINGUAL = fields.Boolean(load_default=False)
ID_GOOGLE_ANALYTICS = fields.String(load_default="UA-xxxxxxx-xx")
ORGANISM_MODULE = fields.Boolean(load_default="False")
GLOSSAIRE = fields.Boolean(load_default=False)
IGNAPIKEY = fields.String(load_default="")
AFFICHAGE_INTRODUCTION = fields.Boolean(load_default=True)
AFFICHAGE_LOGOS_HOME = fields.Boolean(load_default=True)
AFFICHAGE_FOOTER = fields.Boolean(load_default=True)
AFFICHAGE_STAT_GLOBALES = fields.Boolean(load_default=True)
AFFICHAGE_DERNIERES_OBS = fields.Boolean(load_default=True)
AFFICHAGE_EN_CE_MOMENT = fields.Boolean(load_default=True)
AFFICHAGE_RANG_STAT = fields.Boolean(load_default=True)
AFFICHAGE_NOUVELLES_ESPECES = fields.Boolean(load_default=True)
AFFICHAGE_RECHERCHE_AVANCEE = fields.Boolean(load_default=False)
RANG_STAT = fields.List(
fields.Dict,
load_default=[
{"phylum": ["Arthropoda", "Mollusca"]},
{"phylum": ["Chordata"]},
{"regne": ["Plantae"]},
],
)
RANG_STAT_FR = fields.List(
fields.String, load_default=["Faune invertébrée", "Faune vertébrée", "Flore"]
)
LIMIT_RANG_TAXONOMIQUE_HIERARCHIE = fields.Integer(load_default=13)
LIMIT_FICHE_LISTE_HIERARCHY = fields.Integer(load_default=28)
REMOTE_MEDIAS_URL = fields.String(load_default="http://mondomaine.fr/taxhub/")
REMOTE_MEDIAS_PATH = fields.String(load_default="static/medias/")
REDIMENSIONNEMENT_IMAGE = fields.Boolean(load_default=False)
TAXHUB_URL = fields.String(required=False, load_default=None)
ATTR_DESC = fields.Integer(load_default=100)
ATTR_COMMENTAIRE = fields.Integer(load_default=101)
ATTR_MILIEU = fields.Integer(load_default=102)
ATTR_CHOROLOGIE = fields.Integer(load_default=103)
ATTR_MAIN_PHOTO = fields.Integer(load_default=1)
ATTR_OTHER_PHOTO = fields.Integer(load_default=2)
ATTR_LIEN = fields.Integer(load_default=3)
ATTR_PDF = fields.Integer(load_default=4)
ATTR_AUDIO = fields.Integer(load_default=5)
ATTR_VIDEO_HEBERGEE = fields.Integer(load_default=6)
ATTR_YOUTUBE = fields.Integer(load_default=7)
ATTR_DAILYMOTION = fields.Integer(load_default=8)
ATTR_VIMEO = fields.Integer(load_default=9)
PROTECTION = fields.Boolean(load_default= | False)
DISPLAY_PATRIMONIALITE = fields.Boolean(load_defaul | t=False)
PATRIMONIALITE = fields.Dict(
load_default={
"label": "Patrimonial",
"config": {
"oui": {
"icon": "custom/images/logo_patrimonial.png",
"text": "Ce taxon est patrimonial",
}
},
}
)
STATIC_PAGES = fields.Dict(
load_default={
"presentation": {
"title": "Présentation de l'atlas",
"picto": "fa-question-circle",
"order": 0,
"template": "static/custom/templates/presentation.html",
}
}
)
AFFICHAGE_MAILLE = fields.Boolean(load_default=False)
ZOOM_LEVEL_POINT = fields.Integer(load_default=11)
LIMIT_CLUSTER_POINT = fields.Integer(load_default=1000)
NB_DAY_LAST_OBS = fields.String(load_default="7")
NB_LAST_OBS = fields.Integer(load_default=100)
TEXT_LAST_OBS = fields.String(
load_default="Les observations des agents ces 7 derniers jours |"
)
ANONYMIZE = fields.Boolean(load_default=False)
MAP = fields.Nested(MapConfig, load_default=dict())
# coupe le nom_vernaculaire à la 1ere virgule sur les fiches espèces
SPLIT_NOM_VERN = fields.Integer(load_default=True)
INTERACTIVE_MAP_LIST = fields.Boolean(load_default=True)
AVAILABLE_LANGUAGES = fields.Dict(load_default=LANGUAGES)
@validates_schema
def validate_url_taxhub(self, data, **kwargs):
"""
TAXHHUB_URL doit être rempli si REDIMENSIONNEMENT_IMAGE = True
"""
if data["REDIMENSIONNEMENT_IMAGE"] and data["TAXHUB_URL"] is None:
raise ValidationError(
{
"Le champ TAXHUB_URL doit être rempli si REDIMENSIONNEMENT_IMAGE = True"
}
)
|
aeklant/scipy | scipy/stats/_distn_infrastructure.py | Python | bsd-3-clause | 127,400 | 0.000314 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
from itertools import zip_longest
from scipy._lib import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative | distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
| """
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].r |
cliftonmcintosh/openstates | openstates/id/__init__.py | Python | gpl-3.0 | 4,242 | 0 | from pupa.scrape import Jurisdiction, Organization
from .people import IDPersonScraper
from .committees import IDCommitteeScraper
from .bills import IDBillScraper
class Idaho(Jurisdiction):
"""
IDAHO Scraper
"""
division_id = "ocd-division/country:us/state:id"
classification = "government"
name = "Idaho"
url = "http://www.legislature.idaho.gov"
scrapers = {
'people': IDPersonScraper,
'committees': IDCommitteeScraper,
'bills': IDBillScraper
}
parties = [
{'name': 'Republican'},
{'name': 'Democratic'}
]
legislative_sessions = [
{
"_scraped_name": "2011 Session",
"classification": "primary",
"end_date": "2011-04-07",
"identifier": "2011",
"name": "61st Legislature, 1st Regular Session (2011)",
"start_date": "2011-01-10"
},
{
"_scraped_name": "2012 Session",
"classification": "primary",
"identifier": "2012",
"name": "61st Legislature, 2nd Regular Session (2012)"
},
{
"_scraped_name": "2013 Session",
"classification": "primary",
"identifier": "2013",
"name": "62nd Legislature, 1st Regular Session (2013)"
},
{
"_scraped_name": "2014 Session",
"classification": "primary",
"identifier": "2014",
"name": "63nd Legislature, 1st Regular Session (2014)"
},
{
"_scraped_name": "2015 Session",
"classification": "primary",
"end_date": "2015-04-10",
"identifier": "2015",
"name": "64th Legislature, 1st Regular Session (2015)",
"start_date": "2015-01-12"
},
{
"_scraped_name": "2015 Extraordinary Session",
"classification": "special",
"end_date": "2015-05-18",
"identifier": "2015spcl",
"name": "65th Legislature, 1st Extraordinary Session (2015)",
"start_date": "2015-05-18"
},
{
"_scraped_name": "2016 Session",
"classification": "primary",
"end_date": "2016-03-25",
"identifier": "2016",
"name": "63rd Legislature, 2nd Regular Session (2016)",
"start_date": "2016-01-11"
},
{
"_scraped_name": "2017 Session",
"classification": "primary",
"end_date": "2017-04-07",
"identifier": "2017",
"name": "64th Legislature, 1st Regular Session (2017)",
"start_date": "2017-01-09"
}
]
ignored_scraped_sessions = [
"2010 Session",
"2009 Session",
"2008 Session",
"2007 Session",
"2006 Extraordinary Session",
"2006 Session",
"2005 Session",
"2004 Session",
"2003 Session",
"2002 Session",
"2001 Session",
"2000 Extraordinary Session",
"2000 Session",
"1999 Session",
"1998 Session"
]
def get_organizations(self):
legislature_name = "Idaho State Legislature"
lower_chamber_name = "House"
lower_seats = 35
lower_title = "Representative"
upper_chamber_name = "Senate"
upper_seats = 35
upper_title = "Senator"
legislature = Organization(name=leg | islature_name,
classification="legislature")
upper = Organization(upper_chamber_name, classifi | cation='upper',
parent_id=legislature._id)
lower = Organization(lower_chamber_name, classification='lower',
parent_id=legislature._id)
for n in range(1, upper_seats+1):
upper.add_post(
label=str(n), role=upper_title,
division_id='{}/sldu:{}'.format(self.division_id, n))
for n in range(1, lower_seats+1):
lower.add_post(
label=str(n), role=lower_title,
division_id='{}/sldl:{}'.format(self.division_id, n))
yield legislature
yield upper
yield lower
|
unicefuganda/edtrac | edtrac_project/rapidsms_xforms_src/setup.py | Python | bsd-3-clause | 1,235 | 0.004858 | from setuptools import setup, find_packages
setup(
name='rapidsms-xforms',
version=__import__('rapidsms_xforms').__version__,
license="BSD",
install_requires | = [
"django>=1.3",
"rapidsms==0.9.6a",
"django-uni-form==0.8.0",
"django-eav==0.9.2",
"python-digest==1.7",
"django-digest==1.13"
],
dependency_links = [
"https://git | hub.com/mvpdev/django-eav/tarball/master#egg=django-eav-0.9.2"
],
description='Interactive form builder for both XForms and SMS submissions into RapidSMS',
long_description=open('README.rst').read(),
author='Nicolas Pottier, Eric Newcomer',
author_email='code@nyaruka.com',
url='http://github.com/nyaruka/rapidsms-xforms',
download_url='http://github.com/nyaruka/rapidsms-xforms/downloads',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
]
)
|
ralphbean/moksha | moksha/api/widgets/amqp/amqp.py | Python | apache-2.0 | 7,523 | 0.000798 | # This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`moksha.api.widgets.amqp` - An AMQP driven live Moksha socket
==================================================================
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
"""
import moksha
import moksha.utils
from tg import config
from paste.deploy.converters import asbool
import tw.api
import tw2.core as twc
from moksha.api.widgets.orbited import orbited_host, orbited_port, orbited_url
from moksha.api.widgets.orbited import orbited_js
from moksha.lib.helpers import defaultdict, listify
from moksha.widgets.notify import moksha_notify
from moksha.widgets.moksha_js import tw1_moksha_js, tw2_moksha_js
from moksha.widgets.json import tw1_jquery_json_js, tw2_jquery_json_js
from widgets import tw1_amqp_resources, tw2_amqp_resources
from widgets import tw1_jsio_js, tw2_jsio_js
def amqp_subscribe(topic):
""" Return a javascript callback that subscribes to a given topic,
or a list of topics.
"""
sub = """
moksha.debug("Subscribing to the '%(topic)s' topic");
var receiver = moksha_amqp_session.receiver('amq.topic/%(topic)s')
receiver.onReady = raw_msg_callback;
receiver.capacity(0xFFFFFFFF);
"""
return ''.join([sub % {'topic': t} for t in listify(topic)])
def amqp_unsubscribe(topic):
""" Return a javascript callback that unsubscribes to a given topic,
or a list of topics.
"""
return ""
# TODO:
#sub = "stomp.unsubscribe('%s');"
#if isinstance(topic, list):
# sub = ''.join([sub % t for t in topic])
#else:
# sub = sub % topic
#return sub
class TW1AMQPSocket(tw.api.Widget):
callbacks = ['onconnectedframe', 'onmessageframe']
javascript = [tw1_jquery_json_js, tw1_moksha_js,
tw1_amqp_resources, tw1_jsio_js]
params = callbacks[:] + [
'topics', 'notify', 'orbited_host', 'orbited_scheme',
'orbited_port', 'orbited_url', 'orbited_js', 'amqp_broker_host',
'amqp_broker_port', 'amqp_broker_user', 'amqp_broker_pass',
'send_hook', 'recieve_hook', 'moksha_domain']
onconnectedframe = ''
onmessageframe = ''
send_hook = ''
recieve_hook = ''
template = "mako:moksha.api.widgets.amqp.templates.amqp"
hidden = True
def __init__(self, *args, **kw):
self.notify = asbool(config. | get('moksha.socket.notify', False))
self.orbited_host = config.get('orbited_host', 'localhost')
self.orbited_port = str(config.get('orbited_port', 9000))
self.orbited_schem | e = config.get('orbited_scheme', 'http')
self.orbited_url = '%s://%s:%s' % (
self.orbited_scheme, self.orbited_host, self.orbited_port)
self.orbited_js = tw.api.JSLink(
link=self.orbited_url + '/static/Orbited.js')
self.moksha_domain = config.get('moksha.domain', 'localhost')
self.amqp_broker_host = config.get('amqp_broker_host', 'localhost')
self.amqp_broker_port = str(config.get('amqp_broker_port', 5672))
self.amqp_broker_user = config.get('amqp_broker_user', 'guest')
self.amqp_broker_pass = config.get('amqp_broker_pass', 'guest')
super(TW1AMQPSocket, self).__init__(*args, **kw)
def update_params(self, d):
super(TW1AMQPSocket, self).update_params(d)
d.topics = []
d.onmessageframe = defaultdict(str) # {topic: 'js callbacks'}
for callback in self.callbacks:
if len(moksha.utils.livewidgets[callback]):
cbs = ''
if callback == 'onmessageframe':
for topic in moksha.utils.livewidgets[callback]:
d.topics.append(topic)
for cb in moksha.utils.livewidgets[callback][topic]:
d.onmessageframe[topic] += '%s;' % str(cb)
else:
for cb in moksha.utils.livewidgets[callback]:
if isinstance(cb, (tw.api.js_callback,
tw.api.js_function)):
cbs += '$(%s);' % str(cb)
else:
cbs += str(cb)
if cbs:
d[callback] = cbs
# TODO -- AMQPSocket and StompSocket have a *lot* in common.
# They should both inherit from an abstract CometSocket! -- threebean
class TW2AMQPSocket(twc.Widget):
callbacks = ['onconnectedframe', 'onmessageframe']
resources = [tw2_jquery_json_js, tw2_moksha_js,
tw2_amqp_resources, tw2_jsio_js]
topics = twc.Variable()
notify = twc.Param(
default=asbool(config.get('moksha.socket.notify', False)))
hidden = twc.Param(default=True)
orbited_host = twc.Param(
default=config.get('orbited_host', 'localhost'))
orbited_port = twc.Param(
default=str(config.get('orbited_port', 9000)))
orbited_scheme = twc.Param(
default=config.get('orbited_scheme', 'http'))
orbited_js = twc.Param(default=orbited_js)
moksha_domain = twc.Param(
default=config.get('moksha.domain', 'localhost'))
amqp_broker_host = twc.Param(
default=config.get('amqp_broker_host', 'localhost'))
amqp_broker_port = twc.Param(
default=str(config.get('amqp_broker_port', 5672)))
amqp_broker_user = twc.Param(
default=config.get('amqp_broker_user', 'guest'))
amqp_broker_pass = twc.Param(
default=config.get('amqp_broker_pass', 'guest'))
onconnectedframe = twc.Param(default='')
onmessageframe = twc.Param(default='')
send_hook = twc.Param(default='')
receive_hook = twc.Param(default='')
template = "mako:moksha.api.widgets.amqp.templates.amqp"
def prepare(self):
super(TW2AMQPSocket, self).prepare()
self.orbited_url = '%s://%s:%s' % (self.orbited_scheme,
self.orbited_host, self.orbited_port)
self.topics = []
self.onmessageframe = defaultdict(str) # {topic: 'js callbacks'}
for callback in self.callbacks:
if len(moksha.utils.livewidgets[callback]):
cbs = ''
if callback == 'onmessageframe':
for topic in moksha.utils.livewidgets[callback]:
self.topics.append(topic)
for cb in moksha.utils.livewidgets[callback][topic]:
self.onmessageframe[topic] += '%s;' % str(cb)
else:
for cb in moksha.utils.livewidgets[callback]:
if isinstance(cb, (twc.js_callback, twc.js_function)):
cbs += '$(%s);' % str(cb)
else:
cbs += str(cb)
if cbs:
setattr(self, callback, cbs)
if asbool(config.get('moksha.use_tw2', False)):
AMQPSocket = TW2AMQPSocket
else:
AMQPSocket = TW1AMQPSocket
|
pombredanne/drf-toolbox | tests/models.py | Python | bsd-3-clause | 2,563 | 0.00039 | from __future__ import absolute_import, unicode_literals
from drf_toolbox.compat import models, django_pgfields_installed
from drf_toolbox import serializers
from tests.compat import mock
class ExplicitAPIEndpointsModel(models.Model):
api_endpoints = models.IntegerField()
something = models.CharField(max_length=50)
class Meta:
app_label = 'tests'
class NormalModel(models.Model):
foo = models.IntegerField()
bar = models.IntegerField()
baz = models.IntegerField()
bacon = models.IntegerField(unique=True)
def get_absolute_url(self):
return '/normal/%s/' % self.id
class Meta:
app_label = 'tests'
unique_together = ('bar', 'baz')
class ChildModel(models.Model):
normal = models.ForeignKey(NormalModel)
class Meta:
app_label = 'tests'
class GrandchildModel(models.Model):
child = models.ForeignKey(ChildModel)
class Meta:
app_label = 'tests'
class RelatedModel(models.Model):
baz = models.IntegerField()
normal = models.ForeignKey(NormalModel, related_name='related_model')
class Meta:
app_label = 'tests'
class CreatedModel(models.Model):
created = models.DateTimeField(auto_now_add=T | rue)
class Meta:
app_label = 'tests'
if django_pgfields_installed:
with mock.patch.multiple(models.CompositeField,
create_type=mock.DEFAULT,
create_type_sql=mock.DEFAULT,
regi | ster_composite=mock.DEFAULT):
class CoordsField(models.CompositeField):
x = models.IntegerField()
y = models.IntegerField()
class SizeField(models.CompositeField):
width = models.IntegerField()
height = models.IntegerField()
def get_drf_serializer_field(self):
return SizeSerializerField(
fields={
'width': serializers.IntegerField(),
'height': serializers.IntegerField(),
},
instance_class=self.instance_class,
)
class SizeSerializerField(serializers.CompositeField):
pass
class PGFieldsModel(models.Model):
id = models.UUIDField(auto_add=True, primary_key=True)
uuid = models.UUIDField()
array = models.ArrayField(of=models.IntegerField)
extra = models.JSONField()
coords = CoordsField()
size = SizeField()
class Meta:
app_label = 'tests'
|
amarandon/pinax | pinax/apps/account/tests/test_password_reset.py | Python | mit | 7,013 | 0.007557 | import os
import re
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User
import pinax
from emailconfirmation.models import EmailAddress, EmailConfirmation
class PasswordResetTest(TestCase):
# tests based on django.contrib.auth tests
urls = "pinax.apps.account.tests.account_urls"
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
# remove django-mailer to properly test for outbound e-mail
if "mailer" in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.remove("mailer")
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
def context_lookup(self, response, key):
# used for debugging
for subcontext in response.context:
if key in subcontext:
return subcontext[key]
raise KeyError
def test_password_reset_view(self):
"""
Test GET on /password_reset/
"""
response = self.client.get(reverse("acct_passwd_reset"))
self.assertEquals(response.status_code, 200)
def test_email_not_found(self):
"""
Error is raised if the provided e-mail address isn't verified to an
existing user account
"""
data = {
"email": "nothing@example.com",
}
response = self.client.post(reverse("acct_passwd_reset"), data)
self.assertEquals(response.status_code, 200)
# @@@ instead of hard-coding this error message rely on a error key
# defined in the form where the site developer would override this
# error message.
self.assertContains(response, "E-mail address not verified for any user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_not_verified(self):
"""
Error is raised if the provided e-mail address isn't verified to an
existing user account
"""
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = False,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post(reverse("acct_passwd_reset"), data)
self.assertEquals(response.status_code, 200)
# @@@ instead of hard-coding this error message rely on a error key
# defined in the form where the site developer would override this
# error message.
self.assertContains(response, "E-mail address not verified for any user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_found(self):
"""
E-mail is sent if a valid e-mail address is provided for password reset
"""
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = True,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post(reverse("acct_passwd_reset"), data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
def _read_reset_email(self, email):
match = re.search(r"https?://[^/]*(/.*reset_key/\S*)", email.body)
self.assert_(match is not None, "No URL found in sent e-mail")
return match.group(), match.groups()[0]
def _test_confirm_start(self):
bob = User.objects.create_user("bob", "bob@example.com", "abc123")
EmailAddress.objects.create(
user = bob,
email = "bob@example.com",
verified = True,
primary = True,
)
data = {
"email": "bob@example.com",
}
response = self.client.post(reverse("acct_passwd_reset"), data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
return self._read_reset_email(mail.outbox[0])
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# munge the token in the path, but keep the same length, in case the
# URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "New Password (again)")
def test_confirm_invalid_post(self):
url, path = self._test_confirm_start()
# munge the token in the path, but keep the same length, in case the
# URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
data = {
"password1": "newpassword",
"password2": "newpassword",
}
response = self.client.post(path, data)
user = User.objects.get(email="bob@example.com")
self.assert_(not user.check_password("newpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
data = {
"password1": "newpassword",
"password2": "newpassword",
}
response = self.client.post(path, data)
| self.assertEquals(response.status_code, 200)
# check the password has been changed
user = User.objects. | get(email="bob@example.com")
self.assert_(user.check_password("newpassword"))
# check we can't GET with same path
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid")
# check we can't POST with same path
data = {
"password1": "anothernewpassword",
"password2": "anothernewpassword",
}
response = self.client.post(path)
self.assertEquals(response.status_code, 200)
user = User.objects.get(email="bob@example.com")
self.assert_(not user.check_password("anothernewpassword"))
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
data = {
"password1": "newpassword",
"password2": "anothernewpassword",
}
response = self.client.post(path, data)
self.assertEquals(response.status_code, 200)
self.assertContains(response, "You must type the same password each time.")
|
pattisdr/lookit-api | accounts/migrations/0014_auto_20170726_1403.py | Python | mit | 702 | 0.001425 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-26 14:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0013_auto_20170718_2137'),
]
operations = [
migrations.AlterModelOptions(
name='organization',
options={'ordering': ['name'], 'permission | s': (('can_view_organization', 'Can View Organization'), ('can_edit_organization', 'Can Edit Organization'), ('can_create_organization', 'Can Create Organization'), ('can_remove_organizat | ion', 'Can Remove Organization'), ('can_view_experimenter', 'Can View Experimenter'))},
),
]
|
aroth-arsoft/arsoft-web-crashupload | app/manage.py | Python | gpl-3.0 | 266 | 0.003759 | #!/usr/bi | n/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arsoft.web.crashupload.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(s | ys.argv)
|
pktrigg/pygsf | pygsf.py | Python | apache-2.0 | 38,850 | 0.028603 | #name: pygsf
#created: July 2017
#by: p.kennedy@fugro.com
#description: python module to read and write a Generic Sensor Formaty (GSF) file natively
#notes: See main at end of script for example how to use this
#based on GSF Version 3.05
# See readme.md for more details
import sys
from glob import glob
import argparse
import os.path
import struct
import pprint
import time
import datetime
import math
import random
from datetime import datetime
from datetime import timedelta
from statistics import mean
import mmap
# for testing only...
# import matplotlib.pyplot as plt
import numpy as np
#/* The high order 4 bits are used to define the field size for this array */
GSF_FIELD_SIZE_DEFAULT = 0x00 #/* Default values for field size are used used for all beam arrays */
GSF_FIELD_SIZE_ONE = 0x10 #/* value saved as a one byte value after applying scale and offset */
GSF_FIELD_SIZE_TWO = 0x20 #/* value saved as a two byte value after applying scale and offset */
GSF_FIELD_SIZE_FOUR = 0x40 #/* value saved as a four byte value after applying scale and offset */
GSF_MAX_PING_ARRAY_SUBRECORDS = 26
# Record Decriptions (See page 82)
HEADER = 1
SWATH_BATHYMETRY = 2
SOUND_VELOCITY_PROFILE = 3
PROCESSING_PARAMETERS = 4
SENSOR_PARAMETERS = 5
COMMENT = 6
HISTORY = 7
NAVIGATION_ERROR = 8
SWATH_BATHY_SUMMARY = 9
SINGLE_BEAM_SOUNDING = 10
HV_NAVIGATION_ERROR = 11
ATTITUDE = 12
SNIPPET_NONE = 0 # extract the mean value from the snippet array
SNIPPET_MEAN = 1 # extract the mean value from the snippet array
SNIPPET_MAX = 2 # extract the maximum value from the snippet array
SNIPPET_DETECT = 3 # extract the bottom detect snippet value from the snippet array
SNIPPET_MEAN5DB = 4 # extract the mean of all snippets within 5dB of the mean
# the various frequencies we support in the R2Sonic multispectral files
ARCIdx = {100000: 0, 200000: 1, 400000: 2}
# the rejection flags used by this software
REJECT_CLIP = -1
REJECT_RANGE= -2
REJECT_INTENSITY= -4
###############################################################################
def main():
parser = argparse.ArgumentParser(description='Read GSF file and create a reflectivity image.')
parser.add_argument('-i', dest='inputFile', action='store', help='Input ALL filename to image. It can also be a wildcard, e.g. *.gsf')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
print ("processing with settings: ", args)
for filename in glob(args.inputFile):
if not filename.endswith('.gsf'):
print ("File %s is not a .all file, skipping..." % (filename))
continue
if not os.path.isfile(filename):
print ("file not found:", filename)
exit()
# testR2SonicAdjustment()
testreader(filename)
# conditioner()
###############################################################################
def testreader(filename):
'''
sample read script so we can see how to use the code
'''
start_time = time.time() # time the process so we can keep it quick
# filename = "C:/projects/multispectral/PatriciaBasin/20161130-1907 - 0001-2026_1.gsf"
# filename = "C:/development/python/sample_subset.gsf"
# filename = "F:/Projects/multispectral/_BedfordBasin2016/20160331 - 125110 - 0001-2026_1.gsf"
# filename = "F:/Projects/multispectral/_Newbex/20170524-134208 - 0001-2026_1.gsf"
# filename = "F:/Projects/multispectral/_BedfordBasin2017/20170502 - 131750 - 0001-2026_1.gsf"
# filename = "C:/projects/multispectral/_BedfordBasin2017/20170502 - 150058 - 0001-2026_1.gsf"
print (filename)
pingcount = 0
# create a GSFREADER class and pass the filename
r = GSFREADER(filename)
# r.loadnavigation()
# f1 = plt.figure()
# # f2 = plt.figure()
# # f3 = plt.figure()
# ax1 = f1.add_subplot(111)
# # ax2 = f2.add_subplot(111)
# # ax3 = f3.add_subplot(111)
print ("pingcount, pingnumber, 100kHz, 200kHz, 400kHz")
while r.moreData():
# read a datagram. If we support it, return the datagram type and aclass for that datagram
# The user then needs to call the read() method for the class to undertake a fileread and binary decode. This keeps the read super quick.
numberofbytes, recordidentifier, datagram = r.readDatagram()
# print(datagram)
if recordidentifier == SWATH_BATHYMETRY:
print(recordidentifier, end=',')
datagram.read()
datagram.snippettype = SNIPPET_NONE
# print ("%s Lat:%.3f Lon:%.3f Ping:%d Freq:%d Serial %s" % (datagram.currentRecordDateTime(), datagram.latitude, datagram.longitude, datagram.pingnumber, datagram.frequency, datagram.serialnumber))
# for cross profile plotting
# bs = []
# for s in datagram.MEAN_REL_AMPLITUDE_ARRAY:
# if s != 0:
# bs.append(20 * math.log10(s) - 100)
# else:
# bs.append(0)
# bs = [20 * math.log10(s) - 100 for s in datagram.MEAN_REL_AMPLITUDE_ARRAY]
samplearray = datagram.R2Soniccorrection()
if datagram.frequency == 100000:
freq100 = mean(samplearray)
if datagram.frequency == 200000:
freq200 = mean(samplearray)
if datagram.frequency == 400000:
freq400 = mean(samplearray)
# print ("%d,%d,%.3f,%.3f,%.3f" %(pingcount, datagram.pingnumber, freq100, freq200, freq400))
# print ("%d" %(pingcount))
pingcount += 1
# if len(bs) > 0:
# plt.plot(datagram.BEAM_ANGLE_ARRAY, bs, linewidth=0.25, color='blue')
# plt.ylim([-60,-5])
# plt.xlim([-60,60])
# # ax3.plot(datagram.BEAM_ANGLE_ARRAY, datagram.ALONG_TRACK_ARRAY)
# plt.pause(0.001)
# datagram.clippolar(-60, 60)
# print("Duration %.3fs" % (time.time() - start_time )) # time the process
# print ("PingCount:", pingcount)
return
###############################################################################
class UNKNOWN_RECORD:
'''used as a convenience tool for datagrams we have no bespoke classes. Better to make a bespoke class'''
def __init__(self, fileptr, numbytes, recordidentifier, hdrlen):
self.recordidentifier = recordidentifier
self.offset = fileptr.tell()
self.hdrlen = hdrlen
self.numbytes = numbytes
self.fileptr = fileptr
self.fileptr.seek(numbytes, 1) # set the file ptr to the end of the record
self.data = ""
self.name = "unknown"
def read(self):
self.data = self.fileptr.read(self.numberofbytes)
def __str__(self):
'''
pretty print this class
'''
return pprint.pformat(vars(self))
class SCALEFACTOR:
def __init__(self):
self.subrecordID = 0
| self.compressionFlag = 0 #/* Specifies bytes of storage in high order nibble and type of compression in low order nibble */
self.multiplier = 0.0
self.offset = 0
self.name = "scaleFac | tor"
def __str__(self):
'''
pretty print this class
'''
return pprint.pformat(vars(self))
class SWATH_BATHYMETRY_PING :
def __init__(self, fileptr, numbytes, recordidentifier, hdrlen):
self.recordidentifier = recordidentifier # assign the GSF code for this datagram type
self.offset = fileptr.tell() # remember where this packet resides in the file so we can return if needed
self.hdrlen = hdrlen # remember the header length. it should be 8 bytes, bout if checksum then it is 12
self.numbytes = numbytes # remember how many bytes this packet contains
self.fileptr = fileptr # remember the file pointer so we do not need to pass from the host process
self.fileptr.seek(numbytes, 1) # move the file pointer to the end of the record so we can skip as the default actions
self.scalefactors = []
self.DEPTH_ARRAY = []
self.ACROSS_TRACK_ARRAY = []
self.ALONG_TRACK_ARRAY = []
self.TRAVEL_TIME_ARRAY = []
self.BEAM_ANGLE_ARRAY = []
self.MEAN_CAL_AMPLITUDE_ARRAY = []
self.MEAN_REL_AMPLITUDE_ARRAY = []
self.QUALITY_FACTOR_ARRAY = []
self.BEAM_FLAGS_ARRAY = []
self.BEAM_ANGLE_FORWARD_ARRAY = []
self.VERTICAL_ERROR_ARRAY = []
self.HORIZONTAL_ERROR_ARRAY = []
self.SECTOR_NUMBER_ARRAY = []
# self.INTENSITY_SERIES_ARRAY = []
self.SNIPPET_SERIES_ARRAY = []
self.perbeam = True
self.snippettype = SNIPPET_MAX
self.numbeams = 0
self.time = 0
self.pingnanotime = 0
self.name = "swath bathy ping"
######################################## |
kiniou/blender-smooth-slides | tools/lpod/test/test_content.py | Python | gpl-3.0 | 4,392 | 0.000683 | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Romain Gauthier <romain@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
# David Versmisse <david.versmisse@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You | may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
|
# Import from the Standard Library
from unittest import TestCase, main
# Import from lpod
from lpod.document import odf_get_document
class ContentTestCase(TestCase):
def setUp(self):
self.document = document = odf_get_document('samples/base_text.odt')
def tearDown(self):
del self.document
def test_get_body(self):
body = self.document.get_body()
expected = ('<office:text>\n'
' <text:sequence-decls>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Illustration"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Table"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Text"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Drawing"/>\n'
' </text:sequence-decls>\n'
' <text:section text:style-name="Sect1" '
'text:name="Section1">\n'
' <text:h text:style-name="Heading_20_1" '
'text:outline-level="1">LpOD Test Case Document</text:h>\n'
' <text:p text:style-name="Text_20_body">This is the '
'first paragraph.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is the '
'second paragraph.</text:p>\n'
' <text:p text:style-name="Hanging_20_indent">This is '
'a paragraph with a named style.</text:p>\n'
' <text:h text:style-name="Heading_20_2" '
'text:outline-level="2">Level 2 Title</text:h>\n'
' <text:p text:style-name="Text_20_body">This is the '
'first paragraph of the second title.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is the '
'last paragraph with diacritical signs: '
'éè</text:p>\n'
' </text:section>\n'
' <text:section text:style-name="Sect1" '
'text:name="Section2">\n'
' <text:h text:style-name="Heading_20_1" '
'text:outline-level="1" text:restart-numbering="true" '
'text:start-value="-1">First Title of the '
'Second Section</text:h>\n'
' <text:p text:style-name="Text_20_body">First '
'paragraph of the second section.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is '
'the second paragraph with <text:a xlink:type="simple" '
'xlink:href="http://lpod-project.org/" office:name="Link '
'to the lpod project">an external link</text:a> inside.'
'</text:p>\n'
' </text:section>\n'
' </office:text>\n')
self.assertEqual(body.serialize(pretty=True), expected)
if __name__ == '__main__':
main()
|
GPflow/GPflow | gpflow/conditionals/dispatch.py | Python | apache-2.0 | 1,281 | 0.000781 | # Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required | by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deprecated import deprecated
from ..utilities import Dispatcher
conditional = Dispatcher("conditional")
conditional._gpflow_internal_register = conditional.register
# | type-ignore below is because mypy doesn't like it when we assign to a function.
conditional.register = deprecated( # type: ignore
reason="Registering new implementations of conditional() is deprecated. "
"Instead, create your own subclass of gpflow.posteriors.AbstractPosterior "
"and register an implementation of gpflow.posteriors.get_posterior_class "
"that returns your class."
)(conditional._gpflow_internal_register)
sample_conditional = Dispatcher("sample_conditional")
|
grahame/ealgis | django/ealgis/ealauth/migrations/0004_auto_20161217_0949.py | Python | gpl-3.0 | 1,175 | 0.002553 | # -*- coding: utf-8 -*-
# Generated by Django | 1.10.4 on 2016-12-17 09:49
from __future__ import unicode_literals
from django.conf import settings
import d | jango.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ealauth', '0003_auto_20161217_0940'),
]
operations = [
migrations.CreateModel(
name='MapDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('description', models.TextField()),
('json', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('owner_user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='mapdefinition',
unique_together=set([('name', 'owner_user_id')]),
),
]
|
googleapis/python-monitoring | google/cloud/monitoring_v3/services/service_monitoring_service/transports/grpc.py | Python | apache-2.0 | 22,884 | 0.002054 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.monitoring_v3.types import service
from google.cloud.monitoring_v3.types import service as gm_service
from google.cloud.monitoring_v3.types import service_service
from google.protobuf import empty_pb2 # type: ignore
from .base import ServiceMonitoringServiceTransport, DEFAULT_CLIENT_INFO
class ServiceMonitoringServiceGrpcTransport(ServiceMonitoringServiceTransport):
"""gRPC backend transport for ServiceMonitoringService.
The Cloud Monitoring Service-Oriented Monitoring API has endpoints
for managing and querying aspects of a workspace's services. These
include the ``Service``'s monitored resources, its Service-Level
Objectives, and a taxonomy of categorized Health Metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "monitoring.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
| else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and | not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quo |
AlexBoogaard/Sick-Beard-Torrent-Edition | sickbeard/metadata/generic.py | Python | gpl-3.0 | 35,514 | 0.003097 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
import re
import sickbeard
from sickbeard import exceptions, helpers
from sickbeard.metadata import helpers as metadata_helpers
from sickbeard import logger
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class GenericMetadata():
"""
Base class for all metadata providers. Def | ault behavior is m | eant to mostly
follow XBMC 12+ metadata standards. Has support for:
- show metadata file
- episode metadata file
- episode thumbnail
- show fanart
- show poster
- show banner
- season thumbnails (poster)
- season thumbnails (banner)
- season all poster
- season all banner
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
self.name = "Generic"
self._ep_nfo_extension = "nfo"
self._show_metadata_filename = "tvshow.nfo"
self.fanart_name = "fanart.jpg"
self.poster_name = "poster.jpg"
self.banner_name = "banner.jpg"
self.season_all_poster_name = "season-all-poster.jpg"
self.season_all_banner_name = "season-all-banner.jpg"
self.show_metadata = show_metadata
self.episode_metadata = episode_metadata
self.fanart = fanart
self.poster = poster
self.banner = banner
self.episode_thumbnails = episode_thumbnails
self.season_posters = season_posters
self.season_banners = season_banners
self.season_all_poster = season_all_poster
self.season_all_banner = season_all_banner
def get_config(self):
config_list = [self.show_metadata, self.episode_metadata, self.fanart, self.poster, self.banner, self.episode_thumbnails, self.season_posters, self.season_banners, self.season_all_poster, self.season_all_banner]
return '|'.join([str(int(x)) for x in config_list])
def get_id(self):
return GenericMetadata.makeID(self.name)
@staticmethod
def makeID(name):
name_id = re.sub("[+]", "plus", name)
name_id = re.sub("[^\w\d_]", "_", name_id).lower()
return name_id
def set_config(self, string):
config_list = []
for x in string.split('|'):
config_list.append(bool(int(x)))
self.show_metadata = config_list[0]
self.episode_metadata = config_list[1]
self.fanart = config_list[2]
self.poster = config_list[3]
self.banner = config_list[4]
self.episode_thumbnails = config_list[5]
self.season_posters = config_list[6]
self.season_banners = config_list[7]
self.season_all_poster = config_list[8]
self.season_all_banner = config_list[9]
def _has_show_metadata(self, show_obj):
result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj))
logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_metadata(self, ep_obj):
result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj))
logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_fanart(self, show_obj):
result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj))
logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_poster_path(show_obj))
logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_banner_path(show_obj))
logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_thumb(self, ep_obj):
location = self.get_episode_thumb_path(ep_obj)
result = location is not None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_poster(self, show_obj, season):
location = self.get_season_poster_path(show_obj, season)
result = location is not None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_banner(self, show_obj, season):
location = self.get_season_banner_path(show_obj, season)
result = location is not None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def get_show_file_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename)
def get_episode_file_path(self, ep_obj):
return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
def get_fanart_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.fanart_name)
def get_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.poster_name)
def get_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.banner_name)
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = ep_obj.location.rpartition(".")
if tbn_filename[0] == "":
tbn_filename = ep_obj.location + "-thumb.jpg"
else:
tbn_filename = tbn_filename[0] + "-thumb.jpg"
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Returns the full |
chfoo/wpull | wpull/util_test.py | Python | gpl-3.0 | 4,235 | 0.000236 | # encoding=utf-8
import os
import sys
import tempfile
import unittest
from dns.resolver import NoNameservers
from wpull.util import (datetime_str, python_version, filter_pem,
parse_iso8601_str, is_ascii, close_on_error,
get_exception_message, GzipPickleStream)
DEFAULT_TIMEOUT = 30
class TestUtil(unittest.TestCase):
def test_datetime_str(self):
self.assertEqual(20, len(datetime_str()))
def test_parse_iso8601_str(self):
self.assertEqual(10, parse_iso8601_str('1970-01-01T00:00:10Z'))
def test_python_version(self):
version_string = python_version()
nums = tuple([int(n) for n in version_string.split('.')])
self.assertEqual(3, len(nums))
self.assertEqual(nums, sys.version_info[0:3])
def test_filter_pem(self):
unclean = (b'Kitten\n'
b'-----BEGIN CERTIFICATE-----\n'
b'ABCDEFG\n'
b'-----END CERTIFICATE-----\n'
b'Puppy\n'
b'-----BEGIN CERTIFICATE-----\n'
b'QWERTY\n'
b'-----END CERTIFICATE-----\n'
b'Kit\n')
clean = {
(
b'-----BEGIN CERTIFICATE-----\n'
b'ABCDEFG\n'
b'-----END CERTIFICATE-----\n'
),
(
b'-----BEGIN CERTIFICATE-----\n'
b'QWERTY\n'
b'-----END CERTIFICATE-----\n'
)
}
self.assertEqual(clean, filter_pem(unclean))
def test_is_acsii(self):
self.assertTrue(is_ascii('abc'))
self.assertFalse(is_ascii('😤'))
def test_close_on_error(self):
class MyObject(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def oops(self):
with close_on_error(self.close):
raise ValueError()
my_object = MyObject()
self.assertRaises(ValueError, my_object.oops)
self.assertTrue(my_object.closed)
def test_get_exception_message(self):
self.assertEqual('oops', get_exception_message(ValueError('oops')))
try:
raise ValueError('oops')
except ValueError as error:
self.assertEqual('oops', get_exception_message(error))
self.assertEqual('ValueError', get_exception_message(ValueError()))
try:
raise ValueError
except ValueError as error:
self.assertEqual('ValueError', get_exception_message(error))
try:
raise ValueError()
except ValueError as error:
self.assertEqual('ValueError', get_exception_message(error))
self.assertEqual(
'NoNameservers', get_exception_message(NoNameservers())
)
try:
raise NoNameservers
except NoNameservers as error:
self.asser | tEqual(
'NoNameservers', get_exception_message(error)
)
| try:
raise NoNameservers()
except NoNameservers as error:
self.assertEqual(
'NoNameservers', get_exception_message(error)
)
def test_pickle_stream_filename(self):
with tempfile.TemporaryDirectory() as temp_dir:
filename = os.path.join(temp_dir, 'blah.pickle')
stream = GzipPickleStream(filename, mode='wb')
for num in range(10):
stream.dump(num)
stream = GzipPickleStream(filename, mode='rb')
for num, obj in enumerate(stream.iter_load()):
self.assertEqual(num, obj)
def test_pickle_stream_file_obj(self):
with tempfile.TemporaryDirectory() as temp_dir:
filename = os.path.join(temp_dir, 'blah.pickle')
file = open(filename, mode='wb+')
stream = GzipPickleStream(file=file, mode='wb')
for num in range(10):
stream.dump(num)
stream = GzipPickleStream(file=file, mode='rb')
for num, obj in enumerate(stream.iter_load()):
self.assertEqual(num, obj)
|
valdergallo/mock_django_orm | app/forms.py | Python | mit | 374 | 0.002674 | from django.forms import ModelForm, ValidationError
from app.models import AppOne
import re
class FormAppOne(ModelForm):
class Meta:
model = AppOne
def clean_ | name(self):
cleaned_name = self.cleaned_data.get('name')
if re.findall('\d+', cleaned_name):
raise ValidationError("Name must be only text")
r | eturn cleaned_name
|
akissa/baruwa2 | baruwa/lib/mq/commands.py | Python | gpl-3.0 | 5,582 | 0.000179 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2015 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import paste
import logging
from celery.app import app_or_default
from pylons import config as pylonsconfig
from paste.script.command import Command, BadCommand
from celery.bin import camqadm, celerybeat, celeryd, celeryev
__all__ = ['CeleryDaemonCommand', 'CeleryBeatCommand',
'CAMQPAdminCommand', 'CeleryEventCommand']
log = logging.getLogger(__name__)
class BasePasterCommand(Command):
"""
Abstract Base Class for paster commands.
The celery commands are somewhat aggressive about loading
celery.conf, and since our module sets the `CELERY_LOADER`
environment variable to our loader, we have to bootstrap a bit and
make sure we've had a chance to load the pylons config off of the
command line, otherwise everything fails.
"""
group_name = 'baruwa'
min_args = 1
min_args_error = "Please provide a paster config file as an argument."
takes_config_file = 1
requires_config_file = True
def notify_msg(self, msg, log=False):
"""Make a notification to user, additionally if logger is passed
it logs this action using given logger
:param msg: message that will be printed to user
:param log: logging instance, to use to additionally log this message
"""
if log and isinstance(log, logging):
log(msg)
def run(self, args):
"""
Overrides Command.run
Checks for a config file argument and loads it.
"""
if len(args) < self.min_args:
raise BadCommand(self.min_args_error % {'min_args': self.min_args,
'actual_args': len(args)})
# Decrement because we're going to lob off the first argument.
# @@ This is hacky
self.min_args -= 1
self.bootstrap_config(ar | gs[0])
| self.update_parser()
return Command.run(self, args[1:])
# return super(BasePasterCommand, self).run(args[1:])
def update_parser(self):
"""
Abstract method. Allows for the class's parser to be updated
before the superclass's `run` method is called. Necessary to
allow options/arguments to be passed through to the underlying
celery command.
"""
raise NotImplementedError("Abstract Method.")
def bootstrap_config(self, conf):
"""
Loads the pylons configuration.
"""
path_to_ini_file = os.path.realpath(conf)
conf = paste.deploy.appconfig('config:' + path_to_ini_file)
pylonsconfig.init_app(conf.global_conf, conf.local_conf)
class CeleryCommand(BasePasterCommand):
"""Abstract class implements run methods needed for celery
Starts the celery worker that uses a paste.deploy configuration
file.
"""
def update_parser(self):
"""
Abstract method. Allows for the class's parser to be updated
before the superclass's `run` method is called. Necessary to
allow options/arguments to be passed through to the underlying
celery command.
"""
cmd = self.celery_command(app_or_default())
for x in cmd.get_options():
self.parser.add_option(x)
def command(self):
cmd = self.celery_command(app_or_default())
return cmd.run(**vars(self.options))
class CeleryDaemonCommand(CeleryCommand):
"""Start the celery worker
Starts the celery worker that uses a paste.deploy configuration
file.
"""
usage = 'CONFIG_FILE [celeryd options...]'
summary = __doc__.splitlines()[0]
description = "".join(__doc__.splitlines()[2:])
parser = Command.standard_parser(quiet=True)
celery_command = celeryd.WorkerCommand
class CeleryBeatCommand(CeleryCommand):
"""Start the celery beat server
Starts the celery beat server using a paste.deploy configuration
file.
"""
usage = 'CONFIG_FILE [celerybeat options...]'
summary = __doc__.splitlines()[0]
description = "".join(__doc__.splitlines()[2:])
parser = Command.standard_parser(quiet=True)
celery_command = celerybeat.BeatCommand
class CAMQPAdminCommand(CeleryCommand):
"""CAMQP Admin
CAMQP celery admin tool.
"""
usage = 'CONFIG_FILE [camqadm options...]'
summary = __doc__.splitlines()[0]
description = "".join(__doc__.splitlines()[2:])
parser = Command.standard_parser(quiet=True)
celery_command = camqadm.AMQPAdminCommand
class CeleryEventCommand(CeleryCommand):
"""Celery event command.
Capture celery events.
"""
usage = 'CONFIG_FILE [celeryev options...]'
summary = __doc__.splitlines()[0]
description = "".join(__doc__.splitlines()[2:])
parser = Command.standard_parser(quiet=True)
celery_command = celeryev.EvCommand
|
SamGinzburg/GeneOntologyTools | generate_pie_charts.py | Python | mit | 8,151 | 0.036437 | """
Author: Sam Ginzburg
Description: This script reads in a blast2go sequence table output of GO Term mappings, and calculates frequencies of GO Terms at specific GO Levels
Example run:
python generate_pie_charts.py [blast2go_file.txt] [GO Level]
"""
import sys
from GeneOntologyLibrary import obo_parser
from GeneOntologyLibrary import go_term as gt
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def parse_go_terms_by_go(go_counts, go, go_type, term_name):
if go_type == "molecular function":
if go_counts[0].get(go) is None:
go_counts[0][go] = 1
else:
go_counts[0][go] += 1
if go_type == "biological process":
if go_counts[1].get(go) is None:
go_counts[1][go] = 1
else:
go_counts[1][go] += 1
if go_type == "cellular component":
if go_counts[2].get(go) is None:
go_counts[2][go] = 1
else:
go_counts[2][go] += 1
def parse_go_mappped_file(go_counts, string):
#print (string)
if ";" in string:
string = string.split(";") # splits the column by ;
else:
string = [string]
#print("splitstring: " + str(split_string))
return_list = list()
for go_term in string:
go_term = go_term.strip()
if go_term == "-":
continue
if "P:" in go_term or "Biological Process:" in go_term:
go_term = go_term[2:]
if go_counts[0].get(go_term) is None:
go_counts[0][go_term] = 1
else:
go_counts[0][go_term] += 1
if "F:" in go_term or "Molecular Function:" in go_term:
go_term = go_term[2:]
if go_counts[1].get(go_term) is None:
go_counts[1][go_term] = 1
else:
go_counts[1][go_term] += 1
if "C:" in go_term or "Cellular Component:" in go_term:
go_term = go_term[2:]
if go_counts[2].get(go_term) is None:
go_counts[2][go_term] = 1
else:
go_counts[2][go_term] += 1
#print (go_term)
return_list.append(go_term)
return return_list
"""
def filter_by_level(go_dict, level, parser):
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key[2:])
if go_term_object is None:
print ("None -- error has occured:\t" + key[2:])
exit()
else:
print (key)
print ("level:\t" + str(go_term_object[0].calculate_level()))
if go_term_object[0].calculate_level() != int(level):
del go_dict[key]
"""
def filter_by_level(go_dict, level, parser, go_dict_type):
if go_dict_type == "biological_process":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_biological_process_go_terms_by_level(int(level)))])]
if go_dict_type == "molecular_function":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_molecular_function_go_terms_by_level(int(level)))])]
if go_dict_type == "cellular_component":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_cellular_component_go_terms_by_level(int(level)))])]
#print ("filtered:\t" + str(filtered))
ret_dict = dict()
for key in filtered:
ret_dict[key] = go_dict[key]
return ret_dict
def generate_counts(go_dict, parser):
#print (sum(go_dict.values()))
#print (len(go_dict))
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key)
if go_term_object is None:
print ("None -- error has occured:\t" + key)
exit()
else:
for x in range(0, go_dict[key]):
gt.propogate_go_term(go_term_object[0])
#exit()
def save_graph(go_dict, chart_type, level, parser):
fontP = FontProperties()
fontP.set_size('small')
# The slices will be ordered and plotted counter-clockwise.
figure = plt.figure(figsize=(10,10))
labels = go_dict.keys()
sizes = [parser.go_term_by_name_dict.get(x)[0].encountered_count for x in go_dict]
#sizes = go_dict.values()
#print (chart_type)
#print (zip(labels, sizes))
#print (sum(sizes))
plt.title('Graph Level %s Pie Chart [%s]' % (level, chart_type))
total = sum(sizes)
labels = [l+" "+str(float(s)/total * 100)[0:4]+"% ("+ str(s) + ")" for l,s in zip(labels, sizes)]
patches, texts = plt.pie(sizes, startangle=90)
plt.legend(patches, labels, prop = fontP, loc="best")
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
#plt.tight_layout()
#plt.show()
print (chart_type)
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in go_dict]
for x in out:
print (x)
print ("\n")
figure.savefig(chart_type+"_level_"+level+'.png',aspect='auto',dpi=100)
if __name__ == '__main__':
args = sys.argv
args = args[1:]
# these dicts store the name of the GO term and the number of times it occurs
combined = dict()
biological_process = dict()
molecular_function = dict()
cellular_component = dict()
go_counts = [biological_process, molecular_function, cellular_component]
gene_go_term_dict = dict() # key = SeqName description, value = list of gene ontology terms corresponding to the gene
with open(args[0], "r") as f:
for line in f:
line = line.split("\t")
gene_go_term_dict[line[0]] = parse_go_mappped_file(go_counts, line[7])
"""
# remove all genes with no go terms at all
for key in dict(gene_go_term_dict):
if len(gene_go_term_dict[key]) < 1:
del gene_go_term_dict[key]
"""
#print (gene_go_term_dict)
#print (len(gene_go_term_dict))
print ("Number of unique biological processes go terms:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms:\t" + str(len(biological_process) + len(molecular_function) + len(cellular_component)))
print ("Number of molecular function go term | s:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms:\t" + str(sum(cellular_component.values())))
parser = obo_parser("go.obo")
parser.build_obo_file()
generate_counts(biological_process, parser)
generate_counts(molecular_function, parser)
generate_counts(cellular_component, parser)
#p | rint (sum(biological_process.values()))
biological_process = filter_by_level(biological_process,args[1], parser, "biological_process")
molecular_function = filter_by_level(molecular_function,args[1], parser, "molecular_function")
cellular_component = filter_by_level(cellular_component,args[1], parser, "cellular_component")
"""
print (biological_process.keys())
print(parser.go_term_by_name_dict.get("biological_process")[0].encountered_count)
print (molecular_function.keys())
print(parser.go_term_by_name_dict.get("molecular_function")[0].encountered_count)
"""
#save_graph(molecular_function, "Molecular Function", str(2), parser)
combined = dict(biological_process)
combined.update(molecular_function)
combined.update(cellular_component)
print ("Number of unique biological processes go terms after filtering by level:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms after filtering by level:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms after filtering by level:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms after filtering by level:\t" + str(len(combined)))
print ("Number of molecular function go terms after filtering by level:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms after filtering by level:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms after filtering by level:\t" + str(sum(cellular_component.values())))
"""
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in cellular_component]
for x in out:
print (x)
"""
save_graph(biological_process, "Biological Process", args[1], parser)
save_graph(molecular_function, "Molecular Function", args[1], parser)
save_graph(cellular_component, "Cellular Component", args[1], parser)
save_graph(combined, "All", args[1], parser)
|
isezen/pytkm | tkmdecrypt.py | Python | mit | 6,869 | 0.000437 | #!/usr/bin/python # noqa
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""This is decryption module for tkm.py"""
from array import array
import numpy as np
# region Constants
_REF_TABLE_1 = array('i', (
237, 220, 239, 100, 120, 248, 241, 54, 244, 169, 178, 230, 68, 203, 43,
127, 175, 114, 154, 60, 218, 20, 140, 238, 84, 95, 93, 142, 62, 3, 69,
255, 156, 152, 211, 148, 112, 245, 246, 113, 161, 99, 123, 59, 94, 21,
209, 19, 205, 122, 2, 91, 72, 184, 240, 82, 131, 213, 201, 90, 31, 181,
227, 221, 222, 162, 104, 200, 217, 133, 149, 190, 81, 85, 53, 6, 197, 103,
44, 102, 79, 96, 186, 219, 27, 229, 139, 76, 145, 89, 83, 247, 34, 193, 58,
61, 48, 174, 35, 250, 46, 182, 143, 232, 71, 136, 18, 50, 78, 128, 39, 108,
109, 75, 42, 126, 233, 51, 115, 74, 47, 101, 49, 32, 16, 172, 88, 151, 111,
45, 116, 55, 188, 118, 234, 22, 77, 228, 67, 36, 198, 15, 226, 242, 28,
153, 121, 33, 12, 163, 129, 107, 135, 98, 70, 150, 63, 144, 124, 158, 11,
171, 86, 159, 66, 231, 141, 64, 56, 160, 7, 8, 155, 206, 5, 23, 1, 37, 9,
40, 110, 29, 132, 195, 216, 105, 10, 225, 125, 24, 176, 65, 130, 253, 235,
192, 87, 189, 41, 14, 249, 30, 166, 243, 164, 80, 194, 183, 167, 173, 26,
180, 202, 73, 191, 97, 57, 210, 146, 236, 207, 147, 177, 215, 223, 170, 25,
214, 38, 252, 137, 254, 52, 208, 196, 0, 4, 13, 138, 212, 117, 165, 179,
106, 119, 224, 134, 168, 199, 204, 17, 157, 251, 187, 185, 92))
_REF_TABLE_2 = array('i', (
235, 176, 50, 29, 236, 174, 75, 170, 171, 178, 186, 160, 148, 237, 199,
141, 124, 250, 106, 47, 21, 45, 135, 175, 189, 226, 210, 84, 144, 181, 201,
60, 123, 147, 92, 98, 139, 177, 228, 110, 179, 198, 114, 14, 78, 129, 100,
120, 96, 122, 107, 117, 232, 74, 7, 131, 168, 216, 94, 43, 19, 95, 28, 156,
167, 191, 164, 138, 12, 30, 154, 104, 52, 213, 119, 113, 87, 136, 108, 80,
205, 72, 55, 90, 24, 73, 162, 196, 126, 89, 59, 51, 255, 26, 44, 25, 81,
215, 153, 41, 3, 121, 79, 77, 66, 185, 243, 151, 111, 112, 180, 128, 36,
39, 17, 118, 130, 240, 133, 244, 4, 146, 49, 42, 158, 188, 115, 15, 109,
150, 192, 56, 182, 69, 246, 152, 105, 230, 238, 86, 22, 166, 27, 102, 157,
88, 218, 221, 35, 70, 155, 127, 33, 145, 18, 172, 32, 251, 159, 163, 169,
40, 65, 149, 204, 241, 202, 208, 247, 9, 225, 161, 125, 209, 97, 16, 190,
222, 10, 242, 211, 61, 101, 207, 53, 254, 82, 253, 132, 197, 71, 214, 195,
93, 206, 183, 234, 76, 140, 248, 67, 58, 212, 13, 249, 48, 173, 220, 233,
46, 217, 34, 239, 57, 227, 223, 184, 68, 20, 83, 1, 63, 64, 224, 245, 187,
142, 62, 137, 85, 11, 165, 103, 116, 134, 194, 219, 0, 23, 2, 54, 6, 143,
203, 8, 37, 38, 91, 5, 200, 99, 252, 229, 193, 231, 31))
_KEY_SIZE = 8
_KEY_SECTION_LENGTH = 30
_CLEAR_TEXT_LENGTH_SECTION_LENGTH = 7
_HEX_CHARS = '0123456789ABCDEF'
_INT_TO_CHAR_TABLE = '\x00ZN\xc3\x87V bCK\xc4\xb1Ut01\xc3\x9cL\xc5\x9f' + \
'EaB23O\xc3\x96456u7M8S!9\xc5\x9eFRDAIPHpT\xc4\x9e' + \
'i\xc3\xbc/J+%hrGYsyc&(zn)\xc3\xa7vjd=ek\xc4\x9fmo' + \
'g?*-\xc3\xb6f_\xc4\xb0{l}[]#$@<>;.:"\'WwQqXx\\\n\r' + \
',|~\xc3\xa9^\x01\x02\x03\x04\x05\x06\x07\x08\t\x0b' + \
'\x0c\x0e\x0f\x10\x11\x12\x13\x14'
# endregion
# region private functions
def _hex_to_str(p1, p2):
return ''.join(
[chr((_HEX_CHARS.find(p1[i]) << 4 | _HEX_CHARS.find(p1[i + 1])))
for i in range(0, len(p1) + p2, 2)]
)
def _de_shuffle_hex_str(p1, p2, p3, p4):
u = [int(j) for j in p2]
l7 = [u[(i - p3) % _KEY_SIZE] for i in range(_KEY_SIZE)]
# http://stackoverflow.com/questions/10133194/reverse-modulus-operator
l9 = len(p1) + p4
l8 = [ord(i) for i in p1[:l9]]
a = l9 / _KEY_SIZE
for i in range(a):
l9 = i * _KEY_SIZE
for j in range(_KEY_SIZE):
l8[l9 + l7[j]] = ord(p1[(l9 + j)])
return ''.join([chr(i) for i in l8])
# endregion
# region public functions
def decrypt0(encrypted_text, key):
"""Decrypt an encrypted text by a key.
This function is used to decrypt instant data.
:type key: str
:type encrypted_text: str
:param encrypted_text: Encrypted text
:param key: Key string
:return:
"""
# region Internal functions
def _f(j):
"""internal function.
:rtype: int
"""
return int(encrypted_text[len(encrypted_text) - j])
def _opt(l8):
"""internal function.
:param l8: Option
:rtype: str
"""
if l8 == 0:
return encrypted_text
elif l8 == 1:
return _hex_to_str(encrypted_text, -1)
elif l8 == 2:
return _hex_to_str(
_de_shuffle_hex_str(encrypted_text, key, _f(2), -2), 0)
# endregion
l6 = _opt(_f(1))
l9 = [ord(l6[25 + _KEY_SIZE + (ord(l6[(20 + i)]) - 90)]) - 90
for i in range(0, _KEY_SIZE)]
l10 = sum(l9) % l9[0] + 1
l5 = ''.join([chr((ord(l6[(20 + _KEY_SIZE + i)]) - (60 + i)))
for | i in range(5)])
l4 = l6[(55 + _KEY_SIZE):(55 + _KEY_SIZE + int(l5))]
l3 = ''
for i in range(int(l5)):
l19 = ord(l4[i])
l20 = l9[i % _KEY_SIZE]
l19 = (l19 >> l20 | (l19 << 8 - l20 & 255)) & 25 | 5
l19 -= int(l10)
l3 += _INT_TO_CHAR_TABLE.decode('utf-8')[l19]
return l3.encode('utf-8')
def decrypt2(encrypted_text):
"""Decrypt an encrypted text.
This function is used to decrypt static files.
:type encrypted_text: str
:param encrypted_text: Encrypted text
:return: Decrypted Text
"""
# this function is faster for big encrypted_text
def f(x): return x - 55 if x > 57 else x - 48
f = np.vectorize(f, otypes=[np.uint8])
ibyte = np.fromstring(encrypted_text, dtype='uint8')
key, c1, c2, l = 3, 6, 3, len(ibyte)
ii1 = (f(ibyte[c1:l:2]) << 4) + f(ibyte[(c1+1):l:2])
cc2 = (np.arange(c2, len(ii1)+c2) & 15) + key
rt1 = np.array(_REF_TABLE_1, dtype='int8')
rt2 = np.array(_REF_TABLE_2, dtype='int8')
return rt2[ii1 ^ rt1[cc2]].tostring()
def decrypt2_old(encrypted_text):
"""Decrypt an encrypted text.
This function is used to decrypt static files.
:type encrypted_text: str
:param encrypted_text: Encrypted text
:return: Decrypted Text
"""
in_bytes = [ord(i) for i in encrypted_text]
out_bytes = bytearray(len(in_bytes))
key, c1, c2 = 3, 6, 3
while c1 < (len(in_bytes)):
i1 = (in_bytes[c1] - 48)
c1 += 1
i2 = (in_bytes[c1] - 48)
c1 += 1
if i1 > 9: i1 -= 7
if i2 > 9: i2 -= 7
i1 = (i1 << 4) + i2
i1 = i1 ^ _REF_TABLE_1[key + (c2 & 15)]
i1 = _REF_TABLE_2[i1]
out_bytes[c2 - 3] = i1
c2 += 1
clear_text = str(out_bytes)
return clear_text[:clear_text.find('\x00')]
# endregion
|
CGATOxford/CGATPipelines | obsolete/pipeline_annotations.py | Python | mit | 90,231 | 0.000188 | """===================
Annotation pipeline
===================
The annotation pipeline imports various third party annotations
or creates them for use in other pipelines.
The purpose of this pipeline is to automate and standardize the
way we retrieve and build genomic annotations but also to allow
sharing of annotations between projects and people. An important
part is the reconciliation of different data sources in terms
of chromosome names.
Common to all annotations in this pipeline is that they are genomic -
i.e. they are genomic intervals or relate to genomic intervals. Thus,
annotations are tied to a particular version of a genome. This pipeline
follows two principal releases: the UCSC_ genome assembly version and an
ENSEMBL_ geneset version.
The pipeline contains multiple sections that can be built on demand
or when relevant. Certain annotations (ENCODE, GWAS data) exist only
for specific species. The sections are:
assembly
Genome assembly related information such as the location of
gaps, chromosome lengths, etc.
ucsc
Typical annotations downloaded from UCSC such as repeats.
ensembl
The Ensembl gene set, reconciled with the assembly,
and various subsets (coding genes, noncoding genes, ...).
geneset
Annotations derived from the ENSEMBL gene set.
enrichment
Annotations of genomic regions useful for enrichment
analysis. These are derived from multiple input sources.
gwas
GWAS data from the GWAS Catalog and DistlD
ontologies
Ontology annotations (GO, KEGG) of genes.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The :file:`pipeline.ini` needs to be edited so that it points to the
appropriate locations of the auxiliary files. See especially:
1 section ``[ensembl]`` with the location of the ENSEMBL dump
files (``filename_gtf``, filename_pep``, ``filename_cdna``)
2 section ``[general]`` with the location of the indexed genomic
fasta files to use and the name of the genome, as well as the
genome assembly report obtained from NCBI for mapping between
UCSC and ENSEMBL contigs. This can be obtained from:
https://www.ncbi.nlm.nih.gov/assembly
see :doc:`../modules/IndexedFasta`.
3 section ``[ucsc]`` with the name of the database to use (default=``hg19``).
Input
-----
This script requires no input within the :term:`working directory`, but
will look up some files in directories specified in the configuration
file :file:`pipeline.ini` and download annotations using mysql.
Running
-------
The pipeline can be run as any other CGAT pipeline, but as its purpose
is to provide a set of shared annotation between multiple projects
there is an etiquette to be followed:
Using the pipeline results
--------------------------
The annotations pipeline provides an interface for presenting its
results to other pipelines. The interface is defined in the file
:file:`pipeline.ini`. For example::
[interface]
# fasta file with cdna sequences
cdna_fasta=ensembl.dir/cdna.fasta
The ini file of pipeline annotations can be loaded into the parameter
dictionary of your own pipeline::
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py",
prefix="annotations_"),
update_interface=True)
Parameters from the annotation pipeline are now accessible via the
``annotations_`` prefix. As a result, the file
:file:`ensembl.dir/cdna.fasta` can be accessed as::
PARAMS['annotations_cdna_fasta']
Extending the pipeline
-----------------------
Please feel free to add more annotations to the pipeline, but
considering its shared usage, please consult with others. In
particular, consider the following questions:
1. Is the annotation that I want to add genomic? For example,
protein-protein interaction data should be organized separately.
2. Is the annotation of general interest? Do not add if an annotation
is specific to a particular species or of very specialized
interest. Note that there are some exceptions for annotations from
certain species (human).
3. Is the annotation subjective? The pipeline consciously
avoids providing annotations for regions such as promotors as their
definition varies from person to person. Instead, the pipeline
presents files with unambiguous coordinates such as transcription
start sites. In the case of promotors, these could be derived from
transcription start sites and the ``bedtools extend`` command.
4. What is the right format for the annotation? :term:`bed` formatted
file are ideal for intervals with a single annotation. If multiple
annotations are assigned with a feature, use :term:`gff`. For genes,
use :term:`gtf`. Do not provide the same information with different
formats - formats can be easily interconverted using CGAT tools.
Known problems
--------------
The pipeline takes its basic information about the genome and genes
from files downloaded from the genome browsers:
* UCSC: the genomic sequence in :term:`fasta` format.
* ENSEMBL: the gene set in :term:`GTF` format.
Additional data is downloaded from the genome browser databases either
via mysql or through biomart. It is thus important that the releases of
this additional data is consistent with the input files above.
.. note::
The mechanism for getting biomart to download data for
a particular ENSEMBL release involves changing the biomart server
to an archive server.
Also, other data sources will have release cycles that are not tied
to a particular UCSC or ENSEMBL release. It is important to coordinate
and check when updating these other data sources.
Working with non-ENSEMBL species
--------------------------------
:doc:`pipeline_annotations` is very much wedded to annotations in ENSEMBL-
and UCSC_. Using a non-ENSEMBL species or non-UCSC species is possible by
building ENSEMBL- or UCSC-like input files. Even so, annotations that are
downloaded from the ENSEMBL or UCSC database will not be built. You will
thus need to ask if it is worth the effort.
As many other pipelines depend on the annotations in this pipeline it is
necessary to set up a :doc:`pipeline_annotations` stub. To do so, simply
build the config files by running::
python <SRC>pipeline_annotations.py config
and create the files that are bein | g used in the downstream pipeline
explicitely (for example, for protein coding genes)::
mkdir ensembl.dir
cp <MYDATADIR>/my_gtf_geneset.gtf.gz ensembl.dir/geneset_coding.gtf.gz
Roadmap
-------
There are many annotations that could possibly be brought into this pipeline:
* ENCODE data
Can be used directly from a download directory?
* Genome segmentation based on ENCODE
Definitions of enhancers, etc. Note that these will depend not on the
| genome, but on the cell type as well and thus might be project specific?
* Gene networks
Functional assocation between genes. Outside of the
scope of this pipeline?
* Mapability
Mapability tracks are not available from all genomes. The pipeline
could include runnig GEM on the assembly. For now it has been taken
out as it is a rather long job.
Pipeline output
===============
The results of the computation are all stored in an sqlite relational
database file or as compressed files in genomic formats in the pipeline
directory. Output files are grouped by sections listed below.
The sections correspond to primary targets in the pipeline, i.e., to
build all annotations in the section ``assembly`` type::
python <SRC>pipeline_annotations.py make assembly
Section: assembly
-----------------
Annotations derived from the genome assembly. Results are
in :file:`assembly.dir`.
contigs.tsv
A :term:`tsv` formatted table with contig sizes
contigs.bed.gz
bed file with contig sizes
contigs_ungapped.bed.gz
:term:`bed` file with contigs excluding any gapped regions
gaps.bed.gz
:term:`bed` file with gapped regions in contigs
genome.tsv.gz
chromosome nucleotide composition and other stats
cpg.bed.gz
filename with locations of CpG in bed format
gc_segmentation.bed.gz
|
allisonrandal/sphinx-strat-doc-base | source/conf.py | Python | apache-2.0 | 793 | 0.007566 | # -*- coding: utf-8 -*-
#
# Minimum neccessary conf options for LaTeX output.
# Replaces sphinx-quickstart generated conf.py.
#
import sys
im | port os
# -- General configuration ------------------------------------------------
source_suffix = '.rst'
master_doc = 'index'
project = u'Document Title'
copyright = u'years, Author Name'
version = '0.0'
release = version
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'papersize': 'letterpaper',
'pointsize': '11pt',
'fontpkg': '\\usepackage[defaultsans]{droidsans}\n\\renewcommand*\\familydefault{\\sfdefault}',
'fncychap': '',
'preamble': '\\ | usepackage{strat}',
}
latex_documents = [
('index', 'output_file.tex', '', u'Author Name', 'howto'),
]
latex_additional_files = ['strat.sty']
|
viggates/nova | nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py | Python | apache-2.0 | 4,622 | 0.000649 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova import compute
from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
vm_state="slightly crunchy", power_state=1, locked_by='owner')
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
vm_state="vm-1", power_state=1, locked_by=None),
fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
vm_state="vm-2", power_state=2, locked_by='admin'),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
class ExtendedStatusTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-status:'
def setUp(self):
super(ExtendedStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
| res = req.get_response(fakes.wsgi_app_v3(
init_only=('servers',
'os-extended-status')))
return res
def _get_server(self, body):
| return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerStates(self, server, vm_state, power_state, task_state,
locked_by):
self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
self.assertEqual(int(server.get('%spower_state' % self.prefix)),
power_state)
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
self.assertEqual(str(server.get('%slocked_by' % self.prefix)),
locked_by)
def test_show(self):
url = '/v3/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body),
vm_state='slightly crunchy',
power_state=1,
task_state='kayaking',
locked_by='owner')
def test_detail(self):
url = '/v3/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server,
vm_state='vm-%s' % (i + 1),
power_state=(i + 1),
task_state='task-%s' % (i + 1),
locked_by=['None', 'admin'][i])
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
|
beeftornado/sentry | src/sentry/auth/providers/saml2/okta/apps.py | Python | bsd-3-clause | 304 | 0 | from __future__ import absolute_import
from django.apps import AppCo | nfig
class Config(AppConfig):
name = "sentry.auth.providers.saml2.okta"
def ready(self):
from sentry.auth import register
from .provider import OktaSAML2Provider
| register("okta", OktaSAML2Provider)
|
dmlc/tvm | gallery/how_to/work_with_schedules/reduction.py | Python | apache-2.0 | 7,673 | 0.000521 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under | the License is distributed on an
# "AS IS" BASIS, | WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Reduction
=========
**Author**: `Tianqi Chen <https://tqchen.github.io>`_
This is an introduction material on how to do reduction in TVM.
Associative reduction operators like sum/max/min are typical
construction blocks of linear algebra operations.
In this tutorial, we will demonstrate how to do reduction in TVM.
"""
from __future__ import absolute_import, print_function
import tvm
import tvm.testing
from tvm import te
import numpy as np
######################################################################
# Describe Sum of Rows
# --------------------
# Assume we want to compute sum of rows as our example.
# In numpy semantics this can be written as :code:`B = numpy.sum(A, axis=1)`
#
# The following lines describe the row sum operation.
# To create a reduction formula, we declare a reduction axis using
# :any:`te.reduce_axis`. :any:`te.reduce_axis` takes in the range of reductions.
# :any:`te.sum` takes in the expression to be reduced as well as the reduction
# axis and compute the sum of value over all k in the declared range.
#
# The equivalent C code is as follows:
#
# .. code-block:: c
#
# for (int i = 0; i < n; ++i) {
# B[i] = 0;
# for (int k = 0; k < m; ++k) {
# B[i] = B[i] + A[i][k];
# }
# }
#
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
######################################################################
# Schedule the Reduction
# ----------------------
# There are several ways to schedule a reduction.
# Before doing anything, let us print out the IR code of default schedule.
#
s = te.create_schedule(B.op)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# You can find that the IR code is quite like the C code.
# The reduction axis is similar to a normal axis, it can be splitted.
#
# In the following code we split both the row axis of B as well
# axis by different factors. The result is a nested reduction.
#
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
xo, xi = s[B].split(B.op.axis[0], factor=32)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# If we are building a GPU kernel, we can bind the rows of B to GPU threads.
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# Reduction Factoring and Parallelization
# ---------------------------------------
# One problem of building a reduction is that we cannot simply
# parallelize over the reduction axis. We need to divide the computation
# of the reduction, store the local reduction result in a temporal array
# before doing a reduction over the temp array.
#
# The rfactor primitive does such rewrite of the computation.
# In the following schedule, the result of B is written to a temporary
# result B.rf. The factored dimension becomes the first dimension of B.rf.
#
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# The scheduled operator of B also get rewritten to be sum over
# the first axis of reduced result of B.f
#
print(s[B].op.body)
######################################################################
# Cross Thread Reduction
# ----------------------
# We can now parallelize over the factored axis.
# Here the reduction axis of B is marked to be a thread.
# TVM allows reduction axis to be marked as thread if it is the only
# axis in reduction and cross thread reduction is possible in the device.
#
# This is indeed the case after the factoring.
# We can directly compute BF at the reduction axis as well.
# The final generated kernel will divide the rows by blockIdx.x and threadIdx.y
# columns by threadIdx.x and finally do a cross thread reduction over threadIdx.x
#
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.y"))
tx = te.thread_axis("threadIdx.x")
s[B].bind(s[B].op.reduce_axis[0], tx)
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
s[B].set_store_predicate(tx.var.equal(0))
fcuda = tvm.build(s, [A, B], "cuda")
print(fcuda.imported_modules[0].get_source())
######################################################################
# Verify the correctness of result kernel by comparing it to numpy.
#
nn = 128
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
fcuda(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4)
######################################################################
# Describe Convolution via 2D Reduction
# -------------------------------------
# In TVM, we can describe convolution via 2D reduction in a simple way.
# Here is an example for 2D convolution with filter size = [3, 3] and strides = [1, 1].
#
n = te.var("n")
Input = te.placeholder((n, n), name="Input")
Filter = te.placeholder((3, 3), name="Filter")
di = te.reduce_axis((0, 3), name="di")
dj = te.reduce_axis((0, 3), name="dj")
Output = te.compute(
(n - 2, n - 2),
lambda i, j: te.sum(Input[i + di, j + dj] * Filter[di, dj], axis=[di, dj]),
name="Output",
)
s = te.create_schedule(Output.op)
print(tvm.lower(s, [Input, Filter, Output], simple_mode=True))
######################################################################
# .. _general-reduction:
#
# Define General Commutative Reduction Operation
# ----------------------------------------------
# Besides the built-in reduction operations like :any:`te.sum`,
# :any:`tvm.te.min` and :any:`tvm.te.max`, you can also define your
# commutative reduction operation by :any:`te.comm_reducer`.
#
n = te.var("n")
m = te.var("m")
product = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name="product")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), name="k")
B = te.compute((n,), lambda i: product(A[i, k], axis=k), name="B")
######################################################################
# .. note::
#
# Sometimes we would like to perform reduction that involves multiple
# values like :code:`argmax`, which can be done by tuple inputs.
# See :ref:`reduction-with-tuple-inputs` for more detail.
######################################################################
# Summary
# -------
# This tutorial provides a walk through of reduction schedule.
#
# - Describe reduction with reduce_axis.
# - Use rfactor to factor out axis if we need parallelism.
# - Define new reduction operation by :any:`te.comm_reducer`
|
boisvert42/npr-puzzle-python | 2016/0424_drugstore_section.py | Python | cc0-1.0 | 1,003 | 0.011964 | '''
NPR Puzzle 2016-04-24
http://www.npr.org/2016/04/24/475299329/finish-this-puzzle-and-youll-be-just-like-overachievers-you-both-exceed
Name a famous singer -- first and last names.
The last four letters of the first name spelled
backward plus the first four letters of the last name
spelled forward, read together, in order, name a
section of products in a drugstore. What is it?
'''
import sys
sys.path.append('..')
from nprcommontools import get_famous_names
from nltk.corpus import wordnet as wn
import re
#%%
lemmas = wn.all_lemma_names()
lemma_dict = dict([(re.sub(r'[^a-z]+','',x),x) for x in lemmas if x == x.low | er()])
names = get_famous_names()
for name in names.iterkeys():
if name.count(' ') == 1:
first_name, last_name = name.lower().split(' ')
if len(first_name) >= 4 and len(last_name) >= 4:
word = first_name[-1:-5:-1] + last_name[:4]
try:
print lemma_dict[word], name
| except KeyError:
pass
|
karlinjf/ChromiumXRefs | lib/chromium_code_search.py | Python | apache-2.0 | 7,067 | 0.018537 | # Copyright 2017 Josh Karlin. All rights reserved.
# Use of this source code is governed by the Apache license found | in the LICENSE
# file.
import argparse
import datetime
import getopt
import json
import sys
import tempfile
import threading
import time
import urllib.request
import urllib.parse
gFileCache = None;
# A key/value store that stores objects to disk in temporary objects
# for 30 minutes.
class FileCache:
def __init__(self):
self.store = {}
threading.Timer(15 * 60, self.gc).start();
def put(self, url, data):
f = tempfile. | TemporaryFile();
f.write(data);
self.store[url] = (f, datetime.datetime.now());
def get(self, url):
if not url in self.store:
return ''
(f, timestamp) = self.store[url]
f.seek(0);
return f.read();
def gc(self):
threading.Timer(15 * 60, self.gc).start();
expired = datetime.datetime.now() - datetime.timedelta(minutes=30);
remove = []
for url, (f, timestamp) in self.store.items():
if timestamp < expired:
remove.append(url)
for url in remove:
self.store.pop(url);
def cacheResponses(should_cache):
global gFileCache
if not should_cache:
gFileCache = None;
return
if gFileCache:
return
gFileCache = FileCache();
# Retrieve the url by first trying to cache and falling back to the network.
def retrieve(url):
global gFileCache
if gFileCache:
cached_response = gFileCache.get(url);
if (cached_response):
return cached_response.decode('utf8');
response = None
try:
if len(url) > 1500:
short_url = url.split('?')[0]
data = url.split('?')[1]
response = urllib.request.urlopen(short_url, data=data.encode('utf-8'), timeout=3)
else:
response = urllib.request.urlopen(url, timeout=3)
except error:
return ''
result = response.read()
if gFileCache:
gFileCache.put(url, result);
return result.decode('utf8');
def getSignatureFor(src_file, method):
url = ('https://cs.chromium.org/codesearch/json'
'?annotation_request=b'
'&file_spec=b'
'&package_name=chromium'
'&name={file_name}'
'&file_spec=e'
'&type=b'
'&id=1'
'&type=e'
'&label='
'&follow_branches=false'
'&annotation_request=e')
url = url.format(file_name=urllib.parse.quote(src_file, safe=''))
result = retrieve(url);
if not result:
return ''
result = json.loads(result)['annotation_response'][0]
for snippet in result.get('annotation', []):
if not 'type' in snippet:
continue
if 'xref_signature' in snippet:
signature = snippet['xref_signature']['signature']
if '%s(' % method in signature:
return signature
elif 'internal_link' in snippet:
signature = snippet['internal_link']['signature']
if '::%s' % method in signature or 'class-%s' % method in signature:
return signature
return ''
def getCallGraphFor(signature):
url = ('https://cs.chromium.org/codesearch/json'
'?call_graph_request=b'
'&signature={signature}'
'&file_spec=b'
'&package_name=chromium'
'&name=.'
'&file_spec=e'
'&max_num_results=500'
'&call_graph_request=e')
url = url.format(signature=urllib.parse.quote(signature, safe=''))
result = retrieve(url);
if not result:
return {}
result = json.loads(result)['call_graph_response'][0];
node = result['node'];
callers = [];
last_signature = ''
if not 'children' in node:
return callers
for child in node['children']:
if child['signature'] == last_signature:
continue
if not 'snippet_file_path' in child:
continue
caller = {}
caller['filename'] = child['snippet_file_path'];
caller['line'] = child['call_site_range']['start_line']
caller['col'] = child['call_site_range']['start_column']
caller['text'] = child['snippet']['text']['text']
caller['calling_method'] = child['identifier']
caller['calling_signature'] = child['signature']
last_signature = child['signature']
caller['display_name'] = child['display_name']
callers.append(caller)
return callers
def getRefForMatch(filename, match):
ref = {'filename': filename, 'line': match['line_number'], 'signature': match['signature']}
if 'line_text' in match:
ref['line_text'] = match['line_text']
return ref;
def getXrefsFor(signature):
url = ('https://cs.chromium.org/codesearch/json'
'?xref_search_request=b'
'&query={signature}'
'&file_spec=b'
'&name=.'
'&package_name=chromium'
'&file_spec=e'
'&max_num_results=500'
'&xref_search_request=e')
url = url.format(signature=urllib.parse.quote(signature, safe=''))
result = retrieve(url);
if not result:
return {}
result = json.loads(result)['xref_search_response'][0]
status = result['status']
if not 'search_result' in result:
return {}
search_results = result['search_result']
xrefs = {}
for file_result in search_results:
filename = file_result['file']['name']
for match in file_result['match']:
if match['type'] == 'HAS_DEFINITION':
xrefs['definition'] = getRefForMatch(filename, match);
elif match['type'] == 'HAS_DECLARATION':
xrefs['declaration'] = getRefForMatch(filename, match);
elif match['type'] == 'OVERRIDDEN_BY':
xrefs.setdefault('overrides', []);
xrefs['overrides'].append(getRefForMatch(filename, match));
elif match['type'] == 'REFERENCED_AT':
xrefs.setdefault('references', []);
xrefs['references'].append(getRefForMatch(filename, match));
return xrefs
def logAndExit(msg):
print(msg);
sys.exit(2);
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Searches Chromium Code Search for X-Refs.')
parser.add_argument('-p', '--path',
help='The path to this file starting with src/')
parser.add_argument('-w', '--word',
help='The word to search for in the file denoted by the path argument. You must also specify -p')
parser.add_argument('-s', '--signature',
help='A signature provided from a previous search. No -p or -w arguments required.')
args = parser.parse_args()
signature = args.signature;
results = {}
if not signature:
if bool(args.path) ^ bool(args.word):
print("Both path and word must be supplied if one is supplied");
sys.exit(2);
signature = getSignatureFor(args.path, args.word);
results['signature'] = signature
if not signature:
logAndExit("Could not find signature for %s" % (args.word))
results['xrefs'] = getXrefsFor(signature);
results['callers'] = getCallGraphFor(signature);
print(json.dumps(results))
|
devurandom/portage | pym/portage/tests/emerge/test_emerge_slot_abi.py | Python | gpl-2.0 | 5,878 | 0.032154 | # Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
import sys
import portage
from portage import os
from portage import _unicode_decode
from portage.const import (BASH_BINARY, PORTAGE_BIN_PATH,
PORTAGE_PYM_PATH, USER_CONFIG_PATH)
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.util import ensure_dirs
class SlotAbiEmergeTestCase(TestCase):
def testSlotAbiEmerge(self):
debug = False
ebuilds = {
"dev-libs/glib-1.2.10" : {
"SLOT": "1"
},
"dev-libs/glib-2.30.2" : {
"EAPI": "4-slot-abi",
"SLOT": "2/2.30"
},
"dev-libs/glib-2.32.3" : {
"EAPI": "4-slot-abi",
"SLOT": "2/2.32"
},
"dev-libs/dbus-glib-0.98" : {
"EAPI": "4-slot-abi",
"DEPEND": "dev-libs/glib:2=",
"RDEPEND": "dev-libs/glib:2="
},
}
installed = {
"dev-libs/glib-1.2.10" : {
"EAPI": "4-slot-abi",
"SLOT": "1"
},
"dev-libs/glib-2.30.2" : {
"EAPI": "4-slot-abi",
"SLOT": "2/2.30"
},
"dev-libs/dbus-glib-0.98" : {
"EAPI": "4-slot-abi",
"DEPEND": "dev-libs/glib:2/2.30=",
"RDEPEND": "dev-libs/glib:2/2.30="
},
}
world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
playground = ResolverPlayground(ebuilds=ebuilds,
installed=installed, world=world, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
trees = playground.trees
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
portdir = settings["PORTDIR"]
var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
package_mask_path = os.path.join(user_config_dir, "package.mask")
portage_python = portage._python_interpreter
ebuild_cmd = (portage_python, "-Wd",
os.path.join(PORTAGE_BIN_PATH, "ebuild"))
emerge_cmd = (portage_python, "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emerge"))
test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
self.assertFalse(test_ebuild is None)
test_commands = (
emerge_cmd + ("--oneshot", "dev-libs/glib",),
(lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
(BASH_BINARY, "-c", "echo %s >> %s" %
tuple(map(portage._shell_quote,
(">=dev-libs/glib-2.32", package_mask_path,)))),
emerge_cmd + ("--oneshot", "dev-libs/glib",),
(lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
)
distdir = playground.distdir
pkgdir = playground.pkgdir
fake_bin = os.path.join(eprefix, "bin")
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
profile_path = settings.profile_path
features = []
if not portage.process.sandbox_capable or \
os.environ.get("SANDBOX_ON") == "1":
features.append("-sandbox")
make_conf = (
"FEATURES=\"%s\"\n" % (" ".join(features),),
"PORTDIR=\"%s\"\n" % (portdir,),
"PORTAGE_GRPNAME=\"%s\"\n | " % (os.environ["PORTAGE_GRPNAME"],),
"PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
"PKGDIR=\"%s\"\n" % (pkgdir,),
"PORTAGE_INST_GID=%s\n" % (portage.data.portage_gid,),
"PORTAGE_INST_UID=%s\n" % (portage.data.portage_uid,),
"PORTAGE_TMPDIR=\"%s\"\n" % (portage_tmpdir,),
"CLEAN_DELAY=0\n",
"DISTDIR=\"%s\"\n" % (distdir,),
"EMERGE_WARNING_DELAY=0\n",
)
path = os.environ.get("PATH")
if path is not None and | not path.strip():
path = None
if path is None:
path = ""
else:
path = ":" + path
path = fake_bin + path
pythonpath = os.environ.get("PYTHONPATH")
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is not None and \
pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
pass
else:
if pythonpath is None:
pythonpath = ""
else:
pythonpath = ":" + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env = {
"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
"PATH" : path,
"PORTAGE_PYTHON" : portage_python,
"PYTHONPATH" : pythonpath,
}
if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
dirs = [distdir, fake_bin, portage_tmpdir,
user_config_dir, var_cache_edb]
true_symlinks = ["chown", "chgrp"]
true_binary = find_binary("true")
self.assertEqual(true_binary is None, False,
"true command not found")
try:
for d in dirs:
ensure_dirs(d)
with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
for line in make_conf:
f.write(line)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
f.write(b"100")
# non-empty system set keeps --depclean quiet
with open(os.path.join(profile_path, "packages"), 'w') as f:
f.write("*dev-libs/token-system-pkg")
if debug:
# The subprocess inherits both stdout and stderr, for
# debugging purposes.
stdout = None
else:
# The subprocess inherits stderr so that any warnings
# triggered by python -Wd will be visible.
stdout = subprocess.PIPE
for i, args in enumerate(test_commands):
if hasattr(args[0], '__call__'):
self.assertTrue(args[0](),
"callable at index %s failed" % (i,))
continue
proc = subprocess.Popen(args,
env=env, stdout=stdout)
if debug:
proc.wait()
else:
output = proc.stdout.readlines()
proc.wait()
proc.stdout.close()
if proc.returncode != os.EX_OK:
for line in output:
sys.stderr.write(_unicode_decode(line))
self.assertEqual(os.EX_OK, proc.returncode,
"emerge failed with args %s" % (args,))
finally:
playground.cleanup()
|
ceph/autotest | frontend/shared/rest_client.py | Python | gpl-2.0 | 8,328 | 0.003002 | import copy, getpass, logging, pprint, re, urllib, urlparse
import httplib2
from django.utils import datastructures, simplejson
from autotest_lib.frontend.afe import rpc_client_lib
from autotest_lib.client.common_lib import utils
_request_headers = {}
def _get_request_headers(uri):
server = urlparse.urlparse(uri)[0:2]
if server in _request_headers:
return _request_headers[server]
headers = rpc_client_lib.authorization_headers(getpass.getuser(), uri)
headers['Content-Type'] = 'application/json'
_request_headers[server] = headers
return headers
def _clear_request_headers(uri):
server = urlparse.urlparse(uri)[0:2]
if server in _request_headers:
del _request_headers[server]
def _site_verify_response_default(headers, response_body):
return headers['status'] != '401'
class RestClientError(Exception):
pass
class ClientError(Exception):
pass
class ServerError(Exception):
pass
class Response(object):
def __init__(self, httplib_response, httplib_content):
self.status = int(httplib_response['status'])
self.headers = httplib_response
self.entity_body = httplib_content
def decoded_body(self):
return simplejson.loads(self.entity_body)
def __str__(self):
return '\n'.join([str(self.status), self.entity_body])
class Resource(object):
def __init__(self, representation_dict, http | ):
self._http = http
assert 'href' in representation_dict
for key, value in representation_dict.iteritems():
setattr(self, str(key), value)
def __repr__(self):
return 'Resource(%r)' % self._representation()
def pprint(self):
# pretty-print support for debugging/interactive use
pprint.pprint(self._representation())
@classmethod
def load(cls, | uri, http=None):
if not http:
http = httplib2.Http()
directory = cls({'href': uri}, http)
return directory.get()
def _read_representation(self, value):
# recursively convert representation dicts to Resource objects
if isinstance(value, list):
return [self._read_representation(element) for element in value]
if isinstance(value, dict):
converted_dict = dict((key, self._read_representation(sub_value))
for key, sub_value in value.iteritems())
if 'href' in converted_dict:
return type(self)(converted_dict, http=self._http)
return converted_dict
return value
def _write_representation(self, value):
# recursively convert Resource objects to representation dicts
if isinstance(value, list):
return [self._write_representation(element) for element in value]
if isinstance(value, dict):
return dict((key, self._write_representation(sub_value))
for key, sub_value in value.iteritems())
if isinstance(value, Resource):
return value._representation()
return value
def _representation(self):
return dict((key, self._write_representation(value))
for key, value in self.__dict__.iteritems()
if not key.startswith('_')
and not callable(value))
def _do_request(self, method, uri, query_parameters, encoded_body):
uri_parts = [uri]
if query_parameters:
if '?' in uri:
uri_parts += '&'
else:
uri_parts += '?'
uri_parts += urllib.urlencode(query_parameters, doseq=True)
full_uri = ''.join(uri_parts)
if encoded_body:
entity_body = simplejson.dumps(encoded_body)
else:
entity_body = None
logging.debug('%s %s', method, full_uri)
if entity_body:
logging.debug(entity_body)
site_verify = utils.import_site_function(
__file__, 'autotest_lib.frontend.shared.site_rest_client',
'site_verify_response', _site_verify_response_default)
headers, response_body = self._http.request(
full_uri, method, body=entity_body,
headers=_get_request_headers(uri))
if not site_verify(headers, response_body):
logging.debug('Response verification failed, clearing headers and '
'trying again:\n%s', response_body)
_clear_request_headers(uri)
headers, response_body = _http.request(
full_uri, method, body=entity_body,
headers=_get_request_headers(uri))
logging.debug('Response: %s', headers['status'])
return Response(headers, response_body)
def _request(self, method, query_parameters=None, encoded_body=None):
if query_parameters is None:
query_parameters = {}
response = self._do_request(method, self.href, query_parameters,
encoded_body)
if 300 <= response.status < 400: # redirection
return self._do_request(method, response.headers['location'],
query_parameters, encoded_body)
if 400 <= response.status < 500:
raise ClientError(str(response))
if 500 <= response.status < 600:
raise ServerError(str(response))
return response
def _stringify_query_parameter(self, value):
if isinstance(value, (list, tuple)):
return ','.join(self._stringify_query_parameter(item)
for item in value)
return str(value)
def _iterlists(self, mapping):
"""This effectively lets us treat dicts as MultiValueDicts."""
if hasattr(mapping, 'iterlists'): # mapping is already a MultiValueDict
return mapping.iterlists()
return ((key, (value,)) for key, value in mapping.iteritems())
def get(self, query_parameters=None, **kwarg_query_parameters):
"""
@param query_parameters: a dict or MultiValueDict
"""
query_parameters = copy.copy(query_parameters) # avoid mutating original
if query_parameters is None:
query_parameters = {}
query_parameters.update(kwarg_query_parameters)
string_parameters = datastructures.MultiValueDict()
for key, values in self._iterlists(query_parameters):
string_parameters.setlist(
key, [self._stringify_query_parameter(value)
for value in values])
response = self._request('GET',
query_parameters=string_parameters.lists())
assert response.status == 200
return self._read_representation(response.decoded_body())
def get_full(self, results_limit, query_parameters=None,
**kwarg_query_parameters):
"""
Like get() for collections, when the full collection is expected.
@param results_limit: maxmimum number of results to allow
@raises ClientError if there are more than results_limit results.
"""
result = self.get(query_parameters=query_parameters,
items_per_page=results_limit,
**kwarg_query_parameters)
if result.total_results > results_limit:
raise ClientError(
'Too many results (%s > %s) for request %s (%s %s)'
% (result.total_results, results_limit, self.href,
query_parameters, kwarg_query_parameters))
return result
def put(self):
response = self._request('PUT', encoded_body=self._representation())
assert response.status == 200
return self._read_representation(response.decoded_body())
def delete(self):
response = self._request('DELETE')
assert response.status == 204 # no content
def post(self, request_dict):
# request_dict may still have resources in it
request_dict = self._write_representation(request_dict)
response = self._request('POST', encoded_ |
titipata/pubmed_parser | docs/conf.py | Python | mit | 2,161 | 0 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys
import os
import sphinx
import pubmed_parser
import sphinx_gallery
# -- Project information ------------------------ | ----------------------- | ------
project = 'Pubmed Parser'
copyright = '2020, Titipat Achakulvisut'
author = 'Titipat Achakulvisut'
version = pubmed_parser.__version__
release = pubmed_parser.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.doctest'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.