repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zseder/hunmisc
|
hunmisc/corpustools/20ng_to_conll.py
|
1
|
4033
|
"""
Copyright 2011-13 Attila Zseder
Email: zseder@gmail.com
This file is part of hunmisc project
url: https://github.com/zseder/hunmisc
hunmisc is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
"""Converts a newsgroup file (as in the 20 Newsgroups collection) to the conll2
format."""
import os.path
import re
from langtools.nltk.nltktools import NltkTools
from langtools.utils import cmd_utils
from langtools.utils.file_utils import *
from langtools.io.conll2.conll_iter import FieldedDocument
re_pat = re.compile(r"^[\s>]+", re.UNICODE)
# Decoding is not required as NltkTools.tag_raw() handles that for utf-8.
def read_stream(ins):
"""Reads a stream. Returns a {field:raw text} map, with a Body field. The
title is the content of the subject header field."""
fields = {}
for line in ins:
line = line.strip()
if len(line) == 0:
break
if line.startswith("Subject:"):
fields['Title'] = line[8:]
fields['Body'] = u' '.join(re_pat.sub("", line.strip().replace(u'\ufffd', ' ')) for line in ins)
return fields
def read_file(infile):
"""Reads a file. Returns a {field:raw text} map, with a Body field. If title
is true, a Title field will be added too."""
with FileReader(infile, replace=True).open() as ins:
return read_stream(ins)
def write_doc(doc, outs):
"""Writes the document to outs. A header line is written, then the
Title field (if any), then the body."""
outs.write(u"%%#PAGE\t{0}\n".format(doc.title))
if 'Title' in doc.fields:
outs.write(u"%%#Field\tTitle\n")
write_text(doc.fields['Title'], outs)
outs.write(u"%%#Field\tBody\n")
write_text(doc.fields['Body'], outs)
def write_text(text, outs):
for token in text:
outs.write(u"\t".join(token))
outs.write("\n")
if __name__ == '__main__':
import sys
try:
params, args = cmd_utils.get_params_sing(sys.argv[1:], 'i:o:m:ta', 'i', 0)
if not os.path.isdir(params['i']):
raise ValueError('Input must be a directory of files.')
except ValueError as err:
print('Error: {0}'.format(err))
print(('Usage: {0} -i input_dir [-o output_file] -m [hunpos_model] ' +
'[-a]').format(sys.argv[0]))
print(' input_dir: the directory with the input text files.')
print(' hunpos_model: the hunpos model file.')
print(' output_file: the conll2 output file. If omitted, the result will')
print(' be written to stdout.')
print(' hunpos_model: the hunpos model file.')
print(' -a: the output is appended to output_file, instead of overwriting it.')
sys.exit()
if 'o' in params:
output_mode = 'a' if 'a' in params else 'w'
out = FileWriter(params['o'], output_mode).open()
else:
out = StreamWriter(sys.stdout)
nt = NltkTools(pos=True, stem=True, tok=True, pos_model=params.get('m'))
for infile in (os.path.join(d, f) for d, _, fs in os.walk(params['i']) for f in fs):
print "File " + infile
doc = FieldedDocument(infile)
doc.fields = {}
for field, raw_text in read_file(infile).iteritems():
doc.fields[field] = nt.tag_raw(raw_text)
write_doc(doc, out)
if 'o' in params:
out.close()
|
gpl-3.0
| 898,430,724,181,993,000
| 36.691589
| 100
| 0.634763
| false
| 3.516129
| false
| false
| false
|
OneDrive/onedrive-sdk-python
|
src/python2/request/item_copy.py
|
1
|
3729
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from ..model.item import Item
from ..request_base import RequestBase
from ..request_builder_base import RequestBuilderBase
from ..async_operation_monitor import AsyncOperationMonitor
from ..options import *
import json
class ItemCopyRequest(RequestBase):
def __init__(self, request_url, client, options, name=None, parent_reference=None):
super(ItemCopyRequest, self).__init__(request_url, client, options)
self.method = "POST"
self.body_options={}
if name:
self.body_options["name"] = name
if parent_reference:
self.body_options["parentReference"] = parent_reference
@property
def body_options(self):
return self._body_options
@body_options.setter
def body_options(self, value):
self._body_options=value
def post(self):
"""Sends the POST request
Returns:
:class:`AsyncOperationMonitor<onedrivesdk.async_operation_monitor.AsyncOperationMonitor>`:
The resulting entity from the operation
"""
self.content_type = "application/json"
self.append_option(HeaderOption("Prefer", "respond-async"))
response = self.send(self.body_options)
entity = AsyncOperationMonitor(response.headers["Location"], self._client, None)
return entity
class ItemCopyRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client, name=None, parent_reference=None):
super(ItemCopyRequestBuilder, self).__init__(request_url, client)
self._method_options = {}
self._method_options["name"] = name
self._method_options["parentReference"] = parent_reference._prop_dict
def request(self, options=None):
"""Builds the request for the ItemCopy
Args:
options (list of :class:`Option<onedrivesdk.options.Option>`):
Default to None, list of options to include in the request
Returns:
:class:`ItemCopyRequest<onedrivesdk.request.item_copy.ItemCopyRequest>`:
The request
"""
req = ItemCopyRequest(self._request_url, self._client, options, name=self._method_options["name"], parent_reference=self._method_options["parentReference"])
return req
def post(self):
"""Sends the POST request
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The resulting Item from the operation
"""
return self.request().post()
|
mit
| -2,817,140,909,154,993,000
| 36.29
| 164
| 0.679807
| false
| 4.361404
| false
| false
| false
|
thehackercat/aha-memo
|
serverApp/common/aha.py
|
1
|
13263
|
# -*- coding:utf-8 -*-
__author__ = 'LexusLee'
import time
import json
import tornado
import tornado.gen
from tornado.web import HTTPError
from tornado.escape import json_decode
from foundation.log import logger
from foundation import const
from serverAppConfig import DEVICE_TYPE
from serverAppConfig import TOKEN_ROLE, TOKEN_DEADLINE_TIME, TOKEN_USER_ID
from cacheTool import _get_redis_connection
class ArgumentTypeError(HTTPError):
"""Exception raised by `IntLongRequestHandler.add_query_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
"""
def __init__(self, arg_name):
super(ArgumentTypeError, self).__init__(
400, 'Type of argument %s must be string type' % arg_name)
self.arg_name = arg_name
class RequestHandlerAha(tornado.web.RequestHandler):
"""
根据需要,定制tornado.web.RequestHandler
"""
def __init__(self, application, request, auto_init=True, **kwargs):
"""
构造函数
:param write_to_client: 如果是后台调用,该值必须是True,否则是False,默认为False
"""
super(RequestHandlerAha, self).__init__(application, request, **kwargs)
self.auto_init = auto_init
self.decoded_secure_cookie = {}
self.redis_client = None
def on_finish(self):
if self.redis_client:
logger.debug('存在redis连接,所以关闭连接')
self.redis_client.disconnect()
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""
重写重写tornado.web.RequestHandler中的get_secure_cookie方法,用于在多次调用get_secure_cookie
不重复去解密
:param name:
:return:
"""
if name in self.decoded_secure_cookie.keys():
return self.decoded_secure_cookie[name]
else:
value = super(RequestHandlerAha, self).get_secure_cookie(name, value, max_age_days, min_version)
self.decoded_secure_cookie[name] = value
return value
def get_current_user_role(self):
"""
返回当前用户的角色名字,角色名字参考configueFiles文件夹下面的authority文件注释
:return: 角色名字
"""
tokenstr = self.get_secure_cookie("token")
if not tokenstr:
logger.debug("cookie中没有token,因此是游客访问")
return "visitor"
token = json_decode(tokenstr)
role = token.get(TOKEN_ROLE, "visitor")
return role
def get_current_user(self):
"""
重写tornado.web.RequestHandler中的get_current_user方法,用于在调用self.current_user能正确返回
直接调用该函数可以返回相应的用户id,也可以使用self.current_user来返回用户的id。
:return: 如果有相应的token,则返回对应的id,否则返回None
"""
tokenstr = self.get_secure_cookie("token")
if not tokenstr:
logger.debug("cookie中没有token,因此是游客访问,因此没有用户的id")
return None
token = json_decode(tokenstr)
user_id = token.get(TOKEN_USER_ID)
return user_id
def write(self, chunk):
"""
向调用者返回数据,如果是客户端直接请求的,则向客户端返回对应写的数据,函数返回None;如果是后台自己调用,
则返回相应的python对象数据,函数返回对应python对象数据
:param chunk: 待返回的数据
:return: 如果是后台自己调用,返回对应数据的python对象;否则返回None
"""
# self.set_header("Content-Type", "application/json; charset=UTF-8")
if self.auto_init:
super(RequestHandlerAha, self).write(chunk)
else:
return chunk
def __add_arg(self, source, name, *args):
"""
用来底层实现增加请求参数
:param source: 增加参数到指定的source上
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
:exception ArgumentTypeError
"""
if not isinstance(name, basestring):
raise ArgumentTypeError(name)
for v in args:
if not isinstance(v, basestring):
raise ArgumentTypeError(name)
addvalue = list(args)
if name in self.request.query_arguments.keys():
addvalue.extend(source.get(name, []))
self.request.query_arguments[name] = addvalue
def add_query_argument(self, name, *args):
"""
增加query的参数,形如URL后面的参数
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.query_arguments, name, *args)
def add_body_argument(self, name, *args):
"""
增加body的参数,形如提交表单里面的数据
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.body_arguments, name, *args)
def add_argument(self, name, *args):
"""
增加全局参数
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.arguments, name, *args)
def get_redis_conn(self):
"""
得到一个redis的连接
"""
if not self.redis_client:
self.redis_client = _get_redis_connection()
return self.redis_client
@property
def device_type(self):
"""
得到设备类型,返回的模拟枚举类型: DEVICE_TYPE
:return:
"""
if not hasattr(self, "_device_type"):
userAgent = self.request.headers.get('User-Agent', "")
via = self.request.headers.get("Via", "")
self._device_type = self._distinguishDevice(via, userAgent)
return self._device_type
def _distinguishDevice(self, via, userAgent):
"""
验证设备是什么类型设备
:param via:
:param userAgent:
:return: 0代表手机,1表示pc
"""
pcHeaders = ["Windows 98",
"Windows ME",
"Windows 2000",
"Windows XP",
"Windows NT",
"Ubuntu"]
mobileGateWayHeaders = [ "ZXWAP",
"chinamobile.com",
"monternet.com",
"infoX",
"wap.lizongbo.com","Bytemobile"]
mobileUserAgents = [ "Nokia", "SAMSUNG", "MIDP-2", "CLDC1.1", "SymbianOS", "MAUI", "UNTRUSTED/1.0", "Windows CE",
"iPhone", "iPad", "Android", "BlackBerry", "UCWEB", "ucweb", "BREW", "J2ME", "YULONG",
"YuLong", "COOLPAD","TIANYU","TY-", "K-Touch", "Haier", "DOPOD","Lenovo","LENOVO", "HUAQIN",
"AIGO-", "CTC/1.0", "CTC/2.0","CMCC","DAXIAN","MOT-","SonyEricsson","GIONEE","HTC","ZTE",
"HUAWEI", "webOS","GoBrowser","IEMobile", "WAP2.0"]
pcFlag = False
mobileFlag = False
for pcHeader in pcHeaders:
if pcFlag:
break
if userAgent.find(pcHeader) != -1:
pcFlag = True
break
for mobileGateWayHeader in mobileGateWayHeaders:
if mobileFlag:
break
if via.find(mobileGateWayHeader) != -1:
mobileFlag = True
break
for mobileUserAgent in mobileUserAgents:
if mobileFlag:
break
if userAgent.find(mobileUserAgent) != -1:
mobileFlag = True
break
if mobileFlag==True and mobileFlag!=pcFlag:
return DEVICE_TYPE.MOBILE
else:
return DEVICE_TYPE.PC
class ResponseJSON:
"""
处理返回给客户端的json对象
"""
def __init__(self, code, data=None, description=None, status=None):
"""
:param code: 返回的code,数字类型
:param description: code相关描述
:param data: 具体的data数据
"""
self.code = code
self.description = description
self.data = data
self.status = status
def resultDict(self):
"""
返回一个dict对象。如果code不是数字,则认为系统内部错误,code置为500。如果
description为空,则没有description在dict中。如果data为一个json对象字符串,则会把对应
的字符串转换成dict
:return:返回一个dict对象
"""
if isinstance(self.code, int):
meta = {"code": self.code}
else:
meta = {"code": 500}
if const.basic.get('send_description') == 'True' and self.description:
meta["description"] = self.description
if self.status:
if isinstance(self.status, int):
meta['status'] = self.status
else:
meta['status'] = -9999
rdict = {"meta": meta}
if isinstance(self.data, basestring):
try:
rdict["data"] = json.loads(self.data, encoding="utf-8")
except ValueError:
logger.warning("ResponseJSON:data数据格式错误")
elif isinstance(self.data, dict) or isinstance(self.data, list):
rdict["data"] = self.data
return rdict
def resultStr(self):
"""
返回的是结果json字符串
"""
return json.dumps(self.resultDict(), ensure_ascii=False)
def _auth_user_token(token):
"""
通过token去验证用户是否已经登陆成功
:param token:字典格式,token:
CT: create_time,该token创建时间
DT: deadline_time,该token的有效日期
:return: 验证成功返回True,验证失败返回False
"""
if token is None:
return False
else:
token = json_decode(token)
deadline_time = token[TOKEN_DEADLINE_TIME]
now_time = get_system_time(pretty=False)
if now_time < deadline_time:
return True
else:
return False
def authenticated(method):
"""
Decorate methods with this to require that the user be logged in.
"""
def wrapper(self, *args, **kwargs):
try:
if not self.request.loginSuccess: # 第一次登陆会产生异常,如果没有产生异常,说明已经验证过登陆了
return self.write(ResponseJSON(401, description="not login.").resultDict())
# return '已经验证过登陆,但是验证失败'
except AttributeError:
resp = _auth_user_token(self.get_secure_cookie("token"))
if resp:
self.request.loginSuccess = True
return method(self, *args, **kwargs)
else:
self.request.loginSuccess = False
return self.write(ResponseJSON(401, description="not login").resultDict())
# return '验证失败'
else:
return method(self, *args, **kwargs)
return wrapper
def _auth_user_authority(code, role):
"""
通过code去验证用户是否有该权限
:param code: 功能标识码
:return: 如果验证成功,返回True,否则返回False
"""
logger.debug(role)
rolelist = const.authority.get(str(code))
logger.debug(rolelist)
if role in rolelist:
return True
else:
return False
def authorized(code):
"""
一个装饰器,用来验证该用户是否有权限使用该功能,如果有使用该模块的权限,则
返回对应的函数,如果没有,则函数不继续往下执行
:param code: 该模块的标识
"""
def _deco(method):
def wrappers(self, *args, **kwargs):
role = self.get_current_user_role()
resp = _auth_user_authority(code, role)
if resp:
return method(self, *args, **kwargs)
else:
logger.debug("该用户没有此功能的权限")
return self.write(ResponseJSON(403, description="No authority for the function").resultDict()) # 该用户没有该权限
return wrappers
return _deco
def get_system_time(pretty=True):
"""
该函数用于返回系统当前时间
:return:当前系统时间
"""
if pretty:
ISOTIMEFORMAT = "%Y-%m-%d-%X"
current_time = time.strftime(ISOTIMEFORMAT, time.localtime(time.time()))
else:
current_time = time.time()
return current_time
|
gpl-3.0
| 1,503,458,384,952,282,600
| 30.134771
| 122
| 0.565492
| false
| 2.960277
| false
| false
| false
|
par2/lamana
|
lamana/models/fixtures/fixture_model_class.py
|
1
|
10351
|
#------------------------------------------------------------------------------
'''Class-style Model
This fixture is used to test the importing of models, handled by the
`theories.handshake()` module. As of 0.4.11, models can:
- be located in the `lamana.models` folder
- module and classes can have any pythonic name; hard-coding removed
- any sub-package can be accessed by the "model" keyword in `Case.apply()`
- search for the hook-containing class and it's hook method
This module is here to test these aspects as the module is imported. The Wilson_LT
model was adapted. No functions are expected in this module; there are tests
against this.
'''
import math
import collections as ct
import pandas as pd
from lamana.input_ import BaseDefaults
from lamana.theories import BaseModel
from lamana.lt_exceptions import IndeterminateError
# This class lacks a hook method; theories should skip it.
class DummyModel():
pass
# The class containing the hook method can have any name.
class RandomName(BaseModel):
'''A modified laminate theory for circular biaxial flexure disks,
loaded with a flat piston punch on 3-ball support having two distinct
materials (polymer and ceramic).'''
'''Accept extra args and kwds here'''
def __init__(self):
self.Laminate = None
self.FeatureInput = None
self.LaminateModel = None
# TODO: eventually abstract into BaseModel and deprecate direct coding
# TODO: accept kwargs from Case -> handshake
def _use_model_(self, Laminate, adjusted_z=False):
'''Return updated DataFrame and FeatureInput Return None if exceptions raised.
Parameters
----------
df : DataFrame
LaminateModel with IDs and Dimensional Variables.
FeatureInut : dict
Geometry, laminate parameters and more. Updates Globals dict for
parameters in the dashboard output.
adjusted_z: bool; default=False
If True, uses z(m)* values instead; different assumption for internal calc.
Raises
------
ZeroDivisionError
If zero `r` or `a` in the log term are zero.
ValueError
If negative numbers are in the log term or the support radius exceeds
the sample radius.
Returns
-------
tuple
The updated calculations and parameters stored in a tuple
`(LaminateModel, FeatureInput)``.
'''
self.Laminate = Laminate
df = Laminate.LFrame.copy()
FeatureInput = Laminate.FeatureInput
# Author-defined Exception Handling
if (FeatureInput['Parameters']['r'] == 0):
raise ZeroDivisionError('r=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['a'] == 0):
raise ZeroDivisionError('a=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['r'] < 0) | (FeatureInput['Parameters']['a'] < 0):
raise ValueError('Negative numbers are invalid for the log term '
'in moment eqn.')
elif FeatureInput['Parameters']['a'] > FeatureInput['Parameters']['R']:
raise ValueError('Support radius is larger than sample radius.')
elif df['side'].str.contains('INDET').any():
print('INDET value found. Rolling back...')
raise IndeterminateError('INDET value found. Unable to accurately calculate stress.')
#raise AssertionError('Indeterminate value found. Unable to accurately calculate stress.')
# Calling functions to calculate Qs and Ds
df.loc[:, 'Q_11'] = self.calc_stiffness(df, FeatureInput['Properties']).q_11
df.loc[:, 'Q_12'] = self.calc_stiffness(df, FeatureInput['Properties']).q_12
df.loc[:, 'D_11'] = self.calc_bending(df, adj_z=adjusted_z).d_11
df.loc[:, 'D_12'] = self.calc_bending(df, adj_z=adjusted_z).d_12
# Global Variable Update
if (FeatureInput['Parameters']['p'] == 1) & (Laminate.nplies%2 == 0):
D_11T = sum(df['D_11'])
D_12T = sum(df['D_12'])
else:
D_11T = sum(df.loc[df['label'] == 'interface', 'D_11']) # total D11
D_12T = sum(df.loc[df['label'] == 'interface', 'D_12'])
#print(FeatureInput['Geometric']['p'])
D_11p = (1./((D_11T**2 - D_12T**2)) * D_11T) #
D_12n = -(1./((D_11T**2 - D_12T**2)) *D_12T) #
v_eq = D_12T/D_11T # equiv. Poisson's ratio
M_r = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_r
M_t = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_t
K_r = (D_11p*M_r) + (D_12n*M_t) # curvatures
K_t = (D_12n*M_r) + (D_11p*M_t)
# Update FeatureInput
global_params = {
'D_11T': D_11T,
'D_12T': D_12T,
'D_11p': D_11p,
'D_12n': D_12n,
'v_eq ': v_eq,
'M_r': M_r,
'M_t': M_t,
'K_r': K_r,
'K_t:': K_t,
}
FeatureInput['Globals'] = global_params
self.FeatureInput = FeatureInput # update with Globals
#print(FeatureInput)
# Calculate Strains and Stresses and Update DataFrame
df.loc[:,'strain_r'] = K_r * df.loc[:, 'Z(m)']
df.loc[:,'strain_t'] = K_t * df.loc[:, 'Z(m)']
df.loc[:, 'stress_r (Pa/N)'] = (df.loc[:, 'strain_r'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_t'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_t (Pa/N)'] = (df.loc[:, 'strain_t'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_r'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_f (MPa/N)'] = df.loc[:, 'stress_t (Pa/N)']/1e6
del df['Modulus']
del df['Poissons']
self.LaminateModel = df
return (df, FeatureInput)
#------------------------------------------------------------------------------
'''Prefer staticmethods here. Add formulas to doc strings.'''
def calc_stiffness(self, df, mat_props):
'''Return tuple of Series of (Q11, Q12) floats per lamina.'''
# Iterate to Apply Modulus and Poisson's to correct Material
# TODO: Prefer cleaner ways to parse materials from mat_props
df_mat_props = pd.DataFrame(mat_props) # df easier to munge
df_mat_props.index.name = 'materials'
##for material in mat_props.index:
for material in df_mat_props.index:
mat_idx = df['matl'] == material
df.loc[mat_idx, 'Modulus'] = df_mat_props.loc[material, 'Modulus']
df.loc[mat_idx, 'Poissons'] = df_mat_props.loc[material, 'Poissons']
E = df['Modulus'] # series of moduli
v = df['Poissons']
stiffness = ct.namedtuple('stiffness', ['q_11', 'q_12'])
q_11 = E / (1 - (v**2))
q_12 = (v*E) / (1 - (v**2))
return stiffness(q_11, q_12)
def calc_bending(self, df, adj_z=False):
'''Return tuple of Series of (D11, D12) floats.'''
q_11 = df['Q_11']
q_12 = df['Q_12']
h = df['h(m)']
# TODO: need to fix kwargs passing first; tabled since affects many modules.
if not adj_z:
z = df['z(m)']
else:
z = df['z(m)*']
bending = ct.namedtuple('bending', ['d_11', 'd_12'])
d_11 = ((q_11*(h**3)) / 12.) + (q_11*h*(z**2))
d_12 = ((q_12*(h**3)) / 12.) + (q_12*h*(z**2))
return bending(d_11, d_12)
def calc_moment(self, df, load_params, v_eq):
'''Return tuple of moments (radial and tangential); floats.
See Timishenko-Woinowsky: Eq. 91; default'''
P_a = load_params['P_a']
a = load_params['a']
r = load_params['r']
moments = ct.namedtuple('moments', ['m_r', 'm_t'])
m_r = ((P_a/(4*math.pi)) * ((1 + v_eq)*math.log10(a/r)))
m_t = ((P_a/(4*math.pi)) * (((1 + v_eq)*math.log10(a/r)) + (1 - v_eq)))
return moments(m_r, m_t)
class Defaults(BaseDefaults):
'''Return parameters for building distributions cases. Useful for consistent
testing.
Dimensional defaults are inherited from utils.BaseDefaults().
Material-specific parameters are defined here by he user.
- Default geometric parameters
- Default material properties
- Default FeatureInput
Examples
--------
>>> dft = Defaults()
>>> dft.load_params
{'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}
>>> dft.mat_props
{'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
>>> dft.FeatureInput
{'Geometry' : '400-[200]-800',
'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom' : None,
'Model' : Wilson_LT}
Returns
-------
class
Updated attributes inherited from the `BaseDefaults` class.
'''
def __init__(self):
BaseDefaults.__init__(self)
'''DEV: Add defaults first. Then adjust attributes.'''
# DEFAULTS ------------------------------------------------------------
# Build dicts of geometric and material parameters
self.load_params = {
'R': 12e-3, # specimen radius
'a': 7.5e-3, # support ring radius
'p': 5, # points/layer
'P_a': 1, # applied load
'r': 2e-4, # radial distance from center loading
}
self.mat_props = {
'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}
}
# ATTRIBUTES ----------------------------------------------------------
# FeatureInput
self.FeatureInput = self.get_FeatureInput(
self.Geo_objects['standard'][0],
load_params=self.load_params,
mat_props=self.mat_props,
##custom_matls=None,
model='Wilson_LT',
global_vars=None
)
|
bsd-3-clause
| 8,944,103,067,520,001,000
| 39.120155
| 103
| 0.526906
| false
| 3.524345
| false
| false
| false
|
mglidden/git-analysis
|
analysis/word_frequencies.py
|
1
|
1325
|
import fix_paths
import common
import config
from load_samples import load_samples_from_file
from models.commit import Commit
from collections import Counter
import json
import string
def get_words_from_message(commit_message):
#TODO: clean up this method
cleaned_message = str(commit_message.encode('ascii', 'ignore').replace('\n', ' ')).translate(string.maketrans('', ''), string.punctuation + '\t').lower()
return set(cleaned_message.split(' '))
def create_word_frequencies():
session = common.Session()
training_samples = load_samples_from_file(config.TRAINING_DATA_PATH)
word_frequencies = Counter()
for _, commit_id in training_samples:
commit = session.query(Commit).filter(Commit.id == commit_id).first()
for word in get_words_from_message(commit.message):
word_frequencies[word] += 1
all_words = [word for word, _ in word_frequencies.most_common(800)]
word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'w')
word_frequency_file.write(json.dumps(all_words))
word_frequency_file.close()
def load_word_frequencies():
# TODO: Cache this file
word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'r')
word_frequency = json.loads(word_frequency_file.read())
word_frequency_file.close()
return word_frequency
if __name__ == '__main__':
create_word_frequencies()
|
mit
| -2,114,682,968,862,255,400
| 32.974359
| 155
| 0.723019
| false
| 3.450521
| false
| false
| false
|
chebee7i/dit
|
dit/algorithms/optutil.py
|
1
|
8288
|
"""
Various utilities that can be helpful for optimization problems.
"""
from __future__ import division, print_function
from collections import defaultdict
import itertools
import numpy as np
import dit
from .frankwolfe import frank_wolfe
def as_full_rank(A, b):
"""
From a linear system Ax = b, return Bx = c such that B has full rank.
In CVXOPT, linear constraints are denoted as: Ax = b. A has shape (p, n)
and must have full rank. x has shape (n, 1), and so b has shape (p, 1).
Let's assume that we have:
rank(A) = q <= n
This is a typical situation if you are doing optimization, where you have
an under-determined system and are using some criterion for selecting out
a particular solution. Now, it may happen that q < p, which means that some
of your constraint equations are not independent. Since CVXOPT requires
that A have full rank, we must isolate an equivalent system Bx = c which
does have full rank. We use SVD for this. So A = U \Sigma V^*, where
U is (p, p), \Sigma is (p, n) and V^* is (n, n). Then:
\Sigma V^* x = U^{-1} b
We take B = \Sigma V^* and c = U^T b, where we use U^T instead of U^{-1}
for computational efficiency (and since U is orthogonal). But note, we
take only the cols of U (which are rows in U^{-1}) and rows of \Sigma that
have nonzero singular values.
Parameters
----------
A : array-like, shape (p, n)
The LHS for the linear constraints.
b : array-like, shape (p,) or (p, 1)
The RHS for the linear constraints.
Returns
-------
B : array-like, shape (q, n)
The LHS for the linear constraints.
c : array-like, shape (q,) or (q, 1)
The RHS for the linear constraints.
rank : int
The rank of B.
"""
try:
from scipy.linalg import svd
except ImportError:
from numpy.linalg import svd
import scipy.linalg as splinalg
A = np.atleast_2d(A)
b = np.asarray(b)
U, S, Vh = svd(A)
Smat = splinalg.diagsvd(S, A.shape[0], A.shape[1])
# See np.linalg.matrix_rank
tol = S.max() * max(A.shape) * np.finfo(S.dtype).eps
rank = np.sum(S > tol)
B = np.dot(Smat, Vh)[:rank]
c = np.dot(U.transpose(), b)[:rank]
return B, c, rank
class CVXOPT_Template(object):
"""
Template for convex minimization on probability distributions.
"""
def __init__(self, dist, tol=None, prng=None):
"""
Initialize optimizer.
Parameters
----------
dist : distribution
The distribution that is used during optimization.
tol : float | None
The desired convergence tolerance.
prng : RandomState
A NumPy-compatible pseudorandom number generator.
"""
dist = prepare_dist(dist)
self.dist = dist
self.pmf = dist.pmf
self.n_variables = dist.outcome_length()
self.n_symbols = len(dist.alphabet[0])
self.n_elements = len(self.pmf)
if prng is None:
prng = np.random.RandomState()
self.prng = prng
if tol is None:
tol = {}
self.tol = tol
self.init()
def init(self):
# Dimension of optimization variable
self.n = len(self.pmf)
# Number of nonlinear constraints
self.m = 0
self.prep()
self.build_function()
self.build_gradient_hessian()
self.build_linear_inequality_constraints()
self.build_linear_equality_constraints()
self.build_F()
def prep(self):
pass
def build_function(self):
self.func = lambda x: x.sum()
def build_gradient_hessian(self):
import numdifftools
self.gradient = numdifftools.Gradient(self.func)
self.hessian = numdifftools.Hessian(self.func)
def build_linear_inequality_constraints(self):
from cvxopt import matrix
# Dimension of optimization variable
n = self.n
# Nonnegativity constraint
#
# We have M = N = 0 (no 2nd order cones or positive semidefinite cones)
# So, K = l where l is the dimension of the nonnegative orthant. Thus,
# we have l = n.
G = matrix(-1 * np.eye(n)) # G should have shape: (K,n) = (n,n)
h = matrix(np.zeros((n,1))) # h should have shape: (K,1) = (n,1)
self.G = G
self.h = h
def build_linear_equality_constraints(self):
from cvxopt import matrix
# Normalization constraint only
A = [np.ones(self.n_elements)]
b = [1]
A = np.asarray(A, dtype=float)
b = np.asarray(b, dtype=float)
self.A = matrix(A)
self.b = matrix(b) # now a column vector
def initial_dist(self):
return self.prng.dirichlet([1] * self.n)
def build_F(self):
from cvxopt import matrix
n = self.n
m = self.m
def F(x=None, z=None):
# x has shape: (n,1) and is the distribution
# z has shape: (m+1,1) and is the Hessian of f_0
if x is None and z is None:
d = self.initial_dist()
return (m, matrix(d))
xarr = np.array(x)[:, 0]
# Verify that x is in domain.
# Does G,h and A,b take care of this?
#
if np.any(xarr > 1) or np.any(xarr < 0):
return None
if not np.allclose(np.sum(xarr), 1, **self.tol):
return None
# Derivatives
f = self.func(xarr)
Df = self.gradient(xarr)
Df = matrix(Df.reshape((1, n)))
if z is None:
return (f, Df)
else:
# Hessian
H = self.hessian(xarr)
H = matrix(H)
return (f, Df, z[0] * H)
self.F = F
def optimize(self, **kwargs):
"""
Options:
show_progress=False,
maxiters=100,
abstol=1e-7,
reltol=1e-6,
feastol=1e-7,
refinement=0 if m=0 else 1
"""
from cvxopt.solvers import cp, options
old_options = options.copy()
out = None
try:
options.clear()
options.update(kwargs)
with np.errstate(divide='ignore', invalid='ignore'):
result = cp(F=self.F,
G=self.G,
h=self.h,
dims={'l':self.G.size[0], 'q':[], 's':[]},
A=self.A,
b=self.b)
except:
raise
else:
self.result = result
out = np.asarray(result['x'])
finally:
options.clear()
options.update(old_options)
return out
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def prepare_dist(dist):
if not isinstance(dist._sample_space, dit.samplespace.CartesianProduct):
dist = dit.expanded_samplespace(dist, union=True)
if not dist.is_dense():
if len(dist._sample_space) > 1e4:
import warnings
msg = "Sample space has more than 10k elements."
msg += " This could be slow."
warnings.warn(msg)
dist.make_dense()
# We also need linear probabilities.
dist.set_base('linear')
return dist
def op_runner(objective, constraints, **kwargs):
"""
Minimize the objective specified by the constraints.
This safely let's you pass options to the solver and restores their values
once the optimization process has completed.
The objective must be linear in the variables.
This uses cvxopt.modeling.
"""
from cvxopt.solvers import options
from cvxopt.modeling import variable, op
old_options = options.copy()
opt = op(objective, constraints)
try:
options.clear()
options.update(kwargs)
# Ignore 0 log 0 warnings.
with np.errstate(divide='ignore', invalid='ignore'):
opt.solve()
except:
raise
finally:
options.clear()
options.update(old_options)
return opt
|
bsd-3-clause
| 3,453,613,929,888,165,000
| 25.14511
| 79
| 0.552847
| false
| 3.781022
| false
| false
| false
|
monetaproject/moneta
|
qa/rpc-tests/util.py
|
1
|
5291
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Moneta developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal
import json
import shutil
import subprocess
import time
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
START_P2P_PORT=11000
START_RPC_PORT=11100
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
monetad and moneta-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run monetads:
for i in range(4):
datadir = os.path.join("cache", "node"+str(i))
os.makedirs(datadir)
with open(os.path.join(datadir, "moneta.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(START_P2P_PORT+i)+"\n");
f.write("rpcport="+str(START_RPC_PORT+i)+"\n");
args = [ "monetad", "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(START_P2P_PORT))
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "moneta-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and remove debug.logs:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(debug_log("cache", i))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
def start_nodes(num_nodes, dir):
# Start monetads, and wait for RPC interface to be up and running:
devnull = open("/dev/null", "w+")
for i in range(num_nodes):
datadir = os.path.join(dir, "node"+str(i))
args = [ "monetad", "-datadir="+datadir ]
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "moneta-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
# Create&return JSON-RPC connections
rpc_connections = []
for i in range(num_nodes):
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpc_connections.append(AuthServiceProxy(url))
return rpc_connections
def debug_log(dir, n_node):
return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log")
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes:
bitcoind.wait()
del bitcoind_processes[:]
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num)
from_connection.addnode(ip_port, "onetry")
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
|
mit
| -734,973,261,075,377,800
| 33.581699
| 94
| 0.600832
| false
| 3.499339
| false
| false
| false
|
wright-group/WrightData
|
2015-12 Czech/workup.py
|
1
|
3041
|
'''
First Created 2016/05/05 by Blaise Thompson
Last Edited 2016/08/08 by Blaise Thompson
Contributors: Blaise Thompson, Kyle Czech
'''
### import ####################################################################
import os
import sys
import importlib
import collections
import WrightTools as wt
### define ####################################################################
# paths
directory = os.path.dirname(__file__)
key = os.path.basename(directory)
package_folder = os.path.dirname(directory)
# shared module
spec = importlib.util.spec_from_file_location('shared', os.path.join(package_folder, 'shared.py'))
shared_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(shared_module)
# dictionaries to fill
raw_dictionary = collections.OrderedDict()
processed_dictionary = collections.OrderedDict()
### download ##################################################################
bypass_download = False
if __name__ == '__main__' and not bypass_download:
shared_module.download(key, directory)
### movie #####################################################################
raw_pickle_path = os.path.join(directory, 'raw_movie.p')
processed_pickle_path = os.path.join(directory, 'processed_movie.p')
def workup():
# raw
data_paths = wt.kit.glob_handler('.dat', folder=os.path.join(directory, 'movie'))
raw_movie = wt.data.from_COLORS(data_paths, name='MoS2 TrEE Movie')
raw_movie.save(raw_pickle_path)
# processed
processed_movie = raw_movie.copy()
processed_movie.level('ai0', 'd2', -3)
processed_movie.smooth([2, 2, 0], channel='ai0')
processed_movie.scale(channel='ai0', kind='amplitude')
processed_movie.normalize(channel='ai0')
processed_movie.save(processed_pickle_path)
# finish
return raw_movie, processed_movie
# force workup
if False:
workup()
# automatically process
shared_module.process(key='movie',
workup_method=workup, raw_pickle_path=raw_pickle_path,
processed_pickle_path=processed_pickle_path,
raw_dictionary=raw_dictionary,
processed_dictionary=processed_dictionary)
### absorbance ################################################################
raw_pickle_path = os.path.join(directory, 'absorbance_data.p')
processed_pickle_path = raw_pickle_path
def workup():
absorbance_path = os.path.join(directory, 'MoS2_TF_III_ebeam_1nm_Mo_onQuartz_T=300K__corrected.txt')
absorbance_data = wt.data.from_shimadzu(absorbance_path, name='MoS2 thin film absorbance')
absorbance_data.save(raw_pickle_path)
return absorbance_data, absorbance_data
# force workup
if False:
workup()
# automatically process
shared_module.process(key='absorbance',
workup_method=workup, raw_pickle_path=raw_pickle_path,
processed_pickle_path=processed_pickle_path,
raw_dictionary=raw_dictionary,
processed_dictionary=processed_dictionary)
|
cc0-1.0
| 7,105,389,251,518,506,000
| 28.813725
| 104
| 0.610654
| false
| 3.695018
| false
| false
| false
|
lacion/forge
|
forge/forge.py
|
1
|
5804
|
#!/usr/bin/env python
"""
forge.forge
~~~~~
:copyright: (c) 2010-2013 by Luis Morales
:license: BSD, see LICENSE for more details.
"""
#heavely based on diamond https://github.com/BrightcoveOS/Diamond
import os
import sys
import argparse
import logging
import traceback
import inspect
from util import load_class_from_name
from module import Module
class Forge(object):
"""
Forge class loads and starts modules
"""
pass
def __init__(self, user, path, modules):
# Initialize Logging
self.log = logging.getLogger('forge')
# Initialize Members
self.modules = modules
self.user = user
self.path = path
def load_include_path(self, path):
"""
Scan for and add paths to the include path
"""
# Verify the path is valid
if not os.path.isdir(path):
return
# Add path to the system path
sys.path.append(path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
self.load_include_path(fpath)
def load_module(self, fqcn):
"""
Load Module class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Module
if cls == Module or not issubclass(cls, Module):
raise TypeError("%s is not a valid Module" % fqcn)
# Log
self.log.debug("Loaded Module: %s", fqcn)
return cls
def load_modules(self, path):
"""
Scan for collectors to load from path
"""
# Initialize return value
modules = {}
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return modules
# Log
self.log.debug("Loading Modules from: %s", path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
submodules = self.load_modules(fpath)
for key in submodules:
modules[key] = submodules[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] != 'test'
and f[0] != '.'):
modname = f[:-3]
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except ImportError:
# Log error
self.log.error("Failed to import module: %s. %s", modname, traceback.format_exc())
continue
# Log
self.log.debug("Loaded Module: %s", modname)
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if (inspect.isclass(attr) and issubclass(attr, Module) and attr != Module):
# Get class name
fqcn = '.'.join([modname, attrname])
try:
# Load Collector class
cls = self.load_module(fqcn)
# Add Collector class
modules[cls.__name__] = cls
except Exception:
# Log error
self.log.error("Failed to load Module: %s. %s", fqcn, traceback.format_exc())
continue
# Return Collector classes
return modules
def init_module(self, cls):
"""
Initialize module
"""
module = None
try:
# Initialize module
module = cls(self.user)
# Log
self.log.debug("Initialized Module: %s", cls.__name__)
except Exception:
# Log error
self.log.error("Failed to initialize Module: %s. %s", cls.__name__, traceback.format_exc())
# Return module
return module
def run(self):
"""
Load module classes and run them
"""
# Load collectors
modules_path = self.path
self.load_include_path(modules_path)
modules = self.load_modules(modules_path)
for module in self.modules:
c = self.init_module(modules[module.capitalize()])
c.execute()
def run():
"""
executes the recipe list to set the system
"""
parser = argparse.ArgumentParser(
prog='forge',
description='forge is a command line tool that allows to execute modules to configure a linux system.',
epilog='this epilog whose whitespace will be cleaned up and whose words will be wrapped across a couple lines'
)
parser.add_argument('-u', '--user', help='Destination user', type=str, required=True)
parser.add_argument('-m', '--modules', help='List of modules to execute', nargs='+', type=str, required=True)
parser.add_argument('-p', '--path', help='path to find modules', type=str, required=True)
args = parser.parse_args()
init = Forge(args.user, args.path, args.modules)
init.run()
|
bsd-3-clause
| -454,126,331,821,404,100
| 30.372973
| 118
| 0.52929
| false
| 4.506211
| false
| false
| false
|
edgedb/edgedb
|
edb/testbase/protocol/render_utils.py
|
1
|
1595
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import contextlib
import textwrap
class RenderBuffer:
ilevel: int
buf: List[str]
def __init__(self):
self.ilevel = 0
self.buf = []
def write(self, line: str) -> None:
self.buf.append(' ' * (self.ilevel * 2) + line)
def newline(self) -> None:
self.buf.append('')
def lastline(self) -> Optional[str]:
return self.buf[-1] if len(self.buf) else None
def popline(self) -> str:
return self.buf.pop()
def write_comment(self, comment: str) -> None:
lines = textwrap.wrap(comment, width=40)
for line in lines:
self.write(f'// {line}')
def __str__(self):
return '\n'.join(self.buf)
@contextlib.contextmanager
def indent(self):
self.ilevel += 1
try:
yield
finally:
self.ilevel -= 1
|
apache-2.0
| -8,708,220,317,205,776
| 25.147541
| 74
| 0.641379
| false
| 3.815789
| false
| false
| false
|
AnhellO/DAS_Sistemas
|
Ene-Jun-2019/juanalmaguer/Extraordinario/Ejercicio 4.py
|
1
|
1725
|
import peewee
import sqlite3
file = 'countries.db'
db = peewee.SqliteDatabase(file)
class Pais(peewee.Model):
nombre = peewee.TextField()
lenguajes = peewee.TextField()
continente = peewee.TextField()
capital = peewee.TextField()
zona = peewee.TextField()
class Meta:
database = db
db_table = 'Country'
def count_paises():
db.connect()
total = Pais.select().count()
db.close()
return total
def data_countries(pais = 'Mexico'):
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
datos = cursor.execute('select * from Paises where Nombre = "{}"'.format(pais)).fetchall()
conexion.close()
return datos[0]
def latinos():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('select Nombre, Lenguajes from Paises').fetchall()
hispanohablantes = []
for pais in paises:
lenguajes = pais[1].split(',')
if type(lenguajes) != 'NoneType':
if 'spa' in lenguajes:
hispanohablantes.append(pais[0])
return hispanohablantes
def europeos():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('select Nombre from Paises where Continente = "Europe"').fetchall()
conexion.close()
return paises
def main():
print('Total de países: {}'.format(count_paises()))
print('\nDatos de México: {}'.format(data_countries()))
paises = latinos()
print('\nPaíses hispanohablantes: ')
for pais in paises:
print(pais)
paises_europeos = europeos()
print('\nPaíses de Europa: ')
for pais in paises_europeos:
print(pais[0])
if __name__ == '__main__':
main()
|
mit
| 7,194,455,465,097,568,000
| 24.323529
| 95
| 0.632772
| false
| 3.056838
| false
| false
| false
|
TuftsBCB/Walker
|
run_walker.py
|
1
|
2820
|
"""
Main script for running tissue-specific graph walk experiments, to convergence.
"""
import sys
import argparse
from walker import Walker
def generate_seed_list(seed_file):
""" Read seed file into a list. """
seed_list = []
try:
fp = open(seed_file, "r")
except IOError:
sys.exit("Error opening file {}".format(seed_file))
for line in fp.readlines():
info = line.rstrip().split()
if len(info) > 1:
seed_list.append(info[1])
else:
seed_list.append(info[0])
fp.close()
return seed_list
def get_node_list(node_file):
node_list = []
try:
fp = open(node_file, 'r')
except IOError:
sys.exit('Could not open file: {}'.format(node_file))
# read the first (i.e. largest) connected component
cur_line = fp.readline()
while cur_line and not cur_line.isspace():
if cur_line:
node_list.append(cur_line.rstrip())
cur_line = fp.readline()
fp.close()
return node_list
def main(argv):
# set up argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('input_graph', help='Original graph input file, in\
edge list format')
parser.add_argument('seed', help='Seed file, to pull start nodes from')
parser.add_argument('-e', '--restart_prob', type=float, default=0.7,
help='Restart probability for random walk')
parser.add_argument('-l', '--low_list', nargs='?', default=None,
help='<Optional> List of genes expressed and\
unexpressed in the current tissue, if applicable')
parser.add_argument('-n', '--node_list', nargs='?', default=None,
help='<Optional> Order of output probs')
parser.add_argument('-o', '--original_graph_prob', type=float, default=0.1,
help='Probability of walking on the original (non-\
tissue specific) graph, if applicable')
parser.add_argument('-r', '--remove', nargs='+',
help='<Optional> Nodes to remove from the graph, if any')
opts = parser.parse_args()
seed_list = generate_seed_list(opts.seed)
node_list = get_node_list(opts.node_list) if opts.node_list else []
# filter nodes we want to remove out of the starting seed, if any
remove_list = opts.remove if opts.remove else []
if remove_list:
seed_list = [s for s in seed_list if s not in remove_list]
# run the experiments, and write a rank list to stdout
wk = Walker(opts.input_graph, opts.low_list, remove_list)
wk.run_exp(seed_list, opts.restart_prob,
opts.original_graph_prob, node_list)
if __name__ == '__main__':
main(sys.argv)
|
mit
| -6,129,807,627,736,509,000
| 33.390244
| 81
| 0.588652
| false
| 3.852459
| false
| false
| false
|
brunoabud/ic
|
ic/queue.py
|
1
|
4887
|
# coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
from PyQt4.QtCore import QMutex, QThread, QWaitCondition, QElapsedTimer
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
pass
class Full(Exception):
pass
class Locked(Exception):
pass
class Queue(object):
"""Create a queue object with a given maximum size.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self.queue = deque()
# Mutex using for accessing the deque
self.mutex = QMutex()
# Condition that will be held when the queue is empty and the consumer
# needs to wait for a new item
self.item_added = QWaitCondition()
# Condition that will be held when the queue is full and the producer
# needs to wait for a new place to insert the item
self.item_removed = QWaitCondition()
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
Parameters
----------
block : bool
If True(default), the caller thread will block until the queue has
a free space available for putting an new item. If False, the `Full`
exception will be raised if there is no free space in the queue
timeout : int
The max time to wait for a new space to be avaible, in milliseconds.
"""
self.mutex.lock()
try:
# Check if the queue has a limit (0 means not)
if self.maxsize > 0:
# Raise Full if block is False and the queue is at max cap.
if not block:
if self._qsize() == self.maxsize:
raise Full
# If a timeout is not provided, wait indefinitely
elif timeout is None:
while self._qsize() == self.maxsize:
self.item_removed.wait(self.mutex)
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
timer = QElapsedTimer()
timer.start()
while self._qsize() == self.maxsize:
remaining = timeout - timer.elapsed()
if remaining <= 0.0:
raise Full
self.item_removed.wait(self.mutex, remaining)
self._put(item)
self.item_added.wakeOne()
finally:
self.mutex.unlock()
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
Parameters
----------
block : bool
If True(default), the caller thread will block until the queue has
an item available for putting an new item. If False, the `Empty`
exception will be raised if there is no item in the queue
timeout : int
The max time to wait for a new item to be avaible, in milliseconds.
"""
self.mutex.lock()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.item_added.wait(self.mutex)
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
timer = QElapsedTimer()
timer.start()
while not self._qsize():
remaining = timeout - timer.elapsed()
if remaining <= 0.0:
raise Empty
self.item_added.wait(self.mutex, remaining)
item = self._get()
self.item_removed.wakeOne()
return item
finally:
self.mutex.unlock()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
def _clear(self):
self.queue.clear()
def clear(self):
self._clear()
|
gpl-3.0
| 6,604,661,603,224,259,000
| 32.689655
| 80
| 0.561515
| false
| 4.518964
| false
| false
| false
|
WPI-ARC/constrained_path_generator
|
scripts/demo.py
|
1
|
2723
|
#!/usr/bin/python
import math
import rospy
import random
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from constrained_path_generator.msg import *
from constrained_path_generator.srv import *
def make_pose((px, py, pz), (rx, ry, rz, rw)):
new_pose = Pose()
new_pose.position.x = px
new_pose.position.y = py
new_pose.position.z = pz
new_pose.orientation.x = rx
new_pose.orientation.y = ry
new_pose.orientation.z = rz
new_pose.orientation.w = rw
return new_pose
def make_pose_stamped((px, py, pz), (rx, ry, rz, rw), frame):
pose_stamped = PoseStamped()
pose_stamped.pose = make_pose((px, py, pz), (rx, ry, rz, rw))
pose_stamped.header.frame_id = frame
return pose_stamped
def make_quaternion(w, x, y, z):
new_quat = Quaternion()
new_quat.w = w
new_quat.x = x
new_quat.y = y
new_quat.z = z
return new_quat
def make_vector(x, y, z):
new_vector = Vector3()
new_vector.x = x
new_vector.y = y
new_vector.z = z
return new_vector
_joint_state = None
def joint_state_cb(msg):
global _joint_state
_joint_state = msg
def test():
test_node = rospy.init_node("test_planner")
js_sub = rospy.Subscriber("joint_states", JointState, joint_state_cb)
planner_client = rospy.ServiceProxy("plan_constrained_path", PlanConstrainedPath)
# Wait for a joint state
while _joint_state is None and not rospy.is_shutdown():
rospy.sleep(0.1)
print "got robot state"
# Make the waypoints
pose_1 = make_pose_stamped((0.585, 0.15, 1.250), (0.0, 0.888, 0.0, -0.460), "base_link")
waypoints = [pose_1]
# Make the request
query = PlanConstrainedPathQuery()
query.path_type = PlanConstrainedPathQuery.CHECK_ENVIRONMENT_COLLISIONS | PlanConstrainedPathQuery.CARTESIAN_IK | PlanConstrainedPathQuery.PLAN
query.waypoints = waypoints
query.group_name = "left_arm"
query.target_link = "l_wrist_roll_link"
query.planning_time = 5.0
query.max_cspace_jump = 0.05
query.task_space_step_size = 0.025
query.initial_state.joint_state = _joint_state
query.path_orientation_constraint = make_quaternion(0.0, 0.888, 0.0, -0.460)
query.path_angle_tolerance = make_vector(0.01, 0.01, 0.01)
query.path_position_tolerance = make_vector(0.02, 0.02, 0.02)
query.goal_angle_tolerance = make_vector(0.01, 0.01, 0.01)
query.goal_position_tolerance = make_vector(0.01, 0.01, 0.01)
full_req = PlanConstrainedPathRequest()
full_req.query = query
full_res = planner_client.call(full_req)
print full_res
# Make some collision_planes
raw_input("Press ENTER to close...")
print "Done"
if __name__ == '__main__':
test()
|
bsd-2-clause
| -177,377,274,693,844,160
| 28.608696
| 147
| 0.658832
| false
| 2.921674
| false
| false
| false
|
roderickmackenzie/gpvdm
|
gpvdm_gui/gui/license_key.py
|
1
|
2732
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package register
# Registration window
#
import os
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QLineEdit,QComboBox,QHBoxLayout,QPushButton,QLabel,QDialog,QVBoxLayout,QSizePolicy
from PyQt5.QtGui import QPainter,QIcon,QImage
from PyQt5.QtGui import QFont
from icon_lib import icon_get
from PyQt5.QtCore import QSize, Qt
from inp import inp_load_file
import re
from error_dlg import error_dlg
from lock import get_lock
class license_key(QDialog):
def callback_ok(self):
print("boom")
#get_lock().register(email=self.email0.text(),name=self.name.text())
#get_lock().get_license()
self.accept()
def __init__(self):
QWidget.__init__(self)
self.setWindowIcon(icon_get("icon"))
self.setWindowTitle(_("Registration window (www.gpvdm.com)"))
self.setWindowFlags(Qt.WindowStaysOnTopHint)
vbox=QVBoxLayout()
l=QLabel(_("Enter the license key below:"))
l.setFont(QFont('SansSerif', 14))
vbox.addWidget(l)
hbox_widget=QWidget()
hbox=QHBoxLayout()
hbox_widget.setLayout(hbox)
l=QLabel("<b>"+_("Key")+"</b>:")
l.setFont(QFont('SansSerif', 14))
hbox.addWidget(l)
self.name = QLineEdit()
hbox.addWidget(self.name)
vbox.addWidget(hbox_widget)
button_box=QHBoxLayout()
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
button_box.addWidget(spacer)
self.register=QPushButton("Register", self)
self.register.clicked.connect(self.callback_ok)
button_box.addWidget(self.register)
button_box_widget=QWidget()
button_box_widget.setLayout(button_box)
vbox.addWidget(button_box_widget)
self.setLayout(vbox)
self.setMinimumWidth(400)
self.name.setText("key")
def run(self):
return self.exec_()
|
gpl-2.0
| -5,252,635,026,861,043,000
| 25.524272
| 118
| 0.732064
| false
| 3.151096
| false
| false
| false
|
lneuhaus/pyrpl
|
pyrpl/widgets/module_widgets/pid_widget.py
|
1
|
1109
|
"""
A widget for pid modules.
"""
from .base_module_widget import ModuleWidget
from qtpy import QtCore, QtWidgets
class PidWidget(ModuleWidget):
"""
Widget for a single PID.
"""
def init_gui(self):
self.init_main_layout(orientation="vertical")
#self.main_layout = QtWidgets.QVBoxLayout()
#self.setLayout(self.main_layout)
self.init_attribute_layout()
input_filter_widget = self.attribute_widgets["inputfilter"]
self.attribute_layout.removeWidget(input_filter_widget)
self.main_layout.addWidget(input_filter_widget)
for prop in ['p', 'i']: #, 'd']:
self.attribute_widgets[prop].widget.set_log_increment()
# can't avoid timer to update ival
# self.timer_ival = QtCore.QTimer()
# self.timer_ival.setInterval(1000)
# self.timer_ival.timeout.connect(self.update_ival)
# self.timer_ival.start()
def update_ival(self):
widget = self.attribute_widgets['ival']
if self.isVisible() and not widget.editing():
widget.write_attribute_value_to_widget()
|
gpl-3.0
| 1,500,945,310,355,024,100
| 31.617647
| 67
| 0.64202
| false
| 3.746622
| false
| false
| false
|
mozillazg/bild.me-cli
|
setup.py
|
1
|
1880
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from codecs import open
import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import bild
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
requirements = [
'requests>=2.0.1',
'argparse',
]
packages = [
'bild',
]
def long_description():
readme = open('README.rst', encoding='utf8').read()
text = readme + '\n\n' + open('CHANGELOG.rst', encoding='utf8').read()
return text
setup(
name='bild.me-cli',
version=bild.__version__,
description=bild.__doc__,
long_description=long_description(),
url='https://github.com/mozillazg/bild.me-cli',
download_url='https://github.com/mozillazg/bild.me-cli/archive/master.zip',
author=bild.__author__,
author_email='mozillazg101@gmail.com',
license=bild.__license__,
packages=packages,
package_data={'': ['LICENSE.txt']},
package_dir={'bild': 'bild'},
entry_points={
'console_scripts': [
'bild = bild.bild:main',
],
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Environment :: Console',
'Topic :: Utilities',
'Topic :: Terminals',
],
keywords='bild.me, CLI',
)
|
mit
| 1,008,756,644,684,967,000
| 25.478873
| 79
| 0.598936
| false
| 3.686275
| false
| false
| false
|
kamailio/kamcli
|
kamcli/commands/cmd_shv.py
|
1
|
1435
|
import click
from kamcli.cli import pass_context
from kamcli.iorpc import command_ctl
@click.group(
"shv",
help="Manage $shv(name) variables",
short_help="Manage $shv(name) variables",
)
@pass_context
def cli(ctx):
pass
@cli.command("get", short_help="Get the value for $shv(name)")
@click.argument("name", nargs=-1, metavar="<name>")
@pass_context
def shv_get(ctx, name):
"""Get the value for $shv(name)
\b
Parameters:
<name> - the name of shv variable
"""
if not name:
command_ctl(ctx, "pv.shvGet")
else:
for n in name:
command_ctl(ctx, "pv.shvGet", [n])
@cli.command("sets", short_help="Set $shv(name) to string value")
@click.argument("name", metavar="<name>")
@click.argument("sval", metavar="<sval>")
@pass_context
def shv_sets(ctx, name, sval):
"""Set $shv(name) to string value
\b
Parameters:
<name> - the name of shv variable
<sval> - the string value
"""
command_ctl(ctx, "pv.shvSet", [name, "str", sval])
@cli.command("seti", short_help="Set $shv(name) to int value")
@click.argument("name", metavar="<name>")
@click.argument("ival", metavar="<ival>", type=int)
@pass_context
def srv_seti(ctx, name, ival):
"""Set $shv(name) to int value
\b
Parameters:
<name> - the name of shv variable
<ival> - the int value
"""
command_ctl(ctx, "pv.shvSet", [name, "int", ival])
|
gpl-2.0
| -3,036,620,697,451,798,500
| 22.916667
| 65
| 0.609059
| false
| 3.027426
| false
| false
| false
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/astropy/io/fits/hdu/nonstandard.py
|
1
|
4066
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
from ..file import _File
from .base import NonstandardExtHDU
from .hdulist import HDUList
from ..header import Header, _pad_length
from ..util import fileobj_name
from ....extern.six import string_types
from ....utils import lazyproperty
class FitsHDU(NonstandardExtHDU):
"""
A non-standard extension HDU for encapsulating entire FITS files within a
single HDU of a container FITS file. These HDUs have an extension (that is
an XTENSION keyword) of FITS.
The FITS file contained in the HDU's data can be accessed by the `hdulist`
attribute which returns the contained FITS file as an `HDUList` object.
"""
_extension = 'FITS'
@lazyproperty
def hdulist(self):
self._file.seek(self._data_offset)
fileobj = io.BytesIO()
# Read the data into a BytesIO--reading directly from the file
# won't work (at least for gzipped files) due to problems deep
# within the gzip module that make it difficult to read gzip files
# embedded in another file
fileobj.write(self._file.read(self.size))
fileobj.seek(0)
if self._header['COMPRESS']:
fileobj = gzip.GzipFile(fileobj=fileobj)
return HDUList.fromfile(fileobj, mode='readonly')
@classmethod
def fromfile(cls, filename, compress=False):
"""
Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on
disk.
Parameters
----------
filename : str
The path to the file to read into a FitsHDU
compress : bool, optional
Gzip compress the FITS file
"""
return cls.fromhdulist(HDUList.fromfile(filename), compress=compress)
@classmethod
def fromhdulist(cls, hdulist, compress=False):
"""
Creates a new FitsHDU from a given HDUList object.
Parameters
----------
hdulist : HDUList
A valid Headerlet object.
compress : bool, optional
Gzip compress the FITS file
"""
fileobj = bs = io.BytesIO()
if compress:
if hasattr(hdulist, '_file'):
name = fileobj_name(hdulist._file)
else:
name = None
fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs)
hdulist.writeto(fileobj)
if compress:
fileobj.close()
# A proper HDUList should still be padded out to a multiple of 2880
# technically speaking
padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii')
bs.write(padding)
bs.seek(0)
cards = [
('XTENSION', cls._extension, 'FITS extension'),
('BITPIX', 8, 'array data type'),
('NAXIS', 1, 'number of array dimensions'),
('NAXIS1', len(bs.getvalue()), 'Axis length'),
('PCOUNT', 0, 'number of parameters'),
('GCOUNT', 1, 'number of groups'),
]
# Add the XINDn keywords proposed by Perry, though nothing is done with
# these at the moment
if len(hdulist) > 1:
for idx, hdu in enumerate(hdulist[1:]):
cards.append(('XIND' + str(idx + 1), hdu._header_offset,
'byte offset of extension %d' % (idx + 1)))
cards.append(('COMPRESS', compress, 'Uses gzip compression'))
header = Header(cards)
return cls._readfrom_internal(_File(bs), header=header)
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, string_types):
xtension = xtension.rstrip()
return xtension == cls._extension
# TODO: Add header verification
def _summary(self):
# TODO: Perhaps make this more descriptive...
return (self.name, self.__class__.__name__, len(self._header))
|
mit
| 3,870,601,165,106,004,500
| 31.528
| 79
| 0.591982
| false
| 4.029732
| false
| false
| false
|
sh-ft/mudwyrm_users
|
mudwyrm_users/admin/achaea/scripts/brain/combat.py
|
1
|
8663
|
from mudwyrm_users.admin.achaea import ScriptState
from mudwyrm_users.admin.achaea.action import Action, Outcome, EventOutcome
from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent, TriggerPack
from mudwyrm_users.admin.achaea.common import not_, traverse_scripts, AttrDict, partition_action
from mudwyrm_users.admin.achaea.database import Base
from mudwyrm_users.admin.achaea.scripts import char
from mudwyrm_users.admin.achaea.scripts.actions import all_actions as actions
import sqlalchemy as sa
p = None
s = ScriptState()
def init(processor):
assert processor is not None
global p
p = processor
s.loot = []
s.info_here = {}
s.state = 'inactive'
s.target = None
def think():
if s.state == 'inactive':
return
if not s.target:
return combat_echo("No target to fight.")
if isinstance(s.target, int) and s.target not in room['objects']:
s.state = 'inactive'
return combat_echo("Target has been lost. Given up on fighting.")
cure()
if s.state == 'attacking':
attack()
elif s.state == 'defending':
defend()
elif s.state == 'looting':
loot()
##################
def combat_echo(text):
p.echo("[Combat] %s" % text)
def choose_offensive_action(target):
if char.race == 'Dragon':
return (actions.gut, target)
elif char.class_ == 'Sylvan':
return (actions.thornrend if char.status('viridian')
else actions.firelash, target)
elif char.class_ == 'Serpent':
if char.skill_available('garrote'):
return (actions.garrote, target)
else:
assert char.skill_available('bite')
venom = 'sumac' if not char.skill_available('camus', 'venom') else 'camus'
return (actions.venom_bite, venom, target)
elif char.class_ == 'Shaman':
return (actions.curse, 'bleed', target)
elif char.class_ == 'Blademaster':
return (actions.drawslash, target)
elif char.class_ == 'Alchemist':
return (actions.educe_iron, target)
return None
def choose_defensive_action():
if char.skill_available('reflection'):
return (actions.reflection, 'me')
return None
def offensive_mode():
if s.state == 'defending':
s.state = 'attacking'
combat_echo("Switched to offensive mode.")
def defensive_mode():
if s.state == 'attacking':
s.state = 'defending'
combat_echo("Switched to defensive mode.")
def attack():
if char.health < char.defensive_health_level:
defensive_mode()
return
action, args = partition_action(choose_offensive_action(s.target))
if not action:
return combat_echo("No offensive action was set for this character, not attacking.")
if action.possible(*args) and not p.action_already_active(action, *args):
p.act(action, *args)
def defend():
action, args = partition_action(choose_defensive_action())
if not action:
offensive_mode()
return combat_echo("No defensive action was set for this character, not defending.")
if action.possible(*args) and not p.action_already_active(action, *args):
p.act(action, *args)
if char.health > char.offensive_health_level:
offensive_mode()
def loot():
if char.balance('balance') and char.balance('equilibrium'):
for item in s.loot:
p.send("get %s" % item)
s.loot = []
s.state = 'inactive'
combat_echo("Finished fighting.")
def cure():
if char.status('loki'):
if actions.diagnose.possible():
p.act(actions.diagnose)
##########################
@Alias(r'^(?:kill|k) (.+)$')
def combat_start(match):
target = match.group(1)
if s.state not in ['inactive', 'looting']:
return combat_echo("Already fighting someone.")
s.target = target
s.state = 'attacking'
combat_echo("Fighting %s" % s.target)
think()
@Alias(r'^(?:autokill|ak|k)$')
def autotarget_combat_start(match):
if s.state not in ['inactive', 'looting']:
return combat_echo("Already fighting someone.")
def find_target():
target_list = p.db.query(Target).all()
for obj in char.room_objects.itervalues():
for t in target_list:
if obj['name'].find(t.name) >= 0:
return obj
return None
target = find_target()
if not target:
return combat_echo("No target found.")
s.target = target['id']
s.state = 'attacking'
combat_echo("Target found: %s" % target['name'])
think()
@Alias(r'^(?:stopkill|sk)$')
def combat_stop(match):
if s.state not in ['inactive', 'looting']:
s.state = 'inactive'
combat_echo("Given up on fighting.")
else:
combat_echo("Already not fighting")
########################
@OnEvent('TargetNotFound')
def on_target_not_found():
if s.state in ['attacking', 'defending']:
s.state = 'inactive'
combat_echo("Target has been lost. Given up on fighting.")
p.notification("Combat", "Target has been lost.")
@OnEvent('CreatureSlain')
def on_creature_slain(name):
# TODO: check if a creature was the target.
if s.state in ['attacking', 'defending']:
combat_echo("Target has been slain.")
s.state = 'looting'
p.notification("Combat", "Target has been slain.")
@OnEvent('LootDropped')
def on_loot_dropped(name):
s.loot.append(name)
@Trigger(r'^You have slain (.+), retrieving the corpse\.$')
def creature_slain(match):
p.raise_event('CreatureSlain', name=match.group(1))
@Trigger(r'^A few golden sovereigns spill from the corpse\.$',
r'^A small quantity of sovereigns spills from the corpse\.$',
r'^A (?:tiny|small|large) pile of sovereigns spills from the corpse\.$',
r'^(?:Many|Numerous) golden sovereigns spill from the corpse\.$')
def gold_loot(match):
p.raise_event('LootDropped', name='money')
@Trigger(r'^A glistening iridescent pearl tumbles out of the corpse of a barnacle encrusted oyster\.$',
r'^A gleaming black pearl tumbles out of the corpse of a marsh ooze\.$')
def pearl_loot(match):
p.raise_event('LootDropped', name='pearl')
@Trigger(r'^A (?:chipped|jagged|smooth) iconic shard appears and clatters to the ground\.$')
def shard_loot(match):
p.raise_event('LootDropped', name='shard')
@Trigger(r'^The Mask of the Beast tumbles out of the corpse of a mysterious cloaked figure\.$')
def mask_of_the_beast_loot(match):
p.raise_event('LootDropped', name='mask')
class info_here(Action):
def start(action):
s.info_here.clear()
p.send("info here")
@Alias(r'^(ih|info here)$')
def aliases(match):
p.act(info_here)
@Trigger(r'^(\D+\d+)\s+(.*)$')
def info_here_line(match, action):
s.info_here[match.group(1)] = match.group(2)
@Outcome(r'^Number of objects: (\d+)$')
def info_here_end_line(match, action):
n = int(match.group(1))
p.raise_event('InfoHereUpdated')
if len(s.info_here) != n:
raise ScriptError("Warning: Number of objects captured from "
"'info here' doesn't match the actual number of objects.")
@OnEvent('InfoHereUpdated')
def ih_updated():
pass
class Target(Base):
__tablename__ = 'targets'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
def __init__(self, name):
self.name = name
@Alias(r'^target_list$')
def target_list(match):
targets = p.db.query(Target).all()
if not targets:
p.echo("Target list is empty.")
else:
p.echo("Target list: %s." % ", ".join(t.name for t in targets))
@Alias(r'^target_add (.*)$')
def target_add(match):
target = Target(match.group(1))
p.db.add(target)
p.db.commit()
p.echo("%s has been added to the target list." % target.name)
@Alias(r'^target_remove (.*)$')
def target_remove(match):
name = match.group(1)
target = p.db.query(Target).filter(Target.name == name).first()
if not target:
return p.echo("Target list doesn't contain %s." % name)
p.db.delete(target)
p.db.commit()
p.echo("%s has been removed from the target list." % name)
|
mit
| -8,037,533,459,721,834,000
| 31.430769
| 103
| 0.594482
| false
| 3.370817
| false
| false
| false
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/windows_benchmarks/diskspd_benchmark.py
|
1
|
1623
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run DiskSpd in a single VM."""
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker.windows_packages import diskspd
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'diskspd'
BENCHMARK_CONFIG = """
diskspd:
description: Run diskspd on a single machine
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 1
disk_spec: *default_500_gb
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
vm = benchmark_spec.vms[0]
vm.Install('diskspd')
def Run(benchmark_spec):
"""Measure the disk performance in one VM.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects with the benchmark results.
"""
vm = benchmark_spec.vms[0]
results = []
results.extend(diskspd.RunDiskSpd(vm))
return results
def Cleanup(unused_benchmark_spec):
pass
|
apache-2.0
| -5,473,095,602,450,988,000
| 25.177419
| 74
| 0.736907
| false
| 3.663657
| true
| false
| false
|
garnertb/fire-risk
|
fire_risk/backends/__init__.py
|
1
|
2548
|
import psycopg2
from .queries import ALL_RESIDENTIAL_FIRES
from psycopg2.extras import DictCursor
class Backend(object):
"""
Backend mixin that should be used to implement APIs to read data.
"""
def connect(self):
"""
Connect to the backend.
"""
raise NotImplementedError
def close_connection(self):
"""
Close the connection to the backend.
"""
raise NotImplementedError
def query(self):
"""
Query the backend.
"""
raise NotImplementedError
class FileBackend(Backend):
"""
Parse a set of NFIRS incident flat files for structure fires.
Args:
flatfiles (list): a list of file pathnames for files to be parsed.
Returns:
changes the values of the firespread_count attributes to calculated
values
"""
pass
class PostgresBackend(Backend):
"""
The Postgres Backend.
"""
def __init__(self, connection_params):
self.connection_params = connection_params
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_connection()
def connect(self):
self.connection = psycopg2.connect(**self.connection_params)
return self.connection
def get_cursor(self):
return self.connection.cursor(cursor_factory=DictCursor)
def close_connection(self):
self.connection.close()
def query(self, query, query_params=()):
cursor = self.get_cursor()
cursor.execute(query, query_params)
return cursor
def get_firespread_counts(self, query=ALL_RESIDENTIAL_FIRES, query_params=()):
results = self.query(query=query, query_params=query_params).fetchall()
counts = dict(object_of_origin=0, room_of_origin=0, floor_of_origin=0, building_of_origin=0, beyond=0)
for result in results:
if result['fire_sprd'] == '1':
counts['object_of_origin'] += result['count']
if result['fire_sprd'] == '2':
counts['room_of_origin'] += result['count']
if result['fire_sprd'] == '3':
counts['floor_of_origin'] += result['count']
if result['fire_sprd'] == '4':
counts['building_of_origin'] += result['count']
if result['fire_sprd'] == '5':
counts['beyond'] += result['count']
return counts
if __name__ == '__main__':
import doctest
doctest.testmod()
|
mit
| -6,737,203,091,711,348,000
| 24.737374
| 110
| 0.588305
| false
| 4.177049
| false
| false
| false
|
gardir/Devilry_sort
|
sort_deliveries.py
|
1
|
18439
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import time
import shutil
import glob
from rettescript import print_failed
class Devilry_Sort:
def __init__(self,
rootDir,
execute=True,
delete=False,
log=False,
rename=True,
unzip="false",
javacFlag=False,
verbose=False):
"""
Initializes the class
Parameters
----------
self : this
This class
rootDir : String
A string describing the path to root directory
execute : boolean
Execute means the primary function will be executed (default=True)
delete : boolean
If true it will delete all older deliveries (default=False)
log : boolean
If log is true a seperate log-file for what was done is created (default False)
rename : boolean
If renaming is false, the user-id directories will not be renamed to
contain only user-id (default=True)
unzip : boolean
If true program is to unzip a .zip file containing the deliveries before execute (default=False)
verbose : boolean
Be loud about what to do
"""
self.rootDir = rootDir
self.execute = execute
self.delete = delete
self.log = log
self.rename = rename
self.unzip = unzip
self.javacFlag = javacFlag
self.verbose = verbose
self.failed_javac = []
self.my_out = sys.stdout
self.my_err = sys.stderr
if log:
log_filename = os.path.join(rootDir, "log.txt")
self.log_file = open(log_filename, 'w')
self.log_file.close()
self.log_file = open(log_filename, 'a')
self.write_to_log("Log created")
self.my_out = self.log_file
self.my_err = self.log_file
elif not verbose:
self.null_out = open(os.devnull, 'w')
self.my_out = self.null_out
self.my_err = subprocess.STDOUT
def attempt_javac(self, path):
"""
Function inspired by rettescript.py written by Henrik Hillestad Løvold
"""
command = format("javac %s" % os.path.join(path, "*.java"))
if self.verbose:
print("%s:" % (command))
elif self.log:
self.write_to_log(format("%s:" % command))
try:
subprocess.check_call(command, shell=True, stdout=self.my_out, stderr=self.my_err)
except subprocess.CalledProcessError:
return 1
# No problem
return 0
def dive_delete(self, root_depth):
"""
"""
for dirpath, subdirList, fileList in os.walk(rootDir, topdown=False):
depthList = dirpath.split(os.path.sep)
depth = len(depthList) - root_depth
if depth == 1:
for subdir in subdirList:
path = os.path.join(dirpath, subdir).replace(" ", "\ ")
command = ["rm", "-r", path]
if self.verbose:
print("Recursive removing '%s'" % path)
elif self.log:
self.write_to_log(format("Recursive removing '%s'" % path))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
shutil.rmtree(path)
def dive_delete_dir(self, root_depth):
for dirpath, subdirList, fileList in os.walk(rootDir, topdown = False):
depth = len(dirpath.split(os.path.sep)) - root_depth
created = False
for subdir in subdirList:
folder = os.path.join(dirpath, subdir)
command = ['rm', '-d', folder]
try:
if self.verbose:
print("Trying to remove empty folder: %s" % folder)
elif self.log:
self.write_to_log(format("Trying to remove empty folder: %s" % folder))
#subprocess.check_call(command, stdout = self.my_out, stderr = self.my_err)
os.rmdir(folder)
#except subprocess.CalledProcessError:
except OSError:
if self.verbose:
print("Removing empty folder failed: %s" % folder)
elif self.log:
self.write_to_log(format("Removing empty folder failed: %s" % folder))
if depth == 1:
self.move(dirpath, subdir)
java_files_present = len(glob.glob(dirpath+os.path.sep+'*.java')) > 0
if java_files_present and self.attempt_javac(dirpath) != 0:
if self.verbose:
print("%s failed javac" % dirpath)
elif self.log:
self.write_to_log(format("%s failed javac" % dirpath))
self.failed_javac.append(dirpath)
def dive_move(self, root_depth):
for dirpath, subdirList, fileList in os.walk(rootDir, topdown=True):
depthList = dirpath.split(os.path.sep)
depth = len(depthList) - root_depth
# We only want last deadline and last delivery
if depth == 1 or depth == 2:
if (len(subdirList) > 1):
last = sorted(subdirList)[-1]
i = 0
max = len(subdirList)
while (i < max):
if (last != subdirList[i]):
del subdirList[i]
i-=1
max-=1
i+=1
#subdirList = sorted(subdirList)[-1:]
elif depth == 3:
from_path = dirpath
to_path = os.path.join(*from_path.split(os.path.sep)[:-2])
if self.verbose:
print("Moving all files in '%s' to '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format(
"Moving all files in '%s' to '%s'" % (from_path, to_path)))
for work_file in fileList:
file_path = os.path.join(from_path, work_file)
new_file_path = os.path.join(to_path, work_file)
if self.verbose:
print("Renaming '%s' to '%s'" % (file_path, new_file_path))
elif self.log:
self.write_to_log(format("Moved '%s' to '%s'" % (file_path, new_file_path)))
#shutil.move(file_path, new_file_path)
os.rename(file_path, new_file_path)
def move(self, root_path, folder):
from_path = os.path.join(root_path, folder)
to_path = os.path.join(root_path, "older")
command = ['mv', from_path, to_path]
if self.verbose:
print("Moving older files '%s' into '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format("Moving older files '%s' into '%s'" % (from_path, to_path)))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
try:
shutil.move(from_path, to_path)
except IOError as e:
if self.verbose:
print("ERROR: Could not move '%s' to '%s'" % (from_path, to_path))
print(e)
elif self.log:
self.write_to_log("ERROR: Could not move '%s' to '%s'\n%s" % (from_path, to_path, e))
def run(self):
root_depth = len(self.rootDir.split(os.path.sep))
if self.unzip != "false":
self.execute = self.unzip_execute(root_depth)
if self.execute:
if self.rename:
self.user_rename()
self.dive_move(root_depth)
self.dive_delete_dir(root_depth)
if self.delete:
self.dive_delete(root_depth)
if self.log:
self.log_file.close()
elif not verbose:
self.null_out.close()
def unzip_execute(self, root_depth):
zipfile = self.unzip
if self.unzip == "true":
zipfile = self.find_zip_file(root_depth)
# Return if _one_ zip file only not found.
if self.execute:
self.unzip_file(zipfile)
self.unzip_clean(root_depth, zipfile)
return execute
def find_zip_file(self, root_depth):
files = ""
zipfiles = []
for dirpath, subdirs, filenames in os.walk(self.rootDir):
depth = len(dirpath.split(os.path.sep)) - root_depth
if depth == 0:
if self.verbose:
print("Looking for zip files.")
files = filenames;
for afile in files:
if afile[-4:] == ".zip":
if self.verbose:
print("Found zip-file: %s" % afile)
elif self.log:
self.write_to_log(format("Found zip-file: %s" % afile))
zipfiles.append(afile)
if len(zipfiles) > 1:
print("Please have only the zipfile from Devilry in folder")
self.execute = False
elif len(zipfiles) == 0:
print("No zipfiles were found in '%s%s'" % (rootDir, os.path.sep))
self.execute = False
break # out from os.walk() as only files from root needed
if len(zipfiles) > 0:
return zipfiles[0]
return ""
def unzip_file(self, zipfile):
# Unzip command
from_path = format("%s" % (zipfile))
to_path = self.rootDir
command = ['unzip',
from_path,
"-d",
to_path]
if self.verbose:
print("Unzipping file: %s" % from_path)
elif self.log:
self.write_to_log(format("Unzipping file '%s'" % (from_path)))
subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
def unzip_clean(self, root_depth, unzip_file):
for dirpath, subdirs, filenames in os.walk(self.rootDir):
# Finding current depth
if (dirpath[-1] == os.path.sep):
depth = len(dirpath[:-1].split(os.path.sep)) - root_depth
else:
depth = len(dirpath.split(os.path.sep)) - root_depth
# After unzipping, depth 1 is inside unzipped folder (based on Devilry)
if depth == 1:
if self.verbose:
print("Going through folders within '%s'" % dirpath)
elif self.log:
self.write_to_log(format("Going through folders within '%s'" % (dirpath)))
# Move all users/groups one directory down/back
for subdir in subdirs:
from_path = os.path.join(dirpath, subdir)
to_path = os.path.join(*dirpath.split(os.path.sep)[:-1])
if self.verbose:
print("Moving '%s' down to '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format("Moving '%s' down to '%s'" % (from_path, to_path)))
shutil.move(from_path, to_path)
break # out from sub-folder created after zip. only these files needed moving
# Remove the now empty folder
unzipped_folder = unzip_file[unzip_file.rfind("/")+1:-4]
from_path = os.path.join(self.rootDir, unzipped_folder)
command = ["rm", "-d", from_path]
if self.verbose:
print("Removing empty folder: %s" % from_path)
elif self.log:
self.write_to_log(format("Removing empty folder: %s" % (from_path)))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
shutil.rmtree(from_path)
def user_rename(self):
for dirpath, subdirList, fileList in os.walk(rootDir):
for subdir in subdirList:
filepath = os.path.join(dirpath, subdir)
new_filepath = os.path.join(dirpath, (subdir[0:subdir.find('(')]).replace(" ", ""))
if self.verbose:
print("Renaming '%s' to '%s'" % (filepath, new_filepath))
elif self.log:
self.write_to_log(format("Renaming '%s' to '%s'" % (filepath, new_filepath)))
os.rename(filepath, new_filepath)
break
def write_to_log(self, text):
self.log_file.write(
format("%s-%s: %s\n" %
(time.strftime("%H:%M"),
time.strftime("%d/%m/%Y"),
text)))
def print_usage():
print("Usage: python sort_deliveries.py [options] path")
print("Mandatory: path")
print("%10s -- %-s" % ("path", "the mandatory argument which is the output folder to have all user directories within when script is done"))
print("Options: -b -c -d -D -h -l -v -z [zipfile]")
print("%10s -- %-s" % ("-b", "bare move, no rename of user folder"))
print("%10s -- %-s" % ("-c", "runs javac on each user, and prints those that fail"))
print("%10s -- %-s" % ("-d", "delete the other files and folders"))
print("%10s -- %-s" % ("-D", "DEBUG mode, program will not execute"))
print("%10s -- %-s" % ("-h", "shows this menu"))
print("%10s -- %-s" % ("-l", "creates a log file for what happens"))
print("%10s -- %-s" % ("-v", "loud about what happens"))
print("%10s -- %-s" % ("-z", "unzips the .zip file in path first (if only 1 is present)"))
print("%10s -- %-s" % ("-z zipfile", "unzipz the specified zip file in path first"))
print("Example usages")
print("python sort_deliveries -z ~/Downloads/deliveries.zip .")
print("Above command will first unzip the 'deliveries.zip' into current folder, and then sort all files")
print("--")
print("python sort_deliveries -z ~/Downloads/deliveries.zip ~/assignments/assignment1")
print("Above command will first unzip the 'deliveries.zip' into the folder at '$HOME/assignments/assignment1/' before sorting said directory")
print("--")
print("python sort_deliveries .")
print("Above command will sort deliveries from current directory - it should contain ALL the users folders - so it is NOT enough to just unzip the zip file and then run the sort script on subdirectory. It should be run on directory.")
print("Command executions example")
print("unzip ~/Downloads/deliveries.zip ## This will create a folder with the same name as zip-file in current working directory")
print("python sort_deliveries deliveries ## Assuming the name of folder is equal to the zip file, it should be included as 'path'")
if __name__=='__main__':
"""
TO BE DONE
# Argument Parser
parser = argparse.ArgumentParser(description="Usage:\npython sort_deliveries.py [options] pathProgram preprocesses a latex-file ('infile') and produces a new latex-file ('outfile') with additional functionality")
parser.add_argument("infile", help="Name of the latex-file you want preprocessed")
parser.add_argument("-o", "--outfile", nargs=1, help="Name of the new file (cannot be equal to infile)")
parser.add_argument("-f", "--fancy_verbatim", help="produces more fancy verbatim", action="store_true")
parser.add_argument("-v", "--verbosity", help="increase output verbosity", action="store_true")
args = parser.parse_args()
verbose = args.verbosity
fancy = args.fancy_verbatim
if len(sys.argv) < 2 or sys.argv[-1][0] == '-':
print_usage()
sys.exit()
# Quits
"""
rootDir = "."
execute = True
delete = False
rename = True
log = False
unzip = "false"
verbose = False
javacFlag = False
# Find correct path according to arguments
argc = 1 # 0 would be programname
argl = len(sys.argv)-1
# .py -> program not the only argument
# '-' -> last argument not an option
# .zip -> last argument not the zip-file
if argl < 1 or \
sys.argv[argl].find(".py") >= 0 or \
sys.argv[argl][0] == '-' or \
sys.argv[argl].find(".zip") >= 0:
print_usage()
sys.exit()
rootDir = os.path.join(rootDir, sys.argv[-1])[2:]
if (rootDir[-1] == os.path.sep):
rootDir = rootDir[:-1]
# Handle arguments
while argc < argl:
arg = sys.argv[argc]
options = list(arg)
for letter in options[1]:
if letter == 'z':
unzip = "true"
if argc+1 < argl and sys.argv[argc+1].find(".zip", len(sys.argv[argc+1])-4) != -1:
argc += 1
unzip = sys.argv[argc]
elif letter == "h":
print_usage()
execute = False
break
elif letter == "l":
log = True
elif letter == "v":
verbose = True
elif letter == "d":
delete = True
elif letter == "b":
rename = False
elif letter == "D":
execute = False
elif letter == "c":
javacFlag = True
argc += 1
# Execute if executable
if execute:
sorter = Devilry_Sort(rootDir, execute, delete, log, rename, unzip, javacFlag, verbose)
sorter.run()
if javacFlag and len(sorter.failed_javac) > 0:
print_failed(sorter.failed_javac)
elif javacFlag:
print("All students compiled")
|
gpl-2.0
| 3,814,187,668,424,409,600
| 39.169935
| 238
| 0.507485
| false
| 4.13965
| false
| false
| false
|
gurnec/HashCheck
|
UnitTests/get-sha-test-vectors.py
|
1
|
3573
|
#!/usr/bin/python3
#
# SHA test vector downloader & builder
# Copyright (C) 2016 Christopher Gurnee. All rights reserved.
#
# Please refer to readme.md for information about this source code.
# Please refer to license.txt for details about distribution and modification.
#
# Downloads/builds SHA1-3 test vectors from the NIST Cryptographic Algorithm Validation Program
import os, os.path, urllib.request, io, zipfile, glob, re
# Determine and if necessary create the output directory
test_vectors_dir = os.path.join(os.path.dirname(__file__), 'vectors\\')
if not os.path.isdir(test_vectors_dir):
os.mkdir(test_vectors_dir)
# Download and unzip the two NIST test vector "response" files
for sha_url in ('http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip',
'http://csrc.nist.gov/groups/STM/cavp/documents/sha3/sha-3bytetestvectors.zip'):
print('downloading and extracting', sha_url)
with urllib.request.urlopen(sha_url) as sha_downloading: # open connection to the download url;
with io.BytesIO(sha_downloading.read()) as sha_downloaded_zip: # download entirely into ram;
with zipfile.ZipFile(sha_downloaded_zip) as sha_zipcontents: # open the zip file from ram;
sha_zipcontents.extractall(test_vectors_dir) # extract the zip file into the output dir
# Convert each response file into a set of test vector files and a single expected .sha* file
print('creating test vector files and expected .sha* files from NIST response files')
rsp_filename_re = re.compile(r'\bSHA([\d_]+)(?:Short|Long)Msg.rsp$', re.IGNORECASE)
for rsp_filename in glob.iglob(test_vectors_dir + '*.rsp'):
rsp_filename_match = rsp_filename_re.search(rsp_filename)
if not rsp_filename_match: # ignore the Monte Carlo simulation files
continue
print(' processing', rsp_filename_match.group(0))
with open(rsp_filename) as rsp_file:
# Create the expected .sha file which covers this set of test vector files
with open(rsp_filename + '.sha' + rsp_filename_match.group(1).replace('_', '-'), 'w', encoding='utf8') as sha_file:
dat_filenum = 0
for line in rsp_file:
# The "Len" line, specifies the length of the following test vector in bits
if line.startswith('Len ='):
dat_filelen = int(line[5:].strip())
dat_filelen, dat_filelenmod = divmod(dat_filelen, 8)
if dat_filelenmod != 0:
raise ValueError('unexpected bit length encountered (not divisible by 8)')
# The "Msg" line, specifies the test vector encoded in hex
elif line.startswith('Msg ='):
dat_filename = rsp_filename + '-{:04}.dat'.format(dat_filenum)
dat_filenum += 1
# Create the test vector file
with open(dat_filename, 'wb') as dat_file:
dat_file.write(bytes.fromhex(line[5:].strip()[:2*dat_filelen]))
del dat_filelen
# The "MD" line, specifies the expected hash encoded in hex
elif line.startswith('MD ='):
# Write the expected hash to the .sha file which covers this test vector file
print(line[4:].strip(), '*' + os.path.basename(dat_filename), file=sha_file)
del dat_filename
print("done")
|
bsd-3-clause
| -4,253,490,971,588,409,000
| 46.283784
| 123
| 0.617128
| false
| 3.965594
| true
| false
| false
|
NINAnor/QGIS
|
python/plugins/processing/gui/AlgorithmDialogBase.py
|
1
|
6211
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmDialogBase.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import webbrowser
from PyQt4 import uic
from PyQt4.QtCore import QCoreApplication, QSettings, QByteArray, SIGNAL, QUrl
from PyQt4.QtGui import QApplication, QDialogButtonBox, QDesktopWidget
from qgis.utils import iface
from qgis.core import *
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui import AlgorithmClassification
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgAlgorithmBase.ui'))
class AlgorithmDialogBase(BASE, WIDGET):
class InvalidParameterValue(Exception):
def __init__(self, param, widget):
(self.parameter, self.widget) = (param, widget)
def __init__(self, alg):
super(AlgorithmDialogBase, self).__init__(iface.mainWindow())
self.setupUi(self)
self.settings = QSettings()
self.restoreGeometry(self.settings.value("/Processing/dialogBase", QByteArray()))
self.executed = False
self.mainWidget = None
self.alg = alg
# Rename OK button to Run
self.btnRun = self.buttonBox.button(QDialogButtonBox.Ok)
self.btnRun.setText(self.tr('Run'))
self.btnClose = self.buttonBox.button(QDialogButtonBox.Close)
self.setWindowTitle(AlgorithmClassification.getDisplayName(self.alg))
desktop = QDesktopWidget()
if desktop.physicalDpiX() > 96:
self.textHelp.setZoomFactor(desktop.physicalDpiX() / 96)
algHelp = self.alg.shortHelp()
if algHelp is None:
self.textShortHelp.setVisible(False)
else:
self.textShortHelp.document().setDefaultStyleSheet('''.summary { margin-left: 10px; margin-right: 10px; }
h2 { color: #555555; padding-bottom: 15px; }
a { text-decoration: none; color: #3498db; font-weight: bold; }
p { color: #666666; }
b { color: #333333; }
dl dd { margin-bottom: 5px; }''')
self.textShortHelp.setHtml(algHelp)
self.textShortHelp.setOpenLinks(False)
def linkClicked(url):
webbrowser.open(url.toString())
self.textShortHelp.connect(self.textShortHelp, SIGNAL("anchorClicked(const QUrl&)"), linkClicked)
self.textHelp.page().setNetworkAccessManager(QgsNetworkAccessManager.instance())
isText, algHelp = self.alg.help()
if algHelp is not None:
algHelp = algHelp if isText else QUrl(algHelp)
try:
if isText:
self.textHelp.setHtml(algHelp)
else:
self.textHelp.settings().clearMemoryCaches()
self.textHelp.load(algHelp)
except:
self.tabWidget.removeTab(2)
else:
self.tabWidget.removeTab(2)
self.showDebug = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_DEBUG_IN_DIALOG)
def closeEvent(self, evt):
self.settings.setValue("/Processing/dialogBase", self.saveGeometry())
super(AlgorithmDialogBase, self).closeEvent(evt)
def setMainWidget(self):
self.tabWidget.widget(0).layout().addWidget(self.mainWidget)
def error(self, msg):
QApplication.restoreOverrideCursor()
self.setInfo(msg, True)
self.resetGUI()
self.tabWidget.setCurrentIndex(1)
def resetGUI(self):
QApplication.restoreOverrideCursor()
self.lblProgress.setText('')
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
self.btnRun.setEnabled(True)
self.btnClose.setEnabled(True)
def setInfo(self, msg, error=False):
if error:
self.txtLog.append('<span style="color:red"><br>%s<br></span>' % msg)
else:
self.txtLog.append(msg)
QCoreApplication.processEvents()
def setCommand(self, cmd):
if self.showDebug:
self.setInfo('<code>%s<code>' % cmd)
QCoreApplication.processEvents()
def setDebugInfo(self, msg):
if self.showDebug:
self.setInfo('<span style="color:blue">%s</span>' % msg)
QCoreApplication.processEvents()
def setConsoleInfo(self, msg):
if self.showDebug:
self.setCommand('<span style="color:darkgray">%s</span>' % msg)
QCoreApplication.processEvents()
def setPercentage(self, value):
if self.progressBar.maximum() == 0:
self.progressBar.setMaximum(100)
self.progressBar.setValue(value)
QCoreApplication.processEvents()
def setText(self, text):
self.lblProgress.setText(text)
self.setInfo(text, False)
QCoreApplication.processEvents()
def setParamValues(self):
pass
def setParamValue(self, param, widget, alg=None):
pass
def accept(self):
pass
def finish(self):
pass
|
gpl-2.0
| -3,842,399,775,241,946,000
| 34.090395
| 117
| 0.566736
| false
| 4.292329
| false
| false
| false
|
nathanielvarona/airflow
|
airflow/providers/apache/sqoop/hooks/sqoop.py
|
1
|
15515
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a sqoop 1.x hook"""
import subprocess
from copy import deepcopy
from typing import Any, Dict, List, Optional
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class SqoopHook(BaseHook):
"""
This hook is a wrapper around the sqoop 1 binary. To be able to use the hook
it is required that "sqoop" is in the PATH.
Additional arguments that can be passed via the 'extra' JSON field of the
sqoop connection:
* ``job_tracker``: Job tracker local|jobtracker:port.
* ``namenode``: Namenode.
* ``lib_jars``: Comma separated jar files to include in the classpath.
* ``files``: Comma separated files to be copied to the map reduce cluster.
* ``archives``: Comma separated archives to be unarchived on the compute
machines.
* ``password_file``: Path to file containing the password.
:param conn_id: Reference to the sqoop connection.
:type conn_id: str
:param verbose: Set sqoop to verbose.
:type verbose: bool
:param num_mappers: Number of map tasks to import in parallel.
:type num_mappers: int
:param properties: Properties to set via the -D argument
:type properties: dict
"""
conn_name_attr = 'conn_id'
default_conn_name = 'sqoop_default'
conn_type = 'sqoop'
hook_name = 'Sqoop'
def __init__(
self,
conn_id: str = default_conn_name,
verbose: bool = False,
num_mappers: Optional[int] = None,
hcatalog_database: Optional[str] = None,
hcatalog_table: Optional[str] = None,
properties: Optional[Dict[str, Any]] = None,
) -> None:
# No mutable types in the default parameters
super().__init__()
self.conn = self.get_connection(conn_id)
connection_parameters = self.conn.extra_dejson
self.job_tracker = connection_parameters.get('job_tracker', None)
self.namenode = connection_parameters.get('namenode', None)
self.libjars = connection_parameters.get('libjars', None)
self.files = connection_parameters.get('files', None)
self.archives = connection_parameters.get('archives', None)
self.password_file = connection_parameters.get('password_file', None)
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.verbose = verbose
self.num_mappers = num_mappers
self.properties = properties or {}
self.log.info("Using connection to: %s:%s/%s", self.conn.host, self.conn.port, self.conn.schema)
def get_conn(self) -> Any:
return self.conn
def cmd_mask_password(self, cmd_orig: List[str]) -> List[str]:
"""Mask command password for safety"""
cmd = deepcopy(cmd_orig)
try:
password_index = cmd.index('--password')
cmd[password_index + 1] = 'MASKED'
except ValueError:
self.log.debug("No password in sqoop cmd")
return cmd
def popen(self, cmd: List[str], **kwargs: Any) -> None:
"""
Remote Popen
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:return: handle to subprocess
"""
masked_cmd = ' '.join(self.cmd_mask_password(cmd))
self.log.info("Executing command: %s", masked_cmd)
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as sub_process:
for line in iter(sub_process.stdout): # type: ignore
self.log.info(line.strip())
sub_process.wait()
self.log.info("Command exited with return code %s", sub_process.returncode)
if sub_process.returncode:
raise AirflowException(f"Sqoop command failed: {masked_cmd}")
def _prepare_command(self, export: bool = False) -> List[str]:
sqoop_cmd_type = "export" if export else "import"
connection_cmd = ["sqoop", sqoop_cmd_type]
for key, value in self.properties.items():
connection_cmd += ["-D", f"{key}={value}"]
if self.namenode:
connection_cmd += ["-fs", self.namenode]
if self.job_tracker:
connection_cmd += ["-jt", self.job_tracker]
if self.libjars:
connection_cmd += ["-libjars", self.libjars]
if self.files:
connection_cmd += ["-files", self.files]
if self.archives:
connection_cmd += ["-archives", self.archives]
if self.conn.login:
connection_cmd += ["--username", self.conn.login]
if self.conn.password:
connection_cmd += ["--password", self.conn.password]
if self.password_file:
connection_cmd += ["--password-file", self.password_file]
if self.verbose:
connection_cmd += ["--verbose"]
if self.num_mappers:
connection_cmd += ["--num-mappers", str(self.num_mappers)]
if self.hcatalog_database:
connection_cmd += ["--hcatalog-database", self.hcatalog_database]
if self.hcatalog_table:
connection_cmd += ["--hcatalog-table", self.hcatalog_table]
connect_str = self.conn.host
if self.conn.port:
connect_str += f":{self.conn.port}"
if self.conn.schema:
connect_str += f"/{self.conn.schema}"
connection_cmd += ["--connect", connect_str]
return connection_cmd
@staticmethod
def _get_export_format_argument(file_type: str = 'text') -> List[str]:
if file_type == "avro":
return ["--as-avrodatafile"]
elif file_type == "sequence":
return ["--as-sequencefile"]
elif file_type == "parquet":
return ["--as-parquetfile"]
elif file_type == "text":
return ["--as-textfile"]
else:
raise AirflowException("Argument file_type should be 'avro', 'sequence', 'parquet' or 'text'.")
def _import_cmd(
self,
target_dir: Optional[str],
append: bool,
file_type: str,
split_by: Optional[str],
direct: Optional[bool],
driver: Any,
extra_import_options: Any,
) -> List[str]:
cmd = self._prepare_command(export=False)
if target_dir:
cmd += ["--target-dir", target_dir]
if append:
cmd += ["--append"]
cmd += self._get_export_format_argument(file_type)
if split_by:
cmd += ["--split-by", split_by]
if direct:
cmd += ["--direct"]
if driver:
cmd += ["--driver", driver]
if extra_import_options:
for key, value in extra_import_options.items():
cmd += [f'--{key}']
if value:
cmd += [str(value)]
return cmd
# pylint: disable=too-many-arguments
def import_table(
self,
table: str,
target_dir: Optional[str] = None,
append: bool = False,
file_type: str = "text",
columns: Optional[str] = None,
split_by: Optional[str] = None,
where: Optional[str] = None,
direct: bool = False,
driver: Any = None,
extra_import_options: Optional[Dict[str, Any]] = None,
) -> Any:
"""
Imports table from remote location to target dir. Arguments are
copies of direct sqoop command line arguments
:param table: Table to read
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet".
Imports data to into the specified format. Defaults to text.
:param columns: <col,col,col…> Columns to import from table
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param direct: Use direct connector if exists for the database
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--table", table]
if columns:
cmd += ["--columns", columns]
if where:
cmd += ["--where", where]
self.popen(cmd)
def import_query(
self,
query: str,
target_dir: Optional[str] = None,
append: bool = False,
file_type: str = "text",
split_by: Optional[str] = None,
direct: Optional[bool] = None,
driver: Optional[Any] = None,
extra_import_options: Optional[Dict[str, Any]] = None,
) -> Any:
"""
Imports a specific query from the rdbms to hdfs
:param query: Free format query to run
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet"
Imports data to hdfs into the specified format. Defaults to text.
:param split_by: Column of the table used to split work units
:param direct: Use direct import fast path
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--query", query]
self.popen(cmd)
# pylint: disable=too-many-arguments
def _export_cmd(
self,
table: str,
export_dir: Optional[str] = None,
input_null_string: Optional[str] = None,
input_null_non_string: Optional[str] = None,
staging_table: Optional[str] = None,
clear_staging_table: bool = False,
enclosed_by: Optional[str] = None,
escaped_by: Optional[str] = None,
input_fields_terminated_by: Optional[str] = None,
input_lines_terminated_by: Optional[str] = None,
input_optionally_enclosed_by: Optional[str] = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: Optional[Dict[str, Any]] = None,
) -> List[str]:
cmd = self._prepare_command(export=True)
if input_null_string:
cmd += ["--input-null-string", input_null_string]
if input_null_non_string:
cmd += ["--input-null-non-string", input_null_non_string]
if staging_table:
cmd += ["--staging-table", staging_table]
if clear_staging_table:
cmd += ["--clear-staging-table"]
if enclosed_by:
cmd += ["--enclosed-by", enclosed_by]
if escaped_by:
cmd += ["--escaped-by", escaped_by]
if input_fields_terminated_by:
cmd += ["--input-fields-terminated-by", input_fields_terminated_by]
if input_lines_terminated_by:
cmd += ["--input-lines-terminated-by", input_lines_terminated_by]
if input_optionally_enclosed_by:
cmd += ["--input-optionally-enclosed-by", input_optionally_enclosed_by]
if batch:
cmd += ["--batch"]
if relaxed_isolation:
cmd += ["--relaxed-isolation"]
if export_dir:
cmd += ["--export-dir", export_dir]
if extra_export_options:
for key, value in extra_export_options.items():
cmd += [f'--{key}']
if value:
cmd += [str(value)]
# The required option
cmd += ["--table", table]
return cmd
# pylint: disable=too-many-arguments
def export_table(
self,
table: str,
export_dir: Optional[str] = None,
input_null_string: Optional[str] = None,
input_null_non_string: Optional[str] = None,
staging_table: Optional[str] = None,
clear_staging_table: bool = False,
enclosed_by: Optional[str] = None,
escaped_by: Optional[str] = None,
input_fields_terminated_by: Optional[str] = None,
input_lines_terminated_by: Optional[str] = None,
input_optionally_enclosed_by: Optional[str] = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: Optional[Dict[str, Any]] = None,
) -> None:
"""
Exports Hive table to remote location. Arguments are copies of direct
sqoop command line Arguments
:param table: Table remote destination
:param export_dir: Hive table to export
:param input_null_string: The string to be interpreted as null for
string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the field separator character
:param input_lines_terminated_by: Sets the end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param relaxed_isolation: Transaction isolation to read uncommitted
for the mappers
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._export_cmd(
table,
export_dir,
input_null_string,
input_null_non_string,
staging_table,
clear_staging_table,
enclosed_by,
escaped_by,
input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by,
batch,
relaxed_isolation,
extra_export_options,
)
self.popen(cmd)
|
apache-2.0
| 389,434,711,630,519,940
| 36.652913
| 110
| 0.597499
| false
| 4.073792
| false
| false
| false
|
andywalz/PyFileMaker
|
setup.py
|
1
|
1145
|
#!/usr/bin/env python
from setuptools import setup
from PyFileMaker import __version__
setup(
name='PyFileMaker',
version=__version__,
description='Python Object Wrapper for FileMaker Server XML Interface',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['FileMaker'],
author='Klokan Petr Pridal, Pieter Claerhout, Marcin Kawa',
author_email='klokan@klokan.cz, pieter@yellowduck.be, kawa.macin@gmail.com',
url='https://github.com/aeguana/PyFileMaker',
download_url='https://github.com/aeguana/PyFileMaker/releases',
license='http://www.opensource.org/licenses/bsd-license.php',
platforms = ['any'],
packages=['PyFileMaker'],
install_requires=['requests'],
)
|
bsd-3-clause
| -9,189,683,703,606,496,000
| 37.166667
| 80
| 0.655022
| false
| 3.934708
| false
| false
| false
|
jpvanhal/cloudsizzle
|
cloudsizzle/scrapers/oodi/items.py
|
1
|
1589
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2010 CloudSizzle Team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
The models for scraped items.
See documentation in:
http://doc.scrapy.org/topics/items.html
"""
from scrapy.item import Item, Field
from cloudsizzle.scrapers.items import DateField
class CompletedCourseItem(Item):
name = Field()
code = Field()
cr = Field()
ocr = Field()
grade = Field()
date = DateField('%d.%m.%Y')
teacher = Field()
module = Field()
class ModuleItem(Item):
name = Field()
code = Field()
|
mit
| 7,193,697,621,543,712,000
| 30.156863
| 67
| 0.730648
| false
| 4.063939
| false
| false
| false
|
Jigsaw-Code/net-analysis
|
netanalysis/traffic/data/api_repository.py
|
1
|
3175
|
#!/usr/bin/python
#
# Copyright 2019 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Library to access Google's traffic data from its Transparency Report
"""
import datetime
import json
import ssl
import time
from urllib.request import urlopen, Request
from urllib.parse import urlencode, quote
import certifi
import pandas as pd
from netanalysis.traffic.data import model
def _to_timestamp(time_point: datetime.datetime):
return time.mktime(time_point.timetuple())
_SSL_CONTEXT = ssl.create_default_context(cafile=certifi.where())
class ApiTrafficRepository(model.TrafficRepository):
"""TrafficRepository that reads the traffic data from Google's Transparency Report."""
def _query_api(self, endpoint, params=None):
query_url = "https://www.google.com/transparencyreport/api/v3/traffic/" + \
quote(endpoint)
if params:
query_url = query_url + "?" + urlencode(params)
try:
request = Request(query_url)
request.add_header("User-Agent", "Jigsaw-Code/netanalysis")
with urlopen(request, context=_SSL_CONTEXT) as response:
return json.loads(response.read()[6:].decode("utf8"))
except Exception as error:
raise Exception("Failed to query url %s" % query_url, error)
def list_regions(self):
response_proto = self._query_api("regionlist")
return sorted([e[0] for e in response_proto[0][1]])
def get_traffic(self, region_code: str, product_id: model.ProductId,
start: datetime.datetime = None, end: datetime.datetime = None):
DEFAULT_INTERVAL_DAYS = 2 * 365
POINTS_PER_DAY = 48
if not end:
end = datetime.datetime.now()
if not start:
start = end - datetime.timedelta(days=DEFAULT_INTERVAL_DAYS)
number_of_days = (end - start).days
total_points = int(number_of_days * POINTS_PER_DAY)
entries = []
params = [
("start", int(_to_timestamp(start) * 1000)),
("end", int(_to_timestamp(end) * 1000)),
("width", total_points),
("product", product_id.value),
("region", region_code)]
response_proto = self._query_api("fraction", params)
entry_list_proto = response_proto[0][1]
for entry_proto in entry_list_proto:
timestamp = datetime.datetime.utcfromtimestamp(
entry_proto[0] / 1000)
value = entry_proto[1][0][1]
entries.append((timestamp, value / POINTS_PER_DAY / 2))
dates, traffic = zip(*entries)
return pd.Series(traffic, index=dates)
|
apache-2.0
| -1,836,266,151,497,937,000
| 36.352941
| 90
| 0.647874
| false
| 3.953923
| false
| false
| false
|
openstate/yournextrepresentative
|
candidates/diffs.py
|
1
|
8701
|
# The functions in this file are to help produce human readable diffs
# between our JSON representation of candidates.
import re
from django.conf import settings
from django.utils.translation import ugettext as _
import jsonpatch
import jsonpointer
def get_descriptive_value(election, attribute, value, leaf):
"""Get a sentence fragment describing someone's status in a particular year
'attribute' is either "standing_in" or "party_membership", 'election'
is one of the keys from settings.ELECTIONS, and 'value' is what would
be under that year in the 'standing_in' or 'party_memberships'
dictionary (see the comment at the top of update.py)."""
election_data = settings.ELECTIONS[election]
current_election = election_data.get('current')
election_name = election_data['name']
if attribute == 'party_memberships':
if leaf:
# In that case, there's only a particular value in the
# dictionary that's changed:
if leaf == 'name':
if current_election:
message = _(u"is known to be standing for the party '{party}' in the {election}")
else:
message = _(u"was known to be standing for the party '{party}' in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'id':
if current_election:
message = _(u'is known to be standing for the party with ID {party} in the {election}')
else:
message = _(u'was known to be standing for the party with ID {party} in the {election}')
return message.format(party=value, election=election_name)
else:
message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}")
raise Exception, message.format(
leaf, attribute, election
)
else:
if current_election:
message = _(u'is known to be standing for the party "{party}" in the {election}')
else:
message = _(u'was known to be standing for the party "{party}" in the {election}')
return message.format(party=value['name'], election=election_name)
elif attribute == 'standing_in':
if value is None:
if current_election:
message = _(u'is known not to be standing in the {election}')
else:
message = _(u'was known not to be standing in the {election}')
return message.format(election=election_name)
else:
if leaf:
if leaf == 'post_id':
if current_election:
message = _("is known to be standing for the post with ID {party} in the {election}")
else:
message = _("was known to be standing for the post with ID {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'mapit_url':
if current_election:
message = _("is known to be standing in the constituency with MapIt URL {party} in the {election}")
else:
message = _("was known to be standing in the constituency with MapIt URL {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'name':
if current_election:
message = _("is known to be standing in {party} in the {election}")
else:
message = _("was known to be standing in {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'elected':
if value:
return _("was elected in the {election}").format(election=election_name)
else:
return _("was not elected in the {election}").format(election=election_name)
else:
message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}")
raise Exception, message.format(
leaf, attribute, election
)
else:
if current_election:
message = _(u'is known to be standing in {party} in the {election}')
else:
message = _(u'was known to be standing in {party} in the {election}')
return message.format(party=value['name'], election=election_name)
def explain_standing_in_and_party_memberships(operation, attribute, election, leaf):
"""Set 'value' and 'previous_value' in operation to a readable explanation
'attribute' is one of 'standing_in' or 'party_memberships'."""
for key in ('previous_value', 'value'):
if key not in operation:
continue
if election:
operation[key] = get_descriptive_value(
election,
attribute,
operation[key],
leaf,
)
else:
clauses = []
for election, value in (operation[key] or {}).items():
clauses.append(get_descriptive_value(
election,
attribute,
value,
leaf,
))
operation[key] = _(u' and ').join(clauses)
def get_version_diff(from_data, to_data):
"""Calculate the diff (a mangled JSON patch) between from_data and to_data"""
basic_patch = jsonpatch.make_patch(from_data, to_data)
result = []
for operation in basic_patch:
op = operation['op']
# We deal with standing_in and party_memberships slightly
# differently so they can be presented in human-readable form,
# so match those cases first:
m = re.search(
r'(standing_in|party_memberships)(?:/([-_A-Za-z0-9]+))?(?:/(\w+))?',
operation['path'],
)
if op in ('replace', 'remove'):
operation['previous_value'] = \
jsonpointer.resolve_pointer(
from_data,
operation['path']
)
attribute, election, leaf = m.groups() if m else (None, None, None)
if attribute:
explain_standing_in_and_party_memberships(operation, attribute, election, leaf)
if op in ('replace', 'remove'):
# Ignore replacing no data with no data:
if op == 'replace' and \
not operation['previous_value'] and \
not operation['value']:
continue
if op == 'replace' and not operation['previous_value']:
operation['op'] = 'add'
elif op == 'add':
# It's important that we don't skip the case where a
# standing_in value is being set to None, because that's
# saying 'we *know* they're not standing then'
if (not operation['value']) and (attribute != 'standing_in'):
continue
operation['path'] = re.sub(r'^/', '', operation['path'])
result.append(operation)
result.sort(key=lambda o: (o['op'], o['path']))
return result
def clean_version_data(data):
# We're not interested in changes of these IDs:
for i in data.get('identifiers', []):
i.pop('id', None)
for on in data.get('other_names', []):
on.pop('id', None)
data.pop('last_party', None)
data.pop('proxy_image', None)
data.pop('date_of_birth', None)
def get_version_diffs(versions):
"""Add a diff to each of an array of version dicts
The first version is the most recent; the last is the original
version."""
result = []
n = len(versions)
for i, v in enumerate(versions):
# to_version_data = replace_empty_with_none(
# versions[i]['data']
# )
to_version_data = versions[i]['data']
if i == (n - 1):
from_version_data = {}
else:
# from_version_data = replace_empty_with_none(
# versions[i + 1]['data']
# )
from_version_data = versions[i + 1]['data']
clean_version_data(to_version_data)
clean_version_data(from_version_data)
version_with_diff = versions[i].copy()
version_with_diff['diff'] = \
get_version_diff(from_version_data, to_version_data)
result.append(version_with_diff)
return result
|
agpl-3.0
| -1,124,514,147,237,027,800
| 42.288557
| 124
| 0.54488
| false
| 4.378963
| false
| false
| false
|
iwm911/plaso
|
plaso/classifier/scanner.py
|
1
|
24473
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the classes for a scan tree-based format scanner."""
import logging
import os
from plaso.classifier import patterns
from plaso.classifier import range_list
from plaso.classifier import scan_tree
class _ScanMatch(object):
"""Class that implements a scan match."""
def __init__(self, total_data_offset, pattern):
"""Initializes the scan result.
Args:
total_data_offset: the offset of the resulting match relative
to the start of the total data scanned.
pattern: the pattern matched.
"""
super(_ScanMatch, self).__init__()
self.total_data_offset = total_data_offset
self.pattern = pattern
@property
def specification(self):
"""The specification."""
return self.pattern.specification
class _ScanResult(object):
"""Class that implements a scan result."""
def __init__(self, specification):
"""Initializes the scan result.
Args:
scan_tree_node: the corresponding scan tree node or None.
"""
super(_ScanResult, self).__init__()
self.specification = specification
self.scan_matches = []
@property
def identifier(self):
"""The specification identifier."""
return self.specification.identifier
class ScanState(object):
"""Class that implements a scan state."""
# The state definitions.
_SCAN_STATE_START = 1
_SCAN_STATE_SCANNING = 2
_SCAN_STATE_STOP = 3
def __init__(self, scan_tree_node, total_data_size=None):
"""Initializes the scan state.
Args:
scan_tree_node: the corresponding scan tree node or None.
total_data_size: optional value to indicate the total data size.
The default is None.
"""
super(ScanState, self).__init__()
self._matches = []
self.remaining_data = None
self.remaining_data_size = 0
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_START
self.total_data_offset = 0
self.total_data_size = total_data_size
def AddMatch(self, total_data_offset, pattern):
"""Adds a result to the state to scanning.
Args:
total_data_offset: the offset of the resulting match relative
to the start total data scanned.
pattern: the pattern matched.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self._matches.append(_ScanMatch(total_data_offset, pattern))
def GetMatches(self):
"""Retrieves a list containing the results.
Returns:
A list of scan matches (instances of _ScanMatch).
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if self.state != self._SCAN_STATE_STOP:
raise RuntimeError(u'Unsupported scan state.')
return self._matches
def Reset(self, scan_tree_node):
"""Resets the state to start.
This function will clear the remaining data.
Args:
scan_tree_node: the corresponding scan tree node or None.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if self.state != self._SCAN_STATE_STOP:
raise RuntimeError(u'Unsupported scan state.')
self.remaining_data = None
self.remaining_data_size = 0
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_START
def Scanning(self, scan_tree_node, total_data_offset):
"""Sets the state to scanning.
Args:
scan_tree_node: the active scan tree node.
total_data_offset: the offset of the resulting match relative
to the start of the total data scanned.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_SCANNING
self.total_data_offset = total_data_offset
def Stop(self):
"""Sets the state to stop.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self.scan_tree_node = None
self.state = self._SCAN_STATE_STOP
class ScanTreeScannerBase(object):
"""Class that implements a scan tree-based scanner base."""
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(ScanTreeScannerBase, self).__init__()
self._scan_tree = None
self._specification_store = specification_store
def _ScanBufferScanState(
self, scan_tree_object, scan_state, data, data_size, total_data_offset,
total_data_size=None):
"""Scans a buffer using the scan tree.
This function implements a Boyer–Moore–Horspool equivalent approach
in combination with the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
total_data_offset: the offset of the data relative to the start of
the total data scanned.
total_data_size: optional value to indicate the total data size.
The default is None.
Raises:
RuntimeError: if the total data offset, total data size or the last
pattern offset value is out of bounds
"""
if total_data_size is not None and total_data_size < 0:
raise RuntimeError(u'Invalid total data size, value out of bounds.')
if total_data_offset < 0 or (
total_data_size is not None and total_data_offset >= total_data_size):
raise RuntimeError(u'Invalid total data offset, value out of bounds.')
data_offset = 0
scan_tree_node = scan_state.scan_tree_node
if scan_state.remaining_data:
# str.join() should be more efficient then concatenation by +.
data = ''.join([scan_state.remaining_data, data])
data_size += scan_state.remaining_data_size
scan_state.remaining_data = None
scan_state.remaining_data_size = 0
if (total_data_size is not None and
total_data_offset + data_size >= total_data_size):
match_on_boundary = True
else:
match_on_boundary = False
while data_offset < data_size:
if (not match_on_boundary and
data_offset + scan_tree_object.largest_length >= data_size):
break
found_match = False
scan_done = False
while not scan_done:
scan_object = scan_tree_node.CompareByteValue(
data, data_offset, data_size, total_data_offset,
total_data_size=total_data_size)
if isinstance(scan_object, scan_tree.ScanTreeNode):
scan_tree_node = scan_object
else:
scan_done = True
if isinstance(scan_object, patterns.Pattern):
pattern_length = len(scan_object.signature.expression)
data_last_offset = data_offset + pattern_length
if cmp(scan_object.signature.expression,
data[data_offset:data_last_offset]) == 0:
if (not scan_object.signature.is_bound or
scan_object.signature.offset == data_offset):
found_match = True
logging.debug(
u'Signature match at data offset: 0x{0:08x}.'.format(
data_offset))
scan_state.AddMatch(total_data_offset + data_offset, scan_object)
if found_match:
skip_value = len(scan_object.signature.expression)
scan_tree_node = scan_tree_object.root_node
else:
last_pattern_offset = (
scan_tree_object.skip_table.skip_pattern_length - 1)
if data_offset + last_pattern_offset >= data_size:
raise RuntimeError(
u'Invalid last pattern offset, value out of bounds.')
skip_value = 0
while last_pattern_offset >= 0 and not skip_value:
last_data_offset = data_offset + last_pattern_offset
byte_value = ord(data[last_data_offset])
skip_value = scan_tree_object.skip_table[byte_value]
last_pattern_offset -= 1
if not skip_value:
skip_value = 1
scan_tree_node = scan_tree_object.root_node
data_offset += skip_value
if not match_on_boundary and data_offset < data_size:
scan_state.remaining_data = data[data_offset:data_size]
scan_state.remaining_data_size = data_size - data_offset
scan_state.Scanning(scan_tree_node, total_data_offset + data_offset)
def _ScanBufferScanStateFinal(self, scan_tree_object, scan_state):
"""Scans the remaining data in the scan state using the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
"""
if scan_state.remaining_data:
data = scan_state.remaining_data
data_size = scan_state.remaining_data_size
scan_state.remaining_data = None
scan_state.remaining_data_size = 0
# Setting the total data size will make boundary matches are returned
# in this scanning pass.
total_data_size = scan_state.total_data_size
if total_data_size is None:
total_data_size = scan_state.total_data_offset + data_size
self._ScanBufferScanState(
scan_tree_object, scan_state, data, data_size,
scan_state.total_data_offset, total_data_size=total_data_size)
scan_state.Stop()
def GetScanResults(self, scan_state):
"""Retrieves the scan results.
Args:
scan_state: the scan state (instance of ScanState).
Return:
A list of scan results (instances of _ScanResult).
"""
scan_results = {}
for scan_match in scan_state.GetMatches():
specification = scan_match.specification
identifier = specification.identifier
logging.debug(
u'Scan match at offset: 0x{0:08x} specification: {1:s}'.format(
scan_match.total_data_offset, identifier))
if identifier not in scan_results:
scan_results[identifier] = _ScanResult(specification)
scan_results[identifier].scan_matches.append(scan_match)
return scan_results.values()
class Scanner(ScanTreeScannerBase):
"""Class that implements a scan tree-based scanner."""
_READ_BUFFER_SIZE = 512
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(Scanner, self).__init__(specification_store)
def ScanBuffer(self, scan_state, data, data_size):
"""Scans a buffer.
Args:
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
"""
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size,
scan_state.total_data_offset,
total_data_size=scan_state.total_data_size)
def ScanFileObject(self, file_object):
"""Scans a file-like object.
Args:
file_object: a file-like object.
Returns:
A list of scan results (instances of ScanResult).
"""
file_offset = 0
if hasattr(file_object, 'get_size'):
file_size = file_object.get_size()
else:
file_object.seek(0, os.SEEK_END)
file_size = file_object.tell()
scan_state = self.StartScan(total_data_size=file_size)
file_object.seek(file_offset, os.SEEK_SET)
while file_offset < file_size:
data = file_object.read(self._READ_BUFFER_SIZE)
data_size = len(data)
if data_size == 0:
break
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size, file_offset,
total_data_size=file_size)
file_offset += data_size
self.StopScan(scan_state)
return self.GetScanResults(scan_state)
def StartScan(self, total_data_size=None):
"""Starts a scan.
The function sets up the scanning related structures if necessary.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A scan state (instance of ScanState).
Raises:
RuntimeError: when total data size is invalid.
"""
if total_data_size is not None and total_data_size < 0:
raise RuntimeError(u'Invalid total data size.')
if self._scan_tree is None:
self._scan_tree = scan_tree.ScanTree(
self._specification_store, None)
return ScanState(self._scan_tree.root_node, total_data_size=total_data_size)
def StopScan(self, scan_state):
"""Stops a scan.
Args:
scan_state: the scan state (instance of ScanState).
"""
self._ScanBufferScanStateFinal(self._scan_tree, scan_state)
class OffsetBoundScanner(ScanTreeScannerBase):
"""Class that implements an offset-bound scan tree-based scanner."""
_READ_BUFFER_SIZE = 512
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(OffsetBoundScanner, self).__init__(specification_store)
self._footer_scan_tree = None
self._footer_spanning_range = None
self._header_scan_tree = None
self._header_spanning_range = None
def _GetFooterRange(self, total_data_size):
"""Retrieves the read buffer aligned footer range.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A range (instance of Range).
"""
# The actual footer range is in reverse since the spanning footer range
# is based on positive offsets, where 0 is the end of file.
if self._footer_spanning_range.end_offset < total_data_size:
footer_range_start_offset = (
total_data_size - self._footer_spanning_range.end_offset)
else:
footer_range_start_offset = 0
# Calculate the lower bound modulus of the footer range start offset
# in increments of the read buffer size.
footer_range_start_offset /= self._READ_BUFFER_SIZE
footer_range_start_offset *= self._READ_BUFFER_SIZE
# Calculate the upper bound modulus of the footer range size
# in increments of the read buffer size.
footer_range_size = self._footer_spanning_range.size
remainder = footer_range_size % self._READ_BUFFER_SIZE
footer_range_size /= self._READ_BUFFER_SIZE
if remainder > 0:
footer_range_size += 1
footer_range_size *= self._READ_BUFFER_SIZE
return range_list.Range(footer_range_start_offset, footer_range_size)
def _GetHeaderRange(self):
"""Retrieves the read buffer aligned header range.
Returns:
A range (instance of Range).
"""
# Calculate the lower bound modulus of the header range start offset
# in increments of the read buffer size.
header_range_start_offset = self._header_spanning_range.start_offset
header_range_start_offset /= self._READ_BUFFER_SIZE
header_range_start_offset *= self._READ_BUFFER_SIZE
# Calculate the upper bound modulus of the header range size
# in increments of the read buffer size.
header_range_size = self._header_spanning_range.size
remainder = header_range_size % self._READ_BUFFER_SIZE
header_range_size /= self._READ_BUFFER_SIZE
if remainder > 0:
header_range_size += 1
header_range_size *= self._READ_BUFFER_SIZE
return range_list.Range(header_range_start_offset, header_range_size)
def _ScanBufferScanState(
self, scan_tree_object, scan_state, data, data_size, total_data_offset,
total_data_size=None):
"""Scans a buffer using the scan tree.
This function implements a Boyer–Moore–Horspool equivalent approach
in combination with the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
total_data_offset: the offset of the data relative to the start of
the total data scanned.
total_data_size: optional value to indicate the total data size.
The default is None.
"""
scan_done = False
scan_tree_node = scan_tree_object.root_node
while not scan_done:
data_offset = 0
scan_object = scan_tree_node.CompareByteValue(
data, data_offset, data_size, total_data_offset,
total_data_size=total_data_size)
if isinstance(scan_object, scan_tree.ScanTreeNode):
scan_tree_node = scan_object
else:
scan_done = True
if isinstance(scan_object, patterns.Pattern):
pattern_length = len(scan_object.signature.expression)
pattern_start_offset = scan_object.signature.offset
pattern_end_offset = pattern_start_offset + pattern_length
if cmp(scan_object.signature.expression,
data[pattern_start_offset:pattern_end_offset]) == 0:
scan_state.AddMatch(
total_data_offset + scan_object.signature.offset, scan_object)
logging.debug(
u'Signature match at data offset: 0x{0:08x}.'.format(data_offset))
# TODO: implement.
# def ScanBuffer(self, scan_state, data, data_size):
# """Scans a buffer.
# Args:
# scan_state: the scan state (instance of ScanState).
# data: a buffer containing raw data.
# data_size: the size of the raw data in the buffer.
# """
# # TODO: fix footer scanning logic.
# # need to know the file size here for the footers.
# # TODO: check for clashing ranges?
# header_range = self._GetHeaderRange()
# footer_range = self._GetFooterRange(scan_state.total_data_size)
# if self._scan_tree == self._header_scan_tree:
# if (scan_state.total_data_offset >= header_range.start_offset and
# scan_state.total_data_offset < header_range.end_offset):
# self._ScanBufferScanState(
# self._scan_tree, scan_state, data, data_size,
# scan_state.total_data_offset,
# total_data_size=scan_state.total_data_size)
# elif scan_state.total_data_offset > header_range.end_offset:
# # TODO: implement.
# pass
# if self._scan_tree == self._footer_scan_tree:
# if (scan_state.total_data_offset >= footer_range.start_offset and
# scan_state.total_data_offset < footer_range.end_offset):
# self._ScanBufferScanState(
# self._scan_tree, scan_state, data, data_size,
# scan_state.total_data_offset,
# total_data_size=scan_state.total_data_size)
def ScanFileObject(self, file_object):
"""Scans a file-like object.
Args:
file_object: a file-like object.
Returns:
A scan state (instance of ScanState).
"""
# TODO: add support for fixed size block-based reads.
if hasattr(file_object, 'get_size'):
file_size = file_object.get_size()
else:
file_object.seek(0, os.SEEK_END)
file_size = file_object.tell()
file_offset = 0
scan_state = self.StartScan(total_data_size=file_size)
if self._header_scan_tree.root_node is not None:
header_range = self._GetHeaderRange()
# TODO: optimize the read by supporting fixed size block-based reads.
# if file_offset < header_range.start_offset:
# file_offset = header_range.start_offset
file_object.seek(file_offset, os.SEEK_SET)
# TODO: optimize the read by supporting fixed size block-based reads.
# data = file_object.read(header_range.size)
data = file_object.read(header_range.end_offset)
data_size = len(data)
if data_size > 0:
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size, file_offset,
total_data_size=file_size)
file_offset += data_size
if self._footer_scan_tree.root_node is not None:
self.StopScan(scan_state)
self._scan_tree = self._footer_scan_tree
scan_state.Reset(self._scan_tree.root_node)
if self._footer_scan_tree.root_node is not None:
footer_range = self._GetFooterRange(file_size)
# Note that the offset in the footer scan tree start with 0. Make sure
# the data offset of the data being scanned is aligned with the offset
# in the scan tree.
if footer_range.start_offset < self._footer_spanning_range.end_offset:
data_offset = (
self._footer_spanning_range.end_offset - footer_range.start_offset)
else:
data_offset = 0
if file_offset < footer_range.start_offset:
file_offset = footer_range.start_offset
file_object.seek(file_offset, os.SEEK_SET)
data = file_object.read(self._READ_BUFFER_SIZE)
data_size = len(data)
if data_size > 0:
self._ScanBufferScanState(
self._scan_tree, scan_state, data[data_offset:],
data_size - data_offset, file_offset + data_offset,
total_data_size=file_size)
file_offset += data_size
self.StopScan(scan_state)
return self.GetScanResults(scan_state)
def StartScan(self, total_data_size=None):
"""Starts a scan.
The function sets up the scanning related structures if necessary.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A list of scan results (instances of ScanResult).
Raises:
RuntimeError: when total data size is invalid.
"""
if total_data_size is None or total_data_size < 0:
raise RuntimeError(u'Invalid total data size.')
if self._header_scan_tree is None:
self._header_scan_tree = scan_tree.ScanTree(
self._specification_store, True,
offset_mode=scan_tree.ScanTree.OFFSET_MODE_POSITIVE)
if self._header_spanning_range is None:
spanning_range = self._header_scan_tree.range_list.GetSpanningRange()
self._header_spanning_range = spanning_range
if self._footer_scan_tree is None:
self._footer_scan_tree = scan_tree.ScanTree(
self._specification_store, True,
offset_mode=scan_tree.ScanTree.OFFSET_MODE_NEGATIVE)
if self._footer_spanning_range is None:
spanning_range = self._footer_scan_tree.range_list.GetSpanningRange()
self._footer_spanning_range = spanning_range
if self._header_scan_tree.root_node is not None:
self._scan_tree = self._header_scan_tree
elif self._footer_scan_tree.root_node is not None:
self._scan_tree = self._footer_scan_tree
else:
self._scan_tree = None
if self._scan_tree is not None:
root_node = self._scan_tree.root_node
else:
root_node = None
return ScanState(root_node, total_data_size=total_data_size)
def StopScan(self, scan_state):
"""Stops a scan.
Args:
scan_state: the scan state (instance of ScanState).
"""
self._ScanBufferScanStateFinal(self._scan_tree, scan_state)
self._scan_tree = None
|
apache-2.0
| -7,426,356,127,187,481,000
| 31.576565
| 80
| 0.651707
| false
| 3.871657
| false
| false
| false
|
dims/neutron
|
neutron/common/config.py
|
1
|
13000
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import sys
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from oslo_service import wsgi
from neutron._i18n import _, _LI
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron import policy
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions. "
"Note that this can be a colon-separated list of paths. "
"For example: api_extensions_path = "
"extensions:/path/to/more/exts:/even/more/exts. "
"The __path__ of neutron.extensions is appended to "
"this, so if your extensions are in there you don't "
"need to specify them here.")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs. "
"The first 3 octets will remain unchanged. If the 4th "
"octet is not 00, it will also be used. The others "
"will be randomly generated.")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.ListOpt('default_availability_zones', default=[],
help=_("Default value of availability zone hints. The "
"availability zone aware schedulers use this when "
"the resources availability_zone_hints is empty. "
"Multiple availability zones can be specified by a "
"comma separated string. This value can be empty. "
"In this case, even if availability_zone_hints for "
"a resource is empty, availability zone is "
"considered for high availability while scheduling "
"the resource.")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers per subnet")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
deprecated_for_removal=True,
help=_("Maximum number of fixed ips per port. This option "
"is deprecated and will be removed in the N "
"release.")),
cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv4 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. If not set then no pool "
"will be used unless passed explicitly to the subnet "
"create. If no pool is used, then a CIDR must be passed "
"to create a subnet and that subnet will not be "
"allocated from any pool; it will be considered part of "
"the tenant's private address space. This option is "
"deprecated for removal in the N release.")),
cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True,
help=_("Default IPv6 subnet pool to be used for automatic "
"subnet CIDR allocation. "
"Specifies by UUID the pool to be used in case where "
"creation of a subnet is being called without a "
"subnet pool ID. See the description for "
"default_ipv4_subnet_pool for more information. This "
"option is deprecated for removal in the N release.")),
cfg.BoolOpt('ipv6_pd_enabled', default=False,
help=_("Enables IPv6 Prefix Delegation for automatic subnet "
"CIDR allocation. "
"Set to True to enable IPv6 Prefix Delegation for "
"subnet allocation in a PD-capable environment. Users "
"making subnet creation requests for IPv6 subnets "
"without providing a CIDR or subnetpool ID will be "
"given a CIDR via the Prefix Delegation mechanism. "
"Note that enabling PD will override the behavior of "
"the default IPv6 subnetpool.")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('external_dns_driver',
help=_('Driver for external DNS integration.')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron. "
"Attention: the following parameter MUST be set to "
"False if Neutron is being used in conjunction with "
"Nova security groups.")),
cfg.StrOpt('host', default=utils.get_hostname(),
sample_default='example.domain',
help=_("Hostname to be used by the Neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
deprecated_for_removal=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"Newton release, at which point the gateway will not "
"be forced on to subnet.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.StrOpt('ipam_driver',
help=_("Neutron IPAM (IP address management) driver to use. "
"If ipam_driver is not set (default behavior), no IPAM "
"driver is used. In order to use the reference "
"implementation of Neutron IPAM driver, "
"use 'internal'.")),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
cfg.StrOpt('web_framework', default='legacy',
choices=('legacy', 'pecan'),
help=_("This will choose the web framework in which to run "
"the Neutron API server. 'pecan' is a new experiemental "
"rewrite of the API server."))
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
wsgi.register_opts(cfg.CONF)
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='neutron')
def set_db_defaults():
# Update the default QueuePool parameters. These can be tweaked by the
# conf variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(
cfg.CONF,
connection='sqlite://',
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
set_db_defaults()
NOVA_CONF_SECTION = 'nova'
ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION)
ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION)
nova_opts = [
cfg.StrOpt('region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the nova endpoint to use. This endpoint will'
' be looked up in the keystone catalog and should be'
' one of public, internal or admin.')),
]
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
logging.register_options(cfg.CONF)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def reset_service():
# Reset worker in case SIGHUP is called.
# Note that this is called only in case a service is running in
# daemon mode.
setup_logging()
policy.refresh()
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
"""
loader = wsgi.Loader(cfg.CONF)
app = loader.load_app(app_name)
return app
|
apache-2.0
| -2,916,700,827,220,835,000
| 46.619048
| 79
| 0.585846
| false
| 4.506066
| false
| false
| false
|
nginxinc/kubernetes-ingress
|
tests/suite/grpc/helloworld_pb2.py
|
1
|
3911
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='helloworld.Greeter',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=93,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='helloworld.Greeter.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
# @@protoc_insertion_point(module_scope)
|
apache-2.0
| -1,847,906,107,374,825,000
| 28.19403
| 369
| 0.722833
| false
| 3.156578
| false
| true
| false
|
ercanezin/ce888labs
|
lab8/imdb.py
|
1
|
2192
|
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Activation, Embedding, GlobalMaxPooling1D,Convolution1D, Input,LSTM,merge
from keras.datasets import imdb
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
###PREPROCCESSING
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print (X_train[0])
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
###PREPROCCESSING ENDS
inputs = Input(shape=(maxlen,))
m = inputs
m = Embedding(max_features, 128, dropout=0.2)(m)
x = Convolution1D(nb_filter=32, filter_length=4, border_mode='valid',activation='relu', subsample_length=1)(m)
x = GlobalMaxPooling1D()(x)
y=LSTM(70)(m)
z=merge([x, y], mode='concat', concat_axis=1)
z = Dense(1)(z)
predictions = Activation("sigmoid")(z)
model = Model(input=inputs, output=predictions)
#
# model = Sequential()
# model.add(Embedding(max_features, embedding_size, input_length=maxlen))
# model.add(Dropout(0.25))
# model.add(Convolution1D(nb_filter=nb_filter,
# filter_length=filter_length,
# border_mode='valid',
# activation='relu',
# subsample_length=1))
# model.add(MaxPooling1D(pool_length=pool_length))
# model.add(LSTM(lstm_output_size))
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
|
gpl-3.0
| 8,493,642,314,764,031,000
| 24.206897
| 110
| 0.69115
| false
| 3.100424
| true
| false
| false
|
zergov/flashcards
|
flashcards/sets.py
|
1
|
3877
|
"""
flashcards.sets
~~~~~~~~~~~~~~~~~~~
Contain the StudySet object and logic related to it.
"""
from collections import OrderedDict
from flashcards import cards
from flashcards.cards import StudyCard
TITLE_KEY = 'title'
DESC_KEY = 'description'
CARDS_KEY = 'cards'
def create_from_dict(data):
"""
Construct a StudySet Object from a dictionary object.
:param data: the dictionary object
:raises KeyError: when dictionary is missing a needed field to create obj
:raises ValueError: if cards field in data is not of type list
:returns: StudySet object
"""
_assert_data_is_valid(data)
title = data[TITLE_KEY]
description = data[DESC_KEY]
study_cards = [cards.create_from_dict(card) for card in data[CARDS_KEY]]
study_set = StudySet(title, description)
for card in study_cards:
study_set.add(card)
return study_set
def _assert_data_is_valid(data):
""" Check that data received in `create_from_dict` has a valid format """
if TITLE_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % TITLE_KEY)
if DESC_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % DESC_KEY)
if CARDS_KEY not in data:
raise KeyError("Invalid data string. %s key is missing" % CARDS_KEY)
if not isinstance(data[CARDS_KEY], list):
raise ValueError("Invalid data type. %s value's should be a list"
% CARDS_KEY)
class StudySet(object):
"""
A StudySet is a container of flash cards.
"""
def __init__(self, title, description=None):
"""
Creates a Study set.
:param title: The title of the study set.
:param description: The description for this study set.
"""
self._title = title
self._description = '' if description is None else description
self._cards = []
def __iter__(self):
"""Iter through the cards of this set."""
return iter(self._cards)
def __len__(self):
"""Return the number of cards in this StudySet."""
return len(self._cards)
@property
def title(self):
"""
Get the title of this set.
:returns: The title of this Study set.
"""
return self._title
@title.setter
def title(self, value):
"""
Set the title of this set.
:param value: The new title for this set
"""
if isinstance(value, basestring):
self._title = value
else:
raise TypeError("StudySet title should be of type str")
@property
def description(self):
"""
Get the description of this set.
"""
return self._description
@description.setter
def description(self, value):
"""
Set the description of this set.
:param value: The new description for this set
"""
if isinstance(value, basestring):
self._description = value
else:
raise TypeError("StudySet description should be of type str")
def add(self, card):
"""
Add a card to the end of this set.
:param card: A subclass of flashcards.cards.StudyCard object.
"""
if isinstance(card, StudyCard):
self._cards.append(card)
else:
raise TypeError("A Set can only contain instances of "
"StudyCard objects.")
def to_dict(self):
"""
Get a dictionary object representing this StudySet.
:returns: a dictionary object representation of this StudySet.
"""
serialized_cards = [c.to_dict() for c in self]
data = ((TITLE_KEY, self.title),
(DESC_KEY, self.description),
(CARDS_KEY, serialized_cards))
return OrderedDict(data)
|
mit
| -994,116,634,628,581,500
| 25.923611
| 77
| 0.594532
| false
| 4.21413
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/search_term_view.py
|
1
|
2213
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import search_term_targeting_status
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'SearchTermView',
},
)
class SearchTermView(proto.Message):
r"""A search term view with metrics aggregated by search term at
the ad group level.
Attributes:
resource_name (str):
Output only. The resource name of the search term view.
Search term view resource names have the form:
``customers/{customer_id}/searchTermViews/{campaign_id}~{ad_group_id}~{URL-base64_search_term}``
search_term (str):
Output only. The search term.
ad_group (str):
Output only. The ad group the search term
served in.
status (google.ads.googleads.v6.enums.types.SearchTermTargetingStatusEnum.SearchTermTargetingStatus):
Output only. Indicates whether the search
term is currently one of your targeted or
excluded keywords.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
search_term = proto.Field(
proto.STRING,
number=5,
optional=True,
)
ad_group = proto.Field(
proto.STRING,
number=6,
optional=True,
)
status = proto.Field(
proto.ENUM,
number=4,
enum=search_term_targeting_status.SearchTermTargetingStatusEnum.SearchTermTargetingStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -114,439,431,116,269,020
| 29.736111
| 109
| 0.658834
| false
| 4.023636
| false
| false
| false
|
lpryszcz/REDiscover
|
taxid2sra.py
|
1
|
13105
|
#!/usr/bin/env python
desc="""Fetch all entries from SRA for given taxid.
Save the biggest run per each SAMPLE (SRS) from given date. Paired first, if any.
Note, it run fastq-dump in background. Make sure you have enough free cores;)
DEPENDENCIES:
Biopython
"""
epilog="""Author:
l.p.pryszcz+git@gmail.com
Barcelona, 2/10/2012
"""
import argparse, os, re, sys, gzip
from datetime import datetime
from ftplib import FTP
from Bio import Entrez
import xml.etree.ElementTree as ET
def srr2info(srr):
"""Return info for SRR entry
- experiment id
- submission id
- project id
- biosample id
- run date
- bases
- insert size
- insert std
- reads orientation
"""
'''
for child in root[0]: print child.tag, child.attrib
EXPERIMENT {'center_name': 'BI', 'alias': '74116.WR23613.Solexa-42619.62C7UAAXX100916.P', 'accession': 'SRX026545'}
SUBMISSION {'submission_date': '2009-06-01T02:01:25Z', 'lab_name': 'Genome Sequencing', 'submission_comment': 'Produced by user cristyn on Sun May 31 22:01:25 EDT 2009', 'alias': 'BI.Streptococcus_pyogenes_Pathogenomics', 'center_name': 'BI', 'accession': 'SRA008647'}
STUDY {'center_name': 'BI', 'alias': 'Fusarium_oxysporum_Diversity_RNA_Sequencing_multi_isolate', 'accession': 'SRP002351'}
SAMPLE {'center_name': 'BI', 'alias': '74336.0', 'accession': 'SRS190364'}
RUN_SET {}
root[0][0].keys()
['center_name', 'alias', 'accession']
'''
#search NCBI
result = Entrez.read( Entrez.esearch(db="sra",term=srr ) )
if not result['IdList']:
sys.stderr.write( " Entrez Error: No results for %s\n" % srr )
return
elif len(result['IdList'])>1:
sys.stderr.write( " Entrez Warning: Multiple hits for %s: %s\n" % (srr,",".join(result['IdList'])) )
#fetch info from NCBI
xml = Entrez.efetch( db="sra",id=result['IdList'][0] ).read()
root = ET.fromstring(xml)#; print xml
#get experiment
EXPERIMENT = root[0].find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = root[0].find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = root[0].find("STUDY")
srp = s.attrib['accession']
#get accession
s = root[0].find("SAMPLE")
srs = s.attrib['accession']
s = root[0].find('RUN_SET') #it's within RUN_SET
date = s[0].attrib['run_date']
bases = s[0].attrib['total_bases']
#LIBRARY_LAYOUT - maybe try to simplify it
isize=istdv=orient = 0
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
isize = layout['NOMINAL_LENGTH'] # NOMINAL_LENGTH="476"
orient = layout['ORIENTATION'] # ORIENTATION="5\'3\'-3\'5\'
istdv = layout['NOMINAL_SDEV'] ## PAIRED NOMINAL_SDEV="149.286"
return ( srx,sra,srp,srs,date,bases,isize,istdv,orient )
def xml2data(child, taxid2srs, verbose):
""" """
#get experiment
EXPERIMENT = child.find("EXPERIMENT")
srx = EXPERIMENT.attrib['accession']
#get submission
s = child.find("SUBMISSION")
sra = s.attrib['accession']
#get accession
s = child.find("STUDY")
srp = s.attrib['accession']
#get accession
for SAMPLE in child.findall("SAMPLE"):
#if SAMPLE.attrib['accession']!=
srs = SAMPLE.attrib['accession']
#get taxid
SAMPLE_NAME = SAMPLE.find("SAMPLE_NAME")
TAXON_ID = SAMPLE_NAME.find("TAXON_ID")
taxid = int(TAXON_ID.text)
SCIENTIFIC_NAME = SAMPLE_NAME.find("SCIENTIFIC_NAME")
#malformed xml?
if SCIENTIFIC_NAME is None:
return taxid2srs
strain = SCIENTIFIC_NAME.text
strain0 = tissue = stage = ""
#get strain tag - this may cause problems with non-ENA accessions!
SAMPLE_ATTRIBUTES = SAMPLE.find("SAMPLE_ATTRIBUTES")
if SAMPLE_ATTRIBUTES is None:
continue
for SAMPLE_ATTRIBUTE in SAMPLE_ATTRIBUTES.findall("SAMPLE_ATTRIBUTE"):
#print SAMPLE_ATTRIBUTE.find("TAG").text
if SAMPLE_ATTRIBUTE.find("TAG").text == "strain":
#print SAMPLE_ATTRIBUTE.find("VALUE")
strain += " %s" % SAMPLE_ATTRIBUTE.find("VALUE").text
strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-OrganismPart":
tissue = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-StrainOrLine":
strain0 = SAMPLE_ATTRIBUTE.find("VALUE").text
elif SAMPLE_ATTRIBUTE.find("TAG").text == "ArrayExpress-DevelopmentalStage":
stage = SAMPLE_ATTRIBUTE.find("VALUE").text
if strain!="unidentified organism":
break
# get tissue
#LIBRARY_LAYOUT - maybe try to simplify it
DESIGN = EXPERIMENT.find("DESIGN") # [2][2][4][0].attrib#; print layout
LIBRARY_DESCRIPTOR = DESIGN.find("LIBRARY_DESCRIPTOR")
LIBRARY_LAYOUT = LIBRARY_DESCRIPTOR.find("LIBRARY_LAYOUT")
LIBRARY_CONSTRUCTION_PROTOCOL = LIBRARY_DESCRIPTOR.find("LIBRARY_CONSTRUCTION_PROTOCOL")# RNA-seq dUTP eukaryotic
stranded = ""
if LIBRARY_CONSTRUCTION_PROTOCOL is not None and LIBRARY_CONSTRUCTION_PROTOCOL.text is not None:
stranded = re.sub('[ \t\n\r]+', ' ', LIBRARY_CONSTRUCTION_PROTOCOL.text)
orient = ""
isize = istdv = 0
PAIRED = LIBRARY_LAYOUT.find("PAIRED")
if PAIRED is not None:
layout = PAIRED.attrib
if 'NOMINAL_LENGTH' in layout: isize = float(layout['NOMINAL_LENGTH']) # NOMINAL_LENGTH="476"
if 'NOMINAL_SDEV' in layout: istdv = float(layout['NOMINAL_SDEV']) ##PAIRED NOMINAL_SDEV="149.286"
if 'ORIENTATION' in layout: orient = layout['ORIENTATION'] #ORIENTATION="5\'3\'-3\'5\'
#run data
runs = []
RUN_SET = child.find('RUN_SET') #it's within RUN_SET
for RUN in RUN_SET.findall("RUN"):
srr = RUN.attrib['accession']
date = assembly = ""
bases = size = 0
if 'size' in RUN.attrib: size = RUN.attrib['size']
if 'run_date' in RUN.attrib: date = RUN.attrib['run_date']
if 'total_bases' in RUN.attrib: bases = int(RUN.attrib['total_bases'])
if "assembly" in RUN.attrib: assembly = RUN.attrib["assembly"]
runs.append((srr, assembly, size, bases, date))
#store data
childdata = (strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs)
if verbose:
sys.stderr.write( " %s: %s: %s\n" % (taxid, srs, str(childdata)))
if not taxid in taxid2srs:
taxid2srs[taxid] = {}
if not srs in taxid2srs[taxid]:
taxid2srs[taxid][srs] = []
taxid2srs[taxid][srs].append(childdata)
return taxid2srs
def taxid2runs(outfn, taxid, verbose, db="sra", retmode="xml", retmax=10**6):
"""Return info from SRA for given taxid. """
taxid2srs = {}
#search NCBI
term = 'txid%s[organism] AND sra_public[filter] AND "biomol rna"[Properties]' % taxid
if verbose:
sys.stderr.write("Query: %s\n" % term)
result = Entrez.read(Entrez.esearch(db=db, term=term, retmax=retmax))#; print result
ids = result['IdList']
if not ids:
sys.stderr.write(" Entrez Error: No results for %s\n" % taxid)
return
if verbose:
sys.stderr.write("Downloading %s entries from NCBI %s database...\n" % (len(ids), db))
#post NCBI query
for id in ids:
xmlfn = os.path.join(".xml", "%s.xml.gz"%id)
if os.path.isfile(xmlfn):
xml = "".join(l for l in gzip.open(xmlfn))
else:
xml = Entrez.efetch(db=db, retmode=retmode, id=id).read()#; print xml
with gzip.open(xmlfn, "w") as out:
out.write(xml)
root = ET.fromstring(xml)
child = root[0]
taxid2srs = xml2data(child, taxid2srs, verbose)
#print output
out = open(outfn, "w")
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
header = "#Strain\tStrain0\tTissue\tStage\tTaxid\tSample\tExperiment\tProject\tRun\tInsert size\tOrientation\tStranded\tAssembly\tSize\tBases\tDate\n"
out.write(header)
info = "%s\t"*15+"%s\n"
sys.stderr.write("Saving SRA info to: %s\n" % outfn)
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
for strain, strain0, tissue, stage, taxid, srx, srp, isize, istdv, orient, stranded, runs in taxid2srs[taxid][srs]:
for srr, assembly, size, bases, date in runs:
line = info%(strain, strain0, tissue, stage, taxid, srs, srx, srp, srr, isize, orient, stranded, assembly, size, bases, date)
out.write(line.encode('ascii', 'xmlcharrefreplace'))
out.close()
return taxid2srs
def get_runs(taxid2srs, ftpdomain, orientth, maxisize, paired, minbases, verbose):
"""Select the best run for each uniq taxid-srs-date combination
"""
if verbose:
sys.stderr.write( "Fetching best run for each uniq taxid-srs-date combination...\n" )
#select the best run for each uniq taxid-srs-date combination
for taxid in taxid2srs:
for srs in taxid2srs[taxid]:
date2runs={}
for strain, taxid, srx, srp, isize, istdv, orient, runs in taxid2srs[taxid][srs]:
#check if paired
if paired:
if not isize:
continue
#skip if wrong orientation
if orientth and orientth!=orient:
continue
#skip big insert size or not paired
if maxisize:
if isize>maxisize:
continue
#add runs passed filtering
for srr,bases,date in runs:
#skip if too small yield
if bases < minbases*10**6:
continue
if date not in date2runs:
date2runs[date]=[]
date2runs[date].append( (srr,srx,srp,isize,bases) )
#process best run for each uniq taxid-srs-date combination
for date in date2runs:
#
fltruns = filter( lambda x: x[3]!=0, date2runs[date] )
if not fltruns:
fltruns = date2runs[date]
#sort by size
bestrun = sorted( fltruns,key=lambda x: x[-1],reverse=True )[0]
#print bestrun,date2runs[date]
srr,srx,srp,isize,bases = bestrun
# fetch
cmd = "fastq-dump --gzip --split-3 -O %s %s" % (outdir, srr)
def main():
usage = "%(prog)s -v"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-d", "--download", default=False, action="store_true",
help="download SRA files")
parser.add_argument("-t", "--taxid", type=int, required=True,
help="taxid of interest " )
parser.add_argument("-f", dest="ftp", default="ftp-trace.ncbi.nih.gov",
help="ftp server address [%(default)s]" )
parser.add_argument("-e", "--email", default="lpryszcz@crg.es", type=str,
help="email address [%(default)s]" )
parser.add_argument("-o", dest="orient", default="5'3'-3'5'",
help="orientation [%(default)s]" )
parser.add_argument("-m", dest="maxisize", default=1000, type=int,
help="max allowed insert [%(default)s]" )
parser.add_argument("-b", dest="minbases", default=600, type=int,
help="min Mbases in run [%(default)s Mbases -> 10x for 60Mb genome]" )
parser.add_argument("-p", "--paired", default=False, action="store_true",
help="fetch only paired runs" )
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
Entrez.email = o.email
if not os.path.isdir(".xml"):
os.makedirs(".xml")
#get all runs for taxid
outfn = "sra.tsv"
taxid2srs = taxid2runs(outfn, o.taxid, o.verbose); return
if o.download:
#fetch best srr
get_runs( taxid2srs,o.ftp,o.orient,o.maxisize,o.paired,o.minbases,o.verbose )
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
|
gpl-2.0
| 5,824,388,399,453,832,000
| 42.111842
| 268
| 0.583441
| false
| 3.286108
| false
| false
| false
|
eeshangarg/zulip
|
zerver/views/realm_icon.py
|
1
|
2428
|
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_change_icon_source
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.response import json_error, json_success
from zerver.lib.upload import upload_icon_image
from zerver.lib.url_encoding import add_query_arg_to_redirect_url
from zerver.models import UserProfile
@require_realm_admin
def upload_icon(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
return json_error(_("You must upload exactly one icon."))
icon_file = list(request.FILES.values())[0]
if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size:
return json_error(
_("Uploaded file is larger than the allowed limit of {} MiB").format(
settings.MAX_ICON_FILE_SIZE_MIB,
)
)
upload_icon_image(icon_file, user_profile)
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile
)
icon_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=icon_url,
)
return json_success(json_result)
@require_realm_admin
def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
# We don't actually delete the icon because it might still
# be needed if the URL was cached and it is rewritten
# in any case after next update.
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile
)
gravatar_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=gravatar_url,
)
return json_success(json_result)
def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
url = realm_icon_url(user_profile.realm)
# We can rely on the URL already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_icon_url does '?version=version_number'
# hacks to prevent us from having to jump through decode/encode hoops.
url = add_query_arg_to_redirect_url(url, request.META["QUERY_STRING"])
return redirect(url)
|
apache-2.0
| -8,473,574,400,917,262,000
| 37.539683
| 91
| 0.714168
| false
| 3.723926
| false
| false
| false
|
huiyiqun/check_mk
|
cmk/regex.py
|
1
|
2740
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2016 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
"""This module wraps some regex handling functions used by Check_MK"""
import re
from .exceptions import MKGeneralException
# TODO: Clean this up one day by using the way recommended by gettext.
# (See https://docs.python.org/2/library/gettext.html). For this we
# need the path to the locale files here.
try:
_
except NameError:
_ = lambda x: x # Fake i18n when not available
g_compiled_regexes = {}
def regex(pattern):
"""Compile regex or look it up in already compiled regexes.
(compiling is a CPU consuming process. We cache compiled regexes)."""
try:
return g_compiled_regexes[pattern]
except KeyError:
pass
try:
reg = re.compile(pattern)
except Exception, e:
raise MKGeneralException(_("Invalid regular expression '%s': %s") % (pattern, e))
g_compiled_regexes[pattern] = reg
return reg
# Checks if a string contains characters that make it neccessary
# to use regular expression logic to handle it correctly
def is_regex(pattern):
for c in pattern:
if c in '.?*+^$|[](){}\\':
return True
return False
def escape_regex_chars(match):
r = ""
for c in match:
if c in r"[]\().?{}|*^$+":
r += "\\"
r += c
return r
|
gpl-2.0
| -2,221,331,912,281,145,600
| 36.027027
| 89
| 0.531752
| false
| 3.697706
| false
| false
| false
|
SublimeHaskell/SublimeHaskell
|
hsdev/backend.py
|
1
|
36770
|
"""
The `hsdev` backend.
"""
from functools import reduce
import io
import json
import os
import os.path
import pprint
import re
import subprocess
import threading
import sublime
import SublimeHaskell.hsdev.callback as HsCallback
import SublimeHaskell.hsdev.client as HsDevClient
import SublimeHaskell.hsdev.result_parse as ResultParse
import SublimeHaskell.internals.backend as Backend
import SublimeHaskell.internals.logging as Logging
import SublimeHaskell.internals.output_collector as OutputCollector
import SublimeHaskell.internals.proc_helper as ProcHelper
import SublimeHaskell.internals.settings as Settings
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.sublime_haskell_common as Common
def result_identity(resp):
'''Identity function for results
'''
return resp
class HsDevBackend(Backend.HaskellBackend):
"""This class encapsulates all of the functions that interact with the `hsdev` backend.
"""
HSDEV_DEFAULT_PORT = 4567
HSDEV_DEFAULT_HOST = 'localhost'
HSDEV_NOT_FOUND = [0, 0, 0, 0]
HSDEV_MIN_VER = [0, 3, 3, 0] # minimum hsdev version
HSDEV_MAX_VER = [0, 3, 4, 0] # maximum hsdev version
HSDEV_CALL_TIMEOUT = 300.0 # second timeout for synchronous requests (5 minutes should be enough, no?)
def __init__(self, backend_mgr, local=True, port=HSDEV_DEFAULT_PORT, host=HSDEV_DEFAULT_HOST, **kwargs):
super().__init__(backend_mgr)
Logging.log('{0}.__init__({1}, {2})'.format(type(self).__name__, host, port), Logging.LOG_INFO)
# Sanity checking:
exec_with = kwargs.get('exec-with')
install_dir = kwargs.get('install-dir')
if bool(exec_with) ^ bool(install_dir):
if install_dir is None:
sublime.error_message('\n'.join(['\'exec_with\' requires an \'install_dir\'.',
'',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('\'exec_with\' requires an \'install_dir\'.')
else:
sublime.error_message('\n'.join(['\'install_dir\' requires an \'exec_with\'.',
'',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('\'install_dir\' requires an \'exec_with\'.')
elif exec_with and exec_with not in ['stack', 'cabal', 'cabal-new-build']:
sublime.error_message('\n'.join(['Invalid backend \'exec_with\': {0}'.format(exec_with),
'',
'Valid values are "cabal", "cabal-new-build" or "stack".',
'Please check your \'backends\' configuration and retry.']))
raise RuntimeError('Invalid backend \'exec_with\': {0}'.format(exec_with))
# Local hsdev server process and params
self.is_local_hsdev = local
self.hsdev_process = None
self.cache = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.db')
self.log_file = os.path.join(Common.sublime_haskell_cache_path(), 'hsdev', 'hsdev.log')
self.exec_with = exec_with
self.install_dir = Utils.normalize_path(install_dir) if install_dir is not None else None
# Keep track of the hsdev version early. Needed to patch command line arguments later.
self.version = HsDevBackend.hsdev_version(self.exec_with, self.install_dir)
self.drain_stdout = None
self.drain_stderr = None
# Connection params
self.port = port
self.hostname = host
if self.is_local_hsdev:
self.hostname = self.HSDEV_DEFAULT_HOST
self.client = None
self.serial_lock = threading.RLock()
self.request_serial = 1
@staticmethod
def backend_name():
return 'hsdev'
@staticmethod
def is_available(**kwargs):
# Yes, this is slightly redundant because eventually __init__ does the same thing for a class
# instance.
exec_with = kwargs.get('exec-with')
install_dir = kwargs.get('install-dir')
local = kwargs.get('local', False)
exec_install_set = not bool(exec_with) ^ bool(install_dir)
backend_name = kwargs.get('backend_name', 'not specified.')
if exec_install_set or local:
if not exec_install_set:
# Either exec-with or install-dir isn't set, so the corresponding configuration target is unavailable.
return False
hsdev_ver = HsDevBackend.hsdev_version(exec_with, install_dir)
str_version = '.'.join([str(v) for v in hsdev_ver])
Logging.log('hsdev version: {0}'.format(str_version), Logging.LOG_INFO)
retval = hsdev_ver >= HsDevBackend.HSDEV_MIN_VER and hsdev_ver < HsDevBackend.HSDEV_MAX_VER
if not retval:
if retval != HsDevBackend.HSDEV_NOT_FOUND:
min_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MIN_VER])
max_version = '.'.join([str(v) for v in HsDevBackend.HSDEV_MAX_VER])
msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name),
'',
'Incompatible hsdev, detected version ' + str_version,
'Version should be \u2265 ' + min_version + ' and < ' + max_version])
else:
msg = '\n'.join(['Backend configuration: "{0}"'.format(backend_name),
'',
'Tried executing hsdev to get a version number, not successful.',
'Is hsdev installed (or built, if using stack or cabal exec wrappers)?'])
sublime.message_dialog(msg)
return retval
# Assume that a remote backend is actually available. Ultimately, we might not connect to it, but
# it is available to us as a backend.
return True
def start_backend(self):
retval = True
if self.is_local_hsdev:
Logging.log('Starting local \'hsdev\' server', Logging.LOG_INFO)
log_level = Settings.PLUGIN.hsdev_log_level
cmd = self.concat_args([(True, ["hsdev"]),
(True, ["run"]),
(self.port, ["--port", str(self.port)]),
(self.cache, ["--db", self.cache]),
(self.log_file, ["--log", self.log_file]),
(True, ["--log-level", log_level]),
(True, ["--no-color"])])
hsdev_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd)
if hsdev_proc.process is not None:
# Use TextIOWrapper here because it combines decoding with newline handling,
# which means less to maintain.
hsdev_proc.process.stdout = io.TextIOWrapper(hsdev_proc.process.stdout, 'utf-8')
hsdev_proc.process.stderr = io.TextIOWrapper(hsdev_proc.process.stderr, 'utf-8')
# Read and wait for hsdev's startup messge. 15 seconds should be enough time for the message to appear.
# Otherwise, kill the thread because we don't want to get stuck waiting forever.
startup_reader = HsDevStartupReader(hsdev_proc.process.stdout)
startup_reader.start()
startup_reader.wait_startup(15.0)
if startup_reader.successful():
port = startup_reader.port()
if port != self.port:
Logging.log('hsdev: server port changed, was {0}, now {1}'.format(self.port, port), Logging.LOG_WARNING)
self.port = port
self.drain_stdout = OutputCollector.DescriptorDrain('hsdev stdout', hsdev_proc.process.stdout)
self.drain_stderr = OutputCollector.DescriptorDrain('hsdev stderr', hsdev_proc.process.stderr)
self.drain_stdout.start()
self.drain_stderr.start()
self.hsdev_process = hsdev_proc
Logging.log('Local \'hsdev\' server started successfully.', Logging.LOG_INFO)
else:
# This is a bit of a "Hail Mary!" because readline() could just hang forever. Just to make sure,
# kill the process too!
startup_reader.stop()
hsdev_proc.process.kill()
if hsdev_proc.process_err is not None:
Logging.log('Possible reason for timeout: {0}'.format(hsdev_proc.process_err))
self.hsdev_process = None
retval = False
sublime.error_message('Timed out waiting for \'hsdev\' to start up.')
else:
errmsg = 'Could not start local \'hsdev\' server because:\n\n' + hsdev_proc.process_err
sublime.error_message(errmsg)
self.hsdev_process = None
retval = False
return retval
def connect_backend(self):
Logging.log('Connecting to \'hsdev\' server at {0}:{1}'.format(self.hostname, self.port), Logging.LOG_INFO)
retval = True
self.client = HsDevClient.HsDevClient(self.backend_mgr)
if self.client.connect(self.hostname, self.port):
# For a local hsdev server that we started, send the link command so that it exits when we exit.
if self.is_local_hsdev:
self.link()
else:
Logging.log('Connections to \'hsdev\' server unsuccessful, see tracebacks to diagnose.', Logging.LOG_ERROR)
retval = False
return retval
def disconnect_backend(self):
self.exit()
self.client.close()
def stop_backend(self):
if self.is_local_hsdev:
try:
self.hsdev_process.process.wait(90.0)
except subprocess.TimeoutExpired:
sublime.message_dialog('\n'.join(['Time out waiting for \'hsdev\' process to terminate.',
'',
'You may have to kill this process manually from a terminal or',
'console window\'s command line.']))
def is_live_backend(self):
return self.client.is_connected()
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# File/project tracking functions:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
## Pylint deems these two methods unncessary since all they do is call the superclass. However, I'm
## leaving them here just in case something more interesting has to be done in addition to calling
## the superclass.
# def add_project_file(self, filename, project, project_dir):
# super().add_project_file(filename, project, project_dir)
# def remove_project_file(self, filename):
# super().remove_project_file(filename)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Features
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def auto_rescan(self):
return True
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Utility functions used to implement the API:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
@staticmethod
def hsdev_version(exec_with, install_dir, output_compiler_version=False):
retval = [0, 0, 0, 0]
compiler_version = None
cmd = ['hsdev', 'version']
if output_compiler_version:
cmd.append('-c')
hsdev_proc = ProcHelper.exec_with_wrapper(exec_with, install_dir, cmd)
if hsdev_proc.process is not None:
exit_code, out, _ = hsdev_proc.wait()
if exit_code == 0:
## 'cabal new-run' can spit out multiple lines of status before executing the task:
for line in out.splitlines():
hsver = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)\.(?P<build>\d+)', line)
if hsver:
major = int(hsver.group('major'))
minor = int(hsver.group('minor'))
revision = int(hsver.group('revision'))
build = int(hsver.group('build'))
retval = [major, minor, revision, build]
compiler_version = line.split()[1] if output_compiler_version else None
break
return (retval, compiler_version) if output_compiler_version else retval
@staticmethod
def concat_args(args):
def inner_concat(left, right):
(left_pred, left_expr) = left
(right_pred, right_expr) = right
return (left_pred or right_pred, (left_expr if left_pred else []) + (right_expr if right_pred else []))
return reduce(inner_concat, args, (True, []))[1]
def files_and_contents(self, files, contents):
contents = contents or {}
retval = [{'file': f, 'contents': contents.get(f)} for f in files] if files else []
return retval
def make_callbacks(self, name, on_response=None, result_convert=result_identity, on_notify=None, on_error=None,
**backend_args):
with self.serial_lock:
req_serial = str(self.request_serial)
self.request_serial += 1
# Clean up backend arguments:
for param in ['on_response', 'result_convert', 'on_notify', 'on_error']:
if param in backend_args:
del backend_args[param]
return (HsCallback.HsDevCallbacks(req_serial, name, on_response, result_convert, on_notify, on_error), backend_args)
def hsdev_command(self, name, opts, callbacks, async_cmd=False, timeout=HSDEV_CALL_TIMEOUT, is_list=False,
on_result_part=None, split_result=None):
if split_result is None:
split_res = on_result_part is not None
if is_list and split_res:
result = []
def hsdev_command_notify(reply):
if 'result-part' in reply:
notify_result = callbacks.call_result_convert([reply['result-part']])[0]
on_result_part(notify_result)
result.append(notify_result)
else:
callbacks.call_notify(reply)
# FIXME: Is this option still used?
opts.update({'split-result': None})
callbacks.add_notify(hsdev_command_notify)
resp = self.client.call(name, opts, callbacks, wait=not async_cmd, timeout=timeout)
return resp
def command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=False,
on_result_part=on_result_part, split_result=split_result)
def async_command(self, name, opts, callbacks, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=False,
on_result_part=on_result_part, split_result=split_result)
def list_command(self, name, opts, callbacks, timeout=HSDEV_CALL_TIMEOUT, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=False, timeout=timeout, is_list=True,
on_result_part=on_result_part, split_result=split_result)
def async_list_command(self, name, opts, callbacks, on_result_part=None, split_result=None):
return self.hsdev_command(name, opts, callbacks, async_cmd=True, timeout=None, is_list=True,
on_result_part=on_result_part, split_result=split_result)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API implementation:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def link(self, hold=False):
return self.command('link', {'hold': hold}, self.make_callbacks('link')[0])
def ping(self):
return self.command('ping', {}, lambda r: r and ('message' in r) and (r['message'] == 'pong'),
self.make_callbacks('ping')[0])
def scan(self, cabal=False, sandboxes=None, projects=None, files=None, paths=None, ghc=None, contents=None,
docs=False, infer=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan', **backend_args)
return action('scan', {'projects': projects or [],
'cabal': cabal,
'sandboxes': sandboxes or [],
'files': self.files_and_contents(files, contents),
'paths': paths or [],
'ghc-opts': ghc or [],
'docs': docs,
'infer': infer},
callbacks, **backend_args)
def scan_project(self, project, build_tool=None, no_deps=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan project', **backend_args)
return action(
'scan project',
{
'project': project,
'build-tool': build_tool,
'scan-deps': not no_deps,
},
callbacks,
**backend_args
)
def scan_file(self, file, build_tool=None, no_project=False, no_deps=False, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan file', **backend_args)
return action(
'scan file',
{
'file': file,
'build-tool': build_tool,
'scan-project': not no_project,
'scan-deps': not no_deps,
},
callbacks,
**backend_args
)
def scan_package_dbs(self, package_dbs, wait_complete=False, **backend_args):
action = self.command if wait_complete else self.async_command
callbacks, backend_args = self.make_callbacks('scan package-dbs', **backend_args)
return action(
'scan package-dbs',
{'package-db-stack': [{'package-db': p} if p not in ['user-db', 'global-db'] else p for p in package_dbs]},
callbacks,
**backend_args
)
def set_file_contents(self, file, contents=None, **backend_args):
callbacks, backend_args = self.make_callbacks('set-file-contents', **backend_args)
return self.command('set-file-contents', {'file': file, 'contents': contents}, callbacks, **backend_args)
def docs(self, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('docs', **backend_args)
return self.async_command('docs', {'projects': projects or [],
'files': files or []},
callbacks, **backend_args)
def infer(self, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('infer', **backend_args)
return self.async_command('infer', {'projects': projects or [],
'files': files or []},
callbacks, **backend_args)
def remove(self, cabal=False, sandboxes=None, projects=None, files=None, **backend_args):
callbacks, backend_args = self.make_callbacks('remove', **backend_args)
return self.async_list_command('remove', {'projects': projects or [],
'cabal': cabal,
'sandboxes': sandboxes or [],
'files': files or []},
callbacks, **backend_args)
def remove_all(self, **backend_args):
callbacks, backend_args = self.make_callbacks('remove-all', **backend_args)
return self.command('remove-all', {}, callbacks, **backend_args)
def list_packages(self, **backend_args):
callbacks, backend_args = self.make_callbacks('packages', **backend_args)
return self.list_command('packages', {}, callbacks, **backend_args)
def list_projects(self, **backend_args):
callbacks, backend_args = self.make_callbacks('projects', **backend_args)
return self.list_command('projects', {}, callbacks, **backend_args)
def list_sandboxes(self, **backend_args):
return self.list_command('sandboxes', {}, **backend_args)
def symbol(self, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, local_names=False, header=False, **backend_args):
# search_type is one of: exact, prefix, infix, suffix
query = {'input': lookup, 'type': search_type}
filters = []
if project:
filters.append({'project': project})
if file:
filters.append({'file': file})
if module:
filters.append({'module': module})
if package:
filters.append({'package': package})
if installed:
filters.append('installed')
if source:
filters.append('sourced')
if standalone:
filters.append('standalone')
callbacks, backend_args = self.make_callbacks('symbol', result_convert=ResultParse.parse_symbol_ids if header else ResultParse.parse_symbols, **backend_args)
return self.list_command('symbol', {'query': query, 'filters': filters, 'locals': local_names, 'header': header},
callbacks, **backend_args)
def module(self, _projectname, lookup="", search_type='prefix', project=None, file=None, module=None, package=None, installed=False, source=False, standalone=False, header=False, **backend_args):
query = {'input': lookup, 'type': search_type}
filters = []
if project:
filters.append({'project': project})
if file:
filters.append({'file': file})
if module:
filters.append({'module': module})
if package:
filters.append({'package': package})
if installed:
filters.append('installed')
if source:
filters.append('sourced')
if standalone:
filters.append('standalone')
callbacks, backend_args = self.make_callbacks('module', result_convert=ResultParse.parse_module_ids if header else ResultParse.parse_modules, **backend_args)
return self.command('module', {'query': query, 'filters': filters, 'header': header, 'inspection': False},
callbacks, **backend_args)
def project(self, project=None, path=None, **backend_args):
callbacks, backend_args = self.make_callbacks('project', **backend_args)
return self.command('project', {'name': project} if project else {'path': path}, callbacks, **backend_args)
def sandbox(self, path, **backend_args):
callbacks, backend_args = self.make_callbacks('sandbox', **backend_args)
return self.command('sandbox', {'path': path}, callbacks, **backend_args)
def lookup(self, name, file, **backend_args):
callbacks, backend_args = self.make_callbacks('lookup', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('lookup', {'name': name, 'file': file}, callbacks, **backend_args)
def whois(self, name, file, **backend_args):
callbacks, backend_args = self.make_callbacks('whois', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('whois', {'name': name, 'file': file}, callbacks, **backend_args)
def whoat(self, line, column, file, **backend_args):
callbacks, backend_args = self.make_callbacks('whoat', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('whoat', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args)
def scope_modules(self, _projcname, file, lookup='', search_type='prefix', **backend_args):
callbacks, backend_args = self.make_callbacks('scope_modules', result_convert=ResultParse.parse_module_ids,
**backend_args)
return self.list_command('scope modules', {'query': {'input': lookup, 'type': search_type}, 'file': file},
callbacks, **backend_args)
def scope(self, file, lookup='', search_type='prefix', global_scope=False, **backend_args):
callbacks, backend_args = self.make_callbacks('scope', result_convert=ResultParse.parse_symbol_ids, **backend_args)
return self.list_command('scope',
{'query': {'input': lookup,
'type': search_type
},
'file': file
}, callbacks, **backend_args)
def usages(self, line, column, file, **backend_args):
callbacks, backend_args = self.make_callbacks('usages', result_convert=ResultParse.parse_symbol_usages, **backend_args)
return self.list_command('usages', {'line': line, 'column': column, 'file': file}, callbacks, **backend_args)
def complete(self, sym, file, wide=False, **backend_args):
qname = sym.qualified_name() if sym.name is not None else sym.module + '.'
callbacks, backend_args = self.make_callbacks('complete', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('complete', {'prefix': qname, 'wide': wide, 'file': file},
callbacks, **backend_args)
def hayoo(self, query, page=None, pages=None, **backend_args):
callbacks, backend_args = self.make_callbacks('hayoo', result_convert=ResultParse.parse_symbols, **backend_args)
return self.list_command('hayoo', {'query': query, 'page': page or 0, 'pages': pages or 1},
callbacks, **backend_args)
def cabal_list(self, packages, **backend_args):
def convert_to_cabal_packages(pkg_list):
return [ResultParse.parse_cabal_package(pkg) for pkg in pkg_list] if pkg_list else None
callbacks, backend_args = self.make_callbacks('cabal list', result_convert=convert_to_cabal_packages, **backend_args)
return self.list_command('cabal list', {'packages': packages}, callbacks, **backend_args)
def unresolveds(self, files, **backend_args):
callbacks, backend_args = self.make_callbacks('unresolveds', **backend_args)
return self.list_command('unresolveds', {'files': files}, callbacks, **backend_args)
def lint(self, files=None, contents=None, hlint=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
result_convert = backend_args.pop('result_convert', [])
if result_convert and not isinstance(result_convert, list):
result_convert = [result_convert]
result_convert.append(self.convert_warnings)
callbacks, backend_args = self.make_callbacks('lint', result_convert=result_convert, **backend_args)
return action('lint', {'files': self.files_and_contents(files, contents),
'lint-opts': hlint or []},
callbacks, **backend_args)
def check(self, files=None, contents=None, ghc=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
callbacks, backend_args = self.make_callbacks('check', **backend_args)
return action('check', {'files': self.files_and_contents(files, contents),
'ghc-opts': ghc or []},
callbacks, **backend_args)
def check_lint(self, files=None, contents=None, ghc=None, hlint=None, wait_complete=False, **backend_args):
action = self.list_command if wait_complete else self.async_list_command
result_convert = backend_args.pop('result_convert', [])
if result_convert and not isinstance(result_convert, list):
result_convert = [result_convert]
result_convert.append(self.convert_warnings)
callbacks, backend_args = self.make_callbacks('check-lint', result_convert=result_convert, **backend_args)
return action('check-lint', {'files': self.files_and_contents(files, contents),
'ghc-opts': ghc or [],
'lint-opts': hlint or []},
callbacks, **backend_args)
def types(self, _projectname, file, _modulename, _line, _column, ghc_flags=None, contents=None, **backend_args):
callbacks, backend_args = self.make_callbacks('types', **backend_args)
return self.list_command('types', {'files': self.files_and_contents(file, contents),
'ghc-opts': ghc_flags or []},
callbacks, **backend_args)
def autofixes(self, messages, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('autofixes', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('autofixes', {'messages': messages}, callbacks, **backend_args)
def refactor(self, messages, rest=[], pure=True, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('refactor', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('refactor', {'messages': messages, 'rest': rest, 'pure': pure}, callbacks, **backend_args)
def rename(self, name, new_name, file, wait_complete=False, **backend_args):
callbacks, backend_args = self.make_callbacks('rename', result_convert=ResultParse.parse_corrections, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('rename', {'name': name, 'new-name': new_name, 'file': file}, callbacks, **backend_args)
def langs(self, _projectname, **backend_args):
callbacks, backend_args = self.make_callbacks('langs', **backend_args)
return self.command('langs', {}, callbacks, **backend_args)
def flags(self, _projectname, **backend_args):
callbacks, backend_args = self.make_callbacks('flags', **backend_args)
return self.command('flags', {}, callbacks, **backend_args)
def ghc_eval(self, exprs, file=None, source=None, wait_complete=False, **backend_args):
the_file = None
if file is not None:
the_file = {'file': file, 'contents': source}
callbacks, backend_args = self.make_callbacks('ghc eval', result_convert=ResultParse.parse_repl_results, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('ghc eval', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args)
def ghc_type(self, exprs, file=None, source=None, wait_complete=False, **backend_args):
the_file = None
if file is not None:
the_file = {'file': file, 'contents': source}
callbacks, backend_args = self.make_callbacks('ghc type', result_convert=ResultParse.parse_repl_results, **backend_args)
action = self.list_command if wait_complete else self.async_list_command
return action('ghc type', {'exprs': exprs, 'file': the_file}, callbacks, **backend_args)
def stop_ghc(self, **backend_args):
callbacks, backend_args = self.make_callbacks('stop-ghc', **backend_args)
return self.command('stop-ghc', {}, callbacks, **backend_args)
def exit(self):
return self.command('exit', {}, self.make_callbacks('exit')[0])
# old names for compatibility
def autofix_show(self, messages, wait_complete=False, **backend_args):
return self.autofixes(messages, wait_complete=wait_complete, **backend_args)
def autofix_fix(self, messages, rest=[], pure=True, wait_complete=False, **backend_args):
return self.refactor(messages, rest=rest, pure=pure, wait_complete=wait_complete, **backend_args)
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Advanced features:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def query_import(self, symname, filename):
if self.whois(symname, filename):
return (False, ['Symbol {0} already in scope'.format(symname)])
candidates = list(filter(
lambda c: c.imported_from is not None,
self.lookup(symname, filename),
))
return (True, candidates) if candidates else (False, ['Symbol {0} not found'.format(symname)])
def contents_to_module(self, file, contents):
self.set_file_contents(file, contents)
m = self.module(file=file, header=True)
proj = self.project(path=m.location.project)
build_tool = proj['build-tool']
self.scan_file(file=file, build_tool=build_tool, wait_complete=True)
return Utils.head_of(self.module(None, file=file))
def clean_imports(self, filename):
cmd = ['hsclearimports', filename, '--max-import-list', '64']
hsclean_proc = ProcHelper.exec_with_wrapper(self.exec_with, self.install_dir, cmd)
if hsclean_proc.process is not None:
exit_code, result, err = hsclean_proc.wait()
if exit_code == 0:
return (True, result.splitlines())
return (False, err)
return (False, ['\'hscleanimports\' utility not found.'])
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# Utility functions:
# -~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def convert_warnings(self, messages):
for msg in messages:
if msg.get('level', '') == 'warning':
msg['level'] = 'hint'
return messages
class HsDevStartupReader(threading.Thread):
'''Separate thread object that reads the local `hsdev` server's `stdout` looking for the server's startup
message. The server's port number is parsed from the startup message and saved in the object's `hsdev_port`
attribute, just in case this differs from the default or requested port.
'''
def __init__(self, fstdout):
super().__init__(name='hsdev startup reader')
self.stdout = fstdout
self.hsdev_port = -1
self.end_event = threading.Event()
def run(self):
self.end_event.clear()
while not self.end_event.is_set():
srvout = self.stdout.readline().strip()
Logging.log('hsdev initial: {0}'.format(srvout), Logging.LOG_DEBUG)
if srvout != '':
start_confirm = re.search(r'[Ss]erver started at port (?P<port>\d+)$', srvout)
if start_confirm:
self.hsdev_port = int(start_confirm.group('port'))
Logging.log('hsdev initial: \'hsdev\' server started at port {0}'.format(self.hsdev_port))
self.end_event.set()
else:
# Got EOF, stop loop.
self.end_event.set()
def wait_startup(self, tmo):
self.end_event.wait(tmo)
def successful(self):
return self.end_event.is_set()
def stop(self):
self.end_event.clear()
def port(self):
return self.hsdev_port
|
mit
| -7,706,911,259,607,050,000
| 49.438957
| 204
| 0.571716
| false
| 3.794634
| false
| false
| false
|
afsungur/MemWord
|
framefinish.py
|
1
|
2063
|
import wx
from griddict import GridDictionary
import Global
class FrameFinish(wx.Frame):
def __init__(self, parent, true_count, false_count, falses):
FRAME_SIZE_WIDTH = 800
FRAME_SIZE_HEIGHT = 300
FRAME_POS_X = 200
FRAME_POS_Y = 200
wx.Frame.__init__(self, parent, -1,
title=Global.FINISH_TITLE,
size=(FRAME_SIZE_WIDTH, FRAME_SIZE_HEIGHT),
pos=(FRAME_POS_X, FRAME_POS_Y),
style=wx.DEFAULT_FRAME_STYLE)
self.frame = parent
# Text Items
true_count_text = wx.StaticText(self, -1, Global.TRUE_COUNT_TEXT)
false_count_text = wx.StaticText(self, -1, Global.FALSE_COUNT_TEXT)
true_count_value = wx.StaticText(self, -1, str(true_count))
false_count_value = wx.StaticText(self, -1, str(false_count))
seperator = wx.StaticText(self, -1, "-----------------------------")
font = wx.Font(16, wx.MODERN, wx.NORMAL, wx.BOLD)
falses_big_text = wx.StaticText(self, -1, Global.WRONG_ANSWERS_TEXT+":")
falses_big_text.SetFont(font)
# Grid
grid_falses = GridDictionary(self, falses)
print "false count:", len(falses)
# Sizer Set
trueCountSizer = wx.GridBagSizer(2,2)
trueCountSizer.Add(true_count_text,pos=(0,0))
trueCountSizer.Add(true_count_value,pos=(0,1))
trueCountSizer.Add(false_count_text,pos=(1,0))
trueCountSizer.Add(false_count_value,pos=(1,1))
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(trueCountSizer, 0, wx.ALL, 5)
mainSizer.Add(seperator,0, wx.ALL, 5)
mainSizer.Add(falses_big_text,0, wx.ALL, 5)
mainSizer.Add(grid_falses, 0, wx.ALL, 5)
# Bind
self.Bind(wx.EVT_CLOSE, self.close_event)
# Frame Settings
self.SetSizer(mainSizer)
self.Fit()
self.Show()
def close_event(self, evt):
print "closed..."
self.frame.close()
|
gpl-3.0
| -5,161,963,566,746,056,000
| 33.383333
| 80
| 0.56762
| false
| 3.393092
| false
| false
| false
|
davidcdba/oBid
|
oBid/oBid/settings.py
|
1
|
6054
|
#encoding: utf-8
#Para que no de porculo los acentos y Ñ
# Django settings for oBid project.
## EXPLICACION ## IMPORTAMOS LA LIBRERIA 'os' del sistema y establecemos como PATH del proyecto la carpeta en la que se encuentra
import os
PROJECT_PATH=os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('i12gamad', 'i12gamad@uco.es'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'oBid.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Puedes ver cuales son las zonas aqui:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
## EXPLICACION ## ESTABLECEMOS COMO ZONA HORARIA 'Europe/Madrid' para evitar cambios de tiempo
TIME_ZONE = 'Europe/Madrid'
## EXPLICACION ## ESTABLECEMOS COMO IDIOMA QUE USAREMOS EL ESPANOL DE ESPANA
LANGUAGE_CODE = 'es-es'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
## EXPLICACION ## ESTABLECE LA CARPETA 'media' de dentro del proyecto como carpeta donde se encuentra el contenido multimedia
MEDIA_ROOT = os.path.join(PROJECT_PATH,'media')
## EXPLICACION ## ESTABLECE LA ruta 'localhost:8000/media/' como ruta de acceso a la carpeta de contenido multimedia
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
## EXPLICACION ## ESTABLECE LA CARPETA 'static' de dentro del proyecto como carpeta donde se encuentra el contenido estatico
os.path.join(PROJECT_PATH,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
## AVISO ## Linea descomentada, activa la ruta a contenidos estaticos
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '##d-1%bpw32#q*_#q6e)07_n01$qy!s&9mx6_2yh4p6)gv^^p&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'oBid.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'oBid.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
## EXPLICACION ## ESTABLECE LA CARPETA 'templates' de dentro del proyecto como carpeta donde se encuentra los templates
os.path.join(PROJECT_PATH,'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin', ## AVISO ## Linea descomentada, activa el acceso al panel de administracion
## AVISO ## Linea descomentada, activa el acceso a la documentacion del panel de administracion
'django.contrib.admindocs',
#añadimos la aplicación subasta
'subasta',
'usuarios',
'articulos',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
gpl-2.0
| 8,352,776,375,811,243,000
| 35.896341
| 129
| 0.691786
| false
| 3.386122
| false
| false
| false
|
yarikoptic/NiPy-OLD
|
examples/interfaces/process_fiac.py
|
1
|
6055
|
''' Single subject analysis script for SPM / FIAC '''
import sys
from os.path import join as pjoin
from glob import glob
import numpy as np
from nipy.interfaces.spm import spm_info, make_job, scans_for_fnames, \
run_jobdef, fnames_presuffix, fname_presuffix, fltcols
def get_data(data_path, subj_id):
data_def = {}
subject_path = pjoin(data_path, 'fiac%s' % subj_id)
data_def['functionals'] = sorted(
glob(pjoin(subject_path, 'functional_*.nii')))
anatomicals = glob(pjoin(subject_path, 'anatomical.nii'))
if len(anatomicals) == 1:
data_def['anatomical'] = anatomicals[0]
elif len(anatomicals) == 0:
data_def['anatomical'] = None
else:
raise ValueError('Too many anatomicals')
return data_def
def slicetime(data_def):
sess_scans = scans_for_fnames(data_def['functionals'])
stinfo = make_job('temporal', 'st', {
'scans': sess_scans,
'so':range(1,31,2) + range(2,31,2),
'tr':2.5,
'ta':2.407,
'nslices':float(30),
'refslice':1
})
run_jobdef(stinfo)
def realign(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
rinfo = make_job('spatial', 'realign', [{
'estimate':{
'data':sess_scans,
'eoptions':{
'quality':0.9,
'sep':4.0,
'fwhm':5.0,
'rtm':True,
'interp':2.0,
'wrap':[0.0,0.0,0.0],
'weight':[]
}
}
}])
run_jobdef(rinfo)
def reslice(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
rsinfo = make_job('spatial', 'realign', [{
'write':{
'data': np.vstack(sess_scans.flat),
'roptions':{
'which':[2, 1],
'interp':4.0,
'wrap':[0.0,0.0,0.0],
'mask':True,
}
}
}])
run_jobdef(rsinfo)
def coregister(data_def):
func1 = data_def['functionals'][0]
mean_fname = fname_presuffix(func1, 'meana')
crinfo = make_job('spatial', 'coreg', [{
'estimate':{
'ref': [mean_fname],
'source': [data_def['anatomical']],
'other': [[]],
'eoptions':{
'cost_fun':'nmi',
'sep':[4.0, 2.0],
'tol':np.array(
[0.02,0.02,0.02,
0.001,0.001,0.001,
0.01,0.01,0.01,
0.001,0.001,0.001]).reshape(1,12),
'fwhm':[7.0, 7.0]
}
}
}])
run_jobdef(crinfo)
def segnorm(data_def):
def_tpms = np.zeros((3,1), dtype=np.object)
spm_path = spm_info.spm_path
def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'),
def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'),
def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii')
data = np.zeros((1,), dtype=object)
data[0] = data_def['anatomical']
sninfo = make_job('spatial', 'preproc', {
'data': data,
'output':{
'GM':fltcols([0,0,1]),
'WM':fltcols([0,0,1]),
'CSF':fltcols([0,0,0]),
'biascor':1.0,
'cleanup':False,
},
'opts':{
'tpm':def_tpms,
'ngaus':fltcols([2,2,2,4]),
'regtype':'mni',
'warpreg':1.0,
'warpco':25.0,
'biasreg':0.0001,
'biasfwhm':60.0,
'samp':3.0,
'msk':np.array([], dtype=object),
}
})
run_jobdef(sninfo)
def norm_write(data_def):
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a'))
matname = fname_presuffix(data_def['anatomical'],
suffix='_seg_sn.mat',
use_ext=False)
subj = {
'matname': np.zeros((1,), dtype=object),
'resample': np.vstack(sess_scans.flat),
}
subj['matname'][0] = matname
roptions = {
'preserve':False,
'bb':np.array([[-78,-112, -50],[78,76,85.0]]),
'vox':fltcols([2.0,2.0,2.0]),
'interp':1.0,
'wrap':[0.0,0.0,0.0],
}
nwinfo = make_job('spatial', 'normalise', [{
'write':{
'subj': subj,
'roptions': roptions,
}
}])
run_jobdef(nwinfo)
# knock out the list of images, replacing with only one
subj['resample'] = np.zeros((1,), dtype=object)
subj['resample'][0] = data_def['anatomical']
roptions['interp'] = 4.0
run_jobdef(nwinfo)
def smooth(data_def, fwhm=8.0):
try:
len(fwhm)
except TypeError:
fwhm = [fwhm] * 3
fwhm = np.asarray(fwhm, dtype=np.float).reshape(1,3)
sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'wa'))
sinfo = make_job('spatial', 'smooth',
{'data':np.vstack(sess_scans.flat),
'fwhm':fwhm,
'dtype':0})
run_jobdef(sinfo)
def process_subject(ddef):
if not ddef['anatomical']:
return
slicetime(ddef)
realign(ddef)
reslice(ddef)
coregister(ddef)
segnorm(ddef)
norm_write(ddef)
smooth(ddef)
def process_subjects(data_path, subj_ids):
for subj_id in subj_ids:
ddef = get_data(data_path, subj_id)
process_subject(ddef)
if __name__ == '__main__':
try:
data_path = sys.argv[1]
except IndexError:
raise OSError('Need FIAC data path as input')
try:
subj_ids = sys.argv[2:]
except IndexError:
subj_ids = range(16)
process_subjects(data_path, subj_ids)
|
bsd-3-clause
| -6,172,351,069,423,954,000
| 29.124378
| 82
| 0.471181
| false
| 3.196938
| false
| false
| false
|
xesscorp/skidl
|
skidl/bus.py
|
1
|
16133
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Handles buses.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range, str, super
from future import standard_library
from .alias import Alias
from .common import *
from .defines import *
from .logger import logger
from .net import Net
from .netpinlist import NetPinList
from .pin import Pin
from .skidlbaseobj import SkidlBaseObject
from .utilities import *
standard_library.install_aliases()
class Bus(SkidlBaseObject):
"""
This class collects one or more nets into a group that can be indexed.
Args:
name: A string with the name of the bus.
args: A list of ints, pins, nets, buses to attach to the net.
Keyword Args:
attribs: A dictionary of attributes and values to attach to
the Net object.
Example:
::
n = Net()
led1 = Part("Device", 'LED')
b = Bus('B', 8, n, led1['K'])
"""
@classmethod
def get(cls, name, circuit=None):
"""Get the bus with the given name from a circuit, or return None."""
if not circuit:
circuit = builtins.default_circuit
search_params = (
("name", name, True),
("aliases", name, True),
# ('name', ''.join(('.*',name,'.*')), False),
# ('aliases', Alias(''.join(('.*',name,'.*'))), False)
)
for attr, name, do_str_match in search_params:
buses = filter_list(
circuit.buses, do_str_match=do_str_match, **{attr: name}
)
if buses:
return list_or_scalar(buses)
return None
@classmethod
def fetch(cls, name, *args, **attribs):
"""Get the bus with the given name from a circuit, or create it if not found."""
circuit = attribs.get("circuit", builtins.default_circuit)
return cls.get(name, circuit=circuit) or cls(name, *args, **attribs)
def __init__(self, *args, **attribs):
super().__init__()
# Define the member storing the nets so it's present, but it starts empty.
self.nets = []
# For Bus objects, the circuit object the bus is a member of is passed
# in with all the other attributes. If a circuit object isn't provided,
# then the default circuit object is added to the attributes.
attribs["circuit"] = attribs.get("circuit", default_circuit)
# Scan through the kwargs and args to see if there is a name for this bus.
name = attribs.pop("name", None)
if not name:
try:
# The first string found will be the bus name.
name = [a for a in args if isinstance(a, (basestring, type(None)))][0]
# Remove the name from the list of things to be added to the bus.
args = list(args)
args.remove(name)
# args = [a for a in args if a != name]
except IndexError:
# No explicit bus name found, so generate an implicit one.
name = None
# Attach additional attributes to the bus. (The Circuit object also gets
# set here.)
for k, v in list(attribs.items()):
setattr(self, k, v)
# The bus name is set after the circuit is assigned so the name can be
# checked against the other bus names already in that circuit.
self.name = name
# Add the bus to the circuit.
self.circuit = None # Make sure bus isn't seen as part of circuit.
attribs["circuit"] += self # Add bus to circuit (also sets self.circuit).
# Build the bus from net widths, existing nets, nets of pins, other buses.
self.extend(args)
def extend(self, *objects):
"""Extend bus by appending objects to the end (MSB)."""
self.insert(len(self.nets), objects)
def insert(self, index, *objects):
"""Insert objects into bus starting at indexed position."""
for obj in flatten(objects):
if isinstance(obj, int):
# Add a number of new nets to the bus.
for _ in range(obj):
self.nets.insert(index, Net())
index += obj
elif isinstance(obj, Net):
# Add an existing net to the bus.
self.nets.insert(index, obj)
index += 1
elif isinstance(obj, Pin):
# Add a pin to the bus.
try:
# Add the pin's net to the bus.
self.nets.insert(index, obj.get_nets()[0])
except IndexError:
# OK, the pin wasn't already connected to a net,
# so create a new net, add it to the bus, and
# connect the pin to it.
n = Net()
n += obj
self.nets.insert(index, n)
index += 1
elif isinstance(obj, Bus):
# Add an existing bus to this bus.
for n in reversed(obj.nets):
self.nets.insert(index, n)
index += len(obj)
else:
log_and_raise(
logger,
ValueError,
"Adding illegal type of object ({}) to Bus {}.".format(
type(obj), self.name
),
)
# Assign names to all the unnamed nets in the bus.
# Separate index from bus name if name ends with number.
sep = '_' if self.name[-1].isdigit() else ''
for i, net in enumerate(self.nets):
if net.is_implicit():
# Net names are the bus name with the index appended.
net.name = self.name + sep + str(i)
def get_nets(self):
"""Return the list of nets contained in this bus."""
return to_list(self.nets)
def get_pins(self):
"""It's an error to get the list of pins attached to all bus lines."""
log_and_raise(logger, TypeError, "Can't get the list of pins on a bus!")
def copy(self, num_copies=None, **attribs):
"""
Make zero or more copies of this bus.
Args:
num_copies: Number of copies to make of this bus.
Keyword Args:
attribs: Name/value pairs for setting attributes for the copy.
Returns:
A list of Bus copies or a Bus if num_copies==1.
Raises:
Exception if the requested number of copies is a non-integer or negative.
Notes:
An instance of a bus can be copied just by calling it like so::
b = Bus('A', 8) # Create a bus.
b_copy = b(2) # Get two copies of the bus.
You can also use the multiplication operator to make copies::
b = 10 * Bus('A', 8) # Create an array of buses.
"""
# If the number of copies is None, then a single copy will be made
# and returned as a scalar (not a list). Otherwise, the number of
# copies will be set by the num_copies parameter or the number of
# values supplied for each part attribute.
num_copies_attribs = find_num_copies(**attribs)
return_list = (num_copies is not None) or (num_copies_attribs > 1)
if num_copies is None:
num_copies = max(1, num_copies_attribs)
# Check that a valid number of copies is requested.
if not isinstance(num_copies, int):
log_and_raise(
logger,
ValueError,
"Can't make a non-integer number ({}) of copies of a bus!".format(
num_copies
),
)
if num_copies < 0:
log_and_raise(
logger,
ValueError,
"Can't make a negative number ({}) of copies of a bus!".format(
num_copies
),
)
copies = []
for i in range(num_copies):
cpy = Bus(self.name, self)
# Attach additional attributes to the bus.
for k, v in list(attribs.items()):
if isinstance(v, (list, tuple)):
try:
v = v[i]
except IndexError:
log_and_raise(
logger,
ValueError,
"{} copies of bus {} were requested, but too few elements in attribute {}!".format(
num_copies, self.name, k
),
)
setattr(cpy, k, v)
copies.append(cpy)
# Return a list of the copies made or just a single copy.
if return_list:
return copies
return copies[0]
# Make copies with the multiplication operator or by calling the object.
__call__ = copy
def __mul__(self, num_copies):
if num_copies is None:
num_copies = 0
return self.copy(num_copies=num_copies)
__rmul__ = __mul__
def __getitem__(self, *ids):
"""
Return a bus made up of the nets at the given indices.
Args:
ids: A list of indices of bus lines. These can be individual
numbers, net names, nested lists, or slices.
Returns:
A bus if the indices are valid, otherwise None.
"""
# Use the indices to get the nets from the bus.
nets = []
for ident in expand_indices(0, len(self) - 1, False, *ids):
if isinstance(ident, int):
nets.append(self.nets[ident])
elif isinstance(ident, basestring):
nets.extend(filter_list(self.nets, name=ident))
else:
log_and_raise(
logger, TypeError, "Can't index bus with a {}.".format(type(ident))
)
if len(nets) == 0:
# No nets were selected from the bus, so return None.
return None
if len(nets) == 1:
# Just one net selected, so return the Net object.
return nets[0]
# Multiple nets selected, so return them as a NetPinList list.
return NetPinList(nets)
def __setitem__(self, ids, *pins_nets_buses):
"""
You can't assign to bus lines. You must use the += operator.
This method is a work-around that allows the use of the += for making
connections to bus lines while prohibiting direct assignment. Python
processes something like my_bus[7:0] += 8 * Pin() as follows::
1. Bus.__getitem__ is called with '7:0' as the index. This
returns a NetPinList of eight nets from my_bus.
2. The NetPinList.__iadd__ method is passed the NetPinList and
the thing to connect to the it (eight pins in this case). This
method makes the actual connection to the part pin or pins. Then
it creates an iadd_flag attribute in the object it returns.
3. Finally, Bus.__setitem__ is called. If the iadd_flag attribute
is true in the passed argument, then __setitem__ was entered
as part of processing the += operator. If there is no
iadd_flag attribute, then __setitem__ was entered as a result
of using a direct assignment, which is not allowed.
"""
# If the iadd_flag is set, then it's OK that we got
# here and don't issue an error. Also, delete the flag.
if getattr(pins_nets_buses[0], "iadd_flag", False):
del pins_nets_buses[0].iadd_flag
return
# No iadd_flag or it wasn't set. This means a direct assignment
# was made to the pin, which is not allowed.
log_and_raise(logger, TypeError, "Can't assign to a bus! Use the += operator.")
def __iter__(self):
"""
Return an iterator for stepping thru individual lines of the bus.
"""
return (self[l] for l in range(len(self))) # Return generator expr.
def is_movable(self):
"""
Return true if the bus is movable to another circuit.
A bus is movable if all the nets in it are movable.
"""
for n in self.nets:
if not n.is_movable():
# One net not movable means the entire Bus is not movable.
return False
return True # All the nets were movable.
def is_implicit(self):
"""Return true if the bus name is implicit."""
from .defines import NET_PREFIX, BUS_PREFIX
prefix_re = "({}|{})+".format(re.escape(NET_PREFIX), re.escape(BUS_PREFIX))
return re.match(prefix_re, self.name)
def connect(self, *pins_nets_buses):
"""
Return the bus after connecting one or more nets, pins, or buses.
Args:
pins_nets_buses: One or more Pin, Net or Bus objects or
lists/tuples of them.
Returns:
The updated bus with the new connections.
Notes:
You can connect nets or pins to a bus like so::
p = Pin() # Create a pin.
n = Net() # Create a net.
b = Bus('B', 2) # Create a two-wire bus.
b += p,n # Connect pin and net to B[0] and B[1].
"""
nets = NetPinList(self.nets)
nets += pins_nets_buses
return self
__iadd__ = connect
@property
def name(self):
"""
Get, set and delete the name of the bus.
When setting the bus name, if another bus with the same name
is found, the name for this bus is adjusted to make it unique.
"""
return self._name
@name.setter
def name(self, name):
# Remove the existing name so it doesn't cause a collision if the
# object is renamed with its existing name.
self._name = None
# Now name the object with the given name or some variation
# of it that doesn't collide with anything else in the list.
self._name = get_unique_name(self.circuit.buses, "name", BUS_PREFIX, name)
@name.deleter
def name(self):
"""Delete the bus name."""
del self._name
def __str__(self):
"""Return a list of the nets in this bus as a string."""
return self.name + ":\n\t" + "\n\t".join([n.__str__() for n in self.nets])
__repr__ = __str__
def __len__(self):
"""Return the number of nets in this bus."""
return len(self.nets)
@property
def width(self):
"""Return width of a Bus, which is the same as using the len() operator."""
return len(self)
def __bool__(self):
"""Any valid Bus is True"""
return True
__nonzero__ = __bool__ # Python 2 compatibility.
|
mit
| -8,701,624,065,980,637,000
| 34.613687
| 111
| 0.556065
| false
| 4.329844
| false
| false
| false
|
MicheleTobias/CurvilinearAnisotropy
|
Code/WillametteFiles_Centerline.py
|
1
|
1734
|
# import the modules I'm gonna need
import glob, string, csv, os
# input the files to use
inputdirectory = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\centerline_elevation\\'
outputfile1 = 'C:\Users\Michele\Documents\Research\CurvilinearAnisotropy\WillametteRiver\willamette_elevations\willamette\PythonOutput\\WillamettePoints_Centerline.txt'
#outputfile2 = 'C:\Documents and Settings\Michele Tobias\My Documents\Davis\Research\GIS Data\DataOutput\\SBV_average.txt'
filemake = open(outputfile1,'w')
filemake.close()
#filemake = open(outputfile2,'w')
#filemake.close()
data = []
fulldata = []
#add *.txt to the end of the inputdirectory
inputdirectory += '*.txt'
#---------Copying the $GPGGA Lines to their own File--------------
# find the text files you need to work with
textfiles = glob.glob(inputdirectory)
#print textfiles
#for writing the column names at the top of the output file
columnnames = ['Easting\tNorthing\tBed_Elevation']
#finding the lines I need and writing them to the output file under the column names
writer = csv.writer(open(outputfile1, 'w+'))
writer.writerow(columnnames)
#print textfiles
for i in textfiles:
#shortdoc = os.path.basename(i)
#point = shortdoc.rstrip(".txt")
#point = shortdoc[shortdoc.find(' ')+1: shortdoc.find('.')]
reader = csv.reader(open(i, "r"))
data = [row for row in reader]
rownum=0
for j in data:
if rownum >1:
writer.writerow(j)
#fulldata.append(j)
rownum += 1
#j.append(point)
#if j[0] != '#':
# writer.writerow(j)
# fulldata.append(j)
# #print j
#rownum += 1
print 'Finished!'
|
gpl-2.0
| 6,246,451,483,380,688,000
| 30.527273
| 168
| 0.686275
| false
| 3.1875
| false
| false
| false
|
garbas/mozilla-releng-services
|
lib/cli_common/cli_common/log.py
|
1
|
5277
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import os
import structlog
import logbook
import structlog.exceptions
CHANNELS = [
'master',
'staging',
'production',
]
class UnstructuredRenderer(structlog.processors.KeyValueRenderer):
def __call__(self, logger, method_name, event_dict):
event = None
if 'event' in event_dict:
event = event_dict.pop('event')
if event_dict or event is None:
# if there are other keys, use the parent class to render them
# and append to the event
rendered = super(UnstructuredRenderer, self).__call__(
logger, method_name, event_dict)
return '%s (%s)' % (event, rendered)
else:
return event
def setup_mozdef(project_name, channel, MOZDEF):
'''
Setup mozdef using taskcluster secrets
'''
import mozdef_client
sevirity_map = {
'critical': mozdef_client.MozDefEvent.SEVERITY_CRITICAL,
'error': mozdef_client.MozDefEvent.SEVERITY_ERROR,
'warning': mozdef_client.MozDefEvent.SEVERITY_WARNING,
'info': mozdef_client.MozDefEvent.SEVERITY_INFO,
'debug': mozdef_client.MozDefEvent.SEVERITY_DEBUG,
}
def send(logger, method_name, event_dict):
# only send to mozdef if `mozdef` is set
if event_dict.pop('mozdef', False):
msg = mozdef_client.MozDefEvent(MOZDEF)
msg.summary = event_dict.get('event', '')
msg.tags = [
'mozilla-releng/services/' + channel,
project_name,
]
if set(event_dict) - {'event'}:
msg.details = event_dict.copy()
msg.details.pop('event', None)
msg.source = logger.name
msg.set_severity(
sevirity_map.get(
method_name,
mozdef_client.MozDefEvent.SEVERITY_INFO,
),
)
msg.send()
return event_dict
return send
def setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT):
'''
Setup papertrail account using taskcluster secrets
'''
# Setup papertrail
papertrail = logbook.SyslogHandler(
application_name='mozilla-releng/services/{}/{}'.format(channel, project_name),
address=(PAPERTRAIL_HOST, int(PAPERTRAIL_PORT)),
format_string='{record.time} {record.channel}: {record.message}',
bubble=True,
)
papertrail.push_application()
def setup_sentry(project_name, channel, SENTRY_DSN):
'''
Setup sentry account using taskcluster secrets
'''
from raven import Client
from raven.handlers.logbook import SentryHandler
sentry_client = Client(
dsn=SENTRY_DSN,
site=project_name,
name='mozilla-releng/services',
environment=channel,
# TODO:
# release=read(VERSION) we need to promote that as well via secrets
# tags=...
# repos=...
)
sentry = SentryHandler(sentry_client, level=logbook.WARNING, bubble=True)
sentry.push_application()
def init_logger(project_name,
channel=None,
level=logbook.INFO,
handler=None,
PAPERTRAIL_HOST=None,
PAPERTRAIL_PORT=None,
SENTRY_DSN=None,
MOZDEF=None
):
if not channel:
channel = os.environ.get('APP_CHANNEL')
if channel and channel not in CHANNELS:
raise Exception('Initilizing logging with channel `{}`. It should be one of: {}'.format(channel, ', '.join(CHANNELS)))
# By default utput logs on stderr
if handler is None:
fmt = '{record.channel}: {record.message}'
handler = logbook.StderrHandler(level=level, format_string=fmt)
handler.push_application()
# Log to papertrail
if channel and PAPERTRAIL_HOST and PAPERTRAIL_PORT:
setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT)
# Log to sentry
if channel and SENTRY_DSN:
setup_sentry(project_name, channel, SENTRY_DSN)
def logbook_factory(*args, **kwargs):
# Logger given to structlog
logbook.compat.redirect_logging()
return logbook.Logger(level=level, *args, **kwargs)
# Setup structlog over logbook
processors = [
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
]
# send to mozdef before formatting into a string
if channel and MOZDEF:
processors.append(setup_mozdef(project_name, channel, MOZDEF))
processors.append(UnstructuredRenderer())
structlog.configure(
context_class=structlog.threadlocal.wrap_dict(dict),
processors=processors,
logger_factory=logbook_factory,
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def get_logger(*args, **kwargs):
return structlog.get_logger(*args, **kwargs)
|
mpl-2.0
| -6,548,193,065,109,681,000
| 28.480447
| 126
| 0.617396
| false
| 3.837818
| false
| false
| false
|
zfrenchee/pandas
|
pandas/core/api.py
|
1
|
3146
|
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, unique, value_counts
from pandas.core.dtypes.missing import isna, isnull, notna, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.io.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
UInt64Index, RangeIndex, Float64Index,
MultiIndex, IntervalIndex,
TimedeltaIndex, DatetimeIndex,
PeriodIndex, NaT)
from pandas.core.indexes.period import Period, period_range, pnow
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
from pandas.core.indexes.interval import Interval, interval_range
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
# TODO: Remove import when statsmodels updates #18264
from pandas.core.reshape.reshape import get_dummies
from pandas.core.indexing import IndexSlice
from pandas.core.tools.numeric import to_numeric
from pandas.tseries.offsets import DateOffset
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
# see gh-14094.
from pandas.util._depr_module import _DeprecatedModule
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
# deprecation, xref #13790
def match(*args, **kwargs):
import warnings
warnings.warn("pd.match() is deprecated and will be removed "
"in a future version",
FutureWarning, stacklevel=2)
from pandas.core.algorithms import match
return match(*args, **kwargs)
def groupby(*args, **kwargs):
import warnings
warnings.warn("pd.groupby() is deprecated and will be removed; "
"Please use the Series.groupby() or "
"DataFrame.groupby() methods",
FutureWarning, stacklevel=2)
return args[0].groupby(*args[1:], **kwargs)
# Deprecation: xref gh-16747
class TimeGrouper(object):
def __new__(cls, *args, **kwargs):
from pandas.core.resample import TimeGrouper
import warnings
warnings.warn("pd.TimeGrouper is deprecated and will be removed; "
"Please use pd.Grouper(freq=...)",
FutureWarning, stacklevel=2)
return TimeGrouper(*args, **kwargs)
|
bsd-3-clause
| 2,358,515,945,609,053,000
| 37.839506
| 76
| 0.679275
| false
| 4.048906
| false
| false
| false
|
siddhantgoel/tornado-sqlalchemy
|
tests/test_session_mixin.py
|
1
|
1642
|
from unittest.mock import Mock
from tornado_sqlalchemy import MissingDatabaseSettingError, SessionMixin
from ._common import BaseTestCase, User, db
class SessionMixinTestCase(BaseTestCase):
def test_mixin_ok(self):
class GoodHandler(SessionMixin):
def __init__(h_self):
h_self.application = Mock()
h_self.application.settings = {'db': db}
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertEqual(GoodHandler().run(), 0)
def test_mixin_no_db(self):
class BadHandler(SessionMixin):
def __init__(h_self):
h_self.application = Mock()
h_self.application.settings = {}
def run(h_self):
with h_self.make_session() as session:
return session.query(User).count()
self.assertRaises(MissingDatabaseSettingError, BadHandler().run)
def test_distinct_sessions(self):
sessions = set()
class Handler(SessionMixin):
def __init__(h_self):
db.configure(url=self.db_url)
h_self.application = Mock()
h_self.application.settings = {'db': db}
def run(h_self):
session = h_self.session
sessions.add(id(session))
value = session.query(User).count()
session.commit()
session.close()
return value
Handler().run()
Handler().run()
self.assertEqual(len(sessions), 2)
|
mit
| -7,001,430,187,323,076,000
| 27.807018
| 72
| 0.545676
| false
| 4.486339
| true
| false
| false
|
detrout/pykolab
|
pykolab/cli/telemetry/cmd_examine_session.py
|
1
|
4119
|
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import pykolab
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
from pykolab import telemetry
from pykolab.cli import commands
def __init__():
commands.register('examine_session', execute, group='telemetry', description="Examine a Telemetry session.")
def execute(*args, **kw):
db = telemetry.init_db()
wanted = False
if session_id == None:
try:
wanted = conf.cli_args.pop(0)
except:
log.error(_("Unspecified session identifier"))
sys.exit(1)
if not wanted:
wanted = session_id
session_wanted = None
try:
_wanted = (int)(wanted)
session_wanted = _wanted
except:
user_wanted = wanted
if not session_wanted == None:
session = db.query(
telemetry.TelemetrySession
).filter_by(
id=session_wanted
).first()
if session == None:
log.error(_("Invalid session identifier"))
sys.exit(1)
user = db.query(
telemetry.TelemetryUser
).filter_by(
id=session.user_id
).first()
server = db.query(
telemetry.TelemetryServer
).filter_by(
id=session.server_id
).first()
else:
user = db.query(
telemetry.TelemetryUser
).filter_by(
sasl_username=user_wanted
).first()
sessions = db.query(
telemetry.TelemetrySession
).filter_by(
user_id=user.id
).order_by(
telemetry.telemetry_session_table.c.start
)
for session in sessions:
self.action_telemetry_examine_session(session_id=session.id)
return
print _("Session by %s on server %s") % (user.sasl_username,server.fqdn)
command_issues = db.query(
telemetry.TelemetryCommandIssue
).filter_by(
session_id=session.id
)
for command_issue in command_issues:
command = db.query(
telemetry.TelemetryCommand
).filter_by(
id=command_issue.command_id
).first()
command_arg = db.query(
telemetry.TelemetryCommandArg
).filter_by(
id=command_issue.command_arg_id
).first()
print "Client(%d): %s %s %s" % (
command_issue.id,
command_issue.command_tag,
command.command,
command_arg.command_arg
)
server_responses = db.query(
telemetry.TelemetryServerResponse
).filter_by(
command_issue_id=command_issue.id
)
for server_response in server_responses:
server_response_lines = server_response.response.split('\n');
for server_response_line in server_response_lines:
print "Server(%d): %s" % (
server_response.id,
server_response_line
)
|
gpl-3.0
| 592,902,317,208,415,900
| 28.212766
| 112
| 0.554989
| false
| 4.290625
| false
| false
| false
|
gltn/stdm
|
stdm/ui/view_str.py
|
1
|
44716
|
"""
/***************************************************************************
Name : View STR Relationships
Description : Main Window for searching and browsing the social tenure
relationship of the participating entities.
Date : 24/May/2014
copyright : (C) 2014 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : stdm@unhabitat.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import logging
from collections import OrderedDict
from datetime import date
from qgis.PyQt import uic
from qgis.PyQt.QtCore import (
QTimer,
Qt,
QSize,
QObject,
pyqtSignal,
QThread,
QRegExp,
QSortFilterProxyModel,
pyqtSlot
)
from qgis.PyQt.QtWidgets import (
QMainWindow,
QDesktopWidget,
QToolBar,
QAction,
QApplication,
QProgressDialog,
QProgressBar,
QMessageBox,
QVBoxLayout,
QWidget,
QScrollArea,
QFrame,
QCheckBox,
QTabBar,
QCompleter
)
from qgis.core import QgsProject
from qgis.utils import (
iface
)
from sqlalchemy import exc
from sqlalchemy import (
func,
String
)
from stdm.data import globals
from stdm.data.configuration import entity_model
from stdm.data.database import Content
from stdm.data.pg_utils import pg_table_count
from stdm.data.qtmodels import (
BaseSTDMTableModel
)
from stdm.exceptions import DummyException
from stdm.security.authorization import Authorizer
from stdm.settings import current_profile
from stdm.ui.feature_details import DetailsTreeView
from stdm.ui.forms.widgets import ColumnWidgetRegistry
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.notification import (
NotificationBar
)
from stdm.ui.social_tenure.str_editor import STREditor
from stdm.ui.sourcedocument import (
SourceDocumentManager,
DocumentWidget
)
from stdm.ui.spatial_unit_manager import SpatialUnitManagerDockWidget
from stdm.utils.util import (
entity_searchable_columns,
entity_display_columns,
format_name,
lookup_parent_entity
)
LOGGER = logging.getLogger('stdm')
WIDGET, BASE = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_view_str.ui'))
class ViewSTRWidget(WIDGET, BASE):
"""
Search and browse the social tenure relationship
of all participating entities.
"""
def __init__(self, plugin):
QMainWindow.__init__(self, plugin.iface.mainWindow())
self.setupUi(self)
self.btnSearch.setIcon(GuiUtils.get_icon('search.png'))
self.btnClearSearch.setIcon(GuiUtils.get_icon('reset.png'))
self._plugin = plugin
self.search_done = False
# self.tbPropertyPreview.set_iface(self._plugin.iface)
QTimer.singleShot(
100, lambda: self.tbPropertyPreview.set_iface(self._plugin.iface))
self.curr_profile = current_profile()
self.spatial_units = self.curr_profile.social_tenure.spatial_units
# Center me
self.move(QDesktopWidget().availableGeometry().center() -
self.frameGeometry().center())
self.sp_unit_manager = SpatialUnitManagerDockWidget(
self._plugin.iface, self._plugin
)
self.geom_cols = []
for spatial_unit in self.spatial_units:
each_geom_col = self.sp_unit_manager.geom_columns(spatial_unit)
self.geom_cols.extend(each_geom_col)
# Configure notification bar
self._notif_search_config = NotificationBar(
self.vl_notification
)
# set whether currently logged in user has
# permissions to edit existing STR records
self._can_edit = self._plugin.STRCntGroup.canUpdate()
self._can_delete = self._plugin.STRCntGroup.canDelete()
self._can_create = self._plugin.STRCntGroup.canCreate()
# Variable used to store a reference to the
# currently selected social tenure relationship
# when displaying documents in the supporting documents tab window.
# This ensures that there are no duplicates
# when the same item is selected over and over again.
self._strID = None
self.removed_docs = None
# Used to store the root hash of the currently selected node.
self._curr_rootnode_hash = ""
self.str_model, self.str_doc_model = entity_model(
self.curr_profile.social_tenure, False, True
)
self._source_doc_manager = SourceDocumentManager(
self.curr_profile.social_tenure.supporting_doc,
self.str_doc_model,
self
)
self._source_doc_manager.documentRemoved.connect(
self.onSourceDocumentRemoved
)
self._source_doc_manager.setEditPermissions(False)
self.addSTR = None
self.editSTR = None
self.deleteSTR = None
self.initGui()
self.add_spatial_unit_layer()
self.details_tree_view = DetailsTreeView(parent=self, plugin=self._plugin)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.details_tree_view)
self.str_tree_container.setLayout(layout)
# else:
# self.details_tree_view = self._plugin.details_tree_view
self.details_tree_view.activate_feature_details(True)
self.details_tree_view.model.clear()
count = pg_table_count(self.curr_profile.social_tenure.name)
self.setWindowTitle(
self.tr('{}{}'.format(
self.windowTitle(), '- ' + str(count) + ' rows'
))
)
self.active_spu_id = -1
self.toolBox.setStyleSheet(
'''
QToolBox::tab {
background: qlineargradient(
x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #EDEDED, stop: 0.4 #EDEDED,
stop: 0.5 #EDEDED, stop: 1.0 #D3D3D3
);
border-radius: 2px;
border-style: outset;
border-width: 2px;
height: 100px;
border-color: #C3C3C3;
}
QToolBox::tab:selected {
font: italic;
}
'''
)
self.details_tree_view.view.setStyleSheet(
'''
QTreeView:!active {
selection-background-color: #72a6d9;
}
'''
)
def add_tool_buttons(self):
"""
Add toolbar buttons of add, edit and delete buttons.
:return: None
:rtype: NoneType
"""
tool_buttons = QToolBar()
tool_buttons.setObjectName('form_toolbar')
tool_buttons.setIconSize(QSize(16, 16))
self.addSTR = QAction(GuiUtils.get_icon(
'add.png'),
QApplication.translate('ViewSTRWidget', 'Add'),
self
)
self.editSTR = QAction(
GuiUtils.get_icon('edit.png'),
QApplication.translate('ViewSTRWidget', 'Edit'),
self
)
self.deleteSTR = QAction(
GuiUtils.get_icon('remove.png'),
QApplication.translate('ViewSTRWidget', 'Remove'),
self
)
tool_buttons.addAction(self.addSTR)
tool_buttons.addAction(self.editSTR)
tool_buttons.addAction(self.deleteSTR)
self.toolbarVBox.addWidget(tool_buttons)
def initGui(self):
"""
Initialize widget
"""
self.tb_actions.setVisible(False)
self._load_entity_configurations()
self.add_tool_buttons()
# Connect signals
self.tbSTREntity.currentChanged.connect(self.entityTabIndexChanged)
self.btnSearch.clicked.connect(self.searchEntityRelations)
self.btnClearSearch.clicked.connect(self.clearSearch)
# self.tvSTRResults.expanded.connect(self.onTreeViewItemExpanded)
# Set the results treeview to accept requests for context menus
# self.tvSTRResults.setContextMenuPolicy(Qt.CustomContextMenu)
# self.tvSTRResults.customContextMenuRequested.connect(
# self.onResultsContextMenuRequested
# )
if not self._can_create:
self.addSTR.hide()
if not self._can_edit:
self.editSTR.hide()
else:
self.editSTR.setDisabled(True)
if not self._can_delete:
self.deleteSTR.hide()
else:
self.deleteSTR.setDisabled(True)
self.addSTR.triggered.connect(self.load_new_str_editor)
self.deleteSTR.triggered.connect(self.delete_str)
self.editSTR.triggered.connect(self.load_edit_str_editor)
# Load async for the current widget
self.entityTabIndexChanged(0)
def init_progress_dialog(self):
"""
Initializes the progress dialog.
"""
self.progress = QProgressBar(self)
self.progress.resize(self.width(), 10)
self.progress.setTextVisible(False)
def add_spatial_unit_layer(self):
"""
Add the spatial unit layer into the map canvas for later use.
"""
# Used for startup of view STR, just add the first geom layer.
if len(self.geom_cols) > 0:
for spatial_unit in self.spatial_units:
layer_name_item = self.sp_unit_manager.geom_col_layer_name(
spatial_unit.name,
self.geom_cols[0]
)
self.sp_unit_manager.add_layer_by_name(layer_name_item)
def _check_permissions(self):
"""
Enable/disable actions based on the
permissions defined in the content
group.
"""
if self._can_edit:
self.tb_actions.addAction(self._new_str_action)
else:
self.tb_actions.removeAction(self._new_str_action)
if len(self.tb_actions.actions()) == 0:
self.tb_actions.setVisible(False)
else:
self.tb_actions.setVisible(True)
def _load_entity_configurations(self):
"""
Specify the entity configurations.
"""
try:
self.parties = self.curr_profile.social_tenure.parties
tb_str_entities = self.parties + self.spatial_units
for i, t in enumerate(tb_str_entities):
QApplication.processEvents()
entity_cfg = self._entity_config_from_profile(
str(t.name), t.short_name
)
if entity_cfg is not None:
entity_widget = self.add_entity_config(entity_cfg)
# entity_widget.setNodeFormatter(
# EntityNodeFormatter(
# entity_cfg, self.tvSTRResults, self
# )
# )
except DummyException as pe:
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(str(pe))
def _entity_config_from_profile(self, table_name, short_name):
"""
Creates an EntityConfig object from the table name.
:param table_name: Name of the database table.
:type table_name: str
:return: Entity configuration object.
:rtype: EntityConfig
"""
table_display_name = format_name(short_name)
entity = self.curr_profile.entity_by_name(table_name)
model = entity_model(entity)
if model is not None:
# Entity configuration
entity_cfg = EntityConfiguration()
entity_cfg.Title = table_display_name
entity_cfg.STRModel = model
entity_cfg.data_source_name = table_name
for col, factory in self._get_widget_factory(entity):
entity_cfg.LookupFormatters[col.name] = factory
# Load filter and display columns
# using only those which are of
# numeric/varchar type
searchable_columns = entity_searchable_columns(entity)
display_columns = entity_display_columns(entity)
for c in searchable_columns:
if c != 'id':
entity_cfg.filterColumns[c] = format_name(c)
for c in display_columns:
if c != 'id':
entity_cfg.displayColumns[c] = format_name(c)
return entity_cfg
else:
return None
def _get_widget_factory(self, entity):
"""
Get widget factory for specific column type
:param entity: Current column entity object
:type entity: Entity
:return c: Column object corresponding to the widget factory
:rtype c: BaseColumn
:return col_factory: Widget factory corresponding to the column type
:rtype col_factory: ColumnWidgetRegistry
"""
for c in entity.columns.values():
col_factory = ColumnWidgetRegistry.factory(c.TYPE_INFO)
if col_factory is not None:
yield c, col_factory(c)
def add_entity_config(self, config):
"""
Set an entity configuration option and
add it to the 'Search Entity' tab.
"""
entityWidg = STRViewEntityWidget(config)
entityWidg.asyncStarted.connect(self._progressStart)
entityWidg.asyncFinished.connect(self._progressFinish)
tabIndex = self.tbSTREntity.addTab(entityWidg, config.Title)
return entityWidg
def entityTabIndexChanged(self, index):
"""
Raised when the tab index of the entity search tab widget changes.
"""
# Get the current widget in the tab container
entityWidget = self.tbSTREntity.currentWidget()
if isinstance(entityWidget, EntitySearchItem):
entityWidget.loadAsync()
def searchEntityRelations(self):
"""
Slot that searches for matching items for
the specified entity and corresponding STR entities.
"""
entityWidget = self.tbSTREntity.currentWidget()
entity_name = entityWidget.config.data_source_name
self._reset_controls()
if isinstance(entityWidget, EntitySearchItem):
valid, msg = entityWidget.validate()
if not valid:
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(msg)
return
results, searchWord = entityWidget.executeSearch()
# Show error message
if len(results) == 0:
noResultsMsg = QApplication.translate(
'ViewSTR',
'No results found for "{}"'.format(searchWord)
)
self._notif_search_config.clear()
self._notif_search_config.insertErrorNotification(
noResultsMsg
)
return
party_names = [e.name for e in self.curr_profile.social_tenure.parties]
entity = self.curr_profile.entity_by_name(entity_name)
result_ids = [r.id for r in results]
if entity_name in party_names:
self.active_spu_id = self.details_tree_view.search_party(
entity, result_ids
)
else:
self.details_tree_view.search_spatial_unit(
entity, result_ids
)
# self.tbPropertyPreview._iface.activeLayer().selectByExpression("id={}".format(self.active_spu_id))
# self.details_tree_view._selected_features = self.tbPropertyPreview._iface.activeLayer().selectedFeatures()
# self._load_root_node(entity_name, formattedNode)
def clearSearch(self):
"""
Clear search input parameters (for current widget) and results.
"""
entityWidget = self.tbSTREntity.currentWidget()
if isinstance(entityWidget, EntitySearchItem):
entityWidget.reset()
self._reset_controls()
def _reset_controls(self):
# Clear tree view
self._resetTreeView()
# Clear document listings
self._deleteSourceDocTabs()
# Remove spatial unit memory layer
self.tbPropertyPreview.remove_layer()
def on_select_results(self):
"""
Slot which is raised when the selection
is changed in the tree view
selection model.
"""
if len(self.details_tree_view.view.selectedIndexes()) < 1:
self.disable_buttons()
return
self.search_done = True
index = self.details_tree_view.view.selectedIndexes()[0]
item = self.details_tree_view.model.itemFromIndex(index)
QApplication.processEvents()
# STR node - edit social tenure relationship
if item.text() == self.details_tree_view.str_text:
entity = self.curr_profile.social_tenure
str_model = self.details_tree_view.str_models[item.data()]
documents = self.details_tree_view._supporting_doc_models(
entity.name, str_model
)
self._load_source_documents(documents)
# if there is supporting document,
# expand supporting document tab
if len(documents) > 0:
self.toolBox.setCurrentIndex(1)
self.disable_buttons(False)
# party node - edit party
elif item.data() in self.details_tree_view.spatial_unit_items.keys():
self.toolBox.setCurrentIndex(0)
entity = self.details_tree_view.spatial_unit_items[item.data()]
model = self.details_tree_view.feature_model(entity, item.data())
self.draw_spatial_unit(entity.name, model)
self.disable_buttons()
canvas = iface.mapCanvas()
if canvas:
canvas.zoomToFullExtent()
else:
self.disable_buttons()
def disable_buttons(self, status=True):
if self._can_edit:
self.deleteSTR.setDisabled(status)
if self._can_delete:
self.editSTR.setDisabled(status)
def str_party_column_obj(self, record):
"""
Gets the current party column name in STR
table by finding party column with value
other than None.
:param record: The STR record or result.
:type record: Dictionary
:return: The party column name with value.
:rtype: String
"""
for party in self.parties:
party_name = party.short_name.lower()
party_id = '{}_id'.format(party_name)
if party_id not in record.__dict__:
return None
if record.__dict__[party_id] is not None:
party_id_obj = getattr(self.str_model, party_id)
return party_id_obj
def load_edit_str_editor(self):
self.details_tree_view.edit_selected_node()
self.btnSearch.click()
self.disable_buttons()
def load_new_str_editor(self):
try:
# Check type of node and perform corresponding action
add_str = STREditor()
add_str.exec_()
except DummyException as ex:
QMessageBox.critical(
self._plugin.iface.mainWindow(),
QApplication.translate(
"STDMPlugin",
"Loading Error"
),
str(ex)
)
def delete_str(self):
self.details_tree_view.delete_selected_item()
self.btnSearch.click()
self.disable_buttons()
def onSourceDocumentRemoved(self, container_id, doc_uuid, removed_doc):
"""
Slot raised when a source document is removed from the container.
If there are no documents in the specified container then remove
the tab.
"""
curr_container = self.tbSupportingDocs.currentWidget()
curr_doc_widget = curr_container.findChildren(DocumentWidget)
for doc in curr_doc_widget:
if doc.fileUUID == doc_uuid:
doc.deleteLater()
self.removed_docs = removed_doc
def draw_spatial_unit(self, entity_name, model):
"""
Render the geometry of the given spatial unit in the spatial view.
:param row_id: Sqlalchemy object representing a feature.
"""
entity = self.curr_profile.entity_by_name(entity_name)
self.tbPropertyPreview.draw_spatial_unit(entity, model)
def showEvent(self, event):
"""
(Re)load map layers in the viewer and main canvas.
:param event: Window event
:type event: QShowEvent
"""
self.setEnabled(True)
if QTimer is not None:
QTimer.singleShot(200, self.init_mirror_map)
return QMainWindow.showEvent(self, event)
def init_mirror_map(self):
self._notify_no_base_layers()
# Add spatial unit layer if it doesn't exist
self.tbPropertyPreview.refresh_canvas_layers()
self.tbPropertyPreview.load_web_map()
def _notify_no_base_layers(self):
"""
Checks if there are any base layers that will be used when
visualizing the spatial units. If there are no base layers
then insert warning message.
"""
self._notif_search_config.clear()
num_layers = len(QgsProject.instance().mapLayers())
if num_layers == 0:
msg = QApplication.translate(
"ViewSTR",
"No basemap layers are loaded in the "
"current project. Basemap layers "
"enhance the visualization of spatial units."
)
self._notif_search_config.insertWarningNotification(msg)
def _deleteSourceDocTabs(self):
"""
Removes all source document tabs and deletes their references.
"""
tabCount = self.tbSupportingDocs.count()
while tabCount != 0:
srcDocWidget = self.tbSupportingDocs.widget(tabCount - 1)
self.tbSupportingDocs.removeTab(tabCount - 1)
del srcDocWidget
tabCount -= 1
self._strID = None
self._source_doc_manager.reset()
def _resetTreeView(self):
"""
Clears the results tree view.
"""
# Reset tree view
strModel = self.details_tree_view.view.model()
resultsSelModel = self.details_tree_view.view.selectionModel()
if strModel:
strModel.clear()
if resultsSelModel:
if self.search_done:
resultsSelModel.selectionChanged.disconnect(self.on_select_results)
resultsSelModel.selectionChanged.connect(self.on_select_results)
def _load_source_documents(self, source_docs):
"""
Load source documents into document listing widget.
"""
# Configure progress dialog
progress_msg = QApplication.translate(
"ViewSTR", "Loading supporting documents..."
)
progress_dialog = QProgressDialog(self)
if len(source_docs) > 0:
progress_dialog.setWindowTitle(progress_msg)
progress_dialog.setRange(0, len(source_docs))
progress_dialog.setWindowModality(Qt.WindowModal)
progress_dialog.setFixedWidth(380)
progress_dialog.show()
progress_dialog.setValue(0)
self._notif_search_config.clear()
self.tbSupportingDocs.clear()
self._source_doc_manager.reset()
if len(source_docs) < 1:
empty_msg = QApplication.translate(
'ViewSTR', 'No supporting document is uploaded '
'for this social tenure relationship.'
)
self._notif_search_config.clear()
self._notif_search_config.insertWarningNotification(empty_msg)
for i, (doc_type_id, doc_obj) in enumerate(source_docs.items()):
# add tabs, and container and widget for each tab
tab_title = self._source_doc_manager.doc_type_mapping[doc_type_id]
tab_widget = QWidget()
tab_widget.setObjectName(tab_title)
cont_layout = QVBoxLayout(tab_widget)
cont_layout.setObjectName('widget_layout_' + tab_title)
scrollArea = QScrollArea(tab_widget)
scrollArea.setFrameShape(QFrame.NoFrame)
scrollArea_contents = QWidget()
scrollArea_contents.setObjectName('tab_scroll_area_' + tab_title)
tab_layout = QVBoxLayout(scrollArea_contents)
tab_layout.setObjectName('layout_' + tab_title)
scrollArea.setWidgetResizable(True)
scrollArea.setWidget(scrollArea_contents)
cont_layout.addWidget(scrollArea)
self._source_doc_manager.registerContainer(
tab_layout, doc_type_id
)
for doc in doc_obj:
try:
# add doc widgets
self._source_doc_manager.insertDocFromModel(
doc, doc_type_id
)
except DummyException as ex:
LOGGER.debug(str(ex))
self.tbSupportingDocs.addTab(
tab_widget, tab_title
)
progress_dialog.setValue(i + 1)
progress_dialog.deleteLater()
del progress_dialog
# def _on_node_reference_changed(self, rootHash):
# """
# Method for resetting document listing and map preview
# if another root node and its children
# are selected then the documents are reset as
# well as the map preview control.
# """
# if rootHash != self._curr_rootnode_hash:
# self._deleteSourceDocTabs()
# self._curr_rootnode_hash = rootHash
def _progressStart(self):
"""
Load progress dialog window.
For items whose durations is unknown,
'isindefinite' = True by default.
If 'isindefinite' is False, then
'rangeitems' has to be specified.
"""
pass
def _progressFinish(self):
"""
Hide progress dialog window.
"""
pass
def _edit_permissions(self):
"""
Returns True/False whether the current logged in user
has permissions to create new social tenure relationships.
If true, then the system assumes that
they can also edit STR records.
"""
canEdit = False
userName = globals.APP_DBCONN.User.UserName
authorizer = Authorizer(userName)
newSTRCode = "9576A88D-C434-40A6-A318-F830216CA15A"
# Get the name of the content from the code
cnt = Content()
createSTRCnt = cnt.queryObject().filter(
Content.code == newSTRCode
).first()
if createSTRCnt:
name = createSTRCnt.name
canEdit = authorizer.CheckAccess(name)
return canEdit
class EntitySearchItem(QObject):
"""
Abstract class for implementation by widgets that
enable users to search for entity records.
"""
def __init__(self, formatter=None):
super().__init__()
# Specify the formatter that should be
# applied on the result item. It should
# inherit from 'stdm.navigation.STRNodeFormatter'
self.formatter = formatter
def setNodeFormatter(self, formatter):
"""
Set the formatter that should be
applied on the entity search results.
"""
self.formatter = formatter
def validate(self):
"""
Method for validating the input arguments
before a search is conducted.
Should return bool indicating whether validation
was successful and message (applicable if validation fails).
"""
raise NotImplementedError()
def executeSearch(self):
"""
Implemented when the a search operation
is executed. Should return tuple of formatted
results for render in the tree view,raw
object results and search word.
"""
raise NotImplementedError(
str(
QApplication.translate(
"ViewSTR",
"Subclass must implement abstract method."
)
)
)
def loadAsync(self):
"""
Any initialization that needs to be carried
out when the parent container is activated.
"""
pass
def errorHandler(self, error):
"""
Generic handler that logs error
messages to the QGIS message log
"""
# QgsMessageLog.logMessage(error,2)
LOGGER.debug(error)
def reset(self):
"""
Clear search results.
"""
pass
WIDGET2, BASE2 = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_str_view_entity.ui'))
class STRViewEntityWidget(WIDGET2, BASE2, EntitySearchItem):
"""
A widget that represents options for searching through an entity.
"""
asyncStarted = pyqtSignal()
asyncFinished = pyqtSignal()
def __init__(self, config, formatter=None, parent=None):
QWidget.__init__(self, parent)
EntitySearchItem.__init__(self, formatter)
self.setupUi(self)
self.tbSTRViewEntity.setTabIcon(0, GuiUtils.get_icon('filter.png'))
self.tbSTRViewEntity.setTabIcon(1, GuiUtils.get_icon('period_blue.png'))
self.config = config
self.setConfigOptions()
self.curr_profile = current_profile()
self.social_tenure = self.curr_profile.social_tenure
self.str_model = entity_model(self.social_tenure)
# Model for storing display and actual mapping values
self._completer_model = None
self._proxy_completer_model = None
# Hook up signals
self.cboFilterCol.currentIndexChanged.connect(
self._on_column_index_changed
)
self.init_validity_dates()
self.validity_from_date.dateChanged.connect(
self.set_minimum_to_date
)
self.validity.setDisabled(True)
self.init_validity_checkbox()
def init_validity_checkbox(self):
self.check_box_list = []
self.validity_checkbox = QCheckBox()
self.check_box_list.append(self.validity_checkbox)
self.tbSTRViewEntity.tabBar().setTabButton(
self.tbSTRViewEntity.tabBar().count() - 1,
QTabBar.LeftSide, self.validity_checkbox
)
self.validity_checkbox.stateChanged.connect(self.toggle_validity_period)
def toggle_validity_period(self, state):
if state == Qt.Checked:
self.validity.setDisabled(False)
else:
self.validity.setDisabled(True)
def set_minimum_to_date(self):
"""
Set the minimum to date based on the
change in value of from date.
:return:
:rtype:
"""
self.validity_to_date.setMinimumDate(
self.validity_from_date.date()
)
def init_validity_dates(self):
"""
Initialize the dates by setting the current date.
:return:
:rtype:
"""
self.validity_from_date.setDate(
date.today()
)
self.validity_to_date.setDate(
date.today()
)
def setConfigOptions(self):
"""
Apply configuration options.
"""
# Set filter columns and remove id column
for col_name, display_name in self.config.filterColumns.items():
if col_name != "id":
self.cboFilterCol.addItem(
display_name, col_name
)
def loadAsync(self):
"""
Asynchronously loads an entity's attribute values.
"""
self.asyncStarted.emit()
# Create model worker
workerThread = QThread(self)
modelWorker = ModelWorker()
modelWorker.moveToThread(workerThread)
# Connect signals
modelWorker.error.connect(self.errorHandler)
workerThread.started.connect(
lambda: modelWorker.fetch(
self.config.STRModel, self.currentFieldName()
)
)
modelWorker.retrieved.connect(self._asyncFinished)
modelWorker.retrieved.connect(workerThread.quit)
workerThread.finished.connect(modelWorker.deleteLater)
workerThread.finished.connect(workerThread.deleteLater)
# Start thread
workerThread.start()
def validate(self):
"""
Validate entity search widget
"""
is_valid = True
message = ""
if self.txtFilterPattern.text() == "":
message = QApplication.translate(
"ViewSTR", "Search word cannot be empty."
)
is_valid = False
return is_valid, message
def executeSearch(self):
"""
Base class override.
Search for matching items for the specified entity and column.
"""
model_root_node = None
prog_dialog = QProgressDialog(self)
prog_dialog.setFixedWidth(380)
prog_dialog.setWindowTitle(
QApplication.translate(
"STRViewEntityWidget",
"Searching for STR..."
)
)
prog_dialog.show()
prog_dialog.setRange(
0, 10
)
search_term = self._searchTerm()
prog_dialog.setValue(2)
# Try to get the corresponding search term value from the completer model
if self._completer_model is not None:
reg_exp = QRegExp("^%s$" % search_term, Qt.CaseInsensitive,
QRegExp.RegExp2)
self._proxy_completer_model.setFilterRegExp(reg_exp)
if self._proxy_completer_model.rowCount() > 0:
# Get corresponding actual value from the first matching item
value_model_idx = self._proxy_completer_model.index(0, 1)
source_model_idx = self._proxy_completer_model.mapToSource(
value_model_idx
)
prog_dialog.setValue(4)
search_term = self._completer_model.data(
source_model_idx, Qt.DisplayRole
)
modelInstance = self.config.STRModel()
modelQueryObj = modelInstance.queryObject()
queryObjProperty = getattr(
self.config.STRModel, self.currentFieldName()
)
entity_name = modelQueryObj._primary_entity._label_name
entity = self.curr_profile.entity_by_name(entity_name)
prog_dialog.setValue(6)
# Get property type so that the filter can
# be applied according to the appropriate type
propType = queryObjProperty.property.columns[0].type
results = []
try:
if not isinstance(propType, String):
col_name = self.currentFieldName()
col = entity.columns[self.currentFieldName()]
if col.TYPE_INFO == 'LOOKUP':
lookup_entity = lookup_parent_entity(
self.curr_profile, col_name
)
lkp_model = entity_model(lookup_entity)
lkp_obj = lkp_model()
value_obj = getattr(
lkp_model, 'value'
)
result = lkp_obj.queryObject().filter(
func.lower(value_obj) == func.lower(search_term)
).first()
if result is None:
result = lkp_obj.queryObject().filter(
func.lower(value_obj).like(search_term + '%')
).first()
if result is not None:
results = modelQueryObj.filter(
queryObjProperty == result.id
).all()
else:
results = []
else:
results = modelQueryObj.filter(
func.lower(queryObjProperty) == func.lower(search_term)
).all()
if self.validity.isEnabled():
valid_str_ids = self.str_validity_period_filter(results)
else:
valid_str_ids = None
prog_dialog.setValue(7)
except exc.StatementError:
prog_dialog.deleteLater()
del prog_dialog
return model_root_node, [], search_term
# if self.formatter is not None:
# self.formatter.setData(results)
# model_root_node = self.formatter.root(valid_str_ids)
prog_dialog.setValue(10)
prog_dialog.hide()
prog_dialog.deleteLater()
del prog_dialog
return results, search_term
def str_validity_period_filter(self, results):
"""
Filter the entity results using validity period in STR table.
:param results: Entity result
:type results: SQLAlchemy result proxy
:return: Valid list of STR ids
:rtype: List
"""
self.str_model_obj = self.str_model()
valid_str_ids = []
for result in results:
from_date = self.validity_from_date.date().toPyDate()
to_date = self.validity_to_date.date().toPyDate()
entity_id = '{}_id'.format(result.__table__.name[3:])
str_column_obj = getattr(self.str_model, entity_id)
str_result = self.str_model_obj.queryObject().filter(
self.str_model.validity_start >= from_date).filter(
self.str_model.validity_end <= to_date
).filter(str_column_obj == result.id).all()
for res in str_result:
valid_str_ids.append(res.id)
return valid_str_ids
def reset(self):
"""
Clear search input parameters.
"""
self.txtFilterPattern.clear()
if self.cboFilterCol.count() > 0:
self.cboFilterCol.setCurrentIndex(0)
def currentFieldName(self):
"""
Returns the name of the database field
from the current item in the combo box.
"""
curr_index = self.cboFilterCol.currentIndex()
field_name = self.cboFilterCol.itemData(curr_index)
if field_name is None:
return
else:
return field_name
def _searchTerm(self):
"""
Returns the search term specified by the user.
"""
return self.txtFilterPattern.text()
def _asyncFinished(self, model_values):
"""
Slot raised when worker has finished retrieving items.
"""
# Create QCompleter and add values to it.
self._update_completer(model_values)
self.asyncFinished.emit()
def _update_completer(self, values):
# Get the items in a tuple and put them in a list
# Store display and actual values in a
# model for easier mapping and
# retrieval when carrying out searches
model_attr_mapping = []
# Check if there are formaters specified
# for the current field name
for mv in values:
f_model_values = []
m_val = mv[0]
if m_val is not None:
col_label = self.currentFieldName()
if col_label in self.config.LookupFormatters:
formatter = self.config.LookupFormatters[col_label]
if formatter.column.TYPE_INFO == 'LOOKUP':
m_val = formatter.code_value(m_val)[0]
else:
m_val = formatter.format_column_value(m_val)
f_model_values.extend([m_val, m_val])
model_attr_mapping.append(f_model_values)
self._completer_model = BaseSTDMTableModel(model_attr_mapping, ["", ""], self)
# We will use the QSortFilterProxyModel for filtering purposes
self._proxy_completer_model = QSortFilterProxyModel()
self._proxy_completer_model.setDynamicSortFilter(True)
self._proxy_completer_model.setSourceModel(self._completer_model)
self._proxy_completer_model.setSortCaseSensitivity(Qt.CaseInsensitive)
self._proxy_completer_model.setFilterKeyColumn(0)
# Configure completer
mod_completer = QCompleter(self._completer_model, self)
mod_completer.setCaseSensitivity(Qt.CaseInsensitive)
mod_completer.setCompletionMode(QCompleter.PopupCompletion)
mod_completer.setCompletionColumn(0)
mod_completer.setCompletionRole(Qt.DisplayRole)
self.txtFilterPattern.setCompleter(mod_completer)
def _on_column_index_changed(self, int):
"""
Slot raised when the user selects a different filter column.
"""
self.txtFilterPattern.clear()
self.loadAsync()
class EntityConfiguration(object):
"""
Specifies the configuration to apply when creating
a new tab widget for performing entity searches.
"""
browseDescription = "Click on the browse button below to load entity " \
"records and their corresponding social tenure " \
"relationship definitions."
defaultFieldName = ""
# Format of each dictionary item:
# property/db column name - display name
filterColumns = OrderedDict()
displayColumns = OrderedDict()
groupBy = ""
STRModel = None
Title = ""
data_source_name = ""
# Functions for formatting values before
# they are loaded into the completer
LookupFormatters = {}
def __init__(self):
# Reset filter and display columns
self.filterColumns = OrderedDict()
self.displayColumns = OrderedDict()
class ModelWorker(QObject):
"""
Worker for retrieving model attribute
values stored in the database.
"""
retrieved = pyqtSignal(object)
error = pyqtSignal(str)
pyqtSlot(object, str)
def fetch(self, model, fieldname):
"""
Fetch attribute values from the
database for the specified model
and corresponding column name.
"""
try:
if hasattr(model, fieldname):
modelInstance = model()
obj_property = getattr(model, fieldname)
model_values = modelInstance.queryObject(
[obj_property]
).distinct()
self.retrieved.emit(model_values)
except DummyException as ex:
self.error.emit(str(ex))
|
gpl-2.0
| -8,273,558,001,482,112,000
| 32.030441
| 120
| 0.562371
| false
| 4.507661
| true
| false
| false
|
multikatt/beets
|
beetsplug/permissions.py
|
1
|
3116
|
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(config['directory'].get(),
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
|
mit
| 8,238,867,990,123,123,000
| 29.851485
| 78
| 0.626765
| false
| 4.171352
| true
| false
| false
|
gonadarian/kagen
|
kagen/khan.py
|
1
|
1955
|
import os
import csv
import json
import pymongo
from kagen import utils
from kagen.utils import config
from datetime import datetime
logger = utils.get_logger("khan")
def work():
khan = utils.get_conn_khan()
db = utils.get_conn_mongo()
dtf = "%Y-%m-%dT%H:%M:%SZ"
doc = utils.get_response_json(khan, "/api/v1/playlists")
for item in doc:
item["_id"] = item["id"]
for playlist in doc:
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists.drop()
db.playlists.insert(doc)
logger.info("loaded {} items in playlists collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library")
db.playlists_library.drop()
db.playlists_library.insert(doc)
logger.info("loaded {} items in playlists_library collection".format(len(doc)))
doc = utils.get_response_json(khan, "/api/v1/playlists/library/list")
for playlist in doc:
playlist["_id"] = playlist["id"]
playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf)
db.playlists_library_list.drop()
db.playlists_library_list.insert(doc)
logger.info("loaded {} items in playlists_library_list collection".format(len(doc)))
videos = []
ids = []
for playlist in doc:
for video in playlist["videos"]:
video_id = video["id"]
if video_id not in ids:
video["_id"] = video_id
videos.append(video)
ids.append(video_id)
video["date_added"] = datetime.strptime(video["date_added"], dtf)
video["backup_timestamp"] = datetime.strptime(video["backup_timestamp"], dtf)
db.video_list.drop()
db.video_list.insert(videos)
logger.info("loaded {} items in video_list collection".format(len(videos)))
@utils.entry_point
def main():
logger.info("START khan")
work()
logger.info("DONE khan")
|
mit
| 5,795,363,018,765,925,000
| 31.583333
| 91
| 0.640409
| false
| 3.528881
| false
| false
| false
|
irvined1982/olweb-clients
|
bin/bkill.py
|
1
|
3674
|
#!/usr/bin/env python
# Copyright 2014 David Irvine
#
# This file is part of olwclients
#
# olwclients is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# olwclients is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with olwclients. If not, see <http://www.gnu.org/licenses/>.
import argparse
from olwclient import *
import getpass
import re
import sys
parser = argparse.ArgumentParser(description='Displays information about hosts')
OpenLavaConnection.configure_argument_list(parser)
parser.add_argument("-J", dest="job_name", default=None,
help="Operates only on jobs with the specified job_name. The -J option is ignored if a job ID \
other than 0 is specified in the job_ID option.")
parser.add_argument("-m", dest="host_name", default=None,
help="Operates only on jobs dispatched to the specified host or host group.")
parser.add_argument("-q", dest="queue_name", default=None,
help="Operates only on jobs in the specified queue.")
parser.add_argument("-u", dest="user_name", default=getpass.getuser(),
help="Operates only on jobs submitted by the specified user or user group (see bugroup(1)), or by \
all users if the reserved user name all is specified.")
parser.add_argument("job_ids", nargs='+', type=str, default=None,
help='Operates only on jobs that are specified by job_ID or "job_ID[index]", where \
"job_ID[index]" specifies selected job array elements (see bjobs(1)). For job arrays, quotation \
marks must enclose the job ID and index, and index must be enclosed in square brackets.')
parser.add_argument("-s", dest="signal", default="kill", choices=["kill", "suspend", "resume", "requeue"],
help="Sends the specified signal to specified jobs. Signals can be one of: kill, suspend, resume, \
requeue,")
args = parser.parse_args()
connection = OpenLavaConnection(args)
if 0 in args.job_ids or "0" in args.job_ids:
jobs = Job.get_job_list(connection,
user_name=args.user_name,
host_name=args.host_name,
queue_name=args.queue_name,
job_name=args.job_name,
)
else:
jobs = []
for job_id in args.job_ids:
try:
jid = int(job_id)
aid = 0
except ValueError:
match = re.search('\d+\[\d+\]', job_id)
if match:
jid = match.group(0)
aid = match.group(1)
else:
print "Invalid job id: %s" % job_id
sys.exit(1)
jobs.append(Job(connection, job_id=jid, array_index=aid))
try:
for job in jobs:
try:
print "Sending %s signal to job: %s[%s]" % (args.signal, job.job_id, job.array_index)
getattr(job, args.signal)()
except PermissionDeniedError, e:
print "Unable to perform action on job: %s[%s]: %s" % (job.job_id, job.array_index, e.message)
except RemoteServerError, e:
print "Unable to display job information: %s" % e.message
sys.exit(1)
|
gpl-2.0
| 4,289,308,900,084,577,300
| 42.223529
| 119
| 0.619488
| false
| 3.963323
| false
| false
| false
|
panosl/helios
|
helios/orders/forms.py
|
1
|
1114
|
from django import forms
from helios.shipping.models import ShippingMethodRegions
class ShippingChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return u'%s, %s - %s' % (obj.method.name, obj.method.shipper, obj.cost)
# todo this needs to be handled either here
# or in the checkout view in the store app
class ShippingOrderForm(forms.Form):
def __init__(self, customer, *args, **kwargs):
super(ShippingOrderForm, self).__init__(*args, **kwargs)
methods = [region.shippingmethodregions_set.all()
for region in customer.country.shippingregion_set.all()]
methods = [method[0] for method in methods]
self.fields['shipping_choice'].queryset = ShippingMethodRegions.objects.filter(id__in=[method.id for method in methods])
shipping_choice = ShippingChoiceField(
queryset=ShippingMethodRegions.objects.all(),
empty_label=None,
widget=forms.RadioSelect(attrs={
'class': 'order',
'onclick': '$("#shipping_choice").submit()',
})
)
class OrderForm(forms.Form):
pass
|
bsd-3-clause
| 1,921,637,111,047,108,400
| 34.935484
| 128
| 0.670557
| false
| 3.776271
| false
| false
| false
|
buaawp/pums
|
mock/migrations/0001_initial.py
|
1
|
3879
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LtMockModule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='module', max_length=128)),
('description', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='project', max_length=128)),
('description', models.CharField(max_length=1024, blank=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='LtMockRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='defaultName', max_length=128)),
('method', models.CharField(default='GET', max_length=20)),
('address', models.CharField(default='defaultUrl', max_length=2048)),
('params', models.CharField(max_length=1648, blank=True)),
('module', models.ForeignKey(to='mock.LtMockModule')),
('project', models.ForeignKey(to='mock.LtMockProject')),
],
),
migrations.CreateModel(
name='LtMockRequestHeader',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default='defaultkey', max_length=128)),
('value', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockRequestParam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default='defaultkey', max_length=128)),
('value', models.CharField(max_length=1024, blank=True)),
],
),
migrations.CreateModel(
name='LtMockResponse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default='defaultresponse', max_length=128)),
('template', models.CharField(max_length=2048, blank=True)),
('sample', models.CharField(max_length=2048, blank=True)),
],
),
migrations.AddField(
model_name='ltmockrequest',
name='requestheader',
field=models.ForeignKey(blank=True, to='mock.LtMockRequestHeader', null=True),
),
migrations.AddField(
model_name='ltmockrequest',
name='requestparam',
field=models.ForeignKey(blank=True, to='mock.LtMockRequestParam', null=True),
),
migrations.AddField(
model_name='ltmockrequest',
name='response',
field=models.ForeignKey(blank=True, to='mock.LtMockResponse', null=True),
),
migrations.AddField(
model_name='ltmockmodule',
name='project',
field=models.ForeignKey(to='mock.LtMockProject'),
),
]
|
mit
| 6,251,896,528,812,043,000
| 42.58427
| 114
| 0.564836
| false
| 4.453502
| false
| false
| false
|
cossatot/culpable
|
culpable/magnitudes.py
|
1
|
22751
|
import numpy as np
from .stats import Pdf, pdf_from_samples, multiply_pdfs, divide_pdfs
"""
Scaling relationships and related equations for earthquake magnitude
calculations.
"""
"""
Normalized slip distribution from Biasi and Weldon, 2006
"""
Dn_x = np.array(
[ 0. , 0.03852144, 0.07704287, 0.11556431, 0.15408574,
0.19260718, 0.23112861, 0.26965005, 0.30817149, 0.34669292,
0.38521436, 0.42373579, 0.46225723, 0.50077866, 0.5393001 ,
0.57782153, 0.61634297, 0.65486441, 0.69338584, 0.73190728,
0.77042871, 0.80895015, 0.84747158, 0.88599302, 0.92451446,
0.96303589, 1.00155733, 1.04007876, 1.0786002 , 1.11712163,
1.15564307, 1.19416451, 1.23268594, 1.27120738, 1.30972881,
1.34825025, 1.38677168, 1.42529312, 1.46381456, 1.50233599,
1.54085743, 1.57937886, 1.6179003 , 1.65642173, 1.69494317,
1.7334646 , 1.77198604, 1.81050748, 1.84902891, 1.88755035,
1.92607178, 1.96459322, 2.00311465, 2.04163609, 2.08015753,
2.11867896, 2.1572004 , 2.19572183, 2.23424327, 2.2727647 ,
2.31128614, 2.34980758, 2.38832901, 2.42685045, 2.46537188,
2.50389332, 2.54241475, 2.58093619, 2.61945762, 2.65797906,
2.6965005 , 2.73502193, 2.77354337, 2.8120648 , 2.85058624,
2.88910767, 2.92762911, 2.96615055, 3.00467198, 3.04319342,
3.08171485, 3.12023629, 3.15875772, 3.19727916, 3.2358006 ,
3.27432203, 3.31284347, 3.3513649 , 3.38988634, 3.42840777,
3.46692921, 3.50545064, 3.54397208, 3.58249352, 3.62101495,
3.65953639, 3.69805782, 3.73657926, 3.77510069, 3.81362213])
Dn_y = np.array(
[ 3.56431234e-01, 4.07514412e-01, 4.49469325e-01, 4.80250978e-01,
4.99600050e-01, 5.08967345e-01, 5.11056831e-01, 5.09135209e-01,
5.06305810e-01, 5.04929021e-01, 5.06305202e-01, 5.10647854e-01,
5.17294850e-01, 5.25056042e-01, 5.32585263e-01, 5.38688051e-01,
5.42518154e-01, 5.43657945e-01, 5.42107125e-01, 5.38215229e-01,
5.32589131e-01, 5.25993774e-01, 5.19250549e-01, 5.13129949e-01,
5.08236899e-01, 5.04898081e-01, 5.03074847e-01, 5.02334004e-01,
5.01903866e-01, 5.00822254e-01, 4.98152675e-01, 4.93216557e-01,
4.85776256e-01, 4.76112653e-01, 4.64970884e-01, 4.53387277e-01,
4.42445033e-01, 4.33023117e-01, 4.25598012e-01, 4.20136711e-01,
4.16092401e-01, 4.12492219e-01, 4.08093894e-01, 4.01583982e-01,
3.91790171e-01, 3.77880214e-01, 3.59519131e-01, 3.36956396e-01,
3.11019404e-01, 2.83002312e-01, 2.54461304e-01, 2.26954105e-01,
2.01783046e-01, 1.79805426e-01, 1.61356306e-01, 1.46292387e-01,
1.34126853e-01, 1.24201482e-01, 1.15842979e-01, 1.08470898e-01,
1.01650879e-01, 9.51051805e-02, 8.86970782e-02, 8.24006991e-02,
7.62618151e-02, 7.03540397e-02, 6.47382510e-02, 5.94357659e-02,
5.44230300e-02, 4.96471997e-02, 4.50527124e-02, 4.06047119e-02,
3.62987575e-02, 3.21550847e-02, 2.82040784e-02, 2.44727150e-02,
2.09786579e-02, 1.77325398e-02, 1.47440829e-02, 1.20266593e-02,
9.59725861e-03, 7.47225770e-03, 5.66159378e-03, 4.16411755e-03,
2.96568107e-03, 2.04006393e-03, 1.35194170e-03, 8.60866657e-04,
5.25372416e-04, 3.06545806e-04, 1.70626053e-04, 9.04155999e-05,
4.55329491e-05, 2.17590136e-05, 9.85449333e-06, 4.22528115e-06,
1.71367970e-06, 6.56980895e-07, 2.37946616e-07, 8.13790788e-08])
Dn = Pdf(Dn_x, Dn_y)
Dn_sb = multiply_pdfs(Dn, Pdf([Dn_x.min(), Dn_x.max()],
[Dn_x.min(), Dn_x.max()]))
"""
Probability distribution for an earthquake breaking the surface given
Gutenberg-Richter prior; to be used as a p(M) prior for paleoseismic magnitudes
from Biasi and Weldon 2006
"""
gr_pm_x = [5.000, 5.001, 5.057, 5.097, 5.192, 5.300, 5.392, 5.499, 5.597,
5.753, 5.922, 6.021, 6.211, 6.353, 6.533, 6.604, 6.771, 6.999,
7.280, 7.507, 7.726, 7.953, 8.182]
gr_pm_y = [0.000, 0.030, 0.050, 0.063, 0.081, 0.089, 0.089, 0.085, 0.079,
0.067, 0.054, 0.047, 0.035, 0.027, 0.020, 0.018, 0.013, 0.008,
0.005, 0.003, 0.002, 9.785e-4, 0.00]
"""
Conversion functions
"""
def _exp_10(x):
return 10**x
log_fn = {'e': np.log,
'10': np.log10}
exp_fn = {'e': np.exp,
'10': _exp_10}
M_from_D_coeffs = {'BW_2006': {'a': 6.94,
'b': 1.14,
'log_base': '10'},
# WC_1994 are for Average Displacement, not max.
'WC_1994_all': {'a': 6.93,
'b': 0.82,
'log_base': '10'},
'WC_1994_SS': {'a': 7.04,
'b': 0.89,
'log_base': '10'},
'WC_1994_R': {'a': 6.64,
'b': 0.13,
'log_base': '10'},
'WC_1994_N': {'a': 6.78,
'b': 0.65,
'log_base': '10'},
}
M_from_L_coeffs = {'Stirling_2002_instr': {'a': 5.45,
'a_err': 0.08,
'b': 0.95,
'b_err': 0.06,
'log_base': '10'},
'Stirling_2002_pre_instr': {'a': 5.89,
'a_err': 0.11,
'b': 0.79,
'b_err': 0.06,
'log_base': '10'},
'WC_1994_all': {'a': 5.08,
'a_err': 0.1,
'b': 1.16,
'b_err': 0.07,
'log_base': '10'},
'WC_1994_SS': {'a': 5.16,
'a_err': 0.13,
'b': 1.12,
'b_err': 0.08,
'log_base': '10'},
'WC_1994_R': {'a': 5.00,
'a_err': 0.22,
'b': 1.22,
'b_err': 0.16,
'log_base': '10'},
'WC_1994_N': {'a': 4.86,
'a_err': 0.34,
'b': 1.32,
'b_err': 0.26,
'log_base': '10'},
}
def M_from_D(D, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as D.
b : Scalar, or vector of same length as D.
base : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of D.
"""
if ref is not None:
# consider warning if ref is not None and a, b, log are inputs
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
else:
pass
return a + b * log_fn[base](D)
def D_from_M(M, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'base'.
General relationship is D = base ** ((M - a) / b)
Parameters
----------
M : Scalar or vector values for moment magnitude
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as M.
b : Scalar, or vector of same length as M.
base : String, base for exponent, default 'e'.
'e' is e.
'10' is 10.
Returns
-------
D : Scalar or vector of calculated displacement (in meters),
with shape of M.
"""
if ref is not None:
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
return exp_fn[base]((M - a) / b)
def M_from_L(L, ref='Stirling_2002_instr', unit='km', a=None, b=None, base='e',
a_err=None, b_err=None, mc=False):
"""
Moment magnitude from length, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'Stirling_2002_instr' is from Stirling et al. 2002, instrumental data.
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
unit : Unit of length measure. Default is 'km'. 'm' also works.
a : Scalar, or vector of same length as D.
a_err : Standard error of `a`. Scalar.
b : Scalar, or vector of same length as D.
b_err : Standard error of `b`. Scalar.
log : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
mc : Boolean that indicates whether to sample the coefficents a and b
including uncertainties `a_err` and `b_err` through Monte Carlo
techniques.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of L.
"""
# unit conversion
if unit == 'm':
L = L * 1000.
if ref is not None:
a = M_from_L_coeffs[ref]['a']
b = M_from_L_coeffs[ref]['b']
base = M_from_L_coeffs[ref]['log_base']
try:
a_err = M_from_L_coeffs[ref]['a_err']
b_err = M_from_L_coeffs[ref]['b_err']
except KeyError:
pass
if mc == True:
A = a if a_err is None else np.random.normal(a, a_err, len(L))
B = b if b_err is None else np.random.normal(b, b_err, len(L))
else:
A = a
B = b
return A + B * log_fn[base](L)
"""
Estimation functions
"""
def p_D_M(D, M, ref='BW_2006', sample_bias_corr=False):
"""
Likelihood of predicted D given M, as defined by Biasi and Weldon (2006).
Parameters
----------
D : Scalar or array of displacement values (in meters).
M : Scalar or array of magnitudes.
ref: Displacement-magnitude scaling reference (string).
'BW_2006' is Biasi and Weldon (2006).
'WC_1994_all' is Wells and Coppersmith (1994).
Returns
-------
p_D_M : Calculated likelihood. If scalar, simply returns the likelihood.
If not, returns an improper pdf (a `culpable.stats.Pdf`) which
is an interpolation class. Actual likelihoods are `p_D_M.y`, and
corresponding magnitudes (i.e. the prior p_M) are `p_D_M.x`.
"""
D_ave = D_from_M(M, ref=ref)
D = np.abs(D)
if sample_bias_corr == True:
Dn_ = Dn_sb
else:
Dn_ = Dn
if np.isscalar(D):
D_score = D / D_ave
p_D_M = Dn_(D_score)
else:
D_score = np.array([d / D_ave for d in D])
p_D_M = Dn_(D_score)
p_D_M = np.mean(p_D_M, axis=0)
if np.isscalar(p_D_M):
p_D_M = np.float(p_D_M)
else:
p_D_M = Pdf(M, p_D_M, normalize=True)
return p_D_M
def _make_p_M_x(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Makes the X values (i.e., the magnitudes) for a p_M distribution.
"""
if n_M is not None:
p_M_x = np.linspace(p_M_min, p_M_max, num=n_M)
else:
if M_step is None:
M_step = 0.1 # in case it's passed as None from another function
p_M_x = np.arange(p_M_min, p_M_max + M_step, M_step)
return p_M_x
def make_p_M_uniform(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a uniform PDF between the minimum and maximum magnitudes given
by p_M_min and p_M_max.
Parameters
----------
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
return Pdf(p_M_x, np.ones(len(p_M_x)) * 1 / len(p_M_x))
def make_p_M_gr_surface_break(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a PDF based on a Gutenberg-Richter distribution that is then
modified to account for the decreasing likelihood of surface rupture
with decreasing magnitude (distribution from Biasi and Weldon 2006,
figure 8b.
Returns:
--------
p_M : Pdf class with a modified Gutenberg-Richter distribution.
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
p_M_gr_sb = Pdf(gr_pm_x, gr_pm_y)
p_M_gr_sb_y = p_M_gr_sb(p_M_x)
return Pdf(p_M_x, p_M_gr_sb_y)
def make_p_M(p_M_type='uniform', p_M_min=None, p_M_max=None, M_step=None,
n_M=None):
"""
Creates the a PDF of magnitudes to use as the prior p(M).
Parameters
----------
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
if p_M_type == 'uniform':
p_M = make_p_M_uniform(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
elif p_M_type == 'GR_surface_break':
p_M = make_p_M_gr_surface_break(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
return p_M
def p_M_D(D, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
ref='BW_2006', p_M_type='uniform', sample_bias_corr=False):
"""
Calculates p(M|D), the posterior probability of an earthquake having a
magnitude of M given observed displacement D, based on Biasi and Weldon
2006 (but with optional sample bias correction).
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacements in meters (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
else:
#TODO: maybe add some logic for dealing with non `Pdf` priors
pass
p_D = Pdf(p_M.x, [np.trapz(Dn_y, Dn_x * D_from_M(M, ref=ref))
for M in p_M.x])
p_D_M_ = p_D_M(D, p_M.x, ref=ref, sample_bias_corr=sample_bias_corr)
p_M_D_ = multiply_pdfs(p_M, p_D_M_, step=M_step)
p_M_D_ = divide_pdfs(p_M_D_, p_D, step=M_step)
return p_M_D_
def p_M_L(L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', ref='WC_1994_all', mc=True):
"""
Calculates p(M|L), the posterior probability of an earthquake having a
magnitude of M given observed length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-length scaling relationships. See `M_from_L`
for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_L_samples = M_from_L(L, ref=ref, mc=mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
p_M_L_ = multiply_pdfs(p_M, p_M_L_)
return p_M_L_
def p_M_DL(D, L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', D_ref='BW_2006', L_ref='WC_1994_all',
L_mc=True, sample_bias_corr=False):
"""
Calculates p(M|D,L), the posterior probability of an earthquake having a
magnitude of M given observed offset/displacement D and rupture length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacement in meters (floats).
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
D_ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
L_ref : Reference for magnitude-length scaling relationships. See
`M_from_L` for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_D_ = p_M_D(D, p_M, ref=D_ref, sample_bias_corr=sample_bias_corr)
p_M_L_samples = M_from_L(L, ref=L_ref, mc=L_mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
return multiply_pdfs(p_M_L_, p_M_D_)
|
mit
| 3,321,407,271,756,784,000
| 33.628615
| 79
| 0.551976
| false
| 2.902283
| false
| false
| false
|
huwiki/featured-feeds
|
rsslib.py
|
1
|
4281
|
#!/usr/bin/python
# -*- coding: iso-8859-2 -*-
import sys, os
import re, string
import time, datetime, calendar, locale
import urllib
import cPickle
import xml.sax.saxutils
locale.setlocale(locale.LC_TIME, 'en_GB')
currenttimestamp = time.strftime(u'%a, %d %b %Y %H:%M:%S +0000', time.gmtime())
locale.setlocale(locale.LC_TIME, 'hu_HU')
# general settings
settings = {
'rss_webmaster': u'tgr.huwiki@gmail.com (Tisza Gergõ)',
'program_name': 'WpFeedMaker',
'program_version': '1.0',
'program_contact': 'tgr.huwiki@gmail.com',
}
# helpers
def encode_title(s):
s = s[0:1].upper() + s[1:]
s = re.sub(' ', '_', s)
return urllib.quote(s.encode('utf-8'))
def date_vars(date, extend = {}):
if date.isocalendar()[2] < 4:
n = 1
else:
n = 2
iso = date.isocalendar()
dict = {
'year': iso[0],
'years1': iso[0] % 5,
'years2': iso[0] % 5 + 5,
'month': date.month,
'monthname': calendar.month_name[date.month].decode('iso-8859-2'),
'day' : date.day,
'week': iso[1],
'dow' : iso[2],
'n' : n,
}
dict.update(extend)
return dict
# Subclassing of URLopener - sets "User-agent: ", which Wikipedia requires to be set
# to something else than the default "Python-urllib"
class MyURLopener(urllib.URLopener):
version = settings['program_name'] + "/" + settings['program_version'] + " " + settings['program_contact']
# Caching of HTML from Wikipedia
class CacheItem:
def __init__(self, html, date, fetchtime):
self.html = html
self.date = date
self.fetchtime = fetchtime
class WPCache:
def __init__(self, settings):
self.settings = settings
self.url_opener = MyURLopener()
self.filename = self.settings['cache_filename']
if (os.path.exists(self.filename)):
file = open(self.filename)
self.cache = cPickle.load(file)
file.close()
else:
self.cache = {}
def get_html(self, url, date):
if url in self.cache:
return self.cache[url].html
else:
html = self.url_opener.open(url).read()
cacheitem = CacheItem(html, date, time.gmtime())
self.cache[url] = cacheitem
return html
# Weed out old entries, so cache doesn't get big
def too_old(self, date):
return (datetime.date.today() - date).days > self.settings['time_range']
def weed_out_old(self):
self.cache = dict([x for x in self.cache.items() if not self.too_old(x[1].date)])
def save(self):
self.weed_out_old()
file = open(self.filename, "w")
p = cPickle.Pickler(file)
p.dump(self.cache)
class WPFeed:
def __init__(self, settings):
self.settings = settings
self.cache = WPCache(self.settings)
def get_html(self, url, date, clean = True):
html = self.cache.get_html(url, date)
if clean:
html = re.sub('\s*<!--[\s\S]*?-->\s*', '', html)
return html
def rss_item(self, item):
return """<item>
<title>%(title)s</title>
<link>%(url)s</link>
<guid isPermaLink="true">%(url)s</guid>
<description>%(escaped_content)s</description>
</item>
""" % {
'title': xml.sax.saxutils.escape(item['title']),
'url': item['url'],
'escaped_content': xml.sax.saxutils.escape(item['content']),
}
def rss(self, items):
self.xml = """<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:blogChannel="http://backend.userland.com/blogChannelModule">
<channel>
<title>%(rss_title)s</title>
<link>%(rss_link)s</link>
<description>%(rss_description)s</description>
<language>hu</language>
<copyright>CC-BY-SA-3.0</copyright>
<lastBuildDate>%(build_date)s</lastBuildDate>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<webMaster>%(webmaster)s</webMaster>
<generator>%(generator)s</generator>
%(items)s
</channel>
</rss>
""" % {
'rss_title': self.settings['rss_title'],
'rss_link': self.settings['rss_link'],
'rss_description': self.settings['rss_description'],
'webmaster': settings['rss_webmaster'],
'build_date': currenttimestamp,
'items': '\n'.join(map(self.rss_item, items)),
'generator': settings['program_name'] + ' ' + settings['program_version'],
}
def save(self):
file = open(self.settings['output_filename'], "w")
file.write(self.xml.encode('utf-8'))
file.close()
self.cache.save()
def main():
print "This file cannot be invoked directly"
sys.exit(1)
if __name__ == '__main__':
main()
|
mit
| -2,498,234,806,636,101,000
| 25.103659
| 107
| 0.640972
| false
| 2.772668
| false
| false
| false
|
mgedmin/objgraph
|
objgraph.py
|
1
|
43531
|
"""
Tools for drawing Python object reference graphs with graphviz.
You can find documentation online at https://mg.pov.lt/objgraph/
Copyright (c) 2008-2017 Marius Gedminas <marius@pov.lt> and contributors
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import codecs
import collections
import gc
import inspect
import itertools
import operator
import os
import re
import subprocess
import sys
import tempfile
import types
try:
# Python 2.x compatibility
from StringIO import StringIO
except ImportError: # pragma: PY3
from io import StringIO
try:
from types import InstanceType
except ImportError: # pragma: PY3
# Python 3.x compatibility
InstanceType = None
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright (c) 2008-2017 Marius Gedminas and contributors"
__license__ = "MIT"
__version__ = '3.5.1.dev0'
__date__ = '2020-10-11'
try:
basestring
except NameError: # pragma: PY3
# Python 3.x compatibility
basestring = str
try:
iteritems = dict.iteritems
except AttributeError: # pragma: PY3
# Python 3.x compatibility
iteritems = dict.items
IS_INTERACTIVE = False
try: # pragma: nocover
import graphviz
if 'TerminalInteractiveShell' not in get_ipython().__class__.__name__:
# So far I know two shells where it's inappropriate to use inline
# graphics, because they're text only:
# - ipython uses a TerminalInteractiveShell
# - pycharm's console uses PyDevTerminalInteractiveShell
IS_INTERACTIVE = True
except (NameError, ImportError):
pass
def _isinstance(object, classinfo):
"""Return whether an object is an instance of a class or its subclass.
Differs from the builtin isinstance() implementation in that it does not
depend on the ``__class__`` attribute which is proxied by
mock.Mock(spec=...).
"""
return issubclass(type(object), classinfo)
def count(typename, objects=None):
"""Count objects tracked by the garbage collector with a given class name.
The class name can optionally be fully qualified.
Example:
>>> count('dict')
42
>>> count('mymodule.MyClass')
2
.. note::
The Python garbage collector does not track simple
objects like int or str. See
https://docs.python.org/3/library/gc.html#gc.is_tracked
for more information.
Instead of looking through all objects tracked by the GC, you may
specify your own collection, e.g.
>>> count('MyClass', get_leaking_objects())
3
See also: :func:`get_leaking_objects`.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return sum(1 for o in objects if _long_typename(o) == typename)
else:
return sum(1 for o in objects if _short_typename(o) == typename)
finally:
del objects # clear cyclic references to frame
def typestats(objects=None, shortnames=True, filter=None):
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together if ``shortnames`` is True.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
Example:
>>> typestats()
{'list': 12041, 'tuple': 10245, ...}
>>> typestats(get_leaking_objects())
{'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...}
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if objects is None:
objects = gc.get_objects()
try:
if shortnames:
typename = _short_typename
else:
typename = _long_typename
stats = {}
for o in objects:
if filter and not filter(o):
continue
n = typename(o)
stats[n] = stats.get(n, 0) + 1
return stats
finally:
del objects # clear cyclic references to frame
def most_common_types(limit=10, objects=None, shortnames=True, filter=None):
"""Count the names of types with the most instances.
Returns a list of (type_name, count), sorted most-frequent-first.
Limits the return value to at most ``limit`` items. You may set ``limit``
to None to avoid that.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> most_common_types(limit=2)
[('list', 12041), ('tuple', 10245)]
.. versionadded:: 1.4
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
stats = sorted(
typestats(objects, shortnames=shortnames, filter=filter).items(),
key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
return stats
def show_most_common_types(
limit=10,
objects=None,
shortnames=True,
file=None,
filter=None):
"""Print the table of types of most common instances.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.0
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if file is None:
file = sys.stdout
stats = most_common_types(limit, objects, shortnames=shortnames,
filter=filter)
width = max(len(name) for name, count in stats)
for name, count in stats:
file.write('%-*s %i\n' % (width, name, count))
def growth(limit=10, peak_stats={}, shortnames=True, filter=None):
"""Count the increase in peak object since last call.
Returns a list of (type_name, total_count, increase_delta),
descending order by increase_delta.
Limits the output to ``limit`` largest deltas. You may set ``limit`` to
None to see all of them.
Uses and updates ``peak_stats``, a dictionary from type names to previously
seen peak object counts. Usually you don't need to pay attention to this
argument.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> growth(2)
[(tuple, 12282, 10), (dict, 1922, 7)]
.. versionadded:: 3.3.0
"""
gc.collect()
stats = typestats(shortnames=shortnames, filter=filter)
deltas = {}
for name, count in iteritems(stats):
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
deltas = sorted(deltas.items(), key=operator.itemgetter(1),
reverse=True)
if limit:
deltas = deltas[:limit]
return [(name, stats[name], delta) for name, delta in deltas]
def show_growth(limit=10, peak_stats=None, shortnames=True, file=None,
filter=None):
"""Show the increase in peak object counts since last call.
if ``peak_stats`` is None, peak object counts will recorded in
func `growth`, and your can record the counts by yourself with set
``peak_stats`` to a dictionary.
The caveats documented in :func:`growth` apply.
Example:
>>> show_growth()
wrapper_descriptor 970 +14
tuple 12282 +10
dict 1922 +7
...
.. versionadded:: 1.5
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.1
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if peak_stats is None:
result = growth(limit, shortnames=shortnames, filter=filter)
else:
result = growth(limit, peak_stats, shortnames, filter)
if result:
if file is None:
file = sys.stdout
width = max(len(name) for name, _, _ in result)
for name, count, delta in result:
file.write('%-*s%9d %+9d\n' % (width, name, count, delta))
def get_new_ids(skip_update=False, limit=10, sortby='deltas',
shortnames=None, file=None, _state={}):
"""Find and display new objects allocated since last call.
Shows the increase in object counts since last call to this
function and returns the memory address ids for new objects.
Returns a dictionary mapping object type names to sets of object IDs
that have been created since the last time this function was called.
``skip_update`` (bool): If True, returns the same dictionary that
was returned during the previous call without updating the internal
state or examining the objects currently in memory.
``limit`` (int): The maximum number of rows that you want to print
data for. Use 0 to suppress the printing. Use None to print everything.
``sortby`` (str): This is the column that you want to sort by in
descending order. Possible values are: 'old', 'current', 'new',
'deltas'
``shortnames`` (bool): If True, classes with the same name but
defined in different modules will be lumped together. If False,
all type names will be qualified with the module name. If None (default),
``get_new_ids`` will remember the value from previous calls, so it's
enough to prime this once. By default the primed value is True.
``_state`` (dict): Stores old, current, and new_ids in memory.
It is used by the function to store the internal state between calls.
Never pass in this argument unless you know what you're doing.
The caveats documented in :func:`growth` apply.
When one gets new_ids from :func:`get_new_ids`, one can use
:func:`at_addrs` to get a list of those objects. Then one can iterate over
the new objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
Example:
>>> _ = get_new_ids() # store current objects in _state
>>> _ = get_new_ids() # current_ids become old_ids in _state
>>> a = [0, 1, 2] # list we don't know about
>>> b = [3, 4, 5] # list we don't know about
>>> new_ids = get_new_ids(limit=3) # we see new lists
======================================================================
Type Old_ids Current_ids New_ids Count_Deltas
======================================================================
list 324 326 +3 +2
dict 1125 1125 +0 +0
wrapper_descriptor 1001 1001 +0 +0
======================================================================
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
>>> b in new_lists
True
.. versionadded:: 3.4
"""
if not _state:
_state['old'] = collections.defaultdict(set)
_state['current'] = collections.defaultdict(set)
_state['new'] = collections.defaultdict(set)
_state['shortnames'] = True
new_ids = _state['new']
if skip_update:
return new_ids
old_ids = _state['old']
current_ids = _state['current']
if shortnames is None:
shortnames = _state['shortnames']
else:
_state['shortnames'] = shortnames
gc.collect()
objects = gc.get_objects()
for class_name in old_ids:
old_ids[class_name].clear()
for class_name, ids_set in current_ids.items():
old_ids[class_name].update(ids_set)
for class_name in current_ids:
current_ids[class_name].clear()
for o in objects:
if shortnames:
class_name = _short_typename(o)
else:
class_name = _long_typename(o)
id_number = id(o)
current_ids[class_name].add(id_number)
for class_name in new_ids:
new_ids[class_name].clear()
rows = []
keys_to_remove = []
for class_name in current_ids:
num_old = len(old_ids[class_name])
num_current = len(current_ids[class_name])
if num_old == 0 and num_current == 0:
# remove the key from our dicts if we don't have any old or
# current class_name objects
keys_to_remove.append(class_name)
continue
new_ids_set = current_ids[class_name] - old_ids[class_name]
new_ids[class_name].update(new_ids_set)
num_new = len(new_ids_set)
num_delta = num_current - num_old
row = (class_name, num_old, num_current, num_new, num_delta)
rows.append(row)
for key in keys_to_remove:
del old_ids[key]
del current_ids[key]
del new_ids[key]
index_by_sortby = {'old': 1, 'current': 2, 'new': 3, 'deltas': 4}
rows.sort(key=operator.itemgetter(index_by_sortby[sortby], 0),
reverse=True)
if limit is not None:
rows = rows[:limit]
if not rows:
return new_ids
if file is None:
file = sys.stdout
width = max(len(row[0]) for row in rows)
print('='*(width+13*4), file=file)
print('%-*s%13s%13s%13s%13s' %
(width, 'Type', 'Old_ids', 'Current_ids', 'New_ids', 'Count_Deltas'),
file=file)
print('='*(width+13*4), file=file)
for row_class, old, current, new, delta in rows:
print('%-*s%13d%13d%+13d%+13d' %
(width, row_class, old, current, new, delta), file=file)
print('='*(width+13*4), file=file)
return new_ids
def get_leaking_objects(objects=None):
"""Return objects that do not have any referents.
These could indicate reference-counting bugs in C code. Or they could
be legitimate.
Note that the GC does not track simple objects like int or str.
.. versionadded:: 1.7
"""
if objects is None:
gc.collect()
objects = gc.get_objects()
try:
ids = set(id(i) for i in objects)
for i in objects:
ids.difference_update(id(j) for j in gc.get_referents(i))
# this then is our set of objects without referrers
return [i for i in objects if id(i) in ids]
finally:
del objects, i # clear cyclic references to frame
def by_type(typename, objects=None):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return [o for o in objects if _long_typename(o) == typename]
else:
return [o for o in objects if _short_typename(o) == typename]
finally:
del objects # clear cyclic references to frame
def at(addr):
"""Return an object at a given memory address.
The reverse of id(obj):
>>> at(id(obj)) is obj
True
Note that this function does not work on objects that are not tracked by
the GC (e.g. ints or strings).
"""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
def at_addrs(address_set):
"""Return a list of objects for a given set of memory addresses.
The reverse of [id(obj1), id(obj2), ...]. Note that objects are returned
in an arbitrary order.
When one gets ``new_ids`` from :func:`get_new_ids`, one can use this
function to get a list of those objects. Then one can iterate over the new
objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
>>> a = [0, 1, 2]
>>> new_ids = get_new_ids()
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
Note that this function does not work on objects that are not tracked
by the GC (e.g. ints or strings).
.. versionadded:: 3.4
"""
res = []
for o in gc.get_objects():
if id(o) in address_set:
res.append(o)
return res
def find_ref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading from obj.
The end of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_ref_chain(obj, lambda x: isinstance(x, MyClass))
[obj, ..., <MyClass object at ...>]
Returns ``[obj]`` if such a chain could not be found.
.. versionadded:: 1.7
"""
return _find_chain(obj, predicate, gc.get_referents,
max_depth=max_depth, extra_ignore=extra_ignore)[::-1]
def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading to obj.
The start of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_backref_chain(obj, is_proper_module)
[<module ...>, ..., obj]
Returns ``[obj]`` if such a chain could not be found.
.. versionchanged:: 1.5
Returns ``obj`` instead of ``None`` when a chain could not be found.
"""
return _find_chain(obj, predicate, gc.get_referrers,
max_depth=max_depth, extra_ignore=extra_ignore)
def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph ending at ``objs``.
The graph will show you what objects refer to ``objs``, directly and
indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` are not specified, ``show_backrefs``
will try to display the graph inline (if you're using IPython), otherwise
it'll try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_backrefs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function taking one argument and returning a
string) to report extra information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
These will mostly match the number of arrows pointing to an object,
but can be different for various reasons.
Specify ``shortnames=False`` if you want to see fully-qualified type
names ('package.module.ClassName'). By default you get to see only the
class name part.
Examples:
>>> show_backrefs(obj)
>>> show_backrefs([obj1, obj2])
>>> show_backrefs(obj, max_depth=5)
>>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_backrefs(obj, highlight=inspect.isclass)
>>> show_backrefs(obj, extra_ignore=[id(locals())])
>>> show_backrefs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
# For show_backrefs(), it makes sense to stop when reaching a
# module because you'll end up in sys.modules and explode the
# graph with useless clutter. That's why we're specifying
# cull_func here, but not in show_graph().
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referrers, swap_source_target=False,
filename=filename, output=output, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
cull_func=is_proper_module,
extra_node_attrs=extra_node_attrs)
def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph starting at ``objs``.
The graph will show you what objects are reachable from ``objs``, directly
and indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` is not specified, ``show_refs`` will
try to display the graph inline (if you're using IPython), otherwise it'll
try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_refs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function returning a string) to report extra
information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
Examples:
>>> show_refs(obj)
>>> show_refs([obj1, obj2])
>>> show_refs(obj, max_depth=5)
>>> show_refs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_refs(obj, highlight=inspect.isclass)
>>> show_refs(obj, extra_ignore=[id(locals())])
>>> show_refs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionadded:: 1.1
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
Follows references from module objects instead of stopping.
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referents, swap_source_target=True,
filename=filename, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
output=output, extra_node_attrs=extra_node_attrs)
def show_chain(*chains, **kw):
"""Show a chain (or several chains) of object references.
Useful in combination with :func:`find_ref_chain` or
:func:`find_backref_chain`, e.g.
>>> show_chain(find_backref_chain(obj, is_proper_module))
You can specify if you want that chain traced backwards or forwards
by passing a ``backrefs`` keyword argument, e.g.
>>> show_chain(find_ref_chain(obj, is_proper_module),
... backrefs=False)
Ideally this shouldn't matter, but for some objects
:func:`gc.get_referrers` and :func:`gc.get_referents` are not perfectly
symmetrical.
You can specify ``highlight``, ``extra_info``, ``refcounts``,
``shortnames``, ``filename`` or ``output`` arguments like for
:func:`show_backrefs` or :func:`show_refs`.
.. versionadded:: 1.5
.. versionchanged:: 1.7
New parameter: ``backrefs``.
.. versionchanged:: 2.0
New parameter: ``output``.
"""
backrefs = kw.pop('backrefs', True)
chains = [chain for chain in chains if chain] # remove empty ones
def in_chains(x, ids=set(map(id, itertools.chain(*chains)))):
return id(x) in ids
max_depth = max(map(len, chains)) - 1
if backrefs:
show_backrefs([chain[-1] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
else:
show_refs([chain[0] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
def is_proper_module(obj):
"""
Returns ``True`` if ``obj`` can be treated like a garbage collector root.
That is, if ``obj`` is a module that is in ``sys.modules``.
>>> import types
>>> is_proper_module([])
False
>>> is_proper_module(types)
True
>>> is_proper_module(types.ModuleType('foo'))
False
.. versionadded:: 1.8
"""
return (
inspect.ismodule(obj)
and obj is sys.modules.get(getattr(obj, '__name__', None))
)
#
# Internal helpers
#
def _find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()):
queue = [obj]
depth = {id(obj): 0}
parent = {id(obj): None}
ignore = set(extra_ignore)
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(parent))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain
gc.collect()
while queue:
target = queue.pop(0)
if predicate(target):
chain = [target]
while parent[id(target)] is not None:
target = parent[id(target)]
chain.append(target)
return chain
tdepth = depth[id(target)]
if tdepth < max_depth:
referrers = edge_func(target)
ignore.add(id(referrers))
for source in referrers:
if id(source) in ignore:
continue
if id(source) not in depth:
depth[id(source)] = tdepth + 1
parent[id(source)] = target
queue.append(source)
return [obj] # not found
def _show_graph(objs, edge_func, swap_source_target,
max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
cull_func=None, extra_node_attrs=None):
if not _isinstance(objs, (list, tuple)):
objs = [objs]
is_interactive = False
if filename and output:
raise ValueError('Cannot specify both output and filename.')
elif output:
f = output
elif filename and filename.endswith('.dot'):
f = codecs.open(filename, 'w', encoding='utf-8')
dot_filename = filename
elif IS_INTERACTIVE and not filename:
is_interactive = True
f = StringIO()
else:
fd, dot_filename = tempfile.mkstemp(prefix='objgraph-',
suffix='.dot', text=True)
f = os.fdopen(fd, "w")
if getattr(f, 'encoding', None): # pragma: PY3
# Python 3 will wrap the file in the user's preferred encoding
# Re-wrap it for utf-8
import io
f = io.TextIOWrapper(f.detach(), 'utf-8')
f.write('digraph ObjectGraph {\n'
' node[shape=box, style=filled, fillcolor=white];\n')
queue = []
depth = {}
ignore = set(extra_ignore)
ignore.add(id(objs))
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe().f_locals))
ignore.add(id(sys._getframe(1))) # show_refs/show_backrefs
ignore.add(id(sys._getframe(1).f_locals))
for obj in objs:
f.write(' %s[fontcolor=red];\n' % (_obj_node_id(obj)))
depth[id(obj)] = 0
queue.append(obj)
del obj
gc.collect()
nodes = 0
while queue:
nodes += 1
# The names "source" and "target" are reversed here because
# originally there was just show_backrefs() and we were
# traversing the reference graph backwards.
target = queue.pop(0)
tdepth = depth[id(target)]
f.write(' %s[label="%s"%s];\n' % (_obj_node_id(target),
_obj_label(target, extra_info,
refcounts, shortnames),
_obj_attrs(target,
extra_node_attrs)))
h, s, v = _gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth)
if inspect.ismodule(target):
h = .3
s = 1
if highlight and highlight(target):
h = .6
s = .6
v = 0.5 + v * 0.5
f.write(' %s[fillcolor="%g,%g,%g"];\n'
% (_obj_node_id(target), h, s, v))
if v < 0.5:
f.write(' %s[fontcolor=white];\n' % (_obj_node_id(target)))
if hasattr(getattr(target, '__class__', None), '__del__'):
f.write(' %s->%s_has_a_del[color=red,style=dotted,'
'len=0.25,weight=10];\n' % (_obj_node_id(target),
_obj_node_id(target)))
f.write(' %s_has_a_del[label="__del__",shape=doublecircle,'
'height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n'
% (_obj_node_id(target)))
if tdepth >= max_depth:
continue
if cull_func is not None and cull_func(target):
continue
neighbours = edge_func(target)
ignore.add(id(neighbours))
n = 0
skipped = 0
for source in neighbours:
if id(source) in ignore:
continue
if filter and not filter(source):
continue
if n >= too_many:
skipped += 1
continue
if swap_source_target:
srcnode, tgtnode = target, source
else:
srcnode, tgtnode = source, target
elabel = _edge_label(srcnode, tgtnode, shortnames)
f.write(' %s -> %s%s;\n' % (_obj_node_id(srcnode),
_obj_node_id(tgtnode), elabel))
if id(source) not in depth:
depth[id(source)] = tdepth + 1
queue.append(source)
n += 1
del source
del neighbours
if skipped > 0:
h, s, v = _gradient((0, 1, 1), (0, 1, .3), tdepth + 1, max_depth)
if swap_source_target:
label = "%d more references" % skipped
edge = "%s->too_many_%s" % (_obj_node_id(target),
_obj_node_id(target))
else:
label = "%d more backreferences" % skipped
edge = "too_many_%s->%s" % (_obj_node_id(target),
_obj_node_id(target))
f.write(' %s[color=red,style=dotted,len=0.25,weight=10];\n'
% edge)
f.write(' too_many_%s[label="%s",shape=box,height=0.25,'
'color=red,fillcolor="%g,%g,%g",fontsize=6];\n'
% (_obj_node_id(target), label, h, s, v))
f.write(' too_many_%s[fontcolor=white];\n'
% (_obj_node_id(target)))
f.write("}\n")
if output:
return
if is_interactive:
return graphviz.Source(f.getvalue())
else:
# The file should only be closed if this function was in charge of
# opening the file.
f.close()
print("Graph written to %s (%d nodes)" % (dot_filename, nodes))
_present_graph(dot_filename, filename)
def _present_graph(dot_filename, filename=None):
"""Present a .dot file to the user in the requested fashion.
If ``filename`` is provided, runs ``dot`` to convert the .dot file
into the desired format, determined by the filename extension.
If ``filename`` is not provided, tries to launch ``xdot``, a
graphical .dot file viewer. If ``xdot`` is not present on the system,
converts the graph to a PNG.
"""
if filename == dot_filename:
# nothing to do, the user asked for a .dot file and got it
return
if not filename and _program_in_path('xdot'):
print("Spawning graph viewer (xdot)")
subprocess.Popen(['xdot', dot_filename], close_fds=True)
elif _program_in_path('dot'):
if not filename:
print("Graph viewer (xdot) not found, generating a png instead")
filename = dot_filename[:-4] + '.png'
stem, ext = os.path.splitext(filename)
cmd = ['dot', '-T' + ext[1:], '-o' + filename, dot_filename]
dot = subprocess.Popen(cmd, close_fds=False)
dot.wait()
if dot.returncode != 0:
# XXX: shouldn't this go to stderr or a log?
print('dot failed (exit code %d) while executing "%s"'
% (dot.returncode, ' '.join(cmd)))
else:
print("Image generated as %s" % filename)
else:
if not filename:
print("Graph viewer (xdot) and image renderer (dot) not found,"
" not doing anything else")
else:
print("Image renderer (dot) not found, not doing anything else")
def _obj_node_id(obj):
return ('o%d' % id(obj)).replace('-', '_')
def _obj_attrs(obj, extra_node_attrs):
if extra_node_attrs is not None:
attrs = extra_node_attrs(obj)
return ", " + ", ".join('%s="%s"' % (name, _quote(value))
for name, value in sorted(iteritems(attrs))
if value is not None)
else:
return ""
def _obj_label(obj, extra_info=None, refcounts=False, shortnames=True):
if shortnames:
label = [_short_typename(obj)]
else:
label = [_long_typename(obj)]
if refcounts:
label[0] += ' [%d]' % (sys.getrefcount(obj) - 4)
# Why -4? To ignore the references coming from
# obj_label's frame (obj)
# show_graph's frame (target variable)
# sys.getrefcount()'s argument
# something else that doesn't show up in gc.get_referrers()
label.append(_safe_repr(obj))
if extra_info:
label.append(str(extra_info(obj)))
return _quote('\n'.join(label))
def _quote(s):
return (s.replace("\\", "\\\\")
.replace("\"", "\\\"")
.replace("\n", "\\n")
.replace("\0", "\\\\0"))
def _get_obj_type(obj):
objtype = type(obj)
if type(obj) == InstanceType: # pragma: PY2 -- no old-style classes on PY3
objtype = obj.__class__
return objtype
def _short_typename(obj):
return _get_obj_type(obj).__name__
def _long_typename(obj):
objtype = _get_obj_type(obj)
name = objtype.__name__
module = getattr(objtype, '__module__', None)
if module:
return '%s.%s' % (module, name)
else:
return name
def _safe_repr(obj):
try:
return _short_repr(obj)
except Exception:
return '(unrepresentable)'
def _name_or_repr(value):
try:
result = value.__name__
except AttributeError:
result = repr(value)[:40]
if _isinstance(result, basestring):
return result
else:
return repr(value)[:40]
def _short_repr(obj):
if _isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType,
types.BuiltinFunctionType)):
return _name_or_repr(obj)
if _isinstance(obj, types.MethodType):
name = _name_or_repr(obj.__func__)
if obj.__self__:
return name + ' (bound)'
else: # pragma: PY2 -- no unbound methods on Python 3
return name
# NB: types.LambdaType is an alias for types.FunctionType!
if _isinstance(obj, types.LambdaType) and obj.__name__ == '<lambda>':
return 'lambda: %s:%s' % (os.path.basename(obj.__code__.co_filename),
obj.__code__.co_firstlineno)
if _isinstance(obj, types.FrameType):
return '%s:%s' % (obj.f_code.co_filename, obj.f_lineno)
if _isinstance(obj, (tuple, list, dict, set)):
return '%d items' % len(obj)
return repr(obj)[:40]
def _gradient(start_color, end_color, depth, max_depth):
if max_depth == 0:
# avoid division by zero
return start_color
h1, s1, v1 = start_color
h2, s2, v2 = end_color
f = float(depth) / max_depth
h = h1 * (1-f) + h2 * f
s = s1 * (1-f) + s2 * f
v = v1 * (1-f) + v2 * f
return h, s, v
def _edge_label(source, target, shortnames=True):
if (_isinstance(target, dict)
and target is getattr(source, '__dict__', None)):
return ' [label="__dict__",weight=10]'
if _isinstance(source, types.FrameType):
if target is source.f_locals:
return ' [label="f_locals",weight=10]'
if target is source.f_globals:
return ' [label="f_globals",weight=10]'
if _isinstance(source, types.MethodType):
try:
if target is source.__self__:
return ' [label="__self__",weight=10]'
if target is source.__func__:
return ' [label="__func__",weight=10]'
except AttributeError: # pragma: nocover
# Python < 2.6 compatibility
if target is source.im_self:
return ' [label="im_self",weight=10]'
if target is source.im_func:
return ' [label="im_func",weight=10]'
if _isinstance(source, types.FunctionType):
for k in dir(source):
if target is getattr(source, k):
return ' [label="%s",weight=10]' % _quote(k)
if _isinstance(source, dict):
for k, v in iteritems(source):
if v is target:
if _isinstance(k, basestring) and _is_identifier(k):
return ' [label="%s",weight=2]' % _quote(k)
else:
if shortnames:
tn = _short_typename(k)
else:
tn = _long_typename(k)
return ' [label="%s"]' % _quote(tn + "\n" + _safe_repr(k))
return ''
_is_identifier = re.compile('[a-zA-Z_][a-zA-Z_0-9]*$').match
def _program_in_path(program):
# XXX: Consider using distutils.spawn.find_executable or shutil.which
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path = [os.path.join(dir, program) for dir in path]
path = [True for file in path
if os.path.isfile(file) or os.path.isfile(file + '.exe')]
return bool(path)
|
mit
| 6,699,779,760,792,617,000
| 33.575854
| 79
| 0.588202
| false
| 3.878731
| false
| false
| false
|
acg/lwpb
|
python/pbsplit.py
|
1
|
1605
|
#!/usr/bin/env python
'''
pbsplit - split a protobuf stream into multiple files
'''
import sys
import getopt
import lwpb
import lwpb.stream
import lwpb.codec
def shift(L): e = L[0] ; del L[0:1] ; return e
def main():
typename = ""
skip = 0
count = -1
splitsize = 1000 # in number of records
pb2file = None
infile = "-"
fin = sys.stdin
template = None
opts, args = getopt.getopt(sys.argv[1:], 'p:m:s:c:t:z:')
for o, a in opts:
if o == '-p':
pb2file = a
elif o == '-m':
typename = a
elif o == '-s':
skip = int(a)
elif o == '-c':
count = int(a)
elif o == '-t':
template = a
elif o == '-z':
splitsize = int(a)
if len(args):
infile = shift(args)
fin = file(infile)
if template == None:
template = infile+".%05u"
codec = lwpb.codec.MessageCodec(pb2file=pb2file, typename=typename)
reader = lwpb.stream.StreamReader(fin, codec=codec)
writer = None
fout = None
outfile = None
splitnum = 0
splitwritten = 0
written = 0
for record in reader:
if reader.current_number < skip:
continue
if count >= 0 and written >= count:
break
if fout == None:
outfile = template % splitnum
fout = file(outfile, 'w')
writer = lwpb.stream.StreamWriter(fout, codec=codec)
splitwritten = 0
writer.write_raw( reader.current_raw )
written += 1
splitwritten += 1
if splitwritten >= splitsize:
fout.close()
fout = None
splitnum += 1
if fout:
fout.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
| 1,052,393,894,243,850,500
| 16.637363
| 69
| 0.576947
| false
| 3.336798
| false
| false
| false
|
bitmovin/bitmovin-python
|
examples/encoding/create_progressive_webm_encoding_with_vp9_and_opus_codecs.py
|
1
|
4978
|
import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, \
StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
MuxingStream, CloudRegion, ProgressiveWebMMuxing, VP9CodecConfiguration, OpusCodecConfiguration, VP9Quality
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example webm encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1,
encoder_version='BETA')
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_1080p = VP9CodecConfiguration(name='example_video_codec_configuration_1080p',
bitrate=4800000,
rate=25.0,
width=1920,
height=1080,
tile_columns=2,
quality=VP9Quality.GOOD)
video_codec_configuration_1080p = bitmovin.codecConfigurations.VP9.create(video_codec_configuration_1080p).resource
audio_codec_configuration = OpusCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.Opus.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id,
input_streams=[video_input_stream], name='Sample Stream 1080p')
video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
webm_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
webm_muxing = ProgressiveWebMMuxing(streams=[video_muxing_stream_1080p, audio_muxing_stream],
filename='myfile.webm',
outputs=[webm_muxing_output],
name='Sample WebM Muxing 1080p')
webm_muxing = bitmovin.encodings.Muxing.ProgressiveWebM.create(object_=webm_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
print("File successfully encoded")
if __name__ == '__main__':
main()
|
unlicense
| -3,270,803,099,738,486,000
| 48.78
| 119
| 0.57955
| false
| 4.211506
| true
| false
| false
|
f3at/feat
|
src/feat/models/value.py
|
1
|
25453
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
from zope.interface import implements, classImplements
from feat.common import annotate, container
from feat.models import meta as models_meta, action
from feat.models.interface import IValueInfo, NotSupported, IValueOptions
from feat.models.interface import IValidator, IValueRange, ValueTypes
from feat.models.interface import IEncodingInfo, IModel, IReference
from feat.models.interface import IValueOption, IResponse, MissingParameters
from feat.models.interface import UnknownParameters, InvalidParameters
from feat.models.interface import IValueCollection, IValueList
from feat.interface.serialization import ISnapshotable
meta = models_meta.meta
def label(lable):
"""
Annotates the IValueInfo label.
@param lable: label of the IValueInfo being defined.
@type lable: str or unicode
"""
_annotate("label", lable)
def desc(desc):
"""
Annotates the IValueInfo description.
@param desc: description of the IValueInfo being defined.
@type desc: str or unicode
"""
_annotate("desc", desc)
def value_type(vtype):
"""
Annotates the IValueInfo value type.
@param vtype: type of the IValueInfo being defined.
@type vtype: ValueTypes
"""
_annotate("value_type", vtype)
def default(default):
"""
Annotates the IValueInfo default value,
will be validated at instance creation time.
@param default: default value of the IValueInfo being defined.
@type default: Any
"""
_annotate("default", default)
def option(value, is_default=False, label=None):
"""
Annotates a possible value for IValueOptions,
will be validated at instance creation time.
@param value: a possible value for the IValueOptions being defined.
@type value: Any
@param is_default: if the option should be the default value.
@type is_default: bool
@param label: option label or None; if none the string representation
of the value will be used as label.
@type label: str or unicode or None
"""
_annotate("option", value, is_default=is_default, label=label)
def options_only():
"""
Annotates to enforce the value to be one of the specified options.
"""
_annotate("options_only")
def allows(value_info):
"""
Annotate an allowed value info for a collection.
@param value_info: an allowed value for the collection.
@type value_info: IValueInfo
"""
_annotate("allows", value_info)
def is_ordered(flag):
"""Annotate a collection to be ordered.
@param flag: if the collection order is important.
@type flag: bool
"""
_annotate("is_ordered", flag)
def min_size(size):
"""
Annotate a collection minimum size.
@param size: the collection minimum size.
@type size: int
"""
_annotate("min_size", size)
def max_size(size):
"""
Annotate a collection maximum size.
@param size: the collection maximum size.
@type size: int
"""
_annotate("max_size", size)
def _annotate(name, *args, **kwargs):
method_name = "annotate_" + name
annotate.injectClassCallback(name, 4, method_name, *args, **kwargs)
class BaseValue(models_meta.Metadata):
implements(IValueInfo, IValidator)
_class_label = None
_class_desc = None
_class_value_type = None
_class_use_default = False
_class_default = None
### IValueInfo ###
@property
def label(self):
return self._class_label
@property
def desc(self):
return self._class_desc
@property
def value_type(self):
return self._class_value_type
@property
def use_default(self):
return self._class_use_default
@property
def default(self):
return self._class_default
def __eq__(self, other):
if not IValueInfo.providedBy(other):
return NotSupported
other = IValueInfo(other)
if self.value_type != other.value_type:
return False
if self.use_default != other.use_default:
return False
if self.use_default and (self._default != other.default):
return False
if IValueOptions.providedBy(self) != IValueOptions.providedBy(other):
return False
if IValueOptions.providedBy(self):
other = IValueOptions(other)
other_options = set(other.iter_options())
self_options = set(self.iter_options())
if other_options != self_options:
return False
if self.is_restricted != other.is_restricted:
return False
if IValueRange.providedBy(self) != IValueRange.providedBy(other):
return False
if IValueRange.providedBy(self):
other = IValueRange(other)
if (self.minimum != other.minimum
or self.maximum != other.maximum
or self.increment != other.increment):
return False
return True
def __ne__(self, other):
eq = self.__eq__(other)
return eq if eq is NotSupported else not eq
### IValidator ###
def validate(self, value):
if value is None and self.use_default:
value = self.default
return value
def publish(self, value):
if value is None and self.use_default:
value = self.default
return value
def as_string(self, value):
return unicode(self.publish(value))
### annotations ###
@classmethod
def annotate_label(cls, label):
"""@see: feat.models.value.label"""
cls._class_label = label
@classmethod
def annotate_desc(cls, desc):
"""@see: feat.models.value.desc"""
cls._class_desc = desc
@classmethod
def annotate_value_type(cls, value_type):
"""@see: feat.models.value.value_type"""
if value_type not in ValueTypes:
raise ValueError(value_type)
cls._class_value_type = value_type
@classmethod
def annotate_default(cls, default):
"""@see: feat.models.value.default"""
cls._class_use_default = True
cls._class_default = default
class Binary(BaseValue):
implements(IEncodingInfo)
value_type(ValueTypes.binary)
def __init__(self, mime_type=None, encoding=None):
self._mime_type = mime_type
self._encoding = encoding
### IEncodingInfo ###
@property
def mime_type(self):
return self._mime_type
@property
def encoding(self):
return self._encoding
class InterfaceValue(BaseValue):
_value_interface = None
def __init__(self, value_interface=None):
if type(self)._value_interface is None:
self._value_interface = value_interface
def validate(self, value):
new_value = BaseValue.validate(self, value)
if not self._value_interface.providedBy(value):
raise ValueError(value)
return new_value
def publish(self, value):
new_value = BaseValue.publish(self, value)
if not self._value_interface.providedBy(value):
raise ValueError("%r does not provide %r interface" %
(value, self._value_interface))
return new_value
class Response(InterfaceValue):
"""Definition of a model value."""
_value_interface = IResponse
value_type(ValueTypes.model)
class Model(InterfaceValue):
"""Definition of a model value."""
_value_interface = IModel
value_type(ValueTypes.model)
class Reference(InterfaceValue):
"""Definition of a model value."""
_value_interface = IReference
value_type(ValueTypes.reference)
class Struct(BaseValue):
"""Definition of a model value."""
_value_interface = ISnapshotable
value_type(ValueTypes.struct)
class Value(BaseValue):
_class_options = None
_class_options_only = False
def __init__(self, *args, **kwargs):
label = self._class_label
desc = self._class_desc
self._label = unicode(label) if label is not None else None
self._desc = unicode(desc) if desc is not None else None
self._value_type = self._class_value_type
self._options_only = False
self._options = []
if self._class_options is not None:
for v, l in self._class_options:
self._add_option(v, l)
self._options_only = self._class_options_only
self._use_default = self._class_use_default
self._default = None
if self._use_default:
self._default = self._validate_default(self._class_default)
if "default" in kwargs:
if len(args) > 0:
raise ValueError("If the default value is specified "
"as a keyword, no argument are allowed")
self._set_default(kwargs.pop("default"))
else:
if len(args) > 1:
raise ValueError("Only default value is "
"supported as argument")
if len(args) > 0:
self._set_default(args[0])
if kwargs:
raise ValueError("Unsupported keyword arguments")
### IValueInfo ###
@property
def label(self):
return self._label
@property
def desc(self):
return self._desc
@property
def value_type(self):
return self._value_type
@property
def use_default(self):
return self._use_default
@property
def default(self):
return self._default
### IValidator ###
def validate(self, value):
value = BaseValue.validate(self, value)
if self._options_only and not self._has_option(value):
raise ValueError("Value not allowed: %r" % (value, ))
return value
def publish(self, value):
value = BaseValue.validate(self, value)
if self._options_only and not self._has_option(value):
raise ValueError("Value not allowed: %r" % (value, ))
return value
### IValueOptions ###
@property
def is_restricted(self):
return self._options_only
def count_options(self):
return len(self._options)
def iter_options(self):
return iter(self._options)
def has_option(self, value):
try:
return self._has_option(self._validate_option(value))
except ValueError:
return False
def get_option(self, value):
value = unicode(value)
try:
return next((o for o in self._options if o.value == value))
except StopIteration:
return None
### protected ###
def _validate_default(self, value):
return self.validate(value)
def _validate_option(self, value):
return self.validate(value)
def _has_option(self, value):
try:
next((o for o in self._options if o.value == value))
return True
except StopIteration:
return False
def _set_default(self, default):
self._default = self._validate_default(default)
self._use_default = True
def _add_option(self, value, label=None):
# Disable options_only to be able to validate the value
options_only = self._options_only
self._options_only = False
try:
self._validate_option(value)
option = ValueOption(value, label)
self._options.append(option)
finally:
self._options_only = options_only
### annotations ###
@classmethod
def annotate_option(cls, value, is_default=False, label=None):
"""@see: feat.models.value.option"""
if cls._class_options is None:
cls._class_options = container.MroList("_mro_options")
classImplements(cls, IValueOptions)
if is_default:
cls._class_default = value
cls._class_use_default = True
cls._class_options.append((value, label))
@classmethod
def annotate_options_only(cls):
"""@see: feat.models.value.options_only"""
cls._class_options_only = True
class ValueOption(object):
"""Pair of value/label defining a possible option.
@see: feat.models.interface.IValueOption"""
implements(IValueOption)
def __init__(self, value, label=None):
self._value = value
self._label = unicode(label) if label is not None else unicode(value)
### IValueOption ###
@property
def value(self):
return self._value
@property
def label(self):
return self._label
def __eq__(self, other):
if not IValueOption.providedBy(other):
return False
return (self._value == other.value
and self._label == other.label)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._value) ^ hash(self._label)
class String(Value):
"""String value definition."""
value_type(ValueTypes.string)
### overridden ###
def validate(self, value):
"""
Accepts: str, unicode
Returns: unicode
"""
val = value
if isinstance(val, str):
#FIXME: unsafe decoding
val = unicode(value)
val = super(String, self).validate(val)
if not isinstance(val, unicode):
raise ValueError("Not a string: %r" % (value, ))
return val
def publish(self, value):
"""
Accepts: unicode, str
Returns: unicode
"""
val = value
if isinstance(val, str):
#FIXME: unsafe decoding
val = unicode(value)
val = super(String, self).publish(val)
if not isinstance(val, unicode):
raise ValueError("Not a string: %r" % (value, ))
return val
class Float(Value):
value_type(ValueTypes.number)
def validate(self, value):
"""
Accepts: float, int, long, str, unicode
Returns: float
"""
if isinstance(value, (str, unicode, int, long)):
value = float(value)
value = super(Float, self).validate(value)
if not isinstance(value, (float)):
raise ValueError("Not an float: %r" % (value, ))
return value
def publish(self, value):
"""
Accepts: float
Returns: float
"""
value = super(Float, self).publish(value)
if isinstance(value, int):
value = float(value)
return value
class Integer(Value):
"""Definition of an basic integer value."""
value_type(ValueTypes.integer)
### overridden ###
def validate(self, value):
"""
Accepts: int, long, str, unicode
Returns: int, long
"""
if isinstance(value, (str, unicode, float)):
value = int(value)
value = super(Integer, self).validate(value)
if not isinstance(value, (int, long)):
raise ValueError("Not an integer: %r" % (value, ))
return value
def publish(self, value):
"""
Accepts: int, long
Returns: int, long
"""
value = super(Integer, self).publish(value)
if isinstance(value, float):
value = int(value)
if not isinstance(value, (int, long)):
raise ValueError("Not an integer: %r" % (value, ))
return value
class Boolean(Value):
"""Definition of an basic integer value."""
value_type(ValueTypes.boolean)
option(True, label="True")
option(False, label="False")
options_only()
### overridden ###
def validate(self, value):
"""
Accepts: str, unicode, bool
Returns: bool
"""
if isinstance(value, bool):
return value
if isinstance(value, (str, unicode)):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Not a boolean: %r" % (value, ))
value = super(Boolean, self).validate(value)
if not isinstance(value, bool):
raise ValueError("Not a boolean: %r" % (value, ))
return value
def publish(self, value):
value = super(Boolean, self).publish(value)
if not isinstance(value, bool):
raise ValueError("Not a boolean: %r" % (value, ))
return value
class Enum(Value):
"""Definition of integer value with a fixed
set of possible values taken from an enumeration."""
value_type(ValueTypes.string)
options_only()
implements(IValueOptions)
def __init__(self, enum, *args, **kwargs):
self._enum = enum
Value.__init__(self, *args, **kwargs)
for i in enum:
self._add_option(i)
### IValidator ###
def validate(self, value):
if value is None and self._use_default:
value = self._default
if isinstance(value, (str, unicode, int)):
if value in self._enum:
return self._enum[value]
if isinstance(value, int):
if value in self._enum:
return unicode(self._enum[value].name)
raise ValueError(value)
def publish(self, value):
if value is None and self._use_default:
value = self._default
if isinstance(value, (str, unicode)):
if value in self._enum:
return unicode(value)
if isinstance(value, int):
if value in self._enum:
return unicode(self._enum[value].name)
raise ValueError(value)
### overridden ###
def _validate_option(self, value):
return unicode(self.validate(value).name)
def _add_option(self, value, label=None):
if isinstance(value, self._enum):
value = unicode(value.name)
return Value._add_option(self, value, label)
class FixedValues(Value):
'''
String value of one of defined options.
Use: FixedValue(["option1", "option2", ...])
'''
value_type(ValueTypes.string)
options_only()
implements(IValueOptions)
def __init__(self, values, *args, **kwargs):
Value.__init__(self, *args, **kwargs)
for v in values:
self._add_option(v)
class Structure(Value):
implements(IValueList)
value_type(ValueTypes.struct)
_fields = container.MroList("_mro_fields")
def validate(self, value):
if not isinstance(value, dict):
raise ValueError("Expected dictionary, got %r" % (value, ))
fields = self.fields
params = set(value.keys())
expected = set([p.name for p in fields])
required = set([p.name for p in fields if p.is_required])
missing = required - params
if missing:
raise MissingParameters("", params=missing)
unknown = params - expected
if unknown:
raise UnknownParameters("", params=unknown)
param_index = dict([(p.name, p) for p in fields])
validated = {}
errors = {}
for param_name, param_value in value.iteritems():
param_name = str(param_name)
info = param_index[param_name].value_info
try:
valval = IValidator(info).validate(param_value)
validated[param_name] = valval
except ValueError, e:
errors[param_name] = e
if errors:
raise InvalidParameters("", params=errors)
for param in fields:
if not param.is_required:
info = param.value_info
if param.name not in validated and info.use_default:
validated[str(param.name)] = info.default
return validated
def publish(self, value):
def getter(value, name):
try:
if isinstance(value, dict):
return value[name]
else:
return getattr(value, name)
except (KeyError, AttributeError) as e:
raise ValueError(str(e))
result = dict()
for field in self.fields:
try:
v = getter(value, field.name)
result[field.name] = field.value_info.publish(v)
except ValueError:
if field.is_required:
raise
if field.value_info.use_default:
result[field.name] = field.value_info.publish(
field.value_info.default)
return result
### IValueList ###
@property
def fields(self):
inverted_result = []
already_added = set()
for p in reversed(self._fields):
if p.name not in already_added:
inverted_result.append(p)
already_added.add(p.name)
return list(reversed(inverted_result))
### annotations ###
@classmethod
def annotate_param(cls, name, value_info, is_required=True,
label=None, desc=None):
name = unicode(name)
param = action.Param(name, value_info, is_required=is_required,
label=label, desc=desc)
cls._fields.append(param)
field = action.param
class MetaCollection(type(Value)):
@staticmethod
def new(name, allowed_types=[], min_size=None, max_size=None,
is_ordered=True):
cls = MetaCollection(name, (Collection, ), {})
for value_info in allowed_types:
cls.annotate_allows(value_info)
cls.annotate_is_ordered(is_ordered)
if min_size is not None:
cls.annotate_min_size(min_size)
if max_size is not None:
cls.annotate_max_size(max_size)
return cls
class Collection(Value):
implements(IValueCollection)
_class_allowed_types = container.MroList("_mro_allowed_types")
_class_is_ordered = True
_class_min_size = None
_class_max_size = None
value_type(ValueTypes.collection)
### IValueCollection ###
@property
def allowed_types(self):
return list(self._class_allowed_types)
@property
def is_ordered(self):
return self._class_is_ordered
@property
def min_size(self):
return self._class_min_size
@property
def max_size(self):
return self._class_max_size
### overridden ###
def validate(self, value):
return self._convert(value, "validate")
def publish(self, value):
return self._convert(value, "publish")
### annotations ###
@classmethod
def annotate_allows(cls, value_info):
"""@see: feat.models.value.allows"""
value_info = _validate_value_info(value_info)
cls._class_allowed_types.append(value_info)
@classmethod
def annotate_is_ordered(cls, flag):
"""@see: feat.models.value.is_ordered"""
cls._class_is_ordered = _validate_flag(flag)
@classmethod
def annotate_min_size(cls, size):
"""@see: feat.models.value.min_size"""
cls._class_min_size = _validate_size(size)
@classmethod
def annotate_max_size(cls, size):
"""@see: feat.models.value.max_size"""
cls._class_max_size = _validate_size(size)
### private ###
def _convert(self, value, method_name):
if isinstance(value, (str, unicode)):
raise ValueError(value)
try:
all_values = list(value)
except TypeError:
raise ValueError(value)
result = []
if self._class_min_size is not None:
if len(all_values) < self._class_min_size:
raise ValueError(value)
if self._class_max_size is not None:
if len(all_values) > self._class_max_size:
raise ValueError(value)
allowed_types = list(self._class_allowed_types)
for v in all_values:
for allowed in allowed_types:
try:
result.append(getattr(allowed, method_name)(v))
break
except (ValueError, InvalidParameters), e:
continue
else:
raise ValueError(value)
return result
### private ###
def _validate_value_info(value_info):
return IValueInfo(value_info)
def _validate_size(size):
return int(size)
def _validate_flag(flag):
return bool(flag)
|
gpl-2.0
| 6,844,360,681,269,452,000
| 26.606291
| 78
| 0.591011
| false
| 4.179475
| false
| false
| false
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/ma/mrecords.py
|
1
|
28557
|
""":mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
:author: Pierre Gerard-Marchant
"""
#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask',
#!!!: or whatever restricted keywords.
#!!!: An idea would be to no bother in the first place, and then rename the
#!!!: invalid fields with a trailing underscore...
#!!!: Maybe we could just overload the parser function ?
__author__ = "Pierre GF Gerard-Marchant"
import sys
import numpy as np
from numpy import bool_, dtype, \
ndarray, recarray, array as narray
import numpy.core.numerictypes as ntypes
from numpy.core.records import fromarrays as recfromarrays, \
fromrecords as recfromrecords
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \
getdata, getmaskarray, filled
_check_fill_value = ma.core._check_fill_value
import warnings
__all__ = ['MaskedRecords', 'mrecarray',
'fromarrays', 'fromrecords', 'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _getformats(data):
"Returns the formats of each array of arraylist as a comma-separated string."
if hasattr(data, 'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += `obj.itemsize`
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""Checks that the field names of the descriptor ``descr`` are not some
reserved keywords. If this is the case, a default 'f%i' is substituted.
If the argument `names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % `names`)
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
*IVariables*:
_data : {recarray}
Underlying data, as a record array.
_mask : {boolean array}
Mask of the records. A record is masked when all its fields are masked.
_fieldmask : {boolean recarray}
Record array of booleans, setting the mask of each individual field of each record.
_fill_value : {record}
Filling values for each field.
"""
#............................................
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
#
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
#
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
#......................................................
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default ..
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"Returns the data as a recarray."
return ndarray.view(self, recarray)
_data = property(fget=_getdata)
def _getfieldmask(self):
"Alias to mask"
return self._mask
_fieldmask = property(fget=_getfieldmask)
def __len__(self):
"Returns the length"
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
# So far, so good...
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"\
"simple records...")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"Sets the attribute attr to the value val."
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
# Get the list of names ......
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
#
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`."""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field ........
if isinstance(indx, basestring):
#!!!: Make sure _sharedmask is True to propagate back to _fieldmask
#!!!: Don't use _set_mask, there are some copies being made...
#!!!: ...that break propagation
#!!!: Don't force the mask to nomask, that wrecks easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements ..
# First, the data ........
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
#....
def __setitem__(self, indx, value):
"Sets the given record to value."
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"Calculates the string representation."
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self, f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
#
def __repr__(self):
"Calculates the repr representation."
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
# #......................................................
def view(self, dtype=None, type=None):
"""Returns a view of the mrecarray."""
# OK, basic copy-paste from MaskedArray.view...
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again...
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# ...of subclasses (eg, TimeSeriesRecords), so we'll force a type
# ...set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"Forces the mask to hard"
self._hardmask = True
def soften_mask(self):
"Forces the mask to soft"
self._hardmask = False
def copy(self):
"""Returns a copy of the masked record."""
_localdict = self.__dict__
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""Copy the data portion of the array to a hierarchical python
list and returns that list.
Data items are converted to the nearest compatible Python
type. Masked values are converted to fill_value. If
fill_value is None, the corresponding entries in the output
list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling purposes.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tostring(),
self._mask.tostring(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for pickling purposes.
``state`` is typically the output of the ``__getstate__`` output, and is a
5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
# return _data
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
#####---------------------------------------------------------------------------
#---- --- Constructors ---
#####---------------------------------------------------------------------------
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = zip(*masklist)
if fill_value is not None:
_array.fill_value = fill_value
return _array
#..............................................................................
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.....
try:
nfields = len(reclist[0])
except TypeError:
nfields = len(reclist[0].dtype)
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif len(mask.shape) == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise
conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test is performed
on the first line. An exception is raised if the file is 3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2 :
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop .......
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
val = complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"Opens the file handle of file `fname`"
# A file handle ...................
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError("No such file: '%s'" % fname)
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
filename : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file ......................
f = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = f.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data ..............................
_variables = masked_array([line.strip().split(delimitor) for line in f
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
f.close()
# Try to guess the dtype ....................
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor ..................
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask .................
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
#....................................................................
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data ............
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask .............
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
|
gpl-3.0
| -4,077,217,941,820,372,500
| 37.226648
| 91
| 0.536366
| false
| 4.228787
| false
| false
| false
|
schleichdi2/OPENNFR-6.1-CORE
|
opennfr-openembedded-core/meta/lib/oeqa/utils/qemurunner.py
|
1
|
24225
|
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module provides a class for starting qemu images using runqemu.
# It's used by testimage.bbclass.
import subprocess
import os
import sys
import time
import signal
import re
import socket
import select
import errno
import string
import threading
import codecs
from oeqa.utils.dump import HostDumper
import logging
logger = logging.getLogger("BitBake.QemuRunner")
logger.addHandler(logging.StreamHandler())
# Get Unicode non printable control chars
control_range = list(range(0,32))+list(range(127,160))
control_chars = [chr(x) for x in control_range
if chr(x) not in string.printable]
re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
class QemuRunner:
def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds, use_kvm):
# Popen object for runqemu
self.runqemu = None
# pid of the qemu process that runqemu will start
self.qemupid = None
# target ip - from the command line or runqemu output
self.ip = None
# host ip - where qemu is running
self.server_ip = None
# target ip netmask
self.netmask = None
self.machine = machine
self.rootfs = rootfs
self.display = display
self.tmpdir = tmpdir
self.deploy_dir_image = deploy_dir_image
self.logfile = logfile
self.boottime = boottime
self.logged = False
self.thread = None
self.use_kvm = use_kvm
self.runqemutime = 60
self.host_dumper = HostDumper(dump_host_cmds, dump_dir)
def create_socket(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
sock.bind(("127.0.0.1",0))
sock.listen(2)
port = sock.getsockname()[1]
logger.info("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port)
return (sock, port)
except socket.error:
sock.close()
raise
def log(self, msg):
if self.logfile:
# It is needed to sanitize the data received from qemu
# because is possible to have control characters
msg = msg.decode("utf-8", errors='ignore')
msg = re_control_char.sub('', msg)
with codecs.open(self.logfile, "a", encoding="utf-8") as f:
f.write("%s" % msg)
def getOutput(self, o):
import fcntl
fl = fcntl.fcntl(o, fcntl.F_GETFL)
fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return os.read(o.fileno(), 1000000).decode("utf-8")
def handleSIGCHLD(self, signum, frame):
if self.runqemu and self.runqemu.poll():
if self.runqemu.returncode:
logger.info('runqemu exited with code %d' % self.runqemu.returncode)
logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
self.stop()
self._dump_host()
raise SystemExit
def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True):
if self.display:
os.environ["DISPLAY"] = self.display
# Set this flag so that Qemu doesn't do any grabs as SDL grabs
# interact badly with screensavers.
os.environ["QEMU_DONT_GRAB"] = "1"
if not os.path.exists(self.rootfs):
logger.error("Invalid rootfs %s" % self.rootfs)
return False
if not os.path.exists(self.tmpdir):
logger.error("Invalid TMPDIR path %s" % self.tmpdir)
return False
else:
os.environ["OE_TMPDIR"] = self.tmpdir
if not os.path.exists(self.deploy_dir_image):
logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
return False
else:
os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
if not launch_cmd:
launch_cmd = 'runqemu %s %s ' % ('snapshot' if discard_writes else '', runqemuparams)
if self.use_kvm:
logger.info('Using kvm for runqemu')
launch_cmd += ' kvm'
else:
logger.info('Not using kvm for runqemu')
if not self.display:
launch_cmd += ' nographic'
launch_cmd += ' %s %s' % (self.machine, self.rootfs)
return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams)
def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None):
try:
threadsock, threadport = self.create_socket()
self.server_socket, self.serverport = self.create_socket()
except socket.error as msg:
logger.error("Failed to create listening socket: %s" % msg[1])
return False
bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1'
if extra_bootparams:
bootparams = bootparams + ' ' + extra_bootparams
self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1}"'.format(bootparams, threadport)
if qemuparams:
self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams)
self.origchldhandler = signal.getsignal(signal.SIGCHLD)
signal.signal(signal.SIGCHLD, self.handleSIGCHLD)
logger.info('launchcmd=%s'%(launch_cmd))
# FIXME: We pass in stdin=subprocess.PIPE here to work around stty
# blocking at the end of the runqemu script when using this within
# oe-selftest (this makes stty error out immediately). There ought
# to be a proper fix but this will suffice for now.
self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
output = self.runqemu.stdout
#
# We need the preexec_fn above so that all runqemu processes can easily be killed
# (by killing their process group). This presents a problem if this controlling
# process itself is killed however since those processes don't notice the death
# of the parent and merrily continue on.
#
# Rather than hack runqemu to deal with this, we add something here instead.
# Basically we fork off another process which holds an open pipe to the parent
# and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills
# the process group. This is like pctrl's PDEATHSIG but for a process group
# rather than a single process.
#
r, w = os.pipe()
self.monitorpid = os.fork()
if self.monitorpid:
os.close(r)
self.monitorpipe = os.fdopen(w, "w")
else:
# child process
os.setpgrp()
os.close(w)
r = os.fdopen(r)
x = r.read()
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
sys.exit(0)
logger.info("runqemu started, pid is %s" % self.runqemu.pid)
logger.info("waiting at most %s seconds for qemu pid" % self.runqemutime)
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
if self.runqemu.returncode:
# No point waiting any longer
logger.info('runqemu exited with code %d' % self.runqemu.returncode)
self._dump_host()
self.stop()
logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
time.sleep(1)
out = self.getOutput(output)
netconf = False # network configuration is not required by default
if self.is_alive():
logger.info("qemu started - qemu procces pid is %s" % self.qemupid)
if get_ip:
cmdline = ''
with open('/proc/%s/cmdline' % self.qemupid) as p:
cmdline = p.read()
# It is needed to sanitize the data received
# because is possible to have control characters
cmdline = re_control_char.sub('', cmdline)
try:
ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
self.ip = ips[0]
self.server_ip = ips[1]
logger.info("qemu cmdline used:\n{}".format(cmdline))
except (IndexError, ValueError):
# Try to get network configuration from runqemu output
match = re.match('.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*',
out, re.MULTILINE|re.DOTALL)
if match:
self.ip, self.server_ip, self.netmask = match.groups()
# network configuration is required as we couldn't get it
# from the runqemu command line, so qemu doesn't run kernel
# and guest networking is not configured
netconf = True
else:
logger.error("Couldn't get ip from qemu command line and runqemu output! "
"Here is the qemu command line used:\n%s\n"
"and output from runqemu:\n%s" % (cmdline, out))
self._dump_host()
self.stop()
return False
logger.info("Target IP: %s" % self.ip)
logger.info("Server IP: %s" % self.server_ip)
self.thread = LoggingThread(self.log, threadsock, logger)
self.thread.start()
if not self.thread.connection_established.wait(self.boottime):
logger.error("Didn't receive a console connection from qemu. "
"Here is the qemu command line used:\n%s\nand "
"output from runqemu:\n%s" % (cmdline, out))
self.stop_thread()
return False
logger.info("Output from runqemu:\n%s", out)
logger.info("Waiting at most %d seconds for login banner" % self.boottime)
endtime = time.time() + self.boottime
socklist = [self.server_socket]
reachedlogin = False
stopread = False
qemusock = None
bootlog = ''
data = b''
while time.time() < endtime and not stopread:
try:
sread, swrite, serror = select.select(socklist, [], [], 5)
except InterruptedError:
continue
for sock in sread:
if sock is self.server_socket:
qemusock, addr = self.server_socket.accept()
qemusock.setblocking(0)
socklist.append(qemusock)
socklist.remove(self.server_socket)
logger.info("Connection from %s:%s" % addr)
else:
data = data + sock.recv(1024)
if data:
try:
data = data.decode("utf-8", errors="surrogateescape")
bootlog += data
data = b''
if re.search(".* login:", bootlog):
self.server_socket = qemusock
stopread = True
reachedlogin = True
logger.info("Reached login banner")
except UnicodeDecodeError:
continue
else:
socklist.remove(sock)
sock.close()
stopread = True
if not reachedlogin:
logger.info("Target didn't reached login boot in %d seconds" % self.boottime)
lines = "\n".join(bootlog.splitlines()[-25:])
logger.info("Last 25 lines of text:\n%s" % lines)
logger.info("Check full boot log: %s" % self.logfile)
self._dump_host()
self.stop()
return False
# If we are not able to login the tests can continue
try:
(status, output) = self.run_serial("root\n", raw=True)
if re.search("root@[a-zA-Z0-9\-]+:~#", output):
self.logged = True
logger.info("Logged as root in serial console")
if netconf:
# configure guest networking
cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask)
output = self.run_serial(cmd, raw=True)[1]
if re.search("root@[a-zA-Z0-9\-]+:~#", output):
logger.info("configured ip address %s", self.ip)
else:
logger.info("Couldn't configure guest networking")
else:
logger.info("Couldn't login into serial console"
" as root using blank password")
except:
logger.info("Serial console failed while trying to login")
else:
logger.info("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
self._dump_host()
self.stop()
logger.info("Output from runqemu:\n%s" % self.getOutput(output))
return False
return self.is_alive()
def stop(self):
self.stop_thread()
self.stop_qemu_system()
if hasattr(self, "origchldhandler"):
signal.signal(signal.SIGCHLD, self.origchldhandler)
if self.runqemu:
if hasattr(self, "monitorpid"):
os.kill(self.monitorpid, signal.SIGKILL)
logger.info("Sending SIGTERM to runqemu")
try:
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
endtime = time.time() + self.runqemutime
while self.runqemu.poll() is None and time.time() < endtime:
time.sleep(1)
if self.runqemu.poll() is None:
logger.info("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
self.runqemu = None
if hasattr(self, 'server_socket') and self.server_socket:
self.server_socket.close()
self.server_socket = None
self.qemupid = None
self.ip = None
def stop_qemu_system(self):
if self.qemupid:
try:
# qemu-system behaves well and a SIGTERM is enough
os.kill(self.qemupid, signal.SIGTERM)
except ProcessLookupError as e:
logger.warn('qemu-system ended unexpectedly')
def stop_thread(self):
if self.thread and self.thread.is_alive():
self.thread.stop()
self.thread.join()
def restart(self, qemuparams = None):
logger.info("Restarting qemu process")
if self.runqemu.poll() is None:
self.stop()
if self.start(qemuparams):
return True
return False
def is_alive(self):
if not self.runqemu:
return False
qemu_child = self.find_child(str(self.runqemu.pid))
if qemu_child:
self.qemupid = qemu_child[0]
if os.path.exists("/proc/" + str(self.qemupid)):
return True
return False
def find_child(self,parent_pid):
#
# Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
#
ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
processes = ps.decode("utf-8").split('\n')
nfields = len(processes[0].split()) - 1
pids = {}
commands = {}
for row in processes[1:]:
data = row.split(None, nfields)
if len(data) != 3:
continue
if data[1] not in pids:
pids[data[1]] = []
pids[data[1]].append(data[0])
commands[data[0]] = data[2]
if parent_pid not in pids:
return []
parents = []
newparents = pids[parent_pid]
while newparents:
next = []
for p in newparents:
if p in pids:
for n in pids[p]:
if n not in parents and n not in next:
next.append(n)
if p not in parents:
parents.append(p)
newparents = next
#print("Children matching %s:" % str(parents))
for p in parents:
# Need to be careful here since runqemu runs "ldd qemu-system-xxxx"
# Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
basecmd = commands[p].split()[0]
basecmd = os.path.basename(basecmd)
if "qemu-system" in basecmd and "-serial tcp" in commands[p]:
return [int(p),commands[p]]
def run_serial(self, command, raw=False, timeout=5):
# We assume target system have echo to get command status
if not raw:
command = "%s; echo $?\n" % command
data = ''
status = 0
self.server_socket.sendall(command.encode('utf-8'))
start = time.time()
end = start + timeout
while True:
now = time.time()
if now >= end:
data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
break
try:
sread, _, _ = select.select([self.server_socket],[],[], end - now)
except InterruptedError:
continue
if sread:
answer = self.server_socket.recv(1024)
if answer:
data += answer.decode('utf-8')
# Search the prompt to stop
if re.search("[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data):
break
else:
raise Exception("No data on serial console socket")
if data:
if raw:
status = 1
else:
# Remove first line (command line) and last line (prompt)
data = data[data.find('$?\r\n')+4:data.rfind('\r\n')]
index = data.rfind('\r\n')
if index == -1:
status_cmd = data
data = ""
else:
status_cmd = data[index+2:]
data = data[:index]
if (status_cmd == "0"):
status = 1
return (status, str(data))
def _dump_host(self):
self.host_dumper.create_dir("qemu")
logger.warn("Qemu ended unexpectedly, dump data from host"
" is in %s" % self.host_dumper.dump_dir)
self.host_dumper.dump_host()
# This class is for reading data from a socket and passing it to logfunc
# to be processed. It's completely event driven and has a straightforward
# event loop. The mechanism for stopping the thread is a simple pipe which
# will wake up the poll and allow for tearing everything down.
class LoggingThread(threading.Thread):
def __init__(self, logfunc, sock, logger):
self.connection_established = threading.Event()
self.serversock = sock
self.logfunc = logfunc
self.logger = logger
self.readsock = None
self.running = False
self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL
self.readevents = select.POLLIN | select.POLLPRI
threading.Thread.__init__(self, target=self.threadtarget)
def threadtarget(self):
try:
self.eventloop()
finally:
self.teardown()
def run(self):
self.logger.info("Starting logging thread")
self.readpipe, self.writepipe = os.pipe()
threading.Thread.run(self)
def stop(self):
self.logger.info("Stopping logging thread")
if self.running:
os.write(self.writepipe, bytes("stop", "utf-8"))
def teardown(self):
self.logger.info("Tearing down logging thread")
self.close_socket(self.serversock)
if self.readsock is not None:
self.close_socket(self.readsock)
self.close_ignore_error(self.readpipe)
self.close_ignore_error(self.writepipe)
self.running = False
def eventloop(self):
poll = select.poll()
event_read_mask = self.errorevents | self.readevents
poll.register(self.serversock.fileno())
poll.register(self.readpipe, event_read_mask)
breakout = False
self.running = True
self.logger.info("Starting thread event loop")
while not breakout:
events = poll.poll()
for event in events:
# An error occurred, bail out
if event[1] & self.errorevents:
raise Exception(self.stringify_event(event[1]))
# Event to stop the thread
if self.readpipe == event[0]:
self.logger.info("Stop event received")
breakout = True
break
# A connection request was received
elif self.serversock.fileno() == event[0]:
self.logger.info("Connection request received")
self.readsock, _ = self.serversock.accept()
self.readsock.setblocking(0)
poll.unregister(self.serversock.fileno())
poll.register(self.readsock.fileno(), event_read_mask)
self.logger.info("Setting connection established event")
self.connection_established.set()
# Actual data to be logged
elif self.readsock.fileno() == event[0]:
data = self.recv(1024)
self.logfunc(data)
# Since the socket is non-blocking make sure to honor EAGAIN
# and EWOULDBLOCK.
def recv(self, count):
try:
data = self.readsock.recv(count)
except socket.error as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
return ''
else:
raise
if data is None:
raise Exception("No data on read ready socket")
elif not data:
# This actually means an orderly shutdown
# happened. But for this code it counts as an
# error since the connection shouldn't go away
# until qemu exits.
raise Exception("Console connection closed unexpectedly")
return data
def stringify_event(self, event):
val = ''
if select.POLLERR == event:
val = 'POLLER'
elif select.POLLHUP == event:
val = 'POLLHUP'
elif select.POLLNVAL == event:
val = 'POLLNVAL'
return val
def close_socket(self, sock):
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def close_ignore_error(self, fd):
try:
os.close(fd)
except OSError:
pass
|
gpl-2.0
| 2,719,740,783,573,503,000
| 39.107616
| 159
| 0.533375
| false
| 4.175284
| false
| false
| false
|
Orpheus11/nile
|
nile/common/lockutils.py
|
1
|
3733
|
import threading
import weakref
import contextlib
import logging
import fasteners
import os
LOG = logging.getLogger(__name__)
class Semaphores(object):
def __init__(self):
self._semaphores = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def get(self, name):
with self._lock:
try:
return self._semaphores[name]
except KeyError:
sem = threading.Semaphore()
self._semaphores[name] = sem
return sem
def __len__(self):
return len(self._semaphores)
_semaphores = Semaphores()
InterProcessLock = fasteners.InterProcessLock
ReaderWriterLock = fasteners.ReaderWriterLock
def internal_lock(name, semaphores=None):
if semaphores is None:
semaphores = _semaphores
return semaphores.get(name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def _get_lock_path(name, lock_file_prefix, lock_path=None):
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path
if not local_lock_path:
# raise cfg.RequiredOptError('lock_path')
raise
return os.path.join(local_lock_path, name)
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None,
do_log=True, semaphores=None, delay=0.01):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The path in which to store external lock files. For
external locking to work properly, this must be the same for all
references to the lock.
:param do_log: Whether to log acquire/release messages. This is primarily
intended to reduce log message duplication when `lock` is used from the
`synchronized` decorator.
:param semaphores: Container that provides semaphores to use when locking.
This ensures that threads inside the same application can not collide,
due to the fact that external process locks are unaware of a processes
active threads.
:param delay: Delay between acquisition attempts (in seconds).
.. versionchanged:: 0.2
Added *do_log* optional parameter.
.. versionchanged:: 0.3
Added *delay* and *semaphores* optional parameters.
"""
int_lock = internal_lock(name, semaphores=semaphores)
with int_lock:
if do_log:
LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name})
try:
if external:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
ext_lock.acquire(delay=delay)
try:
yield ext_lock
finally:
ext_lock.release()
else:
yield int_lock
finally:
if do_log:
LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
|
apache-2.0
| -4,795,306,328,737,834,000
| 33.247706
| 78
| 0.646397
| false
| 4.013978
| false
| false
| false
|
Fokko/incubator-airflow
|
tests/test_utils/mock_operators.py
|
1
|
1355
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import NamedTuple
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
# Namedtuple for testing purposes
class MockNamedTuple(NamedTuple):
var1: str
var2: str
class MockOperator(BaseOperator):
"""Operator for testing purposes."""
template_fields = ("arg1", "arg2")
@apply_defaults
def __init__(self, arg1: str = "", arg2: str = "", **kwargs):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def execute(self, context):
pass
|
apache-2.0
| 3,725,370,474,415,175,700
| 31.261905
| 65
| 0.723985
| false
| 4.143731
| false
| false
| false
|
Greymerk/python-rpg
|
src/world/terrain/chunkmanager.py
|
1
|
1344
|
from random import choice
from mapcache import MapCache
from chunk import Chunk
class ChunkManager:
def __init__(self, world):
self.world = world
self.chunkCache = []
self.mapCache = MapCache(self, self.world.seed)
self.maxCacheSize = 64
def getChunk(self, x, y):
chunkX = int(x) >> 4
chunkY = int(y) >> 4
for c in self.chunkCache:
if c.getPos() == (chunkX, chunkY):
return c
toLoad = Chunk((chunkX, chunkY), self.world.getSeed(), self.world.mobManager, self.mapCache)
self.chunkCache.append(toLoad)
if len(self.chunkCache) > self.maxCacheSize:
toUnload = self.chunkCache.popleft()
toUnload.unload()
return toLoad
def getMap(self, x, y):
return self.mapCache.get(x, y)
def getTile(self, pos):
x = int(pos[0])
y = int(pos[1])
c = self.getChunk(x, y)
return c.getTile(x % Chunk.size, y % Chunk.size)
def isLoaded(self, x, y):
for c in self.chunkCache:
if c.pos is (x, y):
return True
return False
def setTile(self, (x, y), id):
c = self.getChunk(x, y)
c.setTile((x, y), id)
def saveChunks(self):
for c in self.chunkCache:
c.unload()
def getRandomChunk(self):
return choice(self.chunkCache)
def cull(self, center, dist):
for c in self.chunkCache:
if c.getDistToChunk(center) > dist:
c.unload()
self.chunkCache.remove(c)
|
gpl-3.0
| -1,728,710,524,475,288,800
| 19.676923
| 94
| 0.65253
| false
| 2.688
| false
| false
| false
|
WaveBlocks/WaveBlocks
|
src/WaveBlocks/MatrixPotential1S.py
|
1
|
13237
|
"""The WaveBlocks Project
This file contains code for the representation of potentials for a single component.
These potential are of course scalar ones.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import sympy
import numpy
from MatrixPotential import MatrixPotential
class MatrixPotential1S(MatrixPotential):
r"""
This class represents a scalar potential :math:`V\left(x\right)`. The potential is given as an
analytical :math:`1 \times 1` matrix expression. Some symbolic calculations with
the potential are supported. For example calculation of eigenvalues and
exponentials and numerical evaluation. Further, there are methods for
splitting the potential into a Taylor expansion and for basis transformations
between canonical and eigenbasis.
"""
def __init__(self, expression, variables):
r"""
Create a new ``MatrixPotential1S`` instance for a given potential matrix :math:`V\left(x\right)`.
:param expression: An expression representing the potential.
"""
#: The variable :math:`x` that represents position space.
self.x = variables[0]
#: The matrix of the potential :math:`V\left(x\right)`.
self.potential = expression
# Unpack single matrix entry
self.potential = self.potential[0,0]
self.exponential = None
self.number_components = 1
# prepare the function in every potential matrix cell for numerical evaluation
self.potential_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy"))
# Symbolic and numerical eigenvalues and eigenvectors
self.eigenvalues_s = None
self.eigenvalues_n = None
self.eigenvectors_s = None
self.eigenvectors_n = None
self.taylor_eigen_s = None
self.taylor_eigen_n = None
self.remainder_eigen_s = None
self.remainder_eigen_n = None
def __str__(self):
r"""
Put the number of components and the analytical expression (the matrix) into a printable string.
"""
return """Scalar potential given by the expression: V(x) = \n""" + str(self.potential)
def get_number_components(self):
r"""
:return: The number :math:`N` of components the potential supports. In the one dimensional case, it's just 1.
"""
return 1
def evaluate_at(self, nodes, component=0, as_matrix=False):
r"""
Evaluate the potential matrix elementwise at some given grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: The component :math:`V_{i,j}` that gets evaluated or 'None' to evaluate all.
:param as_matrix: Dummy parameter which has no effect here.
:return: A list with the single entry evaluated at the nodes.
"""
return tuple([ numpy.array(self.potential_n(nodes), dtype=numpy.floating) ])
def calculate_eigenvalues(self):
r"""
Calculate the eigenvalue :math:`\lambda_0\left(x\right)` of the potential :math:`V\left(x\right)`.
In the scalar case this is just the matrix entry :math:`V_{0,0}`.
.. note:: This function is idempotent and the eigenvalues are memoized for later reuse.
"""
if self.eigenvalues_s is None:
self.eigenvalues_s = self.potential
self.eigenvalues_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy"))
def evaluate_eigenvalues_at(self, nodes, component=None, as_matrix=False):
r"""
Evaluate the eigenvalue :math:`\lambda_0\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvalue at.
:param diagonal_component: Dummy parameter that has no effect here.
:param as_matrix: Dummy parameter which has no effect here.
:return: A list with the single eigenvalue evaluated at the nodes.
"""
self.calculate_eigenvalues()
return tuple([ numpy.array(self.eigenvalues_n(nodes)) ])
def calculate_eigenvectors(self):
r"""
Calculate the eigenvector :math:`nu_0\left(x\right)` of the potential :math:`V\left(x\right)`.
In the scalar case this is just the value :math:`1`.
.. note:: This function is idempotent and the eigenvectors are memoized for later reuse.
"""
if self.eigenvectors_s is None:
self.eigenvectors_s = sympy.Matrix([[1]])
self.eigenvectors_n = sympy.vectorize(0)(sympy.lambdify(self.x, 1, "numpy"))
def evaluate_eigenvectors_at(self, nodes):
r"""
Evaluate the eigenvector :math:`nu_0\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvector at.
:return: A list with the eigenvector evaluated at the given nodes.
"""
self.calculate_eigenvectors()
return tuple([ numpy.ones((1, len(nodes)), dtype=numpy.floating) ])
def project_to_eigen(self, nodes, values, basis=None):
r"""
Project a given vector from the canonical basis to the eigenbasis of the potential.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basisvectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:return: This method does nothing and returns the values.
"""
return [ values[0].copy() ]
def project_to_canonical(self, nodes, values, basis=None):
r"""
Project a given vector from the potential's eigenbasis to the canonical basis.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basis vectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:return: This method does nothing and returns the values.
"""
return [ values[0].copy() ]
def calculate_exponential(self, factor=1):
r"""
Calculate the matrix exponential :math:`E = \exp\left(\alpha M\right)`. In this case
the matrix is of size :math:`1 \times 1` thus the exponential simplifies to the scalar exponential function.
:param factor: A prefactor :math:`\alpha` in the exponential.
.. note:: This function is idempotent.
"""
if self.exponential is None:
self.exponential = sympy.exp(factor*self.potential)
def evaluate_exponential_at(self, nodes):
r"""
Evaluate the exponential of the potential matrix :math:`V` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the exponential at.
:return: The numerical approximation of the matrix exponential at the given grid nodes.
"""
# Hack for older sympy versions, see recent issue:
# http://www.mail-archive.com/sympy@googlegroups.com/msg05137.html
lookup = {"I" : 1j}
# prepare the function of every potential matrix exponential cell for numerical evaluation
self.expfunctions = sympy.vectorize(0)(sympy.lambdify(self.x, self.exponential, (lookup, "numpy")))
return tuple([ numpy.array(self.expfunctions(nodes)) ])
def calculate_jacobian(self):
r"""
Calculate the jacobian matrix for the component :math:`V_{0,0}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the first derivative.
"""
self.jacobian_s = sympy.diff(self.potential, self.x)
self.jacobian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.jacobian_s, "numpy"))
def evaluate_jacobian_at(self, nodes, component=None):
r"""
Evaluate the potential's jacobian at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` the jacobian gets evaluated at.
:param component: Dummy parameter that has no effect here.
:return: The value of the potential's jacobian at the given nodes.
"""
return tuple([ self.jacobian_n(nodes) ])
def calculate_hessian(self):
r"""
Calculate the hessian matrix for component :math:`V_{0,0}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the second derivative.
"""
self.hessian_s = sympy.diff(self.potential, self.x, 2)
self.hessian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.hessian_s, "numpy"))
def evaluate_hessian_at(self, nodes, component=None):
r"""
Evaluate the potential's hessian at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` the hessian gets evaluated at.
:param component: Dummy parameter that has no effect here.
:return: The value of the potential's hessian at the given nodes.
"""
return tuple([ self.hessian_n(nodes) ])
def calculate_local_quadratic(self, diagonal_component=None):
r"""
Calculate the local quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda`.
:param diagonal_component: Dummy parameter that has no effect here.
.. note:: This function is idempotent.
"""
# Calculation already done at some earlier time?
if self.taylor_eigen_s is not None:
return
self.calculate_eigenvalues()
self.calculate_jacobian()
self.calculate_hessian()
self.taylor_eigen_s = [ (0, self.eigenvalues_s), (1, self.jacobian_s), (2, self.hessian_s) ]
# Construct function to evaluate the approximation at point q at the given nodes
assert(self.taylor_eigen_n is None)
self.taylor_eigen_n = [
(order, sympy.vectorize(0)(sympy.lambdify([self.x], f, "numpy")))
for order, f in self.taylor_eigen_s
]
def evaluate_local_quadratic_at(self, nodes, diagonal_component=None):
r"""
Numerically evaluate the local quadratic approximation :math:`U` of
the potential's eigenvalue :math:`\lambda` at the given grid nodes :math:`\gamma`.
This function is used for the homogeneous case.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the quadratic approximation at.
:return: An array containing the values of :math:`U` at the nodes :math:`\gamma`.
"""
return tuple([ numpy.array(f(nodes), dtype=numpy.floating) for order, f in self.taylor_eigen_n ])
def calculate_local_remainder(self, diagonal_component=None):
r"""
Calculate the non-quadratic remainder :math:`W` of the quadratic
approximation :math:`U` of the potential's eigenvalue :math:`\lambda`.
This function is used for the homogeneous case and takes into account
the leading component :math:`\chi`.
:param diagonal_component: Dummy parameter that has no effect here.
.. note:: This function is idempotent.
"""
# Calculation already done at some earlier time?
if self.remainder_eigen_s is not None:
return
self.calculate_eigenvalues()
f = self.eigenvalues_s
# point where the taylor series is computed
q = sympy.Symbol("q")
p = f.subs(self.x, q)
j = sympy.diff(f, self.x)
j = j.subs(self.x, q)
h = sympy.diff(f, self.x, 2)
h = h.subs(self.x, q)
quadratic = p + j*(self.x-q) + sympy.Rational(1,2)*h*(self.x-q)**2
# Symbolic expression for the taylor expansion remainder term
self.remainder_eigen_s = self.potential - quadratic
# Construct functions to evaluate the approximation at point q at the given nodes
assert(self.remainder_eigen_n is None)
self.remainder_eigen_n = sympy.vectorize(1)(sympy.lambdify([q, self.x], self.remainder_eigen_s, "numpy"))
def evaluate_local_remainder_at(self, position, nodes, diagonal_component=None, component=None):
r"""
Numerically evaluate the non-quadratic remainder :math:`W` of the quadratic
approximation :math:`U` of the potential's eigenvalue :math:`\lambda` at the given nodes :math:`\gamma`.
This function is used for the homogeneous and the inhomogeneous case and
just evaluates the remainder :math:`W`.
:param position: The point :math:`q` where the Taylor series is computed.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: Dummy parameter that has no effect here.
:return: A list with a single entry consisting of an array containing the values of :math:`W` at the nodes :math:`\gamma`.
"""
return tuple([ numpy.array(self.remainder_eigen_n(position, nodes), dtype=numpy.floating) ])
|
bsd-3-clause
| -6,184,886,920,704,668,000
| 40.495298
| 136
| 0.652187
| false
| 4.079199
| false
| false
| false
|
tobijk/ecromedos
|
lib/net/ecromedos/ecmlprocessor.py
|
1
|
4602
|
# -*- coding: utf-8 -*-
#
# Desc: This file is part of the ecromedos Document Preparation System
# Author: Tobias Koch <tobias@tobijk.de>
# License: MIT
# URL: http://www.ecromedos.net
#
import os, sys
import lxml.etree as etree
from net.ecromedos.error import ECMDSError, ECMDSPluginError
from net.ecromedos.configreader import ECMDSConfigReader
from net.ecromedos.dtdresolver import ECMDSDTDResolver
from net.ecromedos.preprocessor import ECMDSPreprocessor
class ECMLProcessor(ECMDSConfigReader, ECMDSDTDResolver, ECMDSPreprocessor):
def __init__(self, options={}):
ECMDSConfigReader.__init__(self)
ECMDSDTDResolver. __init__(self)
ECMDSPreprocessor.__init__(self)
self.readConfig(options)
self.loadPlugins()
self.loadStylesheet()
#end function
def loadXMLDocument(self, filename):
"""Try to load XML document from @filename."""
try:
# create parser
parser = etree.XMLParser(
load_dtd=True,
no_network=True,
strip_cdata=True,
remove_comments=True,
resolve_entities=True
)
# register custom resolver
parser.resolvers.add(self)
# parse the document
tree = etree.parse(filename, parser=parser)
except Exception as e:
raise ECMDSError(str(e))
# return document tree
return tree
#end function
def loadStylesheet(self):
"""Load matching stylesheet for desired output format."""
target_format = self.config['target_format']
try:
style_dir = self.config['style_dir']
except KeyError:
msg = "Please specify the location of the stylesheets."
raise ECMDSError(msg)
#end try
filename = os.path.join(style_dir, target_format, "ecmds.xsl")
try:
tree = self.loadXMLDocument(filename)
except ECMDSError as e:
msg = "Could not load stylesheet:\n %s" % (e.msg(),)
raise ECMDSError(msg)
#end try
try:
self.stylesheet = etree.XSLT(tree)
except Exception as e:
raise ECMDSError(str(e))
#end if
return self.stylesheet
#end function
def validateDocument(self, document):
"""Validate the given document."""
try:
style_dir = self.config['style_dir']
except KeyError:
msg = "Please specify the location of the stylesheets."
raise ECMDSError(msg)
#end try
# load the DTD
dtd_filename = os.path.join(style_dir, "DTD", "ecromedos.dtd")
dtd = etree.DTD(dtd_filename)
# validate the document
result = dtd.validate(document)
if result == False:
raise ECMDSError(dtd.error_log.last_error)
return result
#end function
def applyStylesheet(self, document):
"""Apply stylesheet to document."""
params = None
try:
params = self.config['xsl_params']
except KeyError: pass
try:
result = self.stylesheet(document, **params)
except Exception as e:
msg = "Error transforming document:\n %s." % (str(e),)
raise ECMDSError(msg)
#end try
return result
#end function
def process(self, filename, verbose=True):
"""Convert the document stored under filename."""
def message(msg, verbose):
if not verbose: return
sys.stdout.write(" * " + msg)
sys.stdout.write(" " * (40 - len(msg)))
sys.stdout.flush()
#end inline function
def status(status, verbose):
if not verbose: return
sys.stdout.write(status + "\n")
#end inline function
# load document
message("Reading document...", verbose)
document = self.loadXMLDocument(filename)
status("DONE", verbose)
# validate document
if self.config['do_validate']:
message("Validating document...", verbose)
self.validateDocument(document)
status("VALID", verbose)
#end if
# prepare document
message("Pre-processing document tree...", verbose)
self.prepareDocument(document)
status("DONE", verbose)
# apply stylesheet
message("Transforming document...", verbose)
self.applyStylesheet(document)
status("DONE", verbose)
#end function
#end class
|
mit
| 7,828,527,572,711,646,000
| 27.407407
| 76
| 0.58279
| false
| 4.225895
| true
| false
| false
|
danielquinn/spirithunter
|
src/spirits/api/resources.py
|
1
|
8242
|
import json
import random
from math import sin, cos
from django.conf import settings
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from tastypie import fields
from tastypie import http
from tastypie.authentication import MultiAuthentication, Authentication, BasicAuthentication, SessionAuthentication
from tastypie.resources import ModelResource, convert_post_to_patch
from tastypie.exceptions import BadRequest
from aspects.models import Element, Facet
from geography.models import Country
from spirithunter import logger
from .authorization import SpiritAuthorization
from ..forms import PatchForm
from ..models.spirit import ElementalStrength, Spirit
class ImageMixin(object):
def dehydrate(self, bundle):
bundle.data.update({
"images": {}
})
for size in self.AVAILABLE_IMAGE_SIZES:
bundle.data["images"][str(size)] = getattr(
bundle.obj,
'image{size}'.format(size=size)
)
return bundle
class ElementResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
class Meta:
queryset = Element.objects.all()
include_resource_uri = False
resource_name = "elements"
class ElementalStrengthResource(ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
element = fields.ToOneField(ElementResource, "element", full=True)
class Meta:
queryset = ElementalStrength.objects.all()
include_resource_uri = False
resource_name = "elements"
class FacetResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32)
class Meta:
queryset = Facet.objects.all()
include_resource_uri = False
resource_name = "facets"
class NationalityResource(ModelResource):
class Meta:
queryset = Country.objects.all()
include_resource_uri = False
resource_name = "nationalities"
def dehydrate(self, bundle):
return {
"code": bundle.obj.country.code,
"name": bundle.obj.country.name,
}
class SpiritResource(ImageMixin, ModelResource):
AVAILABLE_IMAGE_SIZES = (16, 32, 64, 128, 256)
SPIRITS_TO_GENERATE = 5
SPAWN_RADIUS = 50
owner = fields.ToOneField("users.api.UserResource", "owner", null=True)
elementals = fields.ManyToManyField(
ElementalStrengthResource,
"elemental_strengths",
full=True
)
facets = fields.ManyToManyField(
FacetResource,
"facets",
full=True
)
nationalities = fields.ManyToManyField(
NationalityResource,
"nationalities",
full=True
)
class Meta:
allowed_methods = ("get", "patch",)
authentication = MultiAuthentication(
SessionAuthentication(),
BasicAuthentication(),
Authentication()
)
authorization = SpiritAuthorization()
object_class = Spirit
queryset = Spirit.objects.all()
resource_name = "spirits"
filtering = {
"id": ("exact",),
"owner": ("exact",),
"activity": ("exact",),
}
def dehydrate(self, bundle):
bundle = ModelResource.dehydrate(self, bundle)
bundle = ImageMixin.dehydrate(self, bundle)
if bundle.obj.activity == Spirit.ACTIVITY_WANDER:
if bundle.obj.health_current == 0:
bundle.data["experience_given"] = bundle.obj.get_ladder().xp_given
return bundle
@staticmethod
def dehydrate_origin(bundle):
if bundle.obj.origin:
r = json.loads(bundle.obj.origin.geojson)
r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING)
r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING)
return r
return None
@staticmethod
def dehydrate_location(bundle):
if bundle.obj.location:
r = json.loads(bundle.obj.location.geojson)
r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING)
r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING)
return r
return None
@staticmethod
def dehydrate_activity(bundle):
return {
"id": bundle.obj.activity,
"name": bundle.obj.get_activity_display()
}
def obj_get_list(self, bundle, **kwargs):
if bundle.request.GET.get("finder"):
if not bundle.request.location:
raise BadRequest(
"Finder cannot be invoked without a location header"
)
if not bundle.request.user.is_authenticated():
raise BadRequest(
"Finder is only available to authenticated users"
)
try:
return self._finder(bundle.request)
except ValidationError as e:
raise BadRequest(e.messages[0])
else:
return ModelResource.obj_get_list(self, bundle, **kwargs)
def patch_list(self, request, **kwargs):
return http.HttpNotImplemented()
def patch_detail(self, request, **kwargs):
pk = kwargs.get("pk")
request = convert_post_to_patch(request)
self.authorized_update_detail(
Spirit.objects.filter(pk=pk),
self.build_bundle(request=request)
)
form = PatchForm(
request,
get_object_or_404(Spirit, pk=pk),
self.deserialize(
request,
request.body,
format=request.META.get("CONTENT_TYPE", "application/json")
)
)
if form.is_valid():
form.save()
return self.create_response(request, "", status=202)
raise BadRequest(form.errors.as_text())
def _finder(self, request):
"""
Open the app and show me what's here. If there's nothing here (common)
make some spirits relevant to the environment to play with.
"""
lat, lng = (request.location.y, request.location.x)
if lat > 80 or lat < -80:
raise ValidationError("Invalid lat value: %s" % lat)
if lng > 180 or lng < -180:
raise ValidationError("Invalid lng value: %s" % lng)
level_low, level_high = 1, 1
if request.user.is_authenticated():
spirit_levels = sorted(
request.user.spirits.filter(
activity=Spirit.ACTIVITY_JARRED
).values_list(
"level",
flat=True
)
)
if spirit_levels:
level_low, level_high = spirit_levels[0], spirit_levels[-1]
spirits = list(Spirit.objects.filter(
activity=Spirit.ACTIVITY_WANDER,
health_current__gt=0,
location__distance_lte=(request.location, self.SPAWN_RADIUS)
))
while len(spirits) < self.SPIRITS_TO_GENERATE:
# Magic
centre_x = float(lat)
centre_y = float(lng)
r = random.uniform(0, self.SPAWN_RADIUS)
a = random.uniform(0, 360)
target_x = centre_x + ((r * cos(a)) / settings.M_LNG)
target_y = centre_y + ((r * sin(a)) / settings.M_LAT)
# /Magic
logger.debug("Creating a spirit at {lat},{lng}".format(
lat=target_x,
lng=target_y
))
spirit = Spirit.objects.create_for_environment(
centre=(centre_x, centre_y),
target=(target_x, target_y),
level_low=level_low,
level_high=level_high
)
spirits.append(spirit)
# Feel lucky?
if random.randint(1, 10) == 5:
# Start encounter immediately
pass
return SpiritResource.get_object_list(self, request).filter(
activity=Spirit.ACTIVITY_WANDER,
health_current__gt=0,
location__distance_lte=(request.location, 5000)
)
|
agpl-3.0
| -7,481,127,565,425,262,000
| 26.565217
| 115
| 0.579592
| false
| 4.050123
| false
| false
| false
|
linuxrocks123/MailTask
|
mt_attache.py
|
1
|
3151
|
#! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import codecs
import fltk
from html2text import html2text
import os
import tempfile
#Note: EVERY method here must correctly handle unicode by decoding it with utf-8/replace,
#then ENCODING it with utf-8
#Note: FLTK 1.1 seems to use ISO-8859-1 as its native encoding.
# FLTK 1.3 changes this to UTF-8.
#FLTK_ENCODING="ISO-8859-1"
FLTK_ENCODING="UTF-8"
def text_plain(submsg,mime_encoding):
return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace")
def text_html(submsg,mime_encoding):
return html2text(submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace")).encode(encoding=FLTK_ENCODING,errors="replace")
def application_pdf(submsg,mime_encoding):
temptuple=tempfile.mkstemp()
os.fdopen(temptuple[0],'w').write(submsg.get_payload(decode=True))
os.system("xpdf "+temptuple[1]+" & ( sleep 10; rm "+temptuple[1]+" ) &")
return "PDF file opened"
def application_octetstream(submsg,mime_encoding):
fc = fltk.Fl_File_Chooser(".","*",fltk.Fl_File_Chooser.CREATE,"Select Save Location")
fc.show()
while fc.shown():
fltk.Fl_wait()
if fc.value()==None:
return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace")
open(fc.value(),'w').write(submsg.get_payload(decode=True))
return "Undisplayable file; saved to "+fc.value()
def display_submessage(submsg):
if submsg['Content-Transfer-Encoding']==None:
del submsg['Content-Transfer-Encoding']
if submsg.get_payload(decode=True)==None:
return ""
ATTACHE = { "text/plain" : text_plain, "text/html" : text_html,
"application/pdf" : application_pdf }
mime_encoding = submsg.get_content_charset()
if mime_encoding==None:
mime_encoding="utf-8"
else:
try:
codecs.lookup(mime_encoding)
valid_encoding = True
except LookupError:
valid_encoding = False
if not valid_encoding:
mime_encoding="utf-8"
mimetype = submsg.get_content_type()
print mimetype
if mimetype in ATTACHE:
return ATTACHE[mimetype](submsg,mime_encoding)
elif mimetype.find("text/")==0:
return text_plain(submsg,mime_encoding)
return application_octetstream(submsg,mime_encoding)
|
gpl-3.0
| -9,170,419,339,616,157,000
| 35.218391
| 149
| 0.699778
| false
| 3.56448
| false
| false
| false
|
unapiedra/BBChop
|
tests/dumbdag.py
|
1
|
3423
|
# Copyright 2008 Ealdwulf Wuffinga
# This file is part of BBChop.
#
# BBChop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# BBChop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BBChop. If not, see <http://www.gnu.org/licenses/>.
from . import dagAlg
from BBChop.listUtils import listSub,prod
# class for computing over directed acyclic graphs.
# values are held outside the graph object, in lists
# the dag is defined by a parents relation: for each index, which indexes are its parents.
# it is required that < and > on indexes is consistent with the transitive closure of the parents
# relation. That is, if parent*(a,b) then a<b and b>a. This is checked.
# this version of the class has a simple O(N^2) implementation for test purposes
class IllFormedDAGFile(Exception): pass
class DAGWrongLength(Exception): pass
# abstract dag class: defines sum,and type functions in terms of comb functions
class absDag:
def sumUpto(self,values):
return self.combUpto(values,sum)
def sumAfter(self,values):
return self.combAfter(values,sum)
def anyUpto(self,values):
return self.combUpto(values,any)
def anyAfter(self,values):
return self.combAfter(values,any)
def prodAfter(self,values):
return self.combAfter(values,prod)
class dag(absDag):
def __init__(self,parents,N):
self.parents=parents
children=[[] for i in range(N)]
for i in range(N):
for p in parents[i]:
children[p].append(i)
self.children=children
childRel=dagAlg.childLists2Rel(self.children)
self.decendentRel=dagAlg.transitiveClosure(childRel,N)
# these methods assume the consistentency defined above.
# for each location, return the sum of lower locations from values
def combUpto(self,values,comb):
res=[comb([v for (i,v) in enumerate(values) if (i,j) in self.decendentRel]) for j in range(len(values))]
return res
# for each location, return the sum of higher locations from values
def combAfter(self,values,comb):
res=[comb([v for (i,v) in enumerate(values) if (j,i) in self.decendentRel]) for j in range(len(values))]
return res
# for each location, return the sum of locations neither lower or higher from values
# we do this by taking the total and subtracting everything else.
def sumOther(self,values,sumUpto=None,sumAfter=None):
# save recalculating sumUpto/After if already known
if sumUpto is None:
sumUpto=self.sumUpto(values)
if sumAfter is None:
sumAfter=self.sumAfter(values)
sums=[sum(values)]*len(values)
#
sums=listSub(sums,values,sumUpto,sumAfter)
return sums
def linearTestDag(N):
parents=['%d %d' %(a+1,a) for a in range(N-1)]
parents[:0]='0'
return dag(parents,N)
|
gpl-2.0
| -1,333,789,492,420,188,700
| 29.5625
| 112
| 0.674847
| false
| 3.704545
| false
| false
| false
|
jleete97/python-graphics
|
games/turns/reversi/reversi.py
|
1
|
3731
|
import random
import sys
import time
from reversiboard import *
from games.turns.reversi.reversimoves import *
# Window parameters
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 700
# Colors
DARK_GREEN = (0, 128, 0)
DARK_GREY = (128, 128, 128)
LIGHT_RED = (255, 192, 192)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Board size (number of squares on each side)
BOARD_SIZE = 8
HUMAN = 'human'
COMPUTER = 'computer'
# Players: computer is 'W', human is 'B'
# Pick random starting player
sides = [ HUMAN, COMPUTER ]
colors = { HUMAN : WHITE , COMPUTER : BLACK }
pygame.init()
surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32)
another_game = True
while another_game:
playerIndex = random.randrange(2)
board = ReversiBoard(BOARD_SIZE, sides)
drawer = ReversiBoardDrawer(board,
surface,
WINDOW_WIDTH,
WINDOW_HEIGHT,
DARK_GREY,
DARK_GREEN,
GREEN,
sides, colors)
try:
playing = True
missedMoves = 0
winner = None
while playing:
opponentIndex = 1 - playerIndex
player = sides[playerIndex]
opponent = sides[opponentIndex]
drawer.drawBoard()
moveResult = []
if board.noLegalMoves(player, opponent):
print(player + " has no legal move.")
move = None
time.sleep(3)
else:
print(player + " is moving...")
if player == HUMAN:
while moveResult == []:
move = getPlayerMove(drawer)
moveResult = board.resultOfMove(move, player, opponent)
else:
move = getComputerMove(board, COMPUTER, HUMAN)
moveResult = board.resultOfMove(move, player, opponent)
print(" move result: " + str(moveResult))
displayMove = None
if (move is not None):
displayMove = (move[0] + 1, move[1] + 1);
print(player + " has moved: " + str(displayMove))
if move is None:
missedMoves += 1
else:
missedMoves = 0
if missedMoves == 2:
winner = board.determineWinner()
playing = False
else:
board.apply(move, moveResult, player)
drawer.drawMove(move, player)
if board.isFull():
winner = board.determineWinner()
playing = False
playerIndex = 1 - playerIndex
except PlayerQuitException:
pass
if winner is None:
outcome = "The game is a tie."
else:
outcome = "The " + winner + " wins!"
fontObj = pygame.font.Font('freesansbold.ttf', 32)
textSurface = fontObj.render(outcome, True, LIGHT_RED, DARK_GREY)
textRect = textSurface.get_rect()
textRect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2)
surface.blit(textSurface, textRect)
pygame.display.update()
asking_about_another_game = True
while asking_about_another_game:
for event in pygame.event.get():
if event.type == QUIT:
another_game = False
asking_about_another_game = False
break
elif event.type == KEYUP and event.key in [K_ESCAPE, ord('r')]:
asking_about_another_game = False
break
pygame.display.update()
pygame.quit()
sys.exit()
|
mit
| -7,343,959,332,232,993,000
| 26.233577
| 79
| 0.5197
| false
| 3.977612
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/errors/types/keyword_plan_error.py
|
1
|
1758
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"KeywordPlanErrorEnum",},
)
class KeywordPlanErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
keyword plan resource (keyword plan, keyword plan campaign,
keyword plan ad group or keyword plan keyword) or
KeywordPlanService RPC.
"""
class KeywordPlanError(proto.Enum):
r"""Enum describing possible errors from applying a keyword plan."""
UNSPECIFIED = 0
UNKNOWN = 1
BID_MULTIPLIER_OUT_OF_RANGE = 2
BID_TOO_HIGH = 3
BID_TOO_LOW = 4
BID_TOO_MANY_FRACTIONAL_DIGITS = 5
DAILY_BUDGET_TOO_LOW = 6
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS = 7
INVALID_VALUE = 8
KEYWORD_PLAN_HAS_NO_KEYWORDS = 9
KEYWORD_PLAN_NOT_ENABLED = 10
KEYWORD_PLAN_NOT_FOUND = 11
MISSING_BID = 13
MISSING_FORECAST_PERIOD = 14
INVALID_FORECAST_DATE_RANGE = 15
INVALID_NAME = 16
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 8,135,299,712,212,321,000
| 32.169811
| 76
| 0.677474
| false
| 3.724576
| false
| false
| false
|
TribeMedia/synapse
|
synapse/handlers/e2e_keys.py
|
2
|
12592
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ujson as json
import logging
from canonicaljson import encode_canonical_json
from twisted.internet import defer
from synapse.api.errors import SynapseError, CodeMessageException
from synapse.types import get_domain_from_id
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
logger = logging.getLogger(__name__)
class E2eKeysHandler(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.federation = hs.get_replication_layer()
self.device_handler = hs.get_device_handler()
self.is_mine_id = hs.is_mine_id
self.clock = hs.get_clock()
# doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the
# "query handler" interface.
self.federation.register_query_handler(
"client_keys", self.on_federation_query_client_keys
)
@defer.inlineCallbacks
def query_devices(self, query_body, timeout):
""" Handle a device key query from a client
{
"device_keys": {
"<user_id>": ["<device_id>"]
}
}
->
{
"device_keys": {
"<user_id>": {
"<device_id>": {
...
}
}
}
}
"""
device_keys_query = query_body.get("device_keys", {})
# separate users by domain.
# make a map from domain to user_id to device_ids
local_query = {}
remote_queries = {}
for user_id, device_ids in device_keys_query.items():
if self.is_mine_id(user_id):
local_query[user_id] = device_ids
else:
remote_queries[user_id] = device_ids
# Firt get local devices.
failures = {}
results = {}
if local_query:
local_result = yield self.query_local_devices(local_query)
for user_id, keys in local_result.items():
if user_id in local_query:
results[user_id] = keys
# Now attempt to get any remote devices from our local cache.
remote_queries_not_in_cache = {}
if remote_queries:
query_list = []
for user_id, device_ids in remote_queries.iteritems():
if device_ids:
query_list.extend((user_id, device_id) for device_id in device_ids)
else:
query_list.append((user_id, None))
user_ids_not_in_cache, remote_results = (
yield self.store.get_user_devices_from_cache(
query_list
)
)
for user_id, devices in remote_results.iteritems():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.iteritems():
keys = device.get("keys", None)
device_display_name = device.get("device_display_name", None)
if keys:
result = dict(keys)
unsigned = result.setdefault("unsigned", {})
if device_display_name:
unsigned["device_display_name"] = device_display_name
user_devices[device_id] = result
for user_id in user_ids_not_in_cache:
domain = get_domain_from_id(user_id)
r = remote_queries_not_in_cache.setdefault(domain, {})
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
@defer.inlineCallbacks
def do_remote_query(destination):
destination_query = remote_queries_not_in_cache[destination]
try:
limiter = yield get_retry_limiter(
destination, self.clock, self.store
)
with limiter:
remote_result = yield self.federation.query_client_keys(
destination,
{"device_keys": destination_query},
timeout=timeout
)
for user_id, keys in remote_result["device_keys"].items():
if user_id in destination_query:
results[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
yield preserve_context_over_deferred(defer.gatherResults([
preserve_fn(do_remote_query)(destination)
for destination in remote_queries_not_in_cache
]))
defer.returnValue({
"device_keys": results, "failures": failures,
})
@defer.inlineCallbacks
def query_local_devices(self, query):
"""Get E2E device keys for local users
Args:
query (dict[string, list[string]|None): map from user_id to a list
of devices to query (None for all devices)
Returns:
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
local_query = []
result_dict = {}
for user_id, device_ids in query.items():
if not self.is_mine_id(user_id):
logger.warning("Request for keys for non-local user %s",
user_id)
raise SynapseError(400, "Not a user here")
if not device_ids:
local_query.append((user_id, None))
else:
for device_id in device_ids:
local_query.append((user_id, device_id))
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
results = yield self.store.get_e2e_device_keys(local_query)
# Build the result structure, un-jsonify the results, and add the
# "unsigned" section
for user_id, device_keys in results.items():
for device_id, device_info in device_keys.items():
r = dict(device_info["keys"])
r["unsigned"] = {}
display_name = device_info["device_display_name"]
if display_name is not None:
r["unsigned"]["device_display_name"] = display_name
result_dict[user_id][device_id] = r
defer.returnValue(result_dict)
@defer.inlineCallbacks
def on_federation_query_client_keys(self, query_body):
""" Handle a device key query from a federated server
"""
device_keys_query = query_body.get("device_keys", {})
res = yield self.query_local_devices(device_keys_query)
defer.returnValue({"device_keys": res})
@defer.inlineCallbacks
def claim_one_time_keys(self, query, timeout):
local_query = []
remote_queries = {}
for user_id, device_keys in query.get("one_time_keys", {}).items():
if self.is_mine_id(user_id):
for device_id, algorithm in device_keys.items():
local_query.append((user_id, device_id, algorithm))
else:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
results = yield self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
failures = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
@defer.inlineCallbacks
def claim_client_keys(destination):
device_keys = remote_queries[destination]
try:
limiter = yield get_retry_limiter(
destination, self.clock, self.store
)
with limiter:
remote_result = yield self.federation.claim_client_keys(
destination,
{"one_time_keys": device_keys},
timeout=timeout
)
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
yield preserve_context_over_deferred(defer.gatherResults([
preserve_fn(claim_client_keys)(destination)
for destination in remote_queries
]))
defer.returnValue({
"one_time_keys": json_result,
"failures": failures
})
@defer.inlineCallbacks
def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None)
if device_keys:
logger.info(
"Updating device_keys for device %r for user %s at %d",
device_id, user_id, time_now
)
# TODO: Sign the JSON with the server key
changed = yield self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys,
)
if changed:
# Only notify about device updates *if* the keys actually changed
yield self.device_handler.notify_device_update(user_id, [device_id])
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
logger.info(
"Adding %d one_time_keys for device %r for user %r at %d",
len(one_time_keys), device_id, user_id, time_now
)
key_list = []
for key_id, key_json in one_time_keys.items():
algorithm, key_id = key_id.split(":")
key_list.append((
algorithm, key_id, encode_canonical_json(key_json)
))
yield self.store.add_e2e_one_time_keys(
user_id, device_id, time_now, key_list
)
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we
# need to double-check the device is registered to avoid ending up with
# keys without a corresponding device.
self.device_handler.check_device_registered(user_id, device_id)
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
defer.returnValue({"one_time_key_counts": result})
|
apache-2.0
| 1,883,954,235,118,884,400
| 37.98452
| 87
| 0.542011
| false
| 4.343567
| false
| false
| false
|
hoomanlogic/hoomancmd
|
hoomancmd/matchsuggestion.py
|
1
|
17803
|
### score matched, proximity, missing, or nomatch to find the best fit command ###
# todo: Improve suggestion engine
# >> plns
# Did you mean 'logs'? : 97 : journal:90
# used by all versions
proximity_mapping = {
'q': ['a', 's', 'w', '2', '1', '`'],
'w': ['q', 'a', 's', 'd', 'e', '3', '2', '1'],
'e': ['w', 's', 'd', 'f', 'r', '4', '3', '2'],
'r': ['e', 'd', 'f', 'g', 't', '5', '4', '3'],
't': ['r', 'f', 'g', 'h', 'y', '6', '5', '4'],
'y': ['t', 'g', 'h', 'j', 'u', '7', '6', '5'],
'u': ['y', 'h', 'j', 'k', 'i', '8', '7', '6'],
'i': ['u', 'j', 'k', 'l', 'o', '9', '8', '7'],
'o': ['i', 'k', 'l', ';', 'p', '0', '9', '8'],
'p': ['o', 'l', ';', '\'', '[', '-', '0', '9'],
'[': ['p', ';', '\'', ']', '=', '-', '0'],
']': ['[', '\'', '\\', '='],
'a': ['z', 'x', 's', 'w', 'q'],
's': ['a', 'z', 'x', 'c', 'd', 'e', 'w', 'q'],
'd': ['s', 'x', 'c', 'v', 'f', 'r', 'e', 'w'],
'f': ['d', 'c', 'v', 'b', 'g', 't', 'r', 'e'],
'g': ['f', 'v', 'b', 'n', 'h', 'y', 't', 'r'],
'h': ['g', 'b', 'n', 'm', 'j', 'u', 'y', 't'],
'j': ['h', 'n', 'm', ',', 'k', 'i', 'u', 'y'],
'k': ['j', 'm', ',', '.', 'l', 'o', 'i', 'u'],
'l': ['k', ',', '.', '/', ';', 'p', 'o', 'i'],
';': ['l', '.', '/', '\'', '[', 'p'],
'\'': [';', '/', ']', '[', 'p'],
'z': [ 'x', 's', 'a'],
'x': ['z', 'c', 'd', 's', 'a'],
'c': ['x', 'v', 'f', 'd', 's'],
'v': ['c', 'b', 'g', 'f', 'd'],
'b': ['v', 'n', 'h', 'g', 'f'],
'n': ['b', 'm', 'j', 'h', 'g'],
'm': ['n', ',', 'k', 'j', 'h'],
'1': ['q', 'w', '2', '`'],
'2': ['1', 'q', 'w', 'e', '3'],
'3': ['2', 'w', 'e', 'r', '4'],
'4': ['3', 'e', 'r', 't', '5'],
'5': ['4', 'r', 't', 'y', '6'],
'6': ['5', 't', 'y', 'u', '7'],
'7': ['6', 'y', 'u', 'i', '8'],
'8': ['7', 'u', 'i', 'o', '9'],
'9': ['8', 'i', 'o', 'p', '0'],
'0': ['9', 'o', 'p', '[', '-'],
'-': ['0', 'p', '[', ']', '='],
'+': ['-', '[', ']', '\\']
}
# version 1 variables
max_extra = 1 # input has extra characters
max_missing = -1 # input has less characters
class MatchStats(object):
def __init__(self, item, disparity):
self.match = 0
self.proximity = 0
self.disparity = disparity
self.item = item
self.too_disparate = False
self.missing = 0
def increment_match(self):
self.match += 1
def increment_proximity(self):
self.proximity += 1
def increment_proximity(self):
self.proximity += 1
def increment_missing(self):
self.missing += 1
def compare(self, other_instance):
if other_instance is None:
return self
if self.proximity > other_instance.proximity:
return other_instance
elif self.proximity < other_instance.proximity:
return self
else:
if self.match > other_instance.match:
return self
elif self.match < other_instance.match:
return other_instance
else:
if self.disparity > other_instance.disparity:
return other_instance
else:
return self
class BetterMatchStats(object):
# version 2 & 3 variables
max_sequential_disparity = 2
def __init__(self, matchterm):
self.match = 0
self.proximity = 0
self.disparity = 0
self.sequential_disparity = 0
self.matchterm = matchterm
self.too_disparate = False
self.runner_up_score = 0
self.runner_up_matchterm = ''
def increment_match(self):
self.match += 1
self._reset_sequential_disparity()
def increment_proximity(self):
self.proximity += 1
self._reset_sequential_disparity()
def increment_disparity(self):
self.disparity += 1
self._increment_sequential_disparity()
if self.disparity > len(self.matchterm):
self.too_disparate = True
def _increment_sequential_disparity(self):
self.sequential_disparity += 1
if self.sequential_disparity > BetterMatchStats.max_sequential_disparity:
self.too_disparate = True
def _reset_sequential_disparity(self):
self.sequential_disparity = 0
def get_score(self):
if self.disparity == 0 and self.proximity == 0:
return 100
else:
return 100 - ((self.disparity * 2) + self.proximity)
def compare(self, other_instance):
if other_instance is None or other_instance.too_disparate:
return self
if self.too_disparate:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
if self.disparity > other_instance.disparity:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
elif self.disparity < other_instance.disparity:
return self
if self.match > other_instance.match:
return self
elif self.match < other_instance.match:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
if self.proximity > other_instance.proximity:
other_instance.runner_up_score = self.get_score()
other_instance.runner_up_matchterm = self.matchterm
return other_instance
else:
return self
def copy_attributes(self, other_instance):
self.match = other_instance.match
self.proximity = other_instance.proximity
self.disparity = other_instance.disparity
self.sequential_disparity = other_instance.sequential_disparity
self.too_disparate = other_instance.too_disparate
@classmethod
def copy(cls, obj):
instance = BetterMatchStats(obj.matchterm)
instance.match = obj.match
instance.proximity = obj.proximity
instance.disparity = obj.disparity
instance.sequential_disparity = obj.sequential_disparity
instance.too_disparate = obj.too_disparate
return instance
def is_in_proximity(char1, char2):
if char2 in proximity_mapping[char1]:
return True
else:
return False
# version 1
def getbestmatch_v1(input_, list_):
input_ = input_.lower()
matchstats_best = None
for item in list_:
item = item.lower()
disparity = len(input_) - len(item)
# ensure disparity isn't too great
if disparity < max_missing or disparity > max_extra:
continue
inner = input_
outer = item
if disparity < 0:
inner = input_
outer = item
elif disparity > 0:
inner = item
outer = input_
# now we put the smaller as the inner to move around
# so we use the absolute val of disparity to
# put the smaller through the scenarios
for i in range(0, abs(disparity) + 1):
outer_subset = outer[i:]
matchstats = MatchStats(item, abs(disparity))
# loop through characters and compare them
for j, inner_char in enumerate(inner):
if inner_char == outer_subset[j]:
matchstats.increment_match()
continue
elif is_in_proximity(inner_char, outer_subset[j]):
matchstats.increment_proximity()
continue
else:
matchstats.too_disparate = True
break
if not matchstats.too_disparate:
matchstats_best = matchstats.compare(matchstats_best)
if matchstats_best is None:
return None
else:
return matchstats_best.item
# version 2
def getbestmatch_v2(input_, list_):
# case insenitive matching
input_ = input_.lower()
# stores best match so far
current_matchstats_best = None
# iterate through all the possible matchterms
# to find the best match
for matchterm in list_:
# case insenitive matching
matchterm = matchterm.lower()
# ensure disparity isn't too great from the get go
# by comparing overall length, if it is too disparate
# then move on to the next matchterm
# if abs(len(input_) - len(matchterm)) > max_sequential_disparity:
# continue
# create object to hold the match stats
matchstats = BetterMatchStats(matchterm)
# run the input_ and matchterm through
# scenarios find a potential match
matchup_v2(input_, matchterm, matchstats)
# done with while because we hit the end of an index
# now let's calculate the leftover disparity
max_char_len = 0
if len(input_) > len(matchterm):
max_char_len = len(input_)
else:
max_char_len = len(matchterm)
for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))):
matchstats.increment_disparity()
# compare the matchstats after matchup with the current best matchstats
# and set the better of the two to the best match so far
# -- may the best match win...
current_matchstats_best = matchstats.compare(current_matchstats_best)
return current_matchstats_best.matchterm
def matchup_v2(input_, matchterm, matchstats, depth=0):
input_index = 0
matchterm_index = 0
while matchterm_index < len(matchterm) and input_index < len(input_):
if input_[input_index] == matchterm[matchterm_index]:
matchstats.increment_match()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
continue
elif is_in_proximity(input_[input_index], matchterm[matchterm_index]):
matchstats.increment_proximity()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
else:
# increment disparity and check if we are too disparate
matchstats.increment_disparity()
if matchstats.too_disparate:
return
# here we need to branch and try both the possibility that input_ has
# missing or extra chars, then compare the two branches to pick the
# best matchup
# input_ may have bad chars, similar to the proximity solution,
# but treats it as a disparity
bad_char_scenario = None
if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm):
bad_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1)
# input_ may have missing chars
missing_char_scenario = None
if matchterm_index + 1 <= len(matchterm):
missing_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1)
# input_ may have extra chars
extra_char_scenario = None
if input_index + 1 <= len(input_):
extra_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v2(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1)
# if both the input_ and matchterm have reached the end of their input_
# then return
if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm):
return
# grab either one that is not None and compare to the other
# one, which may be None, but one of these scenarios is
# guaranteed to not be None by this point
best_scenario = None
if missing_char_scenario is not None:
best_scenario = missing_char_scenario.compare(extra_char_scenario)
else:
best_scenario = extra_char_scenario.compare(missing_char_scenario)
# compare the winner of missing vs extra with the bad chars scenario
best_scenario = best_scenario.compare(bad_char_scenario)
# copy the attributes from the best scenario
# because simply setting the object makes the
# root caller lose the changes
matchstats.copy_attributes(best_scenario)
return
# investigate this
# >> veweerython
# Did you mean "deleteprop"?
# version 3
def getbestmatch_v3(input_, list_, set_max_sequential_disparity=None):
# case insenitive matching
input_ = input_.lower()
# stores best match so far
current_matchstats_best = None
if set_max_sequential_disparity is not None:
BetterMatchStats.max_sequential_disparity = set_max_sequential_disparity
# iterate through all the possible matchterms
# to find the best match
for matchterm in list_:
# case insenitive matching
matchterm = matchterm.lower()
# ensure disparity isn't too great from the get go
# by comparing overall length, if it is too disparate
# then move on to the next matchterm
# if abs(len(input_) - len(matchterm)) > max_sequential_disparity:
# continue
# create object to hold the match stats
matchstats = BetterMatchStats(matchterm)
if len(input_) > len(matchterm):
max_char_len = len(input_)
inner = matchterm
outer = input_
else:
max_char_len = len(matchterm)
inner = input_
outer = matchterm
# run the input_ and matchterm through
# scenarios find a potential match
matchup_v3(inner, outer, matchstats)
for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))):
matchstats.disparity = matchstats.disparity + 1
# compare the matchstats after matchup with the current best matchstats
# and set the better of the two to the best match so far
# -- may the best match win...
current_matchstats_best = matchstats.compare(current_matchstats_best)
# >> testmatch hooman human humous humid
# humid 90 0
return current_matchstats_best
def matchup_v3(input_, matchterm, matchstats, depth=0):
input_index = 0
matchterm_index = 0
while matchterm_index < len(matchterm) and input_index < len(input_):
if input_[input_index] == matchterm[matchterm_index]:
matchstats.increment_match()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
continue
elif is_in_proximity(input_[input_index], matchterm[matchterm_index]):
matchstats.increment_proximity()
input_index = input_index + 1
matchterm_index = matchterm_index + 1
else:
# increment disparity and check if we are too disparate
matchstats.increment_disparity()
if matchstats.too_disparate:
return
# here we need to branch and try both the possibility that input_ has
# missing or extra chars, then compare the two branches to pick the
# best matchup
# input_ may have bad chars, similar to the proximity solution,
# but treats it as a disparity
bad_char_scenario = None
if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm):
bad_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1)
# input_ may have missing chars
missing_char_scenario = None
if matchterm_index + 1 <= len(matchterm):
missing_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1)
# input_ may have extra chars
extra_char_scenario = None
if input_index + 1 <= len(input_):
extra_char_scenario = BetterMatchStats.copy(matchstats)
matchup_v3(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1)
# if both the input_ and matchterm have reached the end of their input_
# then return
if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm):
return
# grab either one that is not None and compare to the other
# one, which may be None, but one of these scenarios is
# guaranteed to not be None by this point
best_scenario = None
if missing_char_scenario is not None:
best_scenario = missing_char_scenario.compare(extra_char_scenario)
else:
best_scenario = extra_char_scenario.compare(missing_char_scenario)
# compare the winner of missing vs extra with the bad chars scenario
best_scenario = best_scenario.compare(bad_char_scenario)
# copy the attributes from the best scenario
# because simply setting the object makes the
# root caller lose the changes
matchstats.copy_attributes(best_scenario)
return
|
apache-2.0
| -2,147,103,867,725,780,500
| 35.935685
| 119
| 0.559569
| false
| 3.726036
| false
| false
| false
|
pernici/sympy
|
sympy/series/tests/test_order.py
|
1
|
6982
|
from sympy import Symbol, Rational, Order, C, exp, ln, log, O, var, nan, pi, S
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import w, x, y, z
def test_caching_bug():
#needs to be a first test, so that all caches are clean
#cache it
e = O(w)
#and test that this won't raise an exception
f = O(w**(-1/x/log(3)*log(5)), w)
def test_simple_1():
o = Rational(0)
assert Order(2*x) == Order(x)
assert Order(x)*3 == Order(x)
assert -28*Order(x) == Order(x)
assert Order(-23) == Order(1)
assert Order(exp(x)) == Order(1,x)
assert Order(exp(1/x)).expr == exp(1/x)
assert Order(x*exp(1/x)).expr == x*exp(1/x)
assert Order(x**(o/3)).expr == x**(o/3)
assert Order(x**(5*o/3)).expr == x**(5*o/3)
assert Order(x**2 + x + y, x) == \
Order(x**2 + x + y, y) == O(1)
raises(NotImplementedError, 'Order(x, 2 - x)')
def test_simple_2():
assert Order(2*x)*x == Order(x**2)
assert Order(2*x)/x == Order(1,x)
assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x))
assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3
def test_simple_3():
assert Order(x)+x == Order(x)
assert Order(x)+2 == 2+Order(x)
assert Order(x)+x**2 == Order(x)
assert Order(x)+1/x == 1/x+Order(x)
assert Order(1/x)+1/x**2 == 1/x**2+Order(1/x)
assert Order(x)+exp(1/x) == Order(x)+exp(1/x)
def test_simple_4():
assert Order(x)**2 == Order(x**2)
assert Order(x**3)**-2 == Order(x**-6)
def test_simple_5():
assert Order(x)+Order(x**2) == Order(x)
assert Order(x)+Order(x**-2) == Order(x**-2)
assert Order(x)+Order(1/x) == Order(1/x)
def test_simple_6():
assert Order(x)-Order(x) == Order(x)
assert Order(x)+Order(1) == Order(1)
assert Order(x)+Order(x**2) == Order(x)
assert Order(1/x)+Order(1) == Order(1/x)
assert Order(x)+Order(exp(1/x)) == Order(exp(1/x))
assert Order(x**3)+Order(exp(2/x)) == Order(exp(2/x))
assert Order(x**-3)+Order(exp(2/x)) == Order(exp(2/x))
def test_simple_7():
assert 1+O(1) == O(1)
assert 2+O(1) == O(1)
assert x+O(1) == O(1)
assert 1/x+O(1) == 1/x+O(1)
def test_contains_0():
assert Order(1,x).contains(Order(1,x))
assert Order(1,x).contains(Order(1))
assert Order(1).contains(Order(1,x))
def test_contains_1():
assert Order(x).contains(Order(x))
assert Order(x).contains(Order(x**2))
assert not Order(x**2).contains(Order(x))
assert not Order(x).contains(Order(1/x))
assert not Order(1/x).contains(Order(exp(1/x)))
assert not Order(x).contains(Order(exp(1/x)))
assert Order(1/x).contains(Order(x))
assert Order(exp(1/x)).contains(Order(x))
assert Order(exp(1/x)).contains(Order(1/x))
assert Order(exp(1/x)).contains(Order(exp(1/x)))
assert Order(exp(2/x)).contains(Order(exp(1/x)))
assert not Order(exp(1/x)).contains(Order(exp(2/x)))
def test_contains_2():
assert Order(x).contains(Order(y)) is None
assert Order(x).contains(Order(y*x))
assert Order(y*x).contains(Order(x))
assert Order(y).contains(Order(x*y))
assert Order(x).contains(Order(y**2*x))
def test_contains_3():
assert Order(x*y**2).contains(Order(x**2*y)) is None
assert Order(x**2*y).contains(Order(x*y**2)) is None
def test_add_1():
assert Order(x+x) == Order(x)
assert Order(3*x-2*x**2) == Order(x)
assert Order(1+x) == Order(1,x)
assert Order(1+1/x) == Order(1/x)
assert Order(ln(x)+1/ln(x)) == Order(ln(x))
assert Order(exp(1/x)+x) == Order(exp(1/x))
assert Order(exp(1/x)+1/x**20) == Order(exp(1/x))
def test_ln_args():
assert O(log(x)) + O(log(2*x)) == O(log(x))
assert O(log(x)) + O(log(x**3)) == O(log(x))
assert O(log(x*y)) + O(log(x)+log(y)) == O(log(x*y))
def test_multivar_0():
assert Order(x*y).expr == x*y
assert Order(x*y**2).expr == x*y**2
assert Order(x*y,x).expr == x
assert Order(x*y**2,y).expr == y**2
assert Order(x*y*z).expr == x*y*z
assert Order(x/y).expr == x/y
assert Order(x*exp(1/y)).expr == x*exp(1/y)
assert Order(exp(x)*exp(1/y)).expr == exp(1/y)
def test_multivar_0a():
assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y)
def test_multivar_1():
assert Order(x+y).expr == x+y
assert Order(x+2*y).expr == x+y
assert (Order(x+y)+x).expr == (x+y)
assert (Order(x+y)+x**2) == Order(x+y)
assert (Order(x+y)+1/x) == 1/x+Order(x+y)
assert Order(x**2+y*x).expr == x**2+y*x
def test_multivar_2():
assert Order(x**2*y+y**2*x,x,y).expr == x**2*y+y**2*x
def test_multivar_mul_1():
assert Order(x+y)*x == Order(x**2+y*x,x,y)
def test_multivar_3():
assert (Order(x)+Order(y)).args in [
(Order(x), Order(y)),
(Order(y), Order(x))]
assert Order(x)+Order(y)+Order(x+y) == Order(x+y)
assert (Order(x**2*y)+Order(y**2*x)).args in [
(Order(x*y**2), Order(y*x**2)),
(Order(y*x**2), Order(x*y**2))]
assert (Order(x**2*y)+Order(y*x)) == Order(x*y)
def test_issue369():
x = Symbol('x')
y = Symbol('y', negative=True)
z = Symbol('z', complex=True)
# check that Order does not modify assumptions about symbols
Order(x)
Order(y)
Order(z)
assert x.is_positive == None
assert y.is_positive == False
assert z.is_positive == None
assert x.is_infinitesimal == None
assert y.is_infinitesimal == None
assert z.is_infinitesimal == None
def test_leading_order():
assert (x+1+1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),)
assert (1+1/x).extract_leading_order(x) == ((1/x, O(1/x)),)
assert (1+x).extract_leading_order(x) == ((1, O(1, x)),)
assert (1+x**2).extract_leading_order(x) == ((1, O(1, x)),)
assert (2+x**2).extract_leading_order(x) == ((2, O(1, x)),)
assert (x+x**2).extract_leading_order(x) == ((x, O(x)),)
def test_leading_order2():
assert set((2+pi+x**2).extract_leading_order(x)) == set(((pi, O(1, x)),
(S(2), O(1, x))))
assert set((2*x+pi*x+x**2).extract_leading_order(x)) == set(((2*x, O(x)),
(x*pi, O(x))))
def test_order_leadterm():
assert O(x**2)._eval_as_leading_term(x) == O(x**2)
def test_nan():
assert not O(x).contains(nan)
def test_O1():
assert O(1) == O(1, x)
assert O(1) == O(1, y)
assert hash(O(1)) == hash(O(1, x))
assert hash(O(1)) == hash(O(1, y))
def test_getn():
# other lines are tested incidentally by the suite
assert O(x).getn() == 1
assert O(x/log(x)).getn() == 1
assert O(x**2/log(x)**2).getn() == 2
assert O(x*log(x)).getn() == 1
raises(NotImplementedError, '(O(x) + O(y)).getn()')
def test_diff():
assert O(x**2).diff(x) == O(x)
def test_getO():
assert (x).getO() is None
assert (x).removeO() == x
assert (O(x)).getO() == O(x)
assert (O(x)).removeO() == 0
assert (z + O(x) + O(y)).getO() == O(x) + O(y)
assert (z + O(x) + O(y)).removeO() == z
raises(NotImplementedError, '(O(x)+O(y)).getn()')
|
bsd-3-clause
| -6,458,170,842,665,350,000
| 32.567308
| 78
| 0.563879
| false
| 2.496246
| true
| false
| false
|
nadgowdas/cargo
|
cli/cargo.py
|
1
|
2456
|
#!/usr/bin/env python
#Copyright IBM Corporation 2015.
#LICENSE: Apache License 2.0 http://opensource.org/licenses/Apache-2.0
import os
import optparse
import logging
from voyage import *
def main():
usage = "usage: python %prog -f <config_file> {--list | --migrate --source <source> --container <container> --target <target> (optional)--rootfs}"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-l", "--list", action="store_true", dest="listc", default=False, help="list containers")
parser.add_option("-m", "--migrate", action="store_true", dest="migrate", default=False, help="migrate container")
parser.add_option("-f", "--failover", action="store_true", dest="failover", default=False, help="failover container")
parser.add_option("--status", action="store_true", dest="status", default=False, help="query lazy replication status")
parser.add_option("--source", action="store", dest="source", default = None, help="Source Host (agent name)")
parser.add_option("--container", action="store", dest="container", default = None, help="Container name to be migrated")
parser.add_option("--target", action="store", dest="target", default = None, help="Target Host (agent name)")
parser.add_option("--rootfs", action="store_true", dest="rootfs", default=False, help="migrate rootfs")
parser.add_option("-s", "--server", action="store", dest="server", default="127.0.0.1:5000", help="Cargo server and port")
opts,args= parser.parse_args()
listc = opts.listc
migrate = opts.migrate
failover = opts.failover
server = opts.server
source = opts.source
target = opts.target
rootfs = opts.rootfs
container = opts.container
status = opts.container
if not listc and not migrate and not failover and not status:
parser.print_help()
if migrate and not source and not target and not container:
parser.print_help()
if failover and not target and not container and not server:
parser.print_help()
if status and not container:
parser.print_help()
voyage = Voyage(server)
if listc:
voyage.listcontainers()
sys.exit(0)
if migrate:
voyage.migrate(source, container, target, rootfs)
sys.exit(0)
if failover:
voyage.failover(container, target)
sys.exit(0)
if status:
voyage.getStatus(container)
if __name__=="__main__":
main()
|
apache-2.0
| 8,801,125,674,314,020,000
| 35.117647
| 150
| 0.661645
| false
| 3.732523
| false
| false
| false
|
Ilphrin/TuxleTriad
|
Menu.py
|
1
|
16142
|
# coding: utf-8
import pygame
import os
import sys
import gettext
from functions import *
from color import *
from pygame.locals import *
from game import Application
from Sound import Sound
from Text import Text
from Buttons import Button
from listOfCards import *
from Card import Card
pygame.init()
class Menu(pygame.sprite.Sprite):
def __init__(self, width, height):
self.FONT = "Playball.ttf"
# We create the window
self.width = width
self.height = height
fullscreen = pygame.NOFRAME
self.dimension = (self.width, self.height)
self.screen = pygame.display.set_mode(self.dimension, fullscreen)
pygame.display.set_caption("TuxleTriad")
self._load_translation()
self.bkgrnd, self.bkgrndRect = loadImage("background.jpg")
self.bkgrndRect = self.bkgrnd.get_rect()
# The Clock of the game, to manage the frame-rate
self.clock = pygame.time.Clock()
self.fps = 30
# We start the Sound object, playing music and sounds.
self.sound = Sound()
# Needed to keep track of the game if we do a pause during the game.
self.app = None
self.main()
def main(self):
elemText = [_("Play"), _("Options"), _("Rules"), _("About"),
_("Quit Game")]
self.menu = []
for elem in elemText:
self.menu.append(Text(elem, self.FONT, white, 40))
posx = 400
posy = 400 - (60 * len(elemText))
for elem in self.menu:
elem.rect.center = ((posx, posy))
posy += 100
pygame.event.clear()
self.updateMenu()
while 1:
pygame.display.flip()
deactivate()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
self.clicked()
elif event.type == QUIT:
self.quitGame()
self.clock.tick(self.fps)
def updateMenu(self):
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for i in range(len(self.menu)):
self.screen.blit(self.menu[i].surface, self.menu[i].rect)
self.clock.tick(self.fps)
def quitGame(self):
setConfig(self.sound.volume)
pygame.quit()
sys.exit()
def oldMenu(self):
while(1):
for button in self.menu:
button.rect.centerx -= 100 - self.fps
if (button.rect.centerx <= - 500):
return;
self.updateMenu()
pygame.display.flip()
def clicked(self):
for button in self.menu:
if button.rect.collidepoint(pygame.mouse.get_pos()):
self.sound.clicMenu.play()
if button.text == _(u"Quit Game"):
self.quitGame()
self.oldMenu()
if button.text == _(u"Play"):
self.play()
elif button.text == _(u"Options"):
self.options()
elif button.text == _(u"Rules"):
self.rules()
elif button.text == _(u"About"):
self.about()
self.main()
def play(self):
"""User clicked on "Play" """
if self.app != None:
texts = [_("Continue"),_("Adventure"), _("Solo"),
_("Hot Seat"), _("Back")]
else:
texts = [_("Adventure"), _("Solo"), _("Hot Seat"), _("Back")]
length = len(texts)
if self.app != None:
textPos = [(250, 100), (250,200), (250, 300), (250,400),
(550, 500)]
else:
textPos = [(250, 100), (250,200), (250, 300), (550, 500)]
self.menu = []
for i in range(length):
self.menu.append(Text(texts[i], self.FONT, white, 45))
self.menu[i].rect.topleft = textPos[i]
self.updateMenu()
pygame.display.flip()
self.clock.tick(self.fps)
while 1:
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
coordinates = pygame.mouse.get_pos()
for i in range(length):
if self.menu[i].rect.collidepoint(coordinates):
self.sound.clicMenu.play()
self.oldMenu()
if self.menu[i].text == _("Adventure"):
return
elif self.menu[i].text == _("Solo"):
return
elif self.menu[i].text == _("Hot Seat"):
self.hotSeat()
elif self.menu[i].text == _("Back"):
return
elif self.menu[i].text == _("Continue"):
self.app.main()
def options(self):
texts = [_("Audio"), _("Sounds"), _("Music"), _("Back")]
length = len(texts)
textsPos = [(320, 100), (100, 200), (100, 300), (550, 500)]
self.menu = []
for i in range(length):
self.menu.append(Text(texts[i], self.FONT, white, 50))
self.menu[i].rect.topleft = textsPos[i]
bar1, bar1Rect = loadImage("barSound.jpg")
bar2, bar2Rect = loadImage("barSound.jpg")
bar1Rect.topleft = (300, 220)
bar2Rect.topleft = (300, 320)
bars = [bar1Rect, bar2Rect]
# X coordinates, relative to the bar's, of beginning and ending
# of each volume cursor.
MIN_VOLUME = 15
MAX_VOLUME = 240
# X absolute coordinates of the volume cursor.
MIN = bars[0].x + MIN_VOLUME
MAX = bars[0].x + MAX_VOLUME
cursor1, cursor1Rect = loadImage("cursorSound.png")
cursor2, cursor2Rect = loadImage("cursorSound.png")
cursor1Rect.topleft = \
(bar1Rect.x + 225 * self.sound.soundVolume, bar1Rect.y - 23)
cursor2Rect.topleft = \
(bar2Rect.x + 225 * self.sound.musicVolume, bar2Rect.y - 23)
cursors = [cursor1Rect, cursor2Rect]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
self.screen.blit(bar1, bar1Rect)
self.screen.blit(bar2, bar2Rect)
self.screen.blit(cursor1, cursors[0])
self.screen.blit(cursor2, cursors[1])
for i in range(length):
self.screen.blit(self.menu[i].surface, self.menu[i].rect)
pygame.display.update()
move = 0
while 1:
event = pygame.event.wait()
mousex, mousey = pygame.mouse.get_pos()
if event.type == QUIT:
self.quitGame()
elif event.type == MOUSEBUTTONDOWN:
move = 1
reactivate()
elif event.type == MOUSEBUTTONUP:
move = 0
deactivate()
for i in range(len(bars)):
if move == 1 and bars[i].collidepoint((mousex, mousey)):
if MIN <= mousex <= MAX:
cursors[i].centerx = mousex
elif mousex > bars[i].x + MAX_VOLUME:
cursors[i].centerx = bars[i].x + MAX_VOLUME
else:
cursors[i].centerx = bars[i].x + MIN_VOLUME
volume = cursors[i].centerx - MIN
if volume != 0:
volume = (volume / 2.25) / 100.0
assert (0.0 <= volume <= 1.0)
if i == 0:
self.sound.soundVolume = volume
self.sound.playPutCard()
self.sound.update()
elif i == 1:
self.sound.musicVolume = volume
self.sound.update()
self.screen.blit(self.bkgrnd, self.bkgrndRect)
self.screen.blit(bar1, bar1Rect)
self.screen.blit(bar2, bar2Rect)
self.screen.blit(cursor1, cursors[0])
self.screen.blit(cursor2, cursors[1])
for j in range(4):
self.screen.blit(self.menu[j].surface,\
self.menu[j].rect)
pygame.display.update()
self.clock.tick(self.fps)
if move and self.menu[3].rect.collidepoint((mousex, mousey)):
del bar1, bar2, bars, cursor1, cursor2, cursors
self.oldMenu()
self.sound.clicMenu.play()
return
def about(self):
page = 1
allPage = []
pageList = []
index = 0
for number in range(len(allCards)):
pageList.append(Card(number, 1))
index += 1
if index == 3 or number == (len(allCards) or len(allCards)-1):
allPage.append(pageList)
del pageList
pageList = []
index = 0
maxPage = len(allPage)
txtPage = str(page) + "/" + str(maxPage)
navigation = [_("Back"), _("Next"), _("Quit"),
"Programming:", "Kevin \"Ilphrin\" Pellet",
"Graphics:", "Yunero Kisapsodos",
txtPage]
navigationPos = [(80,550), (650,550), (660,40), (630, 100),
(640, 130), (630, 200), (640, 230), (350,550)]
self.menu = []
for i in range(len(navigation)):
if 2 < i < 7:
size = 12
font = "rimouski sb.ttf"
else:
font = self.FONT
size = 30
self.menu.append(Text(navigation[i], font, white, size))
self.menu[i].rect.topleft = navigationPos[i]
cardPos = [(50,50), (50,200), (50, 350)]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for element in self.menu:
self.screen.blit(element.surface,element.rect)
for elem in range(len(allPage[page-1])):
card = allPage[page-1][elem]
card.rect.topleft = cardPos[elem]
card.About.rect.topleft = card.rect.topright
for elem in allPage[page-1]:
self.screen.blit(elem.image, elem.rect)
self.screen.blit(elem.About.surface, elem.About.rect)
while 1:
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
for button in self.menu:
if button.rect.collidepoint(coords):
if button.text == _("Back"):
if page > 1:
page -= 1
self.sound.putcard.play()
if button.text == _("Next"):
if page < maxPage:
page += 1
self.sound.putcard.play()
if button.text == _("Quit"):
self.oldMenu()
return
txtPage = str(page) + "/" + str(maxPage)
self.menu[7] = Text(txtPage, self.FONT, white, 30)
self.menu[7].rect.topleft = navigationPos[7]
self.screen.blit(self.bkgrnd, self.bkgrndRect)
for element in self.menu:
self.screen.blit(element.surface,element.rect)
for elem in range(len(allPage[page-1])):
card = allPage[page-1][elem]
card.rect.topleft = cardPos[elem]
card.About.rect.topleft = card.rect.topright
for elem in allPage[page-1]:
self.screen.blit(elem.image, elem.rect)
self.screen.blit(elem.About.surface,
elem.About.rect)
if event.type == QUIT:
self.quitGame()
def rules(self):
tutorialButton = Button(_(u"Tutorial"), self.FONT, white)
howtoButton = Button(_(u"How To"), self.FONT, white)
backButton = Button(_(u"Back"), self.FONT, white)
tutorialButton.rect.topleft = (250, 100)
howtoButton.rect.topleft = (250, 200)
backButton.rect.topleft = (550, 500)
self.menu = []
self.menu.append(tutorialButton)
self.menu.append(howtoButton)
self.menu.append(backButton)
self.updateMenu()
while (1):
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
for i in range(len(self.menu)):
if self.menu[i].rect.collidepoint(coords):
self.oldMenu()
if self.menu[i].text == _(u"Tutorial"):
self.main()
elif self.menu[i].text == _(u"How To"):
self.HowTo()
return
elif self.menu[i].text == _(u"Back"):
self.main()
elif event.type == QUIT:
self.quitGame()
def HowTo(self):
backButton = Button(_("Back"), self.FONT, white)
prevButton = Button(_("Prev"), self.FONT, white)
nextButton = Button(_("Next"), self.FONT, white)
page = 1
maxPage = 2
pageList = []
for i in range(maxPage):
pageList.append(pygame.image.load(getHowTo(i)))
pageRect = pageList[i - 1].get_rect()
pageRect.topleft = (-20, 0)
backButton.rect.topleft = (600, 40)
prevButton.rect.topleft = (80, 550)
nextButton.rect.topleft = (660, 550)
self.menu = []
self.menu.append(backButton)
self.menu.append(prevButton)
self.menu.append(nextButton)
self.updateMenu()
self.screen.blit(pageList[page - 1], pageRect)
while (1):
self.clock.tick(self.fps)
pygame.display.flip()
event = pygame.event.wait()
if event.type == MOUSEBUTTONUP:
coords = pygame.mouse.get_pos()
if backButton.rect.collidepoint(coords):
self.oldMenu()
return
elif prevButton.rect.collidepoint(coords) and page > 1:
page -= 1
elif nextButton.rect.collidepoint(coords) and page < maxPage:
page += 1
self.updateMenu()
self.screen.blit(pageList[page - 1], pageRect)
elif event.type == QUIT:
self.quitGame()
def _load_translation(self):
base_path = os.getcwd()
directory = os.path.join(base_path, 'translations')
print "Loading translations at: ", directory
params = {
'domain': 'tuxle-triad',
'fallback': True
}
if os.path.isdir(directory):
params.update({'localedir': directory})
translation = gettext.translation(**params)
translation.install("ngettext")
def solo(self):
"""1vsIA mode"""
print "Solo!"
def adventure(self):
"""Adventure mode against IA"""
print "Adventure!"
def hotSeat(self):
"""1vs1 mode"""
if self.app != None:
del self.app
Application(800, 600, self.screen, self.sound, self).main()
else:
Application(800, 600, self.screen, self.sound, self).main()
Menu(800, 600)
|
mit
| 1,228,710,710,715,892,200
| 36.714953
| 78
| 0.477016
| false
| 4.137913
| false
| false
| false
|
mattilyra/gensim
|
docs/src/conf.py
|
1
|
7457
|
# -*- coding: utf-8 -*-
#
# gensim documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 17 13:42:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
html_theme = 'gensim_theme'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
autoclass_content = "both"
napoleon_google_docstring = False # Disable support for google-style docstring
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'indextoc'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': './_templates/indexcontent.html'}
# General information about the project.
project = u'gensim'
copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# main_colour = "#ffbbbb"
html_theme_options = {
# "rightsidebar": "false",
# "stickysidebar": "true",
# "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "sidebarbgcolor": "fuckyou",
# "footerbgcolor": "#771111",
# "relbarbgcolor": "#993333",
# "sidebartextcolor": "#000000",
# "sidebarlinkcolor": "#330000",
# "codebgcolor": "#fffff0",
# "headtextcolor": "#000080",
# "headbgcolor": "#f0f0ff",
# "bgcolor": "#ffffff",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "gensim"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = ''
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_domain_indices = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'gensimdoc'
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
|
lgpl-2.1
| -371,278,304,731,654,850
| 32.868182
| 114
| 0.707153
| false
| 3.577052
| true
| false
| false
|
matham/cutils
|
cutils/knspace.py
|
1
|
15278
|
'''Provides namespace functionality for Kivy objects. It allows kivy objects
to be named and then accessed using the namespace.
:class:`KNSpace` instances are the namespaces that store the named objects.
Classes need to inherit from :class:`KNSpaceBehavior` so that the class, when
named, will be stored in the namespace. :attr:`knspace` is the default
namespace where objects are stored, unless the object is associated with a
different namespace.
Simple Example
-----------------
Here, because no namespace is specified, the default
:attr:`knspace` is used so we can access its widgets directly, as in
`knspace.keyboard`, to get the keyboard widget::
#:import knspace cutils.knspace.knspace
#:import Factory kivy.lang.Factory
<NamedTextInput@KNSpaceBehavior+TextInput>
<Keyboard@Popup>:
BoxLayout:
GridLayout:
cols: 1
NamedTextInput:
name: 'keyboard'
hint_text: 'Type something'
Label:
text: 'My Keyboard'
Button:
text: 'Close Keyboard'
on_press: root.dismiss()
<RootWidget@BoxLayout>:
Button:
on_parent: self.popup = Factory.Keyboard()
on_release: self.popup.open()
text: 'Open keyboard'
Label:
text: 'Keyboard output:\\n' + knspace.keyboard.text if knspace.keyboard else ''
To test, run a app with `RootWidget`.
Multiple Namespaces
-------------------
In the previous example, only the default namespace was used. However,
sometimes we need to split namespaces so we can reuse the name across
multiple widgets using the same name.
When a :class:`KNSpaceBehavior` derived widget is given a name, first we find
the associated namespace using the :attr:`KNSpaceBehavior.knspace` property.
Then, we create a :class:`~kivy.properties.ObjectProperty` in that namespace,
whose name is that name and assign the named widget as its value. See
:attr:`KNSpaceBehavior.knspace` for details on how that namespace is found.
In short, we check if the widget was assigned one, if not, we find the
namespace by walking up its parent tree using
:attr:`KNSpaceBehavior.knspace_key` and finding the first one with a namespace.
Finally, if not found, we use :attr:`knspace`. Therefore, above, the default
namespace was used since none was specified.
::
#:import Factory kivy.lang.Factory
<NamedTextInput@KNSpaceBehavior+TextInput>
<Keyboard@KNSpaceBehavior+Popup>:
knspace_key: 'knspace_parent'
knspace_parent: None
BoxLayout:
GridLayout:
cols: 1
NamedTextInput:
name: 'keyboard'
hint_text: 'Type something'
Label:
text: 'My Keyboard'
Button:
text: 'Close Keyboard'
on_press: root.dismiss()
<Typist@KNSpaceBehavior+BoxLayout>:
knspace: getattr(self, 'knspace').clone() # So we don't create a rule binding
Button:
on_parent:
self.popup = Factory.Keyboard()
self.popup.knspace_parent = root
on_release: self.popup.open()
text: 'Open keyboard'
Label:
text: 'Keyboard output:\\n' + root.knspace.keyboard.text if root.knspace.keyboard else ''
<RootWidget@BoxLayout>:
Typist
Typist
In this example, we wanted two typists, rather than a single keyboard.
But within a typist we wanted to be able to use names, even though typist
share identical names. To do this, we have
`knspace: getattr(self, 'knspace').clone()`. This forks the current namespace
(which happens to be the default, :attr:`knspace`) and create a namespace
shared by widgets that are offspring of that `Typist`.
Now, each `Typist` gets its own namespace, while still sharing the
default namespaces from which it was cloned for widgets not in its namespace.
`knspace_key: 'knspace_parent'` is required, since a `Popup` is not a child
the `Typist`, but they do have to share the namspace, so instead of using
`parent` to find the next namespace up the tree, we use the specified
`knspace_parent` attribute which points to the Typist and hence its
namespace.
Traditional namespace
---------------------
In the above example, we accessed the namespace using e.g.
`root.knspace.keyboard`. We can also access it without having access to e.g.
`root` like in a traditional namespace access.
We can change the above `RootWidget` into::
<RootWidget@KNSpaceBehavior+BoxLayout>:
name: 'root'
Typist
Typist
Now, we can do::
knspace.root.children[0].knspace.keyboard.hint_text = 'Type something else'
And the second Typist's keyboard will have a different hint text. Of course
we could also have done
`root.children[0].knspace.keyboard.hint_text = 'Type something else'` if had
access to the root widget.
'''
__all__ = ('KNSpace', 'KNSpaceBehavior', 'knspace')
from kivy.event import EventDispatcher
from kivy.properties import StringProperty, ObjectProperty, AliasProperty
from kivy.lang import Factory
knspace = None
'''The default :class:`KNSpace` namespace. If a :class:`KNSpace` namespace has
not been assigned to a :class:`KNSpaceBehavior` instance, then this
:class:`KNSpace` namespace serves as the default namespace.
See the examples and :class:`KNSpaceBehavior` for more details.
'''
class KNSpace(EventDispatcher):
'''Each :class:`KNSpace` instance is a namespace that stores the named Kivy
objects when they are associated with this namespace. Each named object is
stored as the value of a Kivy :class:`~kivy.properties.ObjectProperty` of
this instance whose property name is the object's given name. Both `rebind`
and `allownone` are set to `True` for the property.
See :attr:`KNSpaceBehavior` for details on how a namespace is associated
with a named object.
When storing an object in the namespace, the object's `proxy_ref` is
stored if the object has such an attribute.
:Parameters:
`parent`: (internal) A :class:`KNSpace` instance or None.
If specified, it's a parent namespace, in which case, the current
namespace will have in its namespace all its named objects
as well as the named objects of its parent and parent's parent
etc. See :meth:`clone` for more details.
'''
parent = None
'''(internal) The parent namespace instance, :class:`KNSpace`, or None. See
:meth:`clone`.
'''
__has_applied = None
def __init__(self, parent=None, **kwargs):
super(KNSpace, self).__init__(**kwargs)
self.parent = parent
self.__has_applied = set(self.properties().keys())
def __setattr__(self, name, value):
prop = super(KNSpace, self).property(name, quiet=True)
has_applied = self.__has_applied
if prop is None:
if hasattr(self, name):
super(KNSpace, self).__setattr__(name, value)
else:
value = getattr(value, 'proxy_ref', value)
self.apply_property(
**{name:
ObjectProperty(value, rebind=True, allownone=True)}
)
has_applied.add(name)
elif name not in has_applied:
self.apply_property(**{name: prop})
has_applied.add(name)
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
else:
value = getattr(value, 'proxy_ref', value)
super(KNSpace, self).__setattr__(name, value)
def __getattr__(self, name):
parent = self.parent
if parent is None:
raise AttributeError(name)
return getattr(parent, name)
def property(self, name, quiet=False):
# needs to overwrite EventDispatcher.property so kv lang will work
prop = super(KNSpace, self).property(name, quiet=quiet)
if prop is not None:
return prop
prop = ObjectProperty(None, rebind=True, allownone=True)
self.apply_property(**{name: prop})
self.__has_applied.add(name)
return prop
def clone(self):
'''Creates a new :class:`KNSpace` instance which will have access to
all the named objects in the current namespace but will also have a
namespace of its own that is unique to it.
Any new names added to a :class:`KNSpaceBehavior` associated with
this instance will be accesible only through this instance
and not its parent(s). However, when looking for a named object using
this namespace, if the object is not found in this namespace we search
it's parent namespace and so on until we (don't) find it.
'''
return KNSpace(parent=self)
class KNSpaceBehavior(object):
'''Inheriting from this class allows naming of the inherited object, which
is then added to the associated namespace :attr:`knspace` and accessible
through it.
'''
_knspace = ObjectProperty(None, allownone=True)
_name = StringProperty('')
__last_knspace = None
__callbacks = None
def __init__(self, knspace=None, **kwargs):
self.knspace = knspace
super(KNSpaceBehavior, self).__init__(**kwargs)
def __knspace_clear_callbacks(self, *largs):
for obj, name, uid in self.__callbacks:
obj.unbind_uid(name, uid)
last = self.__last_knspace
self.__last_knspace = self.__callbacks = None
assert self._knspace is None
assert last
new = self.__set_parent_knspace()
if new is last:
return
self.property('_knspace').dispatch(self)
name = self.name
if not name:
return
if getattr(last, name) == self:
setattr(last, name, None)
if new:
setattr(new, name, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
def __set_parent_knspace(self):
callbacks = self.__callbacks = []
fbind = self.fbind
append = callbacks.append
parent_key = self.knspace_key
clear = self.__knspace_clear_callbacks
append((self, 'knspace_key', fbind('knspace_key', clear)))
if not parent_key:
self.__last_knspace = knspace
return knspace
append((self, parent_key, fbind(parent_key, clear)))
parent = getattr(self, parent_key, None)
while parent is not None:
fbind = parent.fbind
parent_knspace = getattr(parent, 'knspace', 0)
if parent_knspace is not 0:
append((parent, 'knspace', fbind('knspace', clear)))
self.__last_knspace = parent_knspace
return parent_knspace
append((parent, parent_key, fbind(parent_key, clear)))
new_parent = getattr(parent, parent_key, None)
if new_parent is parent:
break
parent = new_parent
self.__last_knspace = knspace
return knspace
def _get_knspace(self):
_knspace = self._knspace
if _knspace is not None:
return _knspace
if self.__callbacks is not None:
return self.__last_knspace
# we only get here if we never accessed our knspace
return self.__set_parent_knspace()
def _set_knspace(self, value):
if value is self._knspace:
return
knspace = self._knspace or self.__last_knspace
name = self.name
if name and knspace:
setattr(knspace, name, None) # reset old namespace
if value == 'clone':
if not knspace:
knspace = self.knspace # get parents in case we haven't before
if knspace:
value = knspace.clone()
else:
raise ValueError('Cannot clone with no namesapce')
for obj, prop_name, uid in self.__callbacks or []:
obj.unbind_uid(prop_name, uid)
self.__last_knspace = self.__callbacks = None
if name:
if value is None: # if None, first update the recursive knspace
knspace = self.__set_parent_knspace()
if knspace:
setattr(knspace, name, self)
self._knspace = None # cause a kv trigger
else:
setattr(value, name, self)
knspace = self._knspace = value
if not knspace:
raise ValueError('Object has name "{}", but no namespace'.
format(name))
else:
if value is None:
self.__set_parent_knspace() # update before trigger below
self._knspace = value
knspace = AliasProperty(
_get_knspace, _set_knspace, bind=('_knspace', ), cache=False,
rebind=True, allownone=True)
'''The namespace instance, :class:`KNSpace`, associated with this widget.
When this widget is named with :attr:`name` the name is added to the
:attr:`knspace` namespace pointing to this widget.
If the namespace has been set with a :class:`KNSpace` instance, e.g. with
`self.knspace = ...`, then that instance is used. Otherwise, we look at
the property named :attr:`knspace_key` of this obj. If that object has a
knspace property we use that namespace. Otherwise, we look at its
:attr:`knspace_key` object and walk up the parent tree until we find
a parent who has a namespace instance. Finally, if there's no parent with
a namespace, the default :attr:`~cutils.knspace.knspace` namespace is used.
Both `rebind` and `allownone` are `True`.
'''
knspace_key = StringProperty('parent', allownone=True)
'''The name of the property of this instance, to use to find the namespace
associated with this instance. Defaults to `'parent'` so that we'll look
up the parent tree to find the namespace. See :attr:`knspace`.
When `None`, we won't search the parent tree for the namespace.
`allownone` is `True`.
'''
def _get_name(self):
return self._name
def _set_name(self, value):
old_name = self._name
knspace = self.knspace
if old_name and knspace:
setattr(knspace, old_name, None)
self._name = value
if value:
if knspace:
setattr(knspace, value, self)
else:
raise ValueError('Object has name "{}", but no namespace'.
format(value))
name = AliasProperty(_get_name, _set_name, bind=('_name', ), cache=False)
'''The name given to this object. If named, the name will be added to the
associated :attr:`knspace` and will point to the `proxy_ref` of this
object.
When named, one can access this object by e.g. knspace.name, where `name`
is the given name of this instance. See :attr:`knspace` and the module
description for more details.
'''
knspace = KNSpace()
Factory.register('KNSpaceBehavior', cls=KNSpaceBehavior)
|
mit
| 1,435,365,778,809,909,800
| 35.63789
| 101
| 0.623969
| false
| 4.024763
| false
| false
| false
|
nikdoof/django-eveigb
|
test_project/settings.py
|
1
|
5384
|
# Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.db3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '29)2ec_!4fy$mb0c+u7sz5-q84@tjp(b!atfh-3v@0^c9c=do*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'eveigb',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
bsd-3-clause
| 2,804,299,625,662,578,700
| 33.292994
| 127
| 0.685921
| false
| 3.715666
| false
| false
| false
|
prashrock/Python
|
leetCode/largest_number/create_largest_number_from_array.py
|
1
|
1340
|
# Use a custom sort comparator to sort the integers
# Converted the sorted integer array into a string
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
# @param x, first integer
# @param y, second integer
# @return (xy - yx)
def cmp_aggregate(x, y):
str_xy = ''.join((str(x), str(y)))
str_yx = ''.join((str(y), str(x)))
return int(str_xy) - int(str_yx)
#Sort with a custom comparator and get descending order
def largestNumber(num):
sorted_num = sorted(num, key=cmp_to_key(cmp_aggregate), reverse=True)
print sorted_num
sorted_str = ''.join(map(str, sorted_num))
if(int(sorted_str) == 0): return '0'
else: return sorted_str
num = [3, 30, 34, 5, 9]
print num
print largestNumber(num)
|
gpl-2.0
| 4,418,024,888,891,023,400
| 31.682927
| 73
| 0.581343
| false
| 3.252427
| false
| false
| false
|
emulbreh/lymph
|
lymph/core/events.py
|
1
|
3099
|
import re
import logging
from lymph.core.interfaces import Component
from lymph.core import trace
logger = logging.getLogger(__name__)
class Event(object):
def __init__(self, evt_type, body, source=None, headers=None, event_id=None):
self.event_id = event_id
self.evt_type = evt_type
self.body = body
self.source = source
self.headers = headers or {}
def __getitem__(self, key):
return self.body[key]
def __iter__(self):
return iter(self.body)
def __repr__(self):
return '<Event type=%r body=%r>' % (self.evt_type, self.body)
def __str__(self):
return '{type=%s id=%s}' % (self.evt_type, self.event_id)
@classmethod
def deserialize(cls, data):
return cls(data.get('type'), data.get('body', {}), source=data.get('source'), headers=data.get('headers'))
def serialize(self):
return {
'type': self.evt_type,
'headers': self.headers,
'body': self.body,
'source': self.source,
}
class EventHandler(Component):
def __init__(self, interface, func, event_types, sequential=False, queue_name=None, active=True):
self.func = func
self.event_types = event_types
self.sequential = sequential
self.active = active
self.interface = interface
self._queue_name = queue_name or func.__name__
@property
def queue_name(self):
return '%s-%s' % (self.interface.name, self._queue_name)
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
def on_start(self):
self.interface.container.subscribe(self, consume=self.active)
def __call__(self, event, *args, **kwargs):
trace.set_id(event.headers.get('trace_id'))
logger.debug('<E %s', event)
return self.func(self.interface, event, *args, **kwargs)
class EventDispatcher(object):
wildcards = {
'#': r'[\w.]*(?=\.|$)',
'*': r'\w+',
}
def __init__(self, patterns=()):
self.patterns = []
self.update(patterns)
def compile(self, key):
words = (self.wildcards.get(word, re.escape(word)) for word in key.split('.'))
return re.compile('^%s$' % r'\.'.join(words))
def register(self, pattern, handler):
self.patterns.append((
self.compile(pattern),
pattern,
handler,
))
def __iter__(self):
for regex, pattern, handler in self.patterns:
yield pattern, handler
def update(self, other):
for pattern, handler in other:
self.register(pattern, handler)
def dispatch(self, evt_type):
for regex, pattern, handler in self.patterns:
if regex.match(evt_type):
yield pattern, handler
def __call__(self, event):
handlers = set()
for pattern, handler in self.dispatch(event.evt_type):
if handler not in handlers:
handlers.add(handler)
handler(event)
return bool(handlers)
|
apache-2.0
| 1,420,763,787,660,582,100
| 26.918919
| 114
| 0.571475
| false
| 3.878598
| false
| false
| false
|
mesocentrefc/Janua-SMS
|
janua/actions/sms_usage.py
|
1
|
2426
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from janua import jdb
from janua.actions.action import Action
from janua.utils.utilities import get_role
from janua.ws.services import urlconfig, jsonify
class SmsUsage(Action):
"""
Get SMS usage based on administrator quota
* Sample request with administrator level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
JanuaAuthToken: abcdef123456789
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"global": 18,
"quota": "100 M",
"sent": 18
}
}
* Sample request with supervisor level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"quota": "200 D",
"sent": 4
}
}
"""
category = '__INTERNAL__'
@urlconfig('/sms-usage')
def web(self):
admin = jdb.admin.get_by_phone(self.phone_number)
data = {
'success': True,
'params': [],
'num_params': 0
}
reached, numsms = jdb.sms.is_admin_quota_reached(admin)
quota = admin.sms_quota
data = {'sent': int(numsms), 'quota': quota}
if get_role(admin) == 'admin':
data.update({'global': int(jdb.sms.month_usage())})
return jsonify(smsusage=data)
|
gpl-2.0
| 3,349,284,974,751,728,000
| 25.347826
| 76
| 0.60066
| false
| 3.805338
| false
| false
| false
|
nathanbjenx/cairis
|
cairis/controllers/TemplateGoalController.py
|
1
|
3319
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
if (sys.version_info > (3,)):
import http.client
from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
else:
import httplib
from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
from flask import session, request, make_response
from flask_restful import Resource
from cairis.data.TemplateGoalDAO import TemplateGoalDAO
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.MessageDefinitions import TemplateGoalMessage
from cairis.tools.ModelDefinitions import TemplateGoalModel
from cairis.tools.SessionValidator import get_session_id
__author__ = 'Shamal Faily'
class TemplateGoalsAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = TemplateGoalDAO(session_id)
tgs = dao.get_template_goals(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(tgs, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
def post(self):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
new_tg = dao.from_json(request)
dao.add_template_goal(new_tg)
dao.close()
resp_dict = {'message': 'Template Goal successfully added'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class TemplateGoalByNameAPI(Resource):
def get(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
found_tg = dao.get_template_goal(name)
dao.close()
resp = make_response(json_serialize(found_tg, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
def put(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
upd_tg = dao.from_json(request)
dao.update_template_goal(upd_tg, name)
dao.close()
resp_dict = {'message': 'Template Goal successfully updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
def delete(self, name):
session_id = get_session_id(session, request)
dao = TemplateGoalDAO(session_id)
dao.delete_template_goal(name)
dao.close()
resp_dict = {'message': 'Template Goal successfully deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
|
apache-2.0
| 2,627,098,913,465,853,000
| 32.525253
| 78
| 0.726424
| false
| 3.631291
| false
| false
| false
|
andyr0id/PyGFNN
|
examples/gfnn/example1F.py
|
1
|
1657
|
#!/usr/bin/env python
__author__ = 'Andrew J. Lambert, andy@andyroid.co.uk'
"""
example1P
A one layer network with fixed internal connections
"""
from pygfnn.tools.plotting.gfnn import *
import pygfnn.tools.shortcuts as gfnn
import numpy as np
import timeit
import matplotlib.pyplot as plt
import scipy.io as sio
if __name__ == '__main__':
# Network parameters
oscParams = { 'a': 1, 'b1': -1, 'b2': -1000, 'd1': 0, 'd2': 0, 'e': 1 } # Limit cycle
learnParams = gfnn.NOLEARN_ALLFREQ
freqDist = { 'fspac': 'log', 'min': 0.5, 'max': 8 }
# Make network
n = gfnn.buildGFNN(196, oscParams = oscParams, freqDist = freqDist,
learnParams = learnParams)
n.recurrentConns[0].c0[:] = gfnn.getInitC(n, n, [(1,1), (1,2), (1,3), (1,4), (1,6), (1,8), (2,3), (3,4), (3,8)], thresh=0.01)
n.reset()
# First plots, showing initial connection state
ampFig1, phaseFig1 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
# Stimulus - 50 seconds of 1Hz sin
t = np.arange(0, 50, n['h'].dt)
x = np.sin(2 * np.pi * 1 * t) * 0.1
# Run the network
timer = timeit.default_timer
start = timer()
for i in range(len(t)):
out = n.activate(x[i])
end = timer()
print('Elapsed time is %f seconds' % (end - start))
if learnParams is not None:
# Second plots, showing final connection state
ampFig2, phaseFig2 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
Z = n['h'].outputbuffer[:n.offset]
fig1 = ampx(Z, n.dt, freqDist['min'], freqDist['max'])
fig2 = phasex(Z, n.dt, freqDist['min'], freqDist['max'])
plt.show()
|
gpl-2.0
| 3,656,434,342,488,919,600
| 29.685185
| 129
| 0.608328
| false
| 2.780201
| false
| false
| false
|
liberiun/cynin-intranet
|
src/ubify.viewlets/ubify/viewlets/browser/typetitle.py
|
1
|
3657
|
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from ubify.viewlets.config import plone_site_type_title
from ubify.policy import CyninMessageFactory as _
class TypetitleViewlet(ViewletBase):
render = ViewPageTemplateFile('typetitle.pt')
def update(self):
portal_state = getMultiAdapter((self.context, self.request),name=u'plone_portal_state')
context_state = getMultiAdapter((self.context, self.request),name=u'plone_context_state')
tools = getMultiAdapter((self.context, self.request), name=u'plone_tools')
typetool= getToolByName(self.context, 'portal_types')
portal_title = portal_state.portal_title()
object_title = context_state.object_title()
self.object_icon = self.context.icon
object_typename = self.context.portal_type
object_typeobj = typetool[object_typename]
self.typeiconname = object_typeobj.icon_expr
if object_typeobj.title == '' and self.context.portal_type.lower() == 'plone site':
self.typetitle = plone_site_type_title
else:
self.typetitle = _(object_typeobj.title,object_typeobj.title)
self.app_name = object_title
if self.context.portal_type.lower() == 'plone site':
self.tdescription = 'cyn.in site|A cyn.in site allows instant collaboration among peers and provides a central social computer and network.'
else:
self.tdescription = self.typetitle + '|' + object_typeobj.description
self.isaddscreen = False
if hasattr(context_state.parent(),'portal_type') and context_state.parent().portal_type == 'TempFolder':
self.isaddscreen = True
|
gpl-3.0
| 1,373,551,312,006,338,000
| 50.507042
| 152
| 0.710965
| false
| 3.857595
| false
| false
| false
|
rosenbrockc/fortpy
|
fortpy/stats/bp.py
|
1
|
5243
|
"""Methods for testing a code library against Fortran best practices to
help uncover subtle bugs that took a while for us to track down. See
especially http://www.cs.rpi.edu/~szymansk/OOF90/bugs.html"""
def _exec_check_pointers(executable):
"""Checks the specified executable for the pointer condition that not
all members of the derived type have had their values set.
Returns (list of offending members, parameter name).
"""
oparams = []
pmembers = {}
xassigns = map(lambda x: x.lower().strip(), executable.external_assignments())
def add_offense(pname, member):
"""Adds the specified member as an offender under the specified parameter."""
if pname not in oparams:
oparams.append(pname)
if pname not in pmembers:
pmembers[pname] = [member]
else:
pmembers[pname].append(member)
def check_buried(executable, pname, member):
"""Checks whether the member has its value changed by one of the dependency
subroutines in the executable.
"""
for d in executable.dependencies:
if pname in d.argnames:
pindex = d.argnames.index(pname)
dtarget = d.target
if dtarget is not None:
mparam = dtarget.ordered_parameters[pindex]
for pname, param in executable.parameters.items():
if param.direction == "(out)" and param.is_custom:
utype = param.customtype
if utype is None:
continue
for mname, member in utype.members.items():
key = "{}%{}".format(pname, mname).lower().strip()
if key not in xassigns:
#We also need to check the dependency calls to other, buried subroutines.
compname = "{}%{}".format(pname, mname).lower()
if executable.changed(compname) is None:
add_offense(pname, member)
return (oparams, pmembers)
def _type_check_pointers(utype):
"""Checks the user-derived type for non-nullified pointer array declarations
in its base definition.
Returns (list of offending members).
"""
result = []
for mname, member in utype.members.items():
if ("pointer" in member.modifiers and member.D > 0 and
(member.default is None or "null" not in member.default)):
result.append(member)
return result
def check_pointers(parser, codedir=None, mfilter=None, recursive=False):
"""Checks the modules in the specified code parser to see if they
have common, but subtle, pointer bugs in:
1. subroutines with a parameter of intent(out) and user-derived type
must* set *all* members of that parameter or they will have an
*undefined* status.
2. pointer-type arrays that are not nullified are set to a valid target
will return 'T' when passed to `associated`. Best practice is to nullify
pointer arrays in user-derived types as the default value on those types.
:arg parser: [fortpy.code.CodeParser] with the modules to search *already loaded*.
:arg codedir: specify the full path to the library whose modules should be searched,
just another way to filter which modules are generating the warnings.
:arg mfilter: filter to apply to module names; can use the wildcard standard
from bash.
"""
from fnmatch import fnmatch
from fortpy.msg import std, set_verbosity, info
set_verbosity(0)
W1 = " {} '{}' does not set the value of members '{}' in parameter '{}'."
W2 = " Type '{}' does not nullify members '{}' on creation."
offenders = {}
for (modname, module) in parser.modules.items():
if not recursive and codedir is not None and not codedir.lower() in module.filepath.lower():
continue
if mfilter is not None and not fnmatch(module.name.lower(), mfilter.lower()):
continue
#Test the first condition above for all subroutines in the module; also handle
#the recursively defined subroutines.
hprinted = False
for xname, xvalue in module.executables.items():
oparams, pmembers = _exec_check_pointers(xvalue)
if len(oparams) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
for oparam in oparams:
plist = ', '.join([p.name for p in pmembers[oparam]])
std(W1.format(type(xvalue).__name__, xname, plist, oparam), 0)
offenders[xvalue.full_name] = (oparams, pmembers)
for tname, tvalue in module.types.items():
result = _type_check_pointers(tvalue)
if len(result) > 0:
if not hprinted:
info("Best practice suggestions: {}".format(module.filepath))
hprinted = True
plist = ', '.join([p.name for p in result])
std(W2.format(tname, plist), 0)
offenders[xvalue.full_name] = result
return offenders
|
mit
| -214,200,888,926,042,880
| 42.330579
| 100
| 0.60576
| false
| 4.262602
| false
| false
| false
|
mitoNGS/MToolBox
|
aux/filter_HF.py
|
1
|
2956
|
#!/usr/bin/env python
import fileinput
import sys, os
def usage():
print '''
This script is compatible with MToolBox versions < 1.2 only
This script filters the MToolBox vcf file based on Heteroplasmy threshold
Usage:
filter_HF.py <sample_name> <vcf_file> <HF_threshold[float]> <DP_threshold[float]> <out_type[vcf|txt]> <outfilename> <convert_to_homoplamy[Yes|No]> \n<vcf_file> can also be .gz file\n\n<convert_to_homoplasmy> is boolean and takes Yes or No values and converts HF >= 0.9 to GT=1/1. Useful for haplogroup prediction with other methods (e.g. haplogrep)\n\n'''
if __name__ == "__main__":
if len(sys.argv[1:]) < 7:
sys.stderr.write('ERROR: argument missing\n')
usage()
sys.exit(1)
samplename,vcf,HFt,DPt,out_type,outfile,homo_convert= sys.argv[1:]
HFt = float(HFt)
DPt = float(DPt)
out = open(outfile,'w')
homo_convert = str(homo_convert)
if homo_convert not in ['Yes','No']:
sys.stderr.write('Values accepted for <convert_to_homoplasmy> are [Yes|No].\nExit!\n')
sys.exit(1)
if 'gz' in vcf or 'gzip' or 'bz2' in vcf:
ifile = fileinput.input(vcf,openhook=fileinput.hook_compressed)
else:
ifile = fileinput.input(vcf)
for line in ifile:
if line.startswith('##'):
if out_type == 'vcf':
command_string = "##contig=<ID=chrMT,length=16569>\n##filter_VCF_command=filter_vcf.py {0} {1} {2} {3} {4} {5}\n".format(vcf,HFt,DPt,out_type,outfile,homo_convert)
out.write(line)
else:
pass
else:
if line.startswith('#CHROM') and out_type == 'vcf':
out.write(command_string)
line = line.split('\t')
line[-1] = samplename+'\n'
line = '\t'.join(line)
out.write(line)
elif line.startswith('#CHROM') and out_type == 'txt':
header='CHROM\tPOS\tID\tREF\tALT\tDP\tHF\tCIL\tCIU\t'+samplename
out.write(header+'\n')
else:
line = line.split('\t')
geno,DPv,HFv_l,CIL,CIU = line[-1].split(':')
geno = geno.split('/')
if '0' in geno:
geno.remove('0')
HFv_l = HFv_l.split(',')
CIL = CIL.split(',')
CIU = CIU.split(',')
ALT = line[4].split(',')
c =0
while c < (len(geno)):
HFv = float(HFv_l[c])
CILv = float(CIL[c])
CIUv = float(CIU[c])
DPv = float(DPv)
ALTv = str(ALT[c])
if DPv >= float(DPt) and HFv >= float(HFt):
if out_type == 'txt':
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,DPv,HFv,CILv,CIUv,samplename]))
out.write(res+'\n')
else:
if HFv == 1:
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1']))
elif HFv >= 0.9 and homo_convert == 'Yes':
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1']))
else:
res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=1,AN=2','GT','0/1']))
out.write(res+'\n')
else:
pass
c += 1
out.close()
|
gpl-3.0
| -3,761,606,398,086,951,400
| 33.776471
| 356
| 0.60115
| false
| 2.436933
| false
| false
| false
|
fogleman/DCPU-16
|
app/assembler.py
|
1
|
16148
|
import ply.lex as lex
import ply.yacc as yacc
# Constants
SIZE = 0x10000
# Lookups
BASIC_OPCODES = {
'SET': 0x01,
'ADD': 0x02,
'SUB': 0x03,
'MUL': 0x04,
'MLI': 0x05,
'DIV': 0x06,
'DVI': 0x07,
'MOD': 0x08,
'MDI': 0x09,
'AND': 0x0a,
'BOR': 0x0b,
'XOR': 0x0c,
'SHR': 0x0d,
'ASR': 0x0e,
'SHL': 0x0f,
'IFB': 0x10,
'IFC': 0x11,
'IFE': 0x12,
'IFN': 0x13,
'IFG': 0x14,
'IFA': 0x15,
'IFL': 0x16,
'IFU': 0x17,
'ADX': 0x1a,
'SUX': 0x1b,
'STI': 0x1e,
'STD': 0x1f,
}
SPECIAL_OPCODES = {
'JSR': 0x01,
'INT': 0x08,
'IAG': 0x09,
'IAS': 0x0a,
'RFI': 0x0b,
'IAQ': 0x0c,
'HWN': 0x10,
'HWQ': 0x11,
'HWI': 0x12,
}
COMMAND_OPCODES = {
'NOP': 0x0000,
'BRK': 0x0040,
'RFI': 0x0160,
}
REGISTERS = {
'A': 0x0,
'B': 0x1,
'C': 0x2,
'X': 0x3,
'Y': 0x4,
'Z': 0x5,
'I': 0x6,
'J': 0x7,
}
DST_CODES = {
'PUSH': 0x18,
'PEEK': 0x19,
'SP': 0x1b,
'PC': 0x1c,
'EX': 0x1d,
}
SRC_CODES = {
'POP': 0x18,
'PEEK': 0x19,
'SP': 0x1b,
'PC': 0x1c,
'EX': 0x1d,
}
# Reverse Lookups
REV_BASIC_OPCODES = dict((v, k) for k, v in BASIC_OPCODES.items())
REV_SPECIAL_OPCODES = dict((v, k) for k, v in SPECIAL_OPCODES.items())
REV_COMMAND_OPCODES = dict((v, k) for k, v in COMMAND_OPCODES.items())
REV_REGISTERS = dict((v, k) for k, v in REGISTERS.items())
REV_DST_CODES = dict((v, k) for k, v in DST_CODES.items())
REV_SRC_CODES = dict((v, k) for k, v in SRC_CODES.items())
# Helper Functions
def pretty_value(x):
return '%d' % x if x <= 0xff else '0x%04x' % x
def do_lookup(lookup, word):
if isinstance(word, basestring):
try:
word = lookup[word]
except KeyError:
raise Exception('Undefined symbol: "%s"' % word)
return word
# Classes
class Program(object):
def __init__(self, instructions):
self.instructions = instructions
self.text = None
self.lookup = {}
self.size = 0
for instruction in instructions:
if instruction.offset is None:
instruction.offset = self.size
self.size += instruction.size
if isinstance(instruction, Label):
self.lookup[instruction.name] = instruction.offset
def assemble(self):
result = []
for instruction in self.instructions:
result.extend(instruction.assemble(self.lookup))
return result
def pretty(self):
lines = []
skip = False
for instruction in self.instructions:
line = instruction.pretty().strip()
if isinstance(instruction, Label):
pad = 0
else:
pad = 4 if skip else 2
line = '%s%s' % (' ' * pad, line)
data = instruction.assemble(self.lookup)
if data and not isinstance(instruction, Data):
pad = ' ' * (32 - len(line))
data = ' '.join('%04x' % x for x in data)
line = '%s%s; %s' % (line, pad, data)
lines.append(line)
skip = instruction.conditional
return '\n'.join(lines)
class Data(object):
def __init__(self, data):
self.data = data
self.size = len(data)
self.offset = None
self.conditional = False
def assemble(self, lookup):
return [do_lookup(lookup, word) for word in self.data]
def pretty(self):
data = ', '.join('"%s"' % x if isinstance(x, str) else pretty_value(x)
for x in self.data)
return 'DAT %s' % data
class Reserve(object):
def __init__(self, size):
self.size = size
self.offset = None
self.conditional = False
def assemble(self, lookup):
return [0] * self.size
def pretty(self):
return 'RESERVE %s' % pretty_value(self.size)
class Label(object):
def __init__(self, name, offset=None):
self.name = name
self.size = 0
self.offset = offset
self.conditional = False
def assemble(self, lookup):
return []
def pretty(self):
return ':%s' % self.name
class BasicInstruction(object):
def __init__(self, op, dst, src):
self.op = op
self.dst = dst
self.src = src
value = self.op
value |= (self.dst.value & 0x1f) << 5
value |= (self.src.value & 0x3f) << 10
self.value = value
self.size = 1 + dst.size + src.size
self.offset = None
self.conditional = 0x10 <= self.op <= 0x17
def assemble(self, lookup):
result = [self.value]
result.extend(self.src.assemble(lookup))
result.extend(self.dst.assemble(lookup))
return result
def pretty(self):
op = REV_BASIC_OPCODES[self.op]
dst = self.dst.pretty()
src = self.src.pretty()
return '%s %s, %s' % (op, dst, src)
class SpecialInstruction(object):
def __init__(self, op, src):
self.op = op
self.src = src
value = 0
value |= (self.op & 0x1f) << 5
value |= (self.src.value & 0x3f) << 10
self.value = value
self.size = 1 + src.size
self.offset = None
self.conditional = False
def assemble(self, lookup):
result = [self.value]
result.extend(self.src.assemble(lookup))
return result
def pretty(self):
op = REV_SPECIAL_OPCODES[self.op]
src = self.src.pretty()
return '%s %s' % (op, src)
class CommandInstruction(object):
def __init__(self, value):
self.value = value
self.size = 1
self.offset = None
self.conditional = False
def assemble(self, lookup):
result = [self.value]
return result
def pretty(self):
return REV_COMMAND_OPCODES[self.value]
class Operand(object):
def __init__(self, codes, value, word=None):
self.codes = codes
self.value = value
self.word = word
self.size = int(word is not None)
def assemble(self, lookup):
return [] if self.word is None else [do_lookup(lookup, self.word)]
def pretty(self):
x = self.value
word = self.word
if isinstance(word, int):
word = pretty_value(word)
if x in REV_REGISTERS:
return REV_REGISTERS[x]
elif x - 0x08 in REV_REGISTERS:
return '[%s]' % REV_REGISTERS[x - 0x08]
elif x - 0x10 in REV_REGISTERS:
return '[%s + %s]' % (REV_REGISTERS[x - 0x10], word)
elif x in self.codes:
return self.codes[x]
elif x == 0x1a:
return 'PICK %s' % word
elif x == 0x1e:
return '[%s]' % word
elif x == 0x1f:
return '%s' % word
elif x == 0x20:
return pretty_value(0xffff)
elif x >= 0x21:
return pretty_value(x - 0x21)
class DstOperand(Operand):
def __init__(self, *args):
super(DstOperand, self).__init__(REV_DST_CODES, *args)
class SrcOperand(Operand):
def __init__(self, *args):
super(SrcOperand, self).__init__(REV_SRC_CODES, *args)
# Lexer Rules
reserved = set(
BASIC_OPCODES.keys() +
SPECIAL_OPCODES.keys() +
COMMAND_OPCODES.keys() +
REGISTERS.keys() +
DST_CODES.keys() +
SRC_CODES.keys() +
['PICK', 'DAT', 'RESERVE']
)
tokens = [
'LBRACK',
'RBRACK',
'PLUS',
'LABEL',
'ID',
'DECIMAL',
'HEX',
'OCT',
'STRING',
'CHAR',
'INC',
'DEC',
'AT'
] + list(reserved)
t_ignore = ' \t\r,'
t_ignore_COMMENT = r';.*'
t_INC = r'\+\+'
t_DEC = r'\-\-'
t_LBRACK = r'\['
t_RBRACK = r'\]'
t_PLUS = r'\+'
t_AT = r'\@'
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_STRING(t):
r'"[^"]*"'
t.value = tuple(ord(x) for x in t.value[1:-1])
return t
def t_CHAR(t):
r"'[^']'"
t.value = ord(t.value[1])
return t
def t_HEX(t):
r'\-?0x[a-fA-F0-9]+'
t.value = int(t.value, 16) % SIZE
return t
def t_OCT(t):
r'\-?0\d+'
t.value = int(t.value, 8) % SIZE
return t
def t_DECIMAL(t):
r'\-?\d+'
t.value = int(t.value) % SIZE
return t
def t_LABEL(t):
r':\.?[a-zA-Z_][a-zA-Z_0-9]*'
t.value = t.value[1:]
if t.value[0] == '.':
t.value = '%s%s' % (t.lexer.label_prefix, t.value)
else:
t.lexer.label_prefix = t.value
return t
def t_ID(t):
r'\.?[a-zA-Z_][a-zA-Z_0-9]*'
upper = t.value.upper()
if upper in reserved:
t.type = upper
t.value = upper
else:
t.type = 'ID'
if t.value[0] == '.':
t.value = '%s%s' % (t.lexer.label_prefix, t.value)
return t
def t_error(t):
raise Exception('Unrecognized token on line %d: %s' % (t.lineno, t.value))
# Parser Rules
def p_program(t):
'program : instructions'
t[0] = Program(t[1])
def p_instructions1(t):
'instructions : instruction instructions'
t[0] = (t[1],) + t[2]
def p_instructions2(t):
'instructions : instruction'
t[0] = (t[1],)
def p_data1(t):
'data : literal data'
arg = t[1] if isinstance(t[1], tuple) else (t[1],)
t[0] = arg + t[2]
def p_data2(t):
'data : literal'
arg = t[1] if isinstance(t[1], tuple) else (t[1],)
t[0] = arg
def p_instruction_data(t):
'instruction : DAT data'
t[0] = Data(t[2])
def p_instruction_reserve(t):
'instruction : RESERVE literal'
t[0] = Reserve(t[2])
def p_instruction_label1(t):
'instruction : LABEL'
t[0] = Label(t[1])
def p_instruction_label2(t):
'instruction : LABEL AT literal'
t[0] = Label(t[1], t[3])
def p_instruction_basic(t):
'instruction : basic_opcode dst_operand src_operand'
t[0] = BasicInstruction(t[1], t[2], t[3])
def p_instruction_special(t):
'instruction : special_opcode src_operand'
t[0] = SpecialInstruction(t[1], t[2])
def p_instruction_command(t):
'instruction : command_opcode'
t[0] = CommandInstruction(t[1])
def p_dst_operand_register(t):
'dst_operand : register'
t[0] = DstOperand(REGISTERS[t[1]])
def p_dst_operand_register_dereference(t):
'dst_operand : LBRACK register RBRACK'
t[0] = DstOperand(REGISTERS[t[2]] + 0x08)
def p_dst_operand_register_literal_dereference1(t):
'dst_operand : LBRACK register PLUS literal RBRACK'
t[0] = DstOperand(REGISTERS[t[2]] + 0x10, t[4])
def p_dst_operand_register_literal_dereference2(t):
'dst_operand : LBRACK literal PLUS register RBRACK'
t[0] = DstOperand(REGISTERS[t[4]] + 0x10, t[2])
def p_dst_operand_pick1(t):
'dst_operand : LBRACK SP PLUS literal RBRACK'
t[0] = DstOperand(0x1a, t[4])
def p_dst_operand_pick2(t):
'dst_operand : LBRACK literal PLUS SP RBRACK'
t[0] = DstOperand(0x1a, t[2])
def p_dst_operand_pick3(t):
'dst_operand : PICK literal'
t[0] = DstOperand(0x1a, t[2])
def p_dst_operand_code(t):
'dst_operand : dst_code'
t[0] = DstOperand(DST_CODES[t[1]])
def p_dst_operand_push(t):
'dst_operand : LBRACK DEC SP RBRACK'
t[0] = DstOperand(0x18)
def p_dst_operand_peek(t):
'dst_operand : LBRACK SP RBRACK'
t[0] = DstOperand(0x19)
def p_dst_operand_literal_dereference(t):
'dst_operand : LBRACK literal RBRACK'
t[0] = DstOperand(0x1e, t[2])
def p_dst_operand_literal(t):
'dst_operand : literal'
t[0] = DstOperand(0x1f, t[1])
def p_src_operand_register(t):
'src_operand : register'
t[0] = SrcOperand(REGISTERS[t[1]])
def p_src_operand_register_dereference(t):
'src_operand : LBRACK register RBRACK'
t[0] = SrcOperand(REGISTERS[t[2]] + 0x08)
def p_src_operand_register_literal_dereference1(t):
'src_operand : LBRACK register PLUS literal RBRACK'
t[0] = SrcOperand(REGISTERS[t[2]] + 0x10, t[4])
def p_src_operand_register_literal_dereference2(t):
'src_operand : LBRACK literal PLUS register RBRACK'
t[0] = SrcOperand(REGISTERS[t[4]] + 0x10, t[2])
def p_src_operand_pick1(t):
'src_operand : LBRACK SP PLUS literal RBRACK'
t[0] = SrcOperand(0x1a, t[4])
def p_src_operand_pick2(t):
'src_operand : LBRACK literal PLUS SP RBRACK'
t[0] = SrcOperand(0x1a, t[2])
def p_src_operand_pick3(t):
'src_operand : PICK literal'
t[0] = SrcOperand(0x1a, t[2])
def p_src_operand_code(t):
'src_operand : src_code'
t[0] = SrcOperand(SRC_CODES[t[1]])
def p_src_operand_pop(t):
'src_operand : LBRACK SP INC RBRACK'
t[0] = SrcOperand(0x18)
def p_src_operand_peek(t):
'src_operand : LBRACK SP RBRACK'
t[0] = SrcOperand(0x19)
def p_src_operand_literal_dereference(t):
'src_operand : LBRACK literal RBRACK'
t[0] = SrcOperand(0x1e, t[2])
def p_src_operand_literal(t):
'src_operand : literal'
if t[1] == 0xffff:
t[0] = SrcOperand(0x20)
elif t[1] <= 0x1e:
t[0] = SrcOperand(0x21 + t[1])
else:
t[0] = SrcOperand(0x1f, t[1])
def p_literal(t):
'''literal : DECIMAL
| HEX
| OCT
| ID
| STRING
| CHAR'''
t[0] = t[1]
def p_basic_opcode(t):
t[0] = BASIC_OPCODES[t[1]]
p_basic_opcode.__doc__ = ('basic_opcode : %s' %
'\n | '.join(sorted(BASIC_OPCODES)))
def p_special_opcode(t):
t[0] = SPECIAL_OPCODES[t[1]]
p_special_opcode.__doc__ = ('special_opcode : %s' %
'\n | '.join(sorted(SPECIAL_OPCODES)))
def p_command_opcode(t):
t[0] = COMMAND_OPCODES[t[1]]
p_command_opcode.__doc__ = ('command_opcode : %s' %
'\n | '.join(sorted(COMMAND_OPCODES)))
def p_register(t):
t[0] = t[1]
p_register.__doc__ = ('register : %s' %
'\n | '.join(sorted(REGISTERS)))
def p_dst_code(t):
t[0] = t[1]
p_dst_code.__doc__ = ('dst_code : %s' %
'\n | '.join(sorted(DST_CODES)))
def p_src_code(t):
t[0] = t[1]
p_src_code.__doc__ = ('src_code : %s' %
'\n | '.join(sorted(SRC_CODES)))
def p_error(t):
raise Exception('Invalid token on line %d: %s' % (t.lineno, t.value))
# Assembler Functions
def create_lexer():
lexer = lex.lex()
lexer.label_prefix = None
return lexer
def create_parser():
parser = yacc.yacc(debug=False, write_tables=False)
return parser
LEXER = create_lexer()
PARSER = create_parser()
def parse(text):
LEXER.lineno = 1
program = PARSER.parse(text, lexer=LEXER)
program.text = text
return program
def parse_file(path):
with open(path) as fp:
text = fp.read()
return parse(text)
def assemble(text):
program = parse(text)
return program.assemble()
def assemble_file(path):
with open(path) as fp:
text = fp.read()
return assemble(text)
def pretty(text):
program = parse(text)
return program.pretty()
def pretty_file(path):
with open(path) as fp:
text = fp.read()
return pretty(text)
# Disassembler Functions
def disassemble(words):
def next_word():
return words.pop() if words else 0
instructions = []
use_next_word = set(range(0x10, 0x18) + [0x1a, 0x1e, 0x1f])
words = list(reversed(words))
while words:
word = next_word()
op = word & 0x1f
dst = (word >> 5) & 0x1f
src = (word >> 10) & 0x3f
if op != 0 and op in REV_BASIC_OPCODES:
dst = DstOperand(dst, next_word()
if dst in use_next_word else None)
src = SrcOperand(src, next_word()
if src in use_next_word else None)
instruction = BasicInstruction(op, dst, src)
instructions.append(instruction)
elif op == 0 and dst in REV_SPECIAL_OPCODES:
src = SrcOperand(src, next_word()
if src in use_next_word else None)
instruction = SpecialInstruction(dst, src)
instructions.append(instruction)
else:
instruction = Data([word])
instructions.append(instruction)
program = Program(instructions)
program.text = program.pretty()
return program
def disassemble_file(path):
with open(path, 'rb') as fp:
data = fp.read()
words = [(ord(a) << 8) | ord(b) for a, b in zip(data[::2], data[1::2])]
return disassemble(words)
|
mit
| 4,979,945,226,758,335,000
| 24.631746
| 78
| 0.560503
| false
| 2.906407
| false
| false
| false
|
bnkr/selenit
|
selenibench/scripts.py
|
1
|
3871
|
from __future__ import print_function
import sys, argparse, selenium, contextlib, os, json, traceback
from datetime import datetime as DateTime
from datetime import timedelta as TimeDelta
from selenium.webdriver import Remote as WebDriverRemote
from selenium.webdriver.support.ui import WebDriverWait
class SelenibenchCli(object):
"""Downloads timings from the web performance api."""
def __init__(self, argv):
self.argv = argv
def run(self):
parser = self.get_parser()
settings = self.get_settings(parser)
if settings.log_json:
io = open(settings.log_json, 'w')
else:
io = None
runs = 0
contiguous_failures = 0
while runs < settings.number:
runs += 1
remote = WebDriverRemote(command_executor=settings.webdriver,
desired_capabilities=settings.capabilities)
with contextlib.closing(remote) as driver:
try:
driver.get(settings.url[0])
self.find_load_times(driver, io)
contiguous_failures = 0
except:
if contiguous_failures > 3:
print("Failure getting load times. Giving up.")
raise
contiguous_failures += 1
runs -= 1
print("Failure getting load times. Will try again.")
traceback.print_ex()
return 0
def find_load_times(self, driver, log):
def is_loaded(driver):
return driver.execute_script("return (document.readyState == 'complete')")
WebDriverWait(driver, 15).until(is_loaded)
timings = driver.execute_script("return window.performance.timing")
times = {}
for key, value in timings.iteritems():
if not isinstance(value, int):
continue
if value in (True, False):
continue
value = str(value)
unixey = int(value[0:10])
if value[10:]:
ms = int(value[10:])
else:
ms = 0
converted = DateTime.fromtimestamp(unixey)
converted += TimeDelta(milliseconds=ms)
times[key] = converted
# This kind of thing really needs unit tests. The thing takes so long
# to run it's just going to break horribly.
if log:
serialisable = dict(
(key, value.isoformat())
for key, value in times.iteritems())
log.write(json.dumps(serialisable))
log.write("\n")
print(times)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument("url", nargs="+")
parser.add_argument("-w", "--webdriver", required=True,
help="Location to hub or webdriver.")
parser.add_argument("-c", "--capabilities", action="append", default=[],
help="Add a capability.")
parser.add_argument("-n", "--number", type=int, default=1,
help="How many requests to run.")
parser.add_argument("-j", "--log-json", default=None,
help="Log json per-line for each hit.")
return parser
def get_settings(self, parser):
settings = parser.parse_args(self.argv[1:])
capabilities = {'browserName': "firefox"}
for capability in settings.capabilities:
name, value = capability.split("=")
capabilities[name.strip()] = value.strip()
settings.capabilities = capabilities
return settings
def selenibench_main():
"""Command-line entry point."""
cli = SelenibenchCli(sys.argv)
sys.exit(cli.run())
|
mit
| -2,705,399,600,824,886,300
| 32.08547
| 86
| 0.547145
| false
| 4.720732
| false
| false
| false
|
tobykurien/MakerDroid
|
assetsrc/public.mp3/fabmetheus_utilities/vector3index.py
|
1
|
13371
|
"""
Vector3 is a three dimensional vector class.
Below are examples of Vector3 use.
>>> from vector3 import Vector3
>>> origin = Vector3()
>>> origin
0.0, 0.0, 0.0
>>> pythagoras = Vector3( 3, 4, 0 )
>>> pythagoras
3.0, 4.0, 0.0
>>> pythagoras.magnitude()
5.0
>>> pythagoras.magnitudeSquared()
25
>>> triplePythagoras = pythagoras * 3.0
>>> triplePythagoras
9.0, 12.0, 0.0
>>> plane = pythagoras.dropAxis( 2 )
>>> plane
(3+4j)
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
import math
import operator
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__credits__ = 'Nophead <http://forums.reprap.org/profile.php?12,28>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
class Vector3Index:
"A three dimensional vector index class."
__slots__ = [ 'index', 'x', 'y', 'z' ]
def __init__( self, index, x = 0.0, y = 0.0, z = 0.0 ):
self.index = index
self.x = x
self.y = y
self.z = z
def __abs__( self ):
"Get the magnitude of the Vector3."
return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
magnitude = __abs__
def __add__( self, other ):
"Get the sum of this Vector3 and other one."
return Vector3( self.x + other.x, self.y + other.y, self.z + other.z )
def __copy__( self ):
"Get the copy of this Vector3."
return Vector3( self.x, self.y, self.z )
__pos__ = __copy__
copy = __copy__
def __div__( self, other ):
"Get a new Vector3 by dividing each component of this one."
return Vector3( self.x / other, self.y / other, self.z / other )
def __eq__( self, other ):
"Determine whether this vector is identical to other one."
if other == None:
return False
return self.x == other.x and self.y == other.y and self.z == other.z
def __floordiv__( self, other ):
"Get a new Vector3 by floor dividing each component of this one."
return Vector3( self.x // other, self.y // other, self.z // other )
def __hash__( self ):
"Determine whether this vector is identical to other one."
return self.__repr__().__hash__()
def __iadd__( self, other ):
"Add other Vector3 to this one."
self.x += other.x
self.y += other.y
self.z += other.z
return self
def __idiv__( self, other ):
"Divide each component of this Vector3."
self.x /= other
self.y /= other
self.z /= other
return self
def __ifloordiv__( self, other ):
"Floor divide each component of this Vector3."
self.x //= other
self.y //= other
self.z //= other
return self
def __imul__( self, other ):
"Multiply each component of this Vector3."
self.x *= other
self.y *= other
self.z *= other
return self
def __isub__( self, other ):
"Subtract other Vector3 from this one."
self.x -= other.x
self.y -= other.y
self.z -= other.z
return self
def __itruediv__( self, other ):
"True divide each component of this Vector3."
self.x = operator.truediv( self.x, other )
self.y = operator.truediv( self.y, other )
self.z = operator.truediv( self.z, other )
return self
def __mul__( self, other ):
"Get a new Vector3 by multiplying each component of this one."
return Vector3( self.x * other, self.y * other, self.z * other )
def __ne__( self, other ):
"Determine whether this vector is not identical to other one."
return not self.__eq__( other )
def __neg__( self ):
return Vector3( - self.x, - self.y, - self.z )
def __nonzero__( self ):
return self.x != 0 or self.y != 0 or self.z != 0
def __repr__( self ):
"Get the string representation of this Vector3."
return '%s, %s, %s, %s' % ( self.index, self.x, self.y, self.z )
def __rdiv__( self, other ):
"Get a new Vector3 by dividing each component of this one."
return Vector3( other / self.x, other / self.y, other / self.z )
def __rfloordiv__( self, other ):
"Get a new Vector3 by floor dividing each component of this one."
return Vector3( other // self.x, other // self.y, other // self.z )
def __rmul__( self, other ):
"Get a new Vector3 by multiplying each component of this one."
return Vector3( self.x * other, self.y * other, self.z * other )
def __rtruediv__( self, other ):
"Get a new Vector3 by true dividing each component of this one."
return Vector3( operator.truediv( other , self.x ), operator.truediv( other, self.y ), operator.truediv( other, self.z ) )
def __sub__( self, other ):
"Get the difference between the Vector3 and other one."
return Vector3( self.x - other.x, self.y - other.y, self.z - other.z )
def __truediv__( self, other ):
"Get a new Vector3 by true dividing each component of this one."
return Vector3( operator.truediv( self.x, other ), operator.truediv( self.y, other ), operator.truediv( self.z, other ) )
def cross( self, other ):
"Calculate the cross product of this vector with other one."
return Vector3( self.y * other.z - self.z * other.y, - self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x )
def distance( self, other ):
"Get the Euclidean distance between this vector and other one."
return math.sqrt( self.distanceSquared( other ) )
def distanceSquared( self, other ):
"Get the square of the Euclidean distance between this vector and other one."
separationX = self.x - other.x
separationY = self.y - other.y
separationZ = self.z - other.z
return separationX * separationX + separationY * separationY + separationZ * separationZ
def dot( self, other ):
"Calculate the dot product of this vector with other one."
return self.x * other.x + self.y * other.y + self.z * other.z
def dropAxis( self, which ):
"""Get a complex by removing one axis of this one.
Keyword arguments:
which -- the axis to drop (0=X, 1=Y, 2=Z)"""
if which == 0:
return complex( self.y, self.z )
if which == 1:
return complex( self.x, self.z )
if which == 2:
return complex( self.x, self.y )
def getNormalized( self, other ):
"Get the normalized Vector3."
magnitude = abs( self )
if magnitude == 0.0:
return self.copy()
return self / magnitude
def magnitudeSquared( self ):
"Get the square of the magnitude of the Vector3."
return self.x * self.x + self.y * self.y + self.z * self.z
def normalize( self ):
"Scale each component of this Vector3 so that it has a magnitude of 1. If this Vector3 has a magnitude of 0, this method has no effect."
magnitude = abs( self )
if magnitude != 0.0:
self /= magnitude
def reflect( self, normal ):
"Reflect the Vector3 across the normal, which is assumed to be normalized."
distance = 2 * ( self.x * normal.x + self.y * normal.y + self.z * normal.z )
return Vector3( self.x - distance * normal.x, self.y - distance * normal.y, self.z - distance * normal.z )
def setToVector3( self, other ):
"Set this Vector3 to be identical to other one."
self.x = other.x
self.y = other.y
self.z = other.z
def setToXYZ( self, x, y, z ):
"Set the x, y, and z components of this Vector3."
self.x = x
self.y = y
self.z = z
"""
class Vector3:
__slots__ = ['x', 'y', 'z']
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __eq__(self, other):
if isinstance(other, Vector3):
return self.x == other.x and \
self.y == other.y and \
self.z == other.z
else:
assert hasattr(other, '__len__') and len(other) == 3
return self.x == other[0] and \
self.y == other[1] and \
self.z == other[2]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __len__(self):
return 3
def __getitem__(self, key):
return (self.x, self.y, self.z)[key]
def __setitem__(self, key, value):
l = [self.x, self.y, self.z]
l[key] = value
self.x, self.y, self.z = l
def __iter__(self):
return iter((self.x, self.y, self.z))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y, self.z]
for c, v in map(None, name, value):
l['xyz'.index(c)] = v
self.x, self.y, self.z = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector3):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return _class(self.x + other.x,
self.y + other.y,
self.z + other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x + other[0],
self.y + other[1],
self.z + other[2])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector3):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other[0]
self.y += other[1]
self.z += other[2]
return self
def __sub__(self, other):
if isinstance(other, Vector3):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return Vector3(self.x - other.x,
self.y - other.y,
self.z - other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x - other[0],
self.y - other[1],
self.z - other[2])
def __rsub__(self, other):
if isinstance(other, Vector3):
return Vector3(other.x - self.x,
other.y - self.y,
other.z - self.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(other.x - self[0],
other.y - self[1],
other.z - self[2])
def __mul__(self, other):
if isinstance(other, Vector3):
# TODO component-wise mul/div in-place and on Vector2; docs.
if self.__class__ is Point3 or other.__class__ is Point3:
_class = Point3
else:
_class = Vector3
return _class(self.x * other.x,
self.y * other.y,
self.z * other.z)
else:
assert type(other) in (int, long, float)
return Vector3(self.x * other,
self.y * other,
self.z * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(self.x, other),
operator.div(self.y, other),
operator.div(self.z, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(other, self.x),
operator.div(other, self.y),
operator.div(other, self.z))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(self.x, other),
operator.floordiv(self.y, other),
operator.floordiv(self.z, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(other, self.x),
operator.floordiv(other, self.y),
operator.floordiv(other, self.z))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(self.x, other),
operator.truediv(self.y, other),
operator.truediv(self.z, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(other, self.x),
operator.truediv(other, self.y),
operator.truediv(other, self.z))
def __neg__(self):
return Vector3(-self.x,
-self.y,
-self.z)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector3(self.x / d,
self.y / d,
self.z / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector3)
return self.x * other.x + \
self.y * other.y + \
self.z * other.z
def cross(self, other):
assert isinstance(other, Vector3)
return Vector3(self.y * other.z - self.z * other.y,
-self.x * other.z + self.z * other.x,
self.x * other.y - self.y * other.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector3)
d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z)
return Vector3(self.x - d * normal.x,
self.y - d * normal.y,
self.z - d * normal.z)
"""
|
gpl-3.0
| -3,217,481,328,199,761,400
| 26.289796
| 157
| 0.614315
| false
| 2.898548
| false
| false
| false
|
IQSS/geoconnect
|
gc_apps/classification/layer_link_helper.py
|
1
|
5041
|
"""
Used for development to create WorldMap-related links from a layer name
"""
from __future__ import print_function
import logging
import re
import requests
from django.conf import settings
LOGGER = logging.getLogger(__name__)
GEONODE_PREFIX = 'geonode:'
class LayerLink(object):
"""Holds name, link, description"""
def __init__(self, name, link, description=None):
self.name = name
self.link = link
self.description = description
def show(self):
"""print info"""
info = ('name: {0}'
'link: {1}'
'description: {2}'\
).format(self.name, self.link, self.description)
print (info)
class LayerLinkHelper(object):
"""
For development/debugging, given a WorldMap layer name, create links
related to various geonode services including:
- Listing geoserver attributes for the layer
- Retrieving the current SLD in XML format
- Showing the classify service url, etc.
"""
def __init__(self, layer_name, server_name='http://localhost:8000'):
assert layer_name is not None, "layer_name cannot be None"
self.layer_name = layer_name # geonode:boston_social_disorder
self.server_name = server_name
if self.server_name.endswith('/'):
self.server_name = self.server_name[:-1]
self.layer_name_no_prefix = None # boston_social_disorder
self.links_dict = {}
self.links_list = []
# Secondary processing involving requests
self.sld_name = None
self.format_layer_name()
self.format_layer_links()
def format_layer_name(self):
"""
Make sure the layer name has the GEONODE_PREFIX
e.g. "geonode:boston_social_disorder"
Set a variable w/o the prefix
e.g. layer_name_no_prefix = "boston_social_disorder"
"""
if not self.layer_name.startswith(GEONODE_PREFIX):
self.layer_name = '%s%s' % (GEONODE_PREFIX, self.layer_name)
self.layer_name_no_prefix = self.layer_name[len(GEONODE_PREFIX):]
def add_link(self, name, link, description=''):
"""
Add a LayerLink object to "links_list"
"""
layer_link_obj = LayerLink(name, link, description)
# add to list
self.links_list.append(layer_link_obj)
# add to dict
self.links_dict[name] = layer_link_obj
LOGGER.debug('links count: %s', len(self.links_list))
def get_geoserver(self):
"""Retrieve the geoserver url"""
return self.server_name.replace(':8000', ':8080')
def format_layer_links(self):
"""Format/Create the layer links"""
# View layer
view_url = '%s/data/%s' % (self.server_name, self.layer_name)
self.add_link('wm_layer', view_url, 'WorldMap layer view')
# Geoserver attributes
attr_url = ('%s/geoserver/rest/sldservice/%s/attributes.xml'\
% (self.get_geoserver(), self.layer_name))
self.add_link('attributes', attr_url, 'Geoserver Attributes')
# SLD Name
layer_url = '%s/geoserver/rest/layers/%s.html' %\
(self.get_geoserver(), self.layer_name_no_prefix)
self.add_link('sld_name', layer_url, 'SLD name')
if not self.get_sld_name():
return
sld_url = '%s/geoserver/rest/styles/%s.sld' % \
(self.get_geoserver(), self.sld_name)
self.add_link('sld_xml', sld_url, 'current SLD XML')
sld_url2 = '%s%s%s%s' % (\
self.get_geoserver(),
'/geoserver/web/?wicket:bookmarkablePage=',
':org.geoserver.wms.web.data.StyleEditPage&name=',
self.sld_name)
self.add_link('sld_xml2', sld_url2, 'Editable/Formatted SLD XML')
def get_sld_name(self):
"""
Retrieve the layer's SLD name from the server
"""
if not 'sld_name' in self.links_dict:
return False
sld_url = self.links_dict['sld_name'].link
#print ('Attempt to retrieve SLD sld_url: %s' % sld_url)
resp = requests.get(sld_url, auth=settings.WORLDMAP_ACCOUNT_AUTH)
if not resp.status_code == 200:
LOGGER.error('Failed to retrieve SLD: %s', sld_url)
return False
# Parse out the SLD Name
sld_search = re.search(r'<li>Default style: StyleInfoImpl\[(.*)\]',\
resp.text, re.IGNORECASE)
if sld_search is None:
LOGGER.error('Failed to retrieve SLD')
return False
sld_name = sld_search.group(1)
self.sld_name = sld_name
return True
"""
if title_search:
title = title_search.group(1)
content = r.text
start_tag =
idx = content.find('<li>Default style: StyleInfoImpl[')
if idx == -1:
print 'Failed to retrieve SLD'
return
end_idx = content.find(']', idx +
print r.text
"""
|
apache-2.0
| 4,475,897,529,455,973,400
| 29.551515
| 76
| 0.575878
| false
| 3.647612
| false
| false
| false
|
MetricsGrimoire/sortinghat
|
tests/test_cmd_log.py
|
1
|
8958
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import datetime
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat import api
from sortinghat.command import CMD_SUCCESS
from sortinghat.cmd.log import Log
from sortinghat.exceptions import CODE_INVALID_DATE_ERROR, CODE_VALUE_ERROR, CODE_NOT_FOUND_ERROR
from tests.base import TestCommandCaseBase
LOG_UUID_NOT_FOUND_ERROR = "Error: Jane Roe not found in the registry"
LOG_ORG_NOT_FOUND_ERROR = "Error: LibreSoft not found in the registry"
LOG_INVALID_PERIOD_ERROR = "Error: 'from_date' 2001-01-01 00:00:00 cannot be greater than 1999-01-01 00:00:00"
LOG_INVALID_DATE_ERROR = "Error: 1999-13-01 is not a valid date"
LOG_INVALID_FORMAT_DATE_ERROR = "Error: YYZYY is not a valid date"
LOG_EMPTY_OUTPUT = ""
LOG_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00
John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00
John Smith\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00"""
LOG_UUID_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00"""
LOG_ORG_OUTPUT = """John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00
John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00
John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00"""
LOG_TIME_PERIOD_OUTPUT = """John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00"""
class TestLogCaseBase(TestCommandCaseBase):
"""Defines common setup and teardown methods on log unit tests"""
cmd_klass = Log
def load_test_dataset(self):
self.db.clear()
api.add_unique_identity(self.db, 'John Smith')
api.add_unique_identity(self.db, 'John Doe')
api.add_organization(self.db, 'Example')
api.add_organization(self.db, 'Bitergia')
api.add_enrollment(self.db, 'John Smith', 'Example')
api.add_enrollment(self.db, 'John Doe', 'Example')
api.add_enrollment(self.db, 'John Smith', 'Bitergia')
api.add_enrollment(self.db, 'John Smith', 'Bitergia',
datetime.datetime(1999, 1, 1),
datetime.datetime(2000, 1, 1))
api.add_enrollment(self.db, 'John Smith', 'Bitergia',
datetime.datetime(2006, 1, 1),
datetime.datetime(2008, 1, 1))
class TestLogCommand(TestLogCaseBase):
"""Unit tests for log command"""
def test_log(self):
"""Check log output"""
code = self.cmd.run()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_OUTPUT)
def test_log_uuid(self):
"""Check log using a uuid"""
code = self.cmd.run('--uuid', 'John Doe')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_UUID_OUTPUT)
def test_log_organization(self):
"""Check log using a organization"""
code = self.cmd.run('--organization', 'Bitergia')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_ORG_OUTPUT)
def test_log_period(self):
"""Check log using a time period"""
code = self.cmd.run('--from', '1990-1-1 08:59:17',
'--to', '2005-1-1')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT)
def test_log_mix_filter(self):
"""Check log using some filters"""
code = self.cmd.run('--uuid', 'John Doe',
'--organization', 'Example',
'--from', '1990-1-1 08:59:17',
'--to', '2005-1-1')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_EMPTY_OUTPUT)
def test_empty_registry(self):
"""Check output when the registry is empty"""
# Delete the contents of the database
self.db.clear()
code = self.cmd.run()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_EMPTY_OUTPUT)
def test_invalid_dates(self):
"""Check whether it fails when invalid dates are given"""
code = self.cmd.run('--from', '1999-13-01')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
output = sys.stderr.getvalue().strip().split('\n')[0]
self.assertEqual(output, LOG_INVALID_DATE_ERROR)
code = self.cmd.run('--from', 'YYZYY')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR)
code = self.cmd.run('--to', '1999-13-01')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_DATE_ERROR)
code = self.cmd.run('--to', 'YYZYY')
self.assertEqual(code, CODE_INVALID_DATE_ERROR)
x = sys.stderr.getvalue()
output = sys.stderr.getvalue().strip().split('\n')[-1]
self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR)
class TestLog(TestLogCaseBase):
"""Unit tests for log"""
def test_log(self):
"""Check log output"""
code = self.cmd.log()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_OUTPUT)
def test_log_uuid(self):
"""Check log using a uuid"""
code = self.cmd.log('John Doe')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_UUID_OUTPUT)
def test_log_organization(self):
"""Check log using a organization"""
code = self.cmd.log(organization='Bitergia')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_ORG_OUTPUT)
def test_log_period(self):
"""Check log using a time period"""
code = self.cmd.log(from_date=datetime.datetime(1990, 1, 1),
to_date=datetime.datetime(2005, 1, 1))
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT)
def test_period_ranges(self):
"""Check whether enrollments cannot be listed giving invalid period ranges"""
code = self.cmd.log('John Smith', 'Example',
datetime.datetime(2001, 1, 1),
datetime.datetime(1999, 1, 1))
self.assertEqual(code, CODE_VALUE_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_INVALID_PERIOD_ERROR)
def test_not_found_uuid(self):
"""Check whether it raises an error when the uiid is not available"""
code = self.cmd.log(uuid='Jane Roe')
self.assertEqual(code, CODE_NOT_FOUND_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_UUID_NOT_FOUND_ERROR)
def test_not_found_organization(self):
"""Check whether it raises an error when the organization is not available"""
code = self.cmd.log(organization='LibreSoft')
self.assertEqual(code, CODE_NOT_FOUND_ERROR)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, LOG_ORG_NOT_FOUND_ERROR)
def test_empty_registry(self):
"""Check output when the registry is empty"""
# Delete the contents of the database
self.db.clear()
code = self.cmd.log()
self.assertEqual(code, CMD_SUCCESS)
output = sys.stderr.getvalue().strip('\n')
self.assertEqual(output, LOG_EMPTY_OUTPUT)
if __name__ == "__main__":
unittest.main(buffer=True, exit=False)
|
gpl-3.0
| 3,161,797,829,543,985,000
| 35.263158
| 110
| 0.631126
| false
| 3.508421
| true
| false
| false
|
nijx/hypertable
|
src/py/ThriftClient/client_test.py
|
1
|
4079
|
import sys
import time
from hypertable.thriftclient import *
from hyperthrift.gen.ttypes import *
try:
client = ThriftClient("localhost", 38080)
print "HQL examples"
try:
namespace = client.namespace_open("bad")
except:
print "Caught exception when tyring to open 'bad' namespace"
namespace = client.namespace_open("test")
res = client.hql_query(namespace, "show tables")
print res
res = client.hql_query(namespace, "select * from thrift_test")
print res
print "mutator examples";
mutator = client.mutator_open(namespace, "thrift_test", 0, 0);
client.mutator_set_cell(mutator, Cell(Key("py-k1", "col", None), "py-v1"))
client.mutator_flush(mutator);
client.mutator_close(mutator);
print "shared mutator examples";
mutate_spec = MutateSpec("test_py", 1000, 0);
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k1", "col", None), "py-put-v1"))
client.shared_mutator_refresh(namespace, "thrift_test", mutate_spec)
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k2", "col", None), "py-put-v2"))
time.sleep(2)
print "scanner examples";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
print "asynchronous api examples\n";
future = client.future_open(0);
mutator_async_1 = client.async_mutator_open(namespace, "thrift_test", future, 0);
mutator_async_2 = client.async_mutator_open(namespace, "thrift_test", future, 0);
client.async_mutator_set_cell(mutator_async_1, Cell(Key("py-k1","col", None), "py-v1-async"));
client.async_mutator_set_cell(mutator_async_2, Cell(Key("py-k1","col", None), "py-v2-async"));
client.async_mutator_flush(mutator_async_1);
client.async_mutator_flush(mutator_async_2);
num_results=0;
while True:
result = client.future_get_result(future, 0);
if(result.is_empty):
break
num_results+=1;
print result;
if (result.is_error or result.is_scan):
print "Unexpected result\n"
exit(1);
if (num_results>2):
print "Expected only 2 results\n"
exit(1)
if (num_results!=2):
print "Expected only 2 results\n"
exit(1)
if (client.future_is_cancelled(future) or client.future_is_full(future) or not (client.future_is_empty(future)) or client.future_has_outstanding(future)):
print "Future object in unexpected state"
exit(1)
client.async_mutator_close(mutator_async_1)
client.async_mutator_close(mutator_async_2)
color_scanner = client.async_scanner_open(namespace, "FruitColor", future, ScanSpec(None, None, None, 1));
location_scanner = client.async_scanner_open(namespace, "FruitLocation", future, ScanSpec(None, None, None, 1));
energy_scanner = client.async_scanner_open(namespace, "FruitEnergy", future, ScanSpec(None, None, None, 1));
expected_cells = 6;
num_cells = 0;
while True:
result = client.future_get_result(future, 0);
print result;
if (result.is_empty or result.is_error or not(result.is_scan) ):
print "Unexpected result\n"
exit(1);
for cell in result.cells:
print cell;
num_cells+=1;
if(num_cells >= 6):
client.future_cancel(future);
break;
if (not client.future_is_cancelled(future)):
print "Expected future ops to be cancelled\n"
exit(1)
print "regexp scanner example";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1, 0, None, None, ["col"], False,0, 0, "k", "v[24]"));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
client.async_scanner_close(color_scanner);
client.async_scanner_close(location_scanner);
client.async_scanner_close(energy_scanner);
client.future_close(future);
client.namespace_close(namespace)
except:
print sys.exc_info()
raise
|
gpl-3.0
| -526,401,585,103,323,140
| 32.162602
| 156
| 0.679333
| false
| 3.214342
| true
| false
| false
|
earlbellinger/asteroseismology
|
grid/calibrate.py
|
1
|
3590
|
#### Calibrate a solar model
#### Author: Earl Bellinger ( bellinger@mps.mpg.de )
#### Stellar Ages & Galactic Evolution Group
#### Max-Planck-Institut fur Sonnensystemforschung
#### Department of Astronomy, Yale University
import numpy as np
import pandas as pd
from scipy import optimize
from os import path
from subprocess import Popen
from math import log10
Z_div_X_solar = 0.02293 # GS98 # 0.0245 # GN93 #
log10_Z_div_X_solar = np.log10(Z_div_X_solar)
constraint_names = ("log L", "log R", "Fe/H")
param_names = ("Y", "alpha", "Z")
param_init = [0.273449170177157, 1.83413390909832, 0.0197444964340224]
directory = 'calibrate_py'
print(directory)
def objective():
## minimize sum(log(model values / solar values)**2)
# searches in LOGS_MS subdirectory of the global 'directory' variable
hstry_file = path.join(directory, 'LOGS_MS', 'history.data')
if (not path.exists(hstry_file)):
return np.inf
hstry = pd.read_table(hstry_file, header=0, skiprows=5, delimiter='\s+') #header=1,
mdl = hstry.loc[hstry.shape[0]-1] #hstry[nrow(hstry),]
# [Fe/H] = log10 ( Z / X / (Z/X)_Sun )
mdl_Fe_H = mdl['log_surf_cell_z']-np.log10(mdl['surface_h1'])-log10_Z_div_X_solar
mdl_vals = [mdl['log_L'], mdl['log_R'], mdl_Fe_H]
print("*** Model values")
print(constraint_names, mdl_vals)
print('L', 10**mdl['log_L'], 'R', 10**mdl['log_R'])
result = sum([ii**2 for ii in mdl_vals])
if np.isfinite(result):
return log10(result)
return 10**10
### SEARCH
iteration = 0
best_val = np.inf
best_param = param_init
#run = function(params) {
def run(params):
global iteration
global best_val
global best_param
iteration = iteration + 1
print("**** iter:", iteration)
Y, alpha, Z = params
print(param_names, (Y, alpha, Z))
if (Y < 0.2 or Y > 0.4 or Z < 0 or Z > 0.04 or alpha < 1 or alpha > 3):
return 10**10
#if (Y < 0.23):
# Y = 0.23
#if (Y > 0.33):
# Y = 0.33
#if (Z < 0.01):
# Z = 0.01
#if (Z > 0.04):
# Z = 0.04
#if (alpha < 1):
# alpha = 1
#if (alpha > 3):
# alpha = 3
command = "./dispatch.sh" + \
' -Y ' + str(Y) + \
' -a ' + str(alpha) + \
' -o ' + '0' + \
' -Z ' + str(Z) + \
' -D ' + '1' + \
' -g ' + '1' + \
' -e ' + '0' + \
' -c ' + "4572000000" + \
' -d ' + directory
print(command)
#system(command)
process = Popen(command.split(), shell=False)
process.wait()
obj_val = objective()
print("**** objective value =", obj_val)
if (obj_val < best_val):
best_val = obj_val
print("*****", param_names, params)
best_param = params
print("***** New record!")
return obj_val
result = optimize.minimize(fun=run, x0=param_init, method='Nelder-Mead',
options={'disp': True,
'maxiter': 10000}) #,
#bounds=((0.25, 0.32), (1, 3), (0.012, 0.03)))
print("Optimization terminated. Saving best result")
Y, alpha, Z = result.x
command = "./dispatch.sh" + \
' -Y ' + str(Y) + \
' -a ' + str(alpha) + \
' -o ' + '0' + \
' -Z ' + str(Z) + \
' -D ' + '1' + \
' -g ' + '1' + \
' -e ' + '0' + \
' -c ' + "4572000000" + \
' -d ' + directory
print(command)
process = Popen(command.split(), shell=False)
process.wait()
print(result)
|
gpl-2.0
| -8,822,781,440,106,951,000
| 26.72
| 88
| 0.51532
| false
| 2.849206
| false
| false
| false
|
huajiahen/hotspot
|
backend/Busy/models.py
|
1
|
1154
|
# -*- coding:utf-8 -*-
from django.db.models import *
class Event(Model):
content = CharField(u'内容',max_length = 200)
starttime = IntegerField(u'开始时间')
endtime = IntegerField(u'结束时间')
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
address = CharField(u'地点',max_length = 100)
hit = IntegerField(u'想去数',default = 0)
class Emergency(Model):
content = CharField(u'内容',max_length = 100)
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
class Man(Model):
user_id = CharField(u'用户ID',max_length = 200)
longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
hadevent = BooleanField(u'是否参与事件',default = False)
|
mit
| -7,604,758,913,650,735,000
| 38.407407
| 72
| 0.662594
| false
| 2.867925
| false
| false
| false
|
cmaclell/py_plan
|
py_plan/problems/blocksworld.py
|
1
|
3681
|
from operator import ne
from py_search.utils import compare_searches
from py_search.uninformed import depth_first_search
from py_search.uninformed import breadth_first_search
from py_search.uninformed import iterative_deepening_search
from py_plan.total_order import StateSpacePlanningProblem
from py_plan.base import Operator
move = Operator('move',
[('on', '?b', '?x'),
('block', '?b'),
('block', '?x'),
('block', '?y'),
('block', '?other'),
('block', '?other2'),
('not', ('on', '?other', '?b')),
('not', ('on', '?other2', '?y')),
# ('clear', '?b'),
# ('clear', '?y'),
(ne, '?b', '?x'),
(ne, '?b', '?y'),
(ne, '?x', '?y')],
[('on', '?b', '?y'),
# ('clear', '?x'),
('not', ('on', '?b', '?x')),
# ('not', ('clear', '?y'))
])
move_from_table = Operator('move_from_table',
[('on', '?b', 'Table'),
('block', '?other'),
('block', '?other2'),
('not', ('on', '?other', '?b')),
('not', ('on', '?other2', '?y')),
# ('clear', '?b'),
# ('clear', '?y'),
('block', '?b'),
('block', '?y'),
(ne, '?b', '?y')],
[('on', '?b', '?y'),
('not', ('on', '?b', 'Table')),
# ('not', ('clear', '?y'))
])
move_to_table = Operator('move_to_table',
[('on', '?b', '?x'),
('block', '?b'),
('block', '?x'),
('block', '?other'),
('not', ('on', '?other', '?b')),
# ('clear', '?b'),
(ne, '?b', '?x')],
[('on', '?b', 'Table'),
# ('clear', '?x'),
('not', ('on', '?b', '?x'))])
if __name__ == "__main__":
start = [('on', 'A', 'Table'),
('on', 'B', 'Table'),
('on', 'C', 'A'),
('block', 'A'),
('block', 'B'),
('block', 'C'),
# ('clear', 'B'),
# ('clear', 'C')
]
goal = [('on', 'A', 'B'),
('on', 'B', 'C'),
('on', 'C', 'Table')]
# start = [('on', 'A', 'Table'),
# ('on', 'B', 'Table'),
# ('on', 'C', 'Table'),
# ('block', 'A'),
# ('block', 'B'),
# ('block', 'C'),
# ('clear', 'A'),
# ('clear', 'B'),
# ('clear', 'C')]
def progression(x):
return breadth_first_search(x, forward=True, backward=False)
def regression(x):
return breadth_first_search(x, forward=False, backward=True)
def bidirectional(x):
return breadth_first_search(x, forward=True, backward=True)
p = StateSpacePlanningProblem(start, goal, [move_from_table,
move_to_table])
# print(next(best_first_search(p)).state)
compare_searches([p], [progression,
regression, bidirectional,
# iterative_deepening_search
])
print(next(progression(p)).path())
print(next(regression(p)).path())
|
mit
| 1,218,365,807,427,348,500
| 33.401869
| 68
| 0.32627
| false
| 4.099109
| false
| false
| false
|
citrix-openstack-build/horizon
|
horizon/tables/base.py
|
1
|
53167
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import logging
from operator import attrgetter # noqa
import sys
from django.conf import settings # noqa
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse # noqa
from django import template
from django.template.defaultfilters import truncatechars # noqa
from django.template.loader import render_to_string # noqa
from django.utils.datastructures import SortedDict # noqa
from django.utils.html import escape # noqa
from django.utils import http
from django.utils.http import urlencode # noqa
from django.utils.safestring import mark_safe # noqa
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
class Column(html.HTMLElement):
""" A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized.
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('yes', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
data in this column is larger than the supplied number, the data for
this column will be truncated and an ellipsis will be appended to the
truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
Example: ``classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
.. attribute:: wrap_list
Boolean value indicating whether the contents of this cell should be
wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's
``unordered_list`` template filter. Defaults to ``False``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_counter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('yes', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None, wrap_list=False):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = transform.__name__
else:
self.transform = unicode(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
verbose_name = self.transform.title()
else:
verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.verbose_name = verbose_name
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or '-'
self.filters = filters or []
self.truncate = truncate
self.link_classes = link_classes or []
self.wrap_list = wrap_list
if status_choices:
self.status_choices = status_choices
self.display_choices = display_choices
if summation is not None and summation not in self.summation_methods:
raise ValueError("Summation method %s must be one of %s."
% (summation,
", ".join(self.summation_methods.keys())))
self.summation = summation
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
if self.sortable and not self.auto:
self.classes.append("sortable")
if self.hidden:
self.classes.append("hide")
if self.link is not None:
self.classes.append('anchor')
def __unicode__(self):
return unicode(self.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_raw_data(self, datum):
"""
Returns the raw data for this column, before any filters or formatting
are applied to it. This is useful when doing calculations on data in
the table.
"""
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Basic object lookups
elif hasattr(datum, self.transform):
data = getattr(datum, self.transform, None)
# Dict lookups
elif isinstance(datum, collections.Iterable) and \
self.transform in datum:
data = datum.get(self.transform)
else:
if settings.DEBUG:
msg = _("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform, 'obj': datum}
msg = termcolors.colorize(msg, **PALETTE['ERROR'])
LOG.warning(msg)
data = None
return data
def get_data(self, datum):
"""
Returns the final display data for this column from the given inputs.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
data = self.get_raw_data(datum)
display_value = None
if self.display_choices:
display_value = [display for (value, display) in
self.display_choices
if value.lower() == (data or '').lower()]
if display_value:
data = display_value[0]
else:
for filter_func in self.filters:
data = filter_func(data)
if data and self.truncate:
data = truncatechars(data, self.truncate)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_link_url(self, datum):
""" Returns the final value for the column's ``link`` property.
If ``allowed_data_types`` of this column is not empty and the datum
has an assigned type, check if the datum's type is in the
``allowed_data_types`` list. If not, the datum won't be displayed
as a link.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
if self.allowed_data_types:
data_type_name = self.table._meta.data_type_name
data_type = getattr(datum, data_type_name, None)
if data_type and (data_type not in self.allowed_data_types):
return None
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
def get_summation(self):
"""
Returns the summary value for the data in this column if a
valid summation method is specified for it. Otherwise returns ``None``.
"""
if self.summation not in self.summation_methods:
return None
summation_function = self.summation_methods[self.summation]
data = [self.get_raw_data(datum) for datum in self.table.data]
data = filter(lambda datum: datum is not None, data)
if len(data):
summation = summation_function(data)
for filter_func in self.filters:
summation = filter_func(summation)
return summation
else:
return None
class Row(html.HTMLElement):
""" Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
Rows are capable of AJAX updating, with a little added work:
The ``ajax`` property needs to be set to ``True``, and
subclasses need to define a ``get_data`` method which returns a data
object appropriate for consumption by the table (effectively the "get"
lookup versus the table's "list" lookup).
The automatic update interval is configurable by setting the key
``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary.
Default: ``2500`` (measured in milliseconds).
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``SortedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row calculated from
the values of the table's ``status_columns`` if they are set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
.. attribute:: ajax
Boolean value to determine whether ajax updating for this row is
enabled.
.. attribute:: ajax_action_name
String that is used for the query parameter key to request AJAX
updates. Generally you won't need to change this value.
Default: ``"row_update"``.
"""
ajax = False
ajax_action_name = "row_update"
def __init__(self, table, datum=None):
super(Row, self).__init__()
self.table = table
self.datum = datum
self.selected = False
if self.datum:
self.load_cells()
else:
self.id = None
self.cells = []
def load_cells(self, datum=None):
"""
Load the row's data (either provided at initialization or as an
argument to this function), initiailize all the cells contained
by this row, and set the appropriate row properties which require
the row's data to be determined.
This function is called automatically by
:meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is
provided. However, by not providing the data during initialization
this function allows for the possibility of a two-step loading
pattern when you need a row instance but don't yet have the data
available.
"""
# Compile all the cells on instantiation.
table = self.table
if datum:
self.datum = datum
else:
datum = self.datum
cells = []
for column in table.columns.values():
if column.auto == "multi_select":
widget = forms.CheckboxInput(check_test=lambda value: False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
unicode(table.get_object_id(datum)))
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
cell = Cell(datum, data, column, self)
cells.append((column.name or column.auto, cell))
self.cells = SortedDict(cells)
if self.ajax:
interval = conf.HORIZON_CONFIG['ajax_poll_interval']
self.attrs['data-update-interval'] = interval
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.classes.append("ajax-update")
# Add the row's status class and id to the attributes to be rendered.
self.classes.append(self.status_class)
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
self.attrs['id'] = self.id
# Add the row's display name if available
display_name = table.get_object_display(datum)
if display_name:
self.attrs['data-display'] = escape(display_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
@property
def status(self):
column_names = self.table._meta.status_columns
if column_names:
statuses = dict([(column_name, self.cells[column_name].status) for
column_name in column_names])
return self.table.calculate_row_status(statuses)
@property
def status_class(self):
column_names = self.table._meta.status_columns
if column_names:
return self.table.get_row_status_class(self.status)
else:
return ''
def render(self):
return render_to_string("horizon/common/_data_table_row.html",
{"row": self})
def get_cells(self):
""" Returns the bound cells for this row in order. """
return self.cells.values()
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode({"table": self.table.name,
"action": self.ajax_action_name,
"obj_id": self.table.get_object_id(self.datum)})
return "%s?%s" % (table_url, params)
def get_data(self, request, obj_id):
"""
Fetches the updated data for the row based on the object id
passed in. Must be implemented by a subclass to allow AJAX updating.
"""
raise NotImplementedError("You must define a get_data method on %s"
% self.__class__.__name__)
class Cell(html.HTMLElement):
""" Represents a single cell in the table. """
def __init__(self, datum, data, column, row, attrs=None, classes=None):
self.classes = classes or getattr(self, "classes", [])
super(Cell, self).__init__()
self.attrs.update(attrs or {})
self.datum = datum
self.data = data
self.column = column
self.row = row
self.wrap_list = column.wrap_list
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def value(self):
"""
Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
if self.url:
link_classes = ' '.join(self.column.link_classes)
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" class="%s">%s</a>' %
(self.url, link_classes, escape(data)))
return data
@property
def url(self):
if self.column.link:
url = self.column.get_link_url(self.datum)
if url:
return url
else:
return None
@property
def status(self):
""" Gets the status for the column based on the cell's data. """
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
if self.column.status or \
self.column.name in self.column.table._meta.status_columns:
#returns the first matching status found
data_value_lower = unicode(self.data).lower()
for status_name, status_value in self.column.status_choices:
if unicode(status_name).lower() == data_value_lower:
self._status = status_value
return self._status
self._status = None
return self._status
def get_status_class(self, status):
""" Returns a css class name determined by the status value. """
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_default_classes(self):
""" Returns a flattened string of the cell's CSS classes. """
if not self.url:
self.column.classes = [cls for cls in self.column.classes
if cls != "anchor"]
column_class_string = self.column.get_final_attrs().get('class', "")
classes = set(column_class_string.split(" "))
if self.column.status:
classes.add(self.get_status_class(self.status))
return list(classes)
class DataTableOptions(object):
""" Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the
:class:`~horizon.tables.Action` class. These actions will handle tasks
such as bulk deletion, etc. for multiple objects at once.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: pagination_param
The name of the query string parameter which will be used when
paginating this table. When using multiple tables in a single
view this will need to be changed to differentiate between the
tables. Default: ``"marker"``.
.. attribute:: status_columns
A list or tuple of column names which represents the "state"
of the data object being represented.
If ``status_columns`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
The row status is used by other Horizon components to trigger tasks
such as dynamic AJAX updating.
.. attribute:: row_class
The class which should be used for rendering the rows of this table.
Optional. Default: :class:`~horizon.tables.Row`.
.. attribute:: column_class
The class which should be used for handling the columns of this table.
Optional. Default: :class:`~horizon.tables.Column`.
.. attribute:: mixed_data_type
A toggle to indicate if the table accepts two or more types of data.
Optional. Default: :``False``
.. attribute:: data_types
A list of data types that this table would accept. Default to be an
empty list, but if the attibute ``mixed_data_type`` is set to ``True``,
then this list must have at least one element.
.. attribute:: data_type_name
The name of an attribute to assign to data passed to the table when it
accepts mix data. Default: ``"_table_data_type"``
.. attribute:: footer
Boolean to control whether or not to show the table's footer.
Default: ``True``.
.. attribute:: permissions
A list of permission names which this table requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = getattr(options, 'verbose_name', None) \
or self.name.title()
self.verbose_name = verbose_name
self.columns = getattr(options, 'columns', None)
self.status_columns = getattr(options, 'status_columns', [])
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
self.row_class = getattr(options, 'row_class', Row)
self.column_class = getattr(options, 'column_class', Column)
self.pagination_param = getattr(options, 'pagination_param', 'marker')
self.browser_table = getattr(options, 'browser_table', None)
self.footer = getattr(options, 'footer', True)
self.no_data_message = getattr(options,
"no_data_message",
_("No items to display."))
self.permissions = getattr(options, 'permissions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions is not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = getattr(options,
'template',
'horizon/common/_data_table.html')
self.row_actions_template = \
'horizon/common/_data_table_row_actions.html'
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = unicode(getattr(options,
'context_var_name',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_more_data = False
# Set mixed data type table attr
self.mixed_data_type = getattr(options, 'mixed_data_type', False)
self.data_types = getattr(options, 'data_types', [])
# If the data_types has more than 2 elements, set mixed_data_type
# to True automatically.
if len(self.data_types) > 1:
self.mixed_data_type = True
# However, if the mixed_data_type is set to True manually and the
# the data_types is empty, raise an errror.
if self.mixed_data_type and len(self.data_types) <= 1:
raise ValueError("If mixed_data_type is set to True in class %s, "
"data_types should has more than one types" %
self.name)
self.data_type_name = getattr(options,
'data_type_name',
"_table_data_type")
class DataTableMetaclass(type):
""" Metaclass to add options to DataTable class and collect columns. """
def __new__(mcs, name, bases, attrs):
# Process options from Meta
class_name = name
attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = []
for attr_name, obj in attrs.items():
if issubclass(type(obj), (opts.column_class, Column)):
column_instance = attrs.pop(attr_name)
column_instance.name = attr_name
column_instance.classes.append('normal_column')
columns.append((attr_name, column_instance))
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
columns = base.base_columns.items() + columns
attrs['base_columns'] = SortedDict(columns)
# If the table is in a ResourceBrowser, the column number must meet
# these limits because of the width of the browser.
if opts.browser_table == "navigation" and len(columns) > 1:
raise ValueError("You can only assign one column to %s."
% class_name)
if opts.browser_table == "content" and len(columns) > 2:
raise ValueError("You can only assign two columns to %s."
% class_name)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select and opts.browser_table != "navigation":
multi_select = opts.column_class("multi_select",
verbose_name="",
auto="multi_select")
multi_select.classes.append('multi_select_column')
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = opts.column_class("actions",
verbose_name=_("Actions"),
auto="actions")
actions_column.classes.append('actions_column')
columns.append(("actions", actions_column))
# Store this set of columns internally so we can copy them per-instance
attrs['_columns'] = SortedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions))
actions.sort(key=attrgetter('name'))
actions_dict = SortedDict([(action.name, action())
for action in actions])
attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, attrs)
class DataTable(object):
""" A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
__metaclass__ = DataTableMetaclass
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
self.request = request
self.data = data
self.kwargs = kwargs
self._needs_form_wrapper = needs_form_wrapper
self._no_data_message = self._meta.no_data_message
self.breadcrumb = None
self.current_item_id = None
self.permissions = self._meta.permissions
# Create a new set
columns = []
for key, _column in self._columns.items():
column = copy.copy(_column)
column.table = self
columns.append((key, column))
self.columns = SortedDict(columns)
self._populate_data_cache()
# Associate these actions with this table
for action in self.base_actions.values():
action.table = self
self.needs_summary_row = any([col.summation
for col in self.columns.values()])
def __unicode__(self):
return unicode(self._meta.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._meta.name)
@property
def name(self):
return self._meta.name
@property
def footer(self):
return self._meta.footer
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
filter_string = self.get_filter_string()
request_method = self.request.method
needs_preloading = (not filter_string
and request_method == 'GET'
and action.needs_preloading)
valid_method = (request_method == action.method)
if (filter_string and valid_method) or needs_preloading:
if self._meta.mixed_data_type:
self._filtered_data = action.data_type_filter(self,
self.data,
filter_string)
else:
self._filtered_data = action.filter(self,
self.data,
filter_string)
return self._filtered_data
def get_filter_string(self):
filter_action = self._meta._filter_action
param_name = filter_action.get_param_name()
filter_string = self.request.POST.get(param_name, '')
return filter_string
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
row_matched = True
if self._meta.mixed_data_type:
row_matched = action.data_type_matched(datum)
return action._allowed(request, datum) and row_matched
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def is_browser_table(self):
if self._meta.browser_table:
return True
return False
def render(self):
""" Renders the table using the template from the table options. """
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self}
context = template.RequestContext(self.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the table was requested.
"""
return self.request.get_full_path().partition('?')[0]
def get_empty_message(self):
""" Returns the message to be displayed when there is no data. """
return self._no_data_message
def get_object_by_id(self, lookup):
"""
Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
We will convert the object id and ``lookup`` to unicode before
comparison.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
if not isinstance(lookup, unicode):
lookup = unicode(str(lookup), 'utf-8')
matches = []
for datum in self.data:
obj_id = self.get_object_id(datum)
if not isinstance(obj_id, unicode):
obj_id = unicode(str(obj_id), 'utf-8')
if obj_id == lookup:
matches.append(datum)
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
@property
def has_actions(self):
"""
Boolean. Indicates whether there are any available actions on this
table.
"""
if not self.base_actions:
return False
return any(self.get_table_actions()) or any(self._meta.row_actions)
@property
def needs_form_wrapper(self):
"""
Boolean. Indicates whather this table should be rendered wrapped in
a ``<form>`` tag or not.
"""
# If needs_form_wrapper is explicitly set, defer to that.
if self._needs_form_wrapper is not None:
return self._needs_form_wrapper
# Otherwise calculate whether or not we need a form element.
return self.has_actions
def get_table_actions(self):
""" Returns a list of the action instances for this table. """
bound_actions = [self.base_actions[action.name] for
action in self._meta.table_actions]
return [action for action in bound_actions if
self._filter_action(action, self.request)]
def get_row_actions(self, datum):
""" Returns a list of the action instances for a specific row. """
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
bound_action.attrs = copy.copy(bound_action.attrs)
bound_action.datum = datum
# Remove disallowed actions.
if not self._filter_action(bound_action,
self.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self.request, datum)
# Pre-create the URL for this link with appropriate parameters
if issubclass(bound_action.__class__, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def render_table_actions(self):
""" Renders the actions specified in ``Meta.table_actions``. """
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions}
if self._meta.filter and \
self._filter_action(self._meta._filter_action, self.request):
extra_context["filter"] = self._meta._filter_action
context = template.RequestContext(self.request, extra_context)
return table_actions_template.render(context)
def render_row_actions(self, datum):
"""
Renders the actions specified in ``Meta.row_actions`` using the
current row data. """
template_path = self._meta.row_actions_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum)}
context = template.RequestContext(self.request, extra_context)
return row_actions_template.render(context)
@staticmethod
def parse_action(action_string):
"""
Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
bits.reverse()
table = bits.pop()
action = bits.pop()
try:
object_id = bits.pop()
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""
Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if not action or action.method != self.request.method:
# We either didn't get an action or we're being hacked. Goodbye.
return None
# Meanhile, back in Gotham...
if not action.requires_input or obj_id or obj_ids:
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self.request, obj_id)
# Otherwise figure out what to pass along
else:
# Preference given to a specific id, since that implies
# the user selected an action for just one row.
if obj_id:
obj_ids = [obj_id]
response = action.multiple(self, self.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self.request,
_("Please select a row before taking that action."))
return None
@classmethod
def check_handler(cls, request):
""" Determine whether the request should be handled by this table. """
if request.method == "POST" and "action" in request.POST:
table, action, obj_id = cls.parse_action(request.POST["action"])
elif "table" in request.GET and "action" in request.GET:
table = request.GET["table"]
action = request.GET["action"]
obj_id = request.GET.get("obj_id", None)
else:
table = action = obj_id = None
return table, action, obj_id
def maybe_preempt(self):
"""
Determine whether the request should be handled by a preemptive action
on this table or by an AJAX row update before loading any data.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name:
# Handle AJAX row updating.
new_row = self._meta.row_class(self)
if new_row.ajax and new_row.ajax_action_name == action_name:
try:
datum = new_row.get_data(request, obj_id)
new_row.load_cells(datum)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(new_row.render())
else:
return HttpResponse(status=error.status_code)
preemptive_actions = [action for action in
self.base_actions.values() if action.preempt]
if action_name:
for action in preemptive_actions:
if action.name == action_name:
handled = self.take_action(action_name, obj_id)
if handled:
return handled
return None
def maybe_handle(self):
"""
Determine whether the request should be handled by any action on this
table after data has been loaded.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name and action_name:
action_names = [action.name for action in
self.base_actions.values() if not action.preempt]
# do not run preemptive actions here
if action_name in action_names:
return self.take_action(action_name, obj_id)
return None
def sanitize_id(self, obj_id):
""" Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
""" Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
.. warning::
Make sure that the value returned is a unique value for the id
otherwise rendering issues can occur.
"""
return datum.id
def get_object_display(self, datum):
""" Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overriden to return other values.
"""
if hasattr(datum, 'name'):
return datum.name
return None
def has_more_data(self):
"""
Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_marker(self):
"""
Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1]))
def get_pagination_string(self):
""" Returns the query parameter string to paginate this table. """
return "=".join([self._meta.pagination_param, self.get_marker()])
def calculate_row_status(self, statuses):
"""
Returns a boolean value determining the overall row status
based on the dictionary of column name to status mappings passed in.
By default, it uses the following logic:
#. If any statuses are ``False``, return ``False``.
#. If no statuses are ``False`` but any or ``None``, return ``None``.
#. If all statuses are ``True``, return ``True``.
This provides the greatest protection against false positives without
weighting any particular columns.
The ``statuses`` parameter is passed in as a dictionary mapping
column names to their statuses in order to allow this function to
be overridden in such a way as to weight one column's status over
another should that behavior be desired.
"""
values = statuses.values()
if any([status is False for status in values]):
return False
elif any([status is None for status in values]):
return None
else:
return True
def get_row_status_class(self, status):
"""
Returns a css class name determined by the status value. This class
name is used to indicate the status of the rows in the table if
any ``status_columns`` have been specified.
"""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_columns(self):
""" Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
""" Return the row data for this table broken out by columns. """
rows = []
try:
for datum in self.filtered_data:
row = self._meta.row_class(self, datum)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
return rows
|
apache-2.0
| 670,515,343,822,811,100
| 37.779723
| 79
| 0.584479
| false
| 4.554309
| false
| false
| false
|
jromang/retina-old
|
distinclude/spyderlib/interpreter.py
|
1
|
11927
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Shell Interpreter"""
import sys
import atexit
import threading
import ctypes
import os
import re
import os.path as osp
import pydoc
from subprocess import Popen, PIPE
from code import InteractiveConsole
# Local imports:
from spyderlib.utils.dochelpers import isdefined
from spyderlib.utils import encoding
# Force Python to search modules in the current directory first:
sys.path.insert(0, '')
def guess_filename(filename):
"""Guess filename"""
if osp.isfile(filename):
return filename
if not filename.endswith('.py'):
filename += '.py'
for path in [os.getcwdu()]+sys.path:
fname = osp.join(path, filename)
if osp.isfile(fname):
return fname
elif osp.isfile(fname+'.py'):
return fname+'.py'
elif osp.isfile(fname+'.pyw'):
return fname+'.pyw'
return filename
class Interpreter(InteractiveConsole, threading.Thread):
"""Interpreter, executed in a separate thread"""
p1 = ">>> "
p2 = "... "
def __init__(self, namespace=None, exitfunc=None,
Output=None, WidgetProxy=None, debug=False):
"""
namespace: locals send to InteractiveConsole object
commands: list of commands executed at startup
"""
InteractiveConsole.__init__(self, namespace)
threading.Thread.__init__(self)
self._id = None
self.exit_flag = False
self.debug = debug
# Execution Status
self.more = False
if exitfunc is not None:
atexit.register(exitfunc)
self.namespace = self.locals
self.namespace['__name__'] = '__main__'
self.namespace['execfile'] = self.execfile
self.namespace['runfile'] = self.runfile
self.namespace['help'] = self.help_replacement
# Capture all interactive input/output
self.initial_stdout = sys.stdout
self.initial_stderr = sys.stderr
self.initial_stdin = sys.stdin
# Create communication pipes
pr, pw = os.pipe()
self.stdin_read = os.fdopen(pr, "r")
self.stdin_write = os.fdopen(pw, "w", 0)
self.stdout_write = Output()
self.stderr_write = Output()
self.widget_proxy = WidgetProxy()
self.redirect_stds()
#------ Standard input/output
def redirect_stds(self):
"""Redirects stds"""
if not self.debug:
sys.stdout = self.stdout_write
sys.stderr = self.stderr_write
sys.stdin = self.stdin_read
def restore_stds(self):
"""Restore stds"""
if not self.debug:
sys.stdout = self.initial_stdout
sys.stderr = self.initial_stderr
sys.stdin = self.initial_stdin
def help_replacement(self, text=None, interactive=False):
"""For help() support"""
if text is not None and not interactive:
return pydoc.help(text)
elif text is None:
pyver = "%d.%d" % (sys.version_info[0], sys.version_info[1])
self.write("""
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
""" % pyver)
else:
text = text.strip()
try:
eval("pydoc.help(%s)" % text)
except (NameError, SyntaxError):
print "no Python documentation found for '%r'" % text
self.write(os.linesep)
self.widget_proxy.new_prompt("help> ")
inp = self.raw_input()
if inp.strip():
self.help_replacement(inp, interactive=True)
else:
self.write("""
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
""")
def run_command(self, cmd, new_prompt=True):
"""Run command in interpreter"""
if cmd == 'exit()':
self.exit_flag = True
self.write('\n')
return
# -- Special commands type I
# (transformed into commands executed in the interpreter)
# ? command
special_pattern = r"^%s (?:r\')?(?:u\')?\"?\'?([a-zA-Z0-9_\.]+)"
run_match = re.match(special_pattern % 'run', cmd)
help_match = re.match(r'^([a-zA-Z0-9_\.]+)\?$', cmd)
cd_match = re.match(r"^\!cd \"?\'?([a-zA-Z0-9_ \.]+)", cmd)
if help_match:
cmd = 'help(%s)' % help_match.group(1)
# run command
elif run_match:
filename = guess_filename(run_match.groups()[0])
cmd = 'runfile(r"%s", args=None)' % filename
# !cd system command
elif cd_match:
cmd = 'import os; os.chdir(r"%s")' % cd_match.groups()[0].strip()
# -- End of Special commands type I
# -- Special commands type II
# (don't need code execution in interpreter)
xedit_match = re.match(special_pattern % 'xedit', cmd)
edit_match = re.match(special_pattern % 'edit', cmd)
clear_match = re.match(r"^clear ([a-zA-Z0-9_, ]+)", cmd)
# (external) edit command
if xedit_match:
filename = guess_filename(xedit_match.groups()[0])
self.widget_proxy.edit(filename, external_editor=True)
# local edit command
elif edit_match:
filename = guess_filename(edit_match.groups()[0])
if osp.isfile(filename):
self.widget_proxy.edit(filename)
else:
self.stderr_write.write(
"No such file or directory: %s\n" % filename)
# remove reference (equivalent to MATLAB's clear command)
elif clear_match:
varnames = clear_match.groups()[0].replace(' ', '').split(',')
for varname in varnames:
try:
self.namespace.pop(varname)
except KeyError:
pass
# Execute command
elif cmd.startswith('!'):
# System ! command
pipe = Popen(cmd[1:], shell=True,
stdin=PIPE, stderr=PIPE, stdout=PIPE)
txt_out = encoding.transcode( pipe.stdout.read() )
txt_err = encoding.transcode( pipe.stderr.read().rstrip() )
if txt_err:
self.stderr_write.write(txt_err)
if txt_out:
self.stdout_write.write(txt_out)
self.stdout_write.write('\n')
self.more = False
# -- End of Special commands type II
else:
# Command executed in the interpreter
# self.widget_proxy.set_readonly(True)
self.more = self.push(cmd)
# self.widget_proxy.set_readonly(False)
if new_prompt:
self.widget_proxy.new_prompt(self.p2 if self.more else self.p1)
if not self.more:
self.resetbuffer()
def run(self):
"""Wait for input and run it"""
while not self.exit_flag:
self.run_line()
def run_line(self):
line = self.stdin_read.readline()
if self.exit_flag:
return
# Remove last character which is always '\n':
self.run_command(line[:-1])
def get_thread_id(self):
"""Return thread id"""
if self._id is None:
for thread_id, obj in threading._active.items():
if obj is self:
self._id = thread_id
return self._id
def raise_keyboard_interrupt(self):
if self.isAlive():
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.get_thread_id(),
ctypes.py_object(KeyboardInterrupt))
return True
else:
return False
def closing(self):
"""Actions to be done before restarting this interpreter"""
pass
def execfile(self, filename):
"""Exec filename"""
source = open(filename, 'r').read()
try:
try:
name = filename.encode('ascii')
except UnicodeEncodeError:
name = '<executed_script>'
code = compile(source, name, "exec")
except (OverflowError, SyntaxError):
InteractiveConsole.showsyntaxerror(self, filename)
else:
self.runcode(code)
def runfile(self, filename, args=None):
"""
Run filename
args: command line arguments (string)
"""
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
self.namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in args.split():
sys.argv.append(arg)
self.execfile(filename)
sys.argv = ['']
self.namespace.pop('__file__')
def eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert isinstance(text, (str, unicode))
try:
return eval(text, self.locals), True
except:
return None, False
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
return isdefined(objtxt, force_import=force_import,
namespace=self.locals)
#===========================================================================
# InteractiveConsole API
#===========================================================================
def push(self, line):
"""
Push a line of source text to the interpreter
The line should not have a trailing newline; it may have internal
newlines. The line is appended to a buffer and the interpreter’s
runsource() method is called with the concatenated contents of the
buffer as source. If this indicates that the command was executed
or invalid, the buffer is reset; otherwise, the command is incomplete,
and the buffer is left as it was after the line was appended.
The return value is True if more input is required, False if the line
was dealt with in some way (this is the same as runsource()).
"""
return InteractiveConsole.push(self, line)
def resetbuffer(self):
"""Remove any unhandled source text from the input buffer"""
InteractiveConsole.resetbuffer(self)
|
gpl-3.0
| 547,717,842,301,860,860
| 35.26875
| 80
| 0.545035
| false
| 4.353414
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.