repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
aaugustin/websockets | tests/test_auth.py | Python | bsd-3-clause | 38 | 0 | f | rom websockets.auth import * # no | qa
|
delcypher/klee-runner | tools/invocation-info-bc-stats.py | Python | mit | 7,740 | 0.003876 | #!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read an invocation info files and display information
about it.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
add_KleeRunner_to_module_search_path()
from KleeRunner import InvocationInfo
from collections import namedtuple
import argparse
import io
import logging
import os
import pprint
import re
import statistics
import subprocess
import sys
import tempfile
import yaml
_logger = None
class GlobalProgramStats:
def __init__(self, **kwargs):
self.min_branches = None
if 'min_branches' in kwargs:
self.min_branches = kwargs['min_branches']
self.max_branches = None
if 'max_branches' in kwargs:
self.max_branches = kwargs['max_branches']
self.mean_branches = None
if 'mean_branches' in kwargs:
self.mean_branches = kwargs['mean_branches']
self.std_dev_branches = None
if 'std_dev_branches' is kwargs:
self.std_dev_branches = kwargs['std_dev_branches']
self.median_branches = None
if 'median_branches' in kwargs:
self.median_branches = kwargs['median_branches']
self.min_sym_bytes = None
if 'min_sym_bytes' in kwargs:
self.min_sym_bytes = kwargs['min_sym_bytes']
self.max_sym_bytes = None
if 'max_sym_bytes' in kwargs:
self.max_sym_bytes = kwargs['max_sym_bytes']
self.mean_sym_bytes = None
if 'mean_sym_bytes' in kwargs:
self.mean_sym_bytes = kwargs['mean_sym_bytes']
if 'std_dev_sym_bytes' is kwargs:
self.std_dev_sym_bytes = kwargs['std_dev_sym_bytes']
self.median_sym_bytes = None
if 'median_sym_bytes' in kwargs:
self.median_sym_bytes = kwargs['median_sym_bytes']
def dump(self):
print("min_branches: {}".format(self.min_branches))
print("max_branches: {}".format(self.max_branches))
print("mean_branches: {}".format(self.mean_branches))
print("std_dev_branches: {}".format(self.std_dev_branches))
print("median_branches: {}".format(self.median_branches))
print("min_sym_bytes: {}".format(self.min_sym_bytes))
print("max_sym_bytes: {}".format(self.max_sym_bytes))
print("mean_sym_bytes: {}".format(self.mean_sym_bytes))
print("std_dev_sym_bytes: {}".format(self.std_dev_sym_bytes))
print("median_sym_bytes: {}".format(self.median_sym_bytes))
def get_stats(program_path, bc_stats_tool):
num_branches = 0
estimated_sym_bytes = 0
with tempfile.TemporaryFile() as f:
cmd_line = [bc_stats_tool, '-entry-point=main', program_path]
_logger.debug('Calling {}'.format(cmd_line))
subprocess.call(cmd_line, stdout=f)
f.seek(0, io.SEEK_SET)
data = yaml.load(f)
num_branches = data['num_branches']
estimated_sym_bytes = data['estimated_num_symbolic_bytes']
return num_branches, estimated_sym_bytes
def get_augmented_spec_file(invocation_info):
augmented_spec_file = invocation_info['misc']['augmented_spec_file']
with open(augmented_spec_file, 'r') as f:
data = yaml.load(f)
return data
def main(args):
global _logger
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-l", "--log-level", type=str, default="info",
dest="log_level",
choices=['debug', 'info', 'warning', 'error' | ])
parser.add_argument( | "--bc-stats",
dest='bc_stats',
type=str,
help='path to bc-stats tool',
default="bc-stats")
parser.add_argument("--categories",
nargs='+',
default=[],
help='One of more categories to keep',
)
parser.add_argument('invocation_info_file',
help='Invocation info file',
type=argparse.FileType('r'))
pargs = parser.parse_args()
logLevel = getattr(logging, pargs.log_level.upper(), None)
logging.basicConfig(level=logLevel)
_logger = logging.getLogger(__name__)
invocationInfos = InvocationInfo.loadRawInvocationInfos(
pargs.invocation_info_file)
print("schema version: {}".format(invocationInfos['schema_version']))
print("# of jobs: {}".format(len(invocationInfos['jobs'])))
gs = GlobalProgramStats()
gs.mean_branches = []
gs.std_dev_branches = []
gs.median_branches = []
gs.mean_sym_bytes = []
gs.median_sym_bytes = []
gs.std_dev_sym_bytes = []
categories = set(pargs.categories)
drop_count = 0
keep_count = 0
for info in invocationInfos['jobs']:
programPath = info['program']
if not os.path.exists(programPath):
_logger.error(
'Program path "{}" does not exist'.format(programPath))
if len(categories) > 0:
augmented_spec_file = get_augmented_spec_file(info)
infoCategories = set(augmented_spec_file['categories'])
if infoCategories.issuperset(categories):
_logger.info('Keeping {} due to being in "{}"'.format(
programPath,
categories))
else:
_logger.info('Dropping {}. "{}" is not superset of "{}"'.format(
programPath,
infoCategories,
categories))
drop_count += 1
continue
keep_count += 1
num_branches, estimated_sym_bytes = get_stats(programPath, pargs.bc_stats)
# Partially compute stats using num_branches
if gs.min_branches == None or gs.min_branches > num_branches:
gs.min_branches = num_branches
_logger.info('"{}" had smaller number of branches: {}'.format(
programPath,
num_branches))
if gs.max_branches == None or gs.max_branches < num_branches:
gs.max_branches = num_branches
_logger.info('"{}" had larger number of branches: {}'.format(
programPath,
num_branches))
gs.mean_branches.append(num_branches)
gs.median_branches.append(num_branches)
gs.std_dev_branches.append(num_branches)
# Partially compute stats using estimated_sym_bytes
if gs.min_sym_bytes == None or gs.min_sym_bytes > estimated_sym_bytes:
gs.min_sym_bytes = estimated_sym_bytes
_logger.info('"{}" had smaller number of sym bytes: {}'.format(
programPath,
estimated_sym_bytes))
if gs.max_sym_bytes == None or gs.max_sym_bytes < estimated_sym_bytes:
gs.max_sym_bytes = estimated_sym_bytes
_logger.info('"{}" had larger number of sym bytes: {}'.format(
programPath,
estimated_sym_bytes))
gs.mean_sym_bytes.append(estimated_sym_bytes)
gs.median_sym_bytes.append(estimated_sym_bytes)
gs.std_dev_sym_bytes.append(estimated_sym_bytes)
# Now compute mean/medians
gs.mean_branches = statistics.mean(gs.mean_branches)
gs.std_dev_branches = statistics.stdev(gs.std_dev_branches)
gs.median_branches = statistics.median(gs.median_branches)
gs.mean_sym_bytes = statistics.mean(gs.mean_sym_bytes)
gs.std_dev_sym_bytes = statistics.stdev(gs.std_dev_sym_bytes)
gs.median_sym_bytes = statistics.median(gs.median_sym_bytes)
# Output
print("drop_count: {}".format(drop_count))
print("keep_count: {}".format(keep_count))
gs.dump()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Kovak/KivyNBT | flat_kivy/fa_icon_definitions.py | Python | mit | 15,461 | 0.000065 | fa_icons = {
'fa-glass': u"\uf000",
'fa-music': u"\uf001",
'fa-search': u"\uf002",
'fa-envelope-o': u"\uf003",
'fa-heart': u"\uf004",
'fa-star': u"\uf005",
'fa-star-o': u"\uf006",
'fa-user': u"\uf007",
'fa-film': u"\uf008",
'fa-th-large': u"\uf009",
'fa-th': u"\uf00a",
'fa-th-list': u"\uf00b",
'fa-check': u"\uf00c",
'fa-times': u"\uf00d",
'fa-search-plus': u"\uf00e",
'fa-search-minus': u"\uf010",
'fa-power-off': u"\uf011",
'fa-signal': u"\uf012",
'fa-gear': u"\uf013",
'fa-cog': u"\uf013",
'fa-trash-o': u"\uf014",
'fa-home': u"\uf015",
'fa-file-o': u"\uf016",
'fa-clock-o': u"\uf017",
'fa-road': u"\uf018",
'fa-download': u"\uf019",
'fa-arrow-circle-o-down': u"\uf01a",
'fa-arrow-circle-o-up': u"\uf01b",
'fa-inbox': u"\uf01c",
'fa-play-circle-o': u"\uf01d",
'fa-rotate-right': u"\uf01e",
'fa-repeat': u"\uf01e",
'fa-refresh': u"\uf021",
'fa-list-alt': u"\uf022",
'fa-lock': u"\uf023",
'fa-flag': u"\uf024",
'fa-headphones': u"\uf025",
'fa-volume-off': u"\uf026",
'fa-volume-down': u"\uf027",
'fa-volume-up': u"\uf028",
'fa-qrcode': u"\uf029",
'fa-barcode': u"\uf02a",
'fa-tag': u"\uf02b",
'fa-tags': u"\uf02c",
'fa-book': u"\uf02d",
'fa-bookmark': u"\uf02e",
'fa-print': u"\uf02f",
'fa-camera': u"\uf030",
'fa-font': u"\uf031",
'fa-bold': u"\uf032",
'fa-italic': u"\uf033",
'fa-text-height': u"\uf034",
'fa-text-width': u"\uf035",
'fa-align-left': u"\uf036",
'fa-align-center': u"\uf037",
'fa-align-right': u"\uf038",
'fa-align-justify': u"\uf039",
'fa-list': u"\uf03a",
'fa-dedent': u"\uf03b",
'fa-outdent': u"\uf03b",
'fa-indent': u"\uf03c",
'fa-video-camera': u"\uf03d",
'fa-photo': u"\uf03e",
'fa-image': u"\uf03e",
'fa-picture-o': u"\uf03e",
'fa-pencil': u"\uf040",
'fa-map-marker': u"\uf041",
'fa-adjust': u"\uf042",
'fa-tint': u"\uf043",
'fa-edit': u"\uf044",
'fa-pencil-square-o': u"\uf044",
'fa-share-square-o': u"\uf045",
'fa-check-square-o': u"\uf046",
'fa-arrows': u"\uf047",
'fa-step-backward': u"\uf048",
'fa-fast-backward': u"\uf049",
'fa-backward': u"\uf04a",
'fa-play': u"\uf04b",
'fa-pause': u"\uf04c",
'fa-stop': u"\uf04d",
'fa-forward': u"\uf04e",
'fa-fast-forward': u"\uf050",
'fa-step-forward': u"\uf051",
'fa-eject': u"\uf052",
'fa-chevron-left': u"\uf053",
'fa-chevron-right': u"\uf054",
'fa-plus-circle': u"\uf055",
'fa-minus-circle': u"\uf056",
'fa-times-circle': u"\uf057",
'fa-check-circle': u"\uf058",
'fa-question-circle': u"\uf059",
'fa-info-circle': u"\uf05a",
'fa-crosshairs': u"\uf05b",
'fa-times-circle-o': u"\uf05c",
'fa-check-circle-o': u"\uf | 05d",
'fa-ban': u"\uf05e",
'fa-arrow-left': u"\uf060",
'fa-arrow-right': u"\uf061",
'fa-arrow-up': u"\uf062",
'fa-arrow-down': u"\uf063",
'fa-mail-forward': u"\uf064",
'fa-share': u"\uf064",
'fa-expand': u"\uf065" | ,
'fa-compress': u"\uf066",
'fa-plus': u"\uf067",
'fa-minus': u"\uf068",
'fa-asterisk': u"\uf069",
'fa-exclamation-circle': u"\uf06a",
'fa-gift': u"\uf06b",
'fa-leaf': u"\uf06c",
'fa-fire': u"\uf06d",
'fa-eye': u"\uf06e",
'fa-eye-slash': u"\uf070",
'fa-warning': u"\uf071",
'fa-exclamation-triangle': u"\uf071",
'fa-plane': u"\uf072",
'fa-calendar': u"\uf073",
'fa-random': u"\uf074",
'fa-comment': u"\uf075",
'fa-magnet': u"\uf076",
'fa-chevron-up': u"\uf077",
'fa-chevron-down': u"\uf078",
'fa-retweet': u"\uf079",
'fa-shopping-cart': u"\uf07a",
'fa-folder': u"\uf07b",
'fa-folder-open': u"\uf07c",
'fa-arrows-v': u"\uf07d",
'fa-arrows-h': u"\uf07e",
'fa-bar-chart-o': u"\uf080",
'fa-twitter-square': u"\uf081",
'fa-facebook-square': u"\uf082",
'fa-camera-retro': u"\uf083",
'fa-key': u"\uf084",
'fa-gears': u"\uf085",
'fa-cogs': u"\uf085",
'fa-comments': u"\uf086",
'fa-thumbs-o-up': u"\uf087",
'fa-thumbs-o-down': u"\uf088",
'fa-star-half': u"\uf089",
'fa-heart-o': u"\uf08a",
'fa-sign-out': u"\uf08b",
'fa-linkedin-square': u"\uf08c",
'fa-thumb-tack': u"\uf08d",
'fa-external-link': u"\uf08e",
'fa-sign-in': u"\uf090",
'fa-trophy': u"\uf091",
'fa-github-square': u"\uf092",
'fa-upload': u"\uf093",
'fa-lemon-o': u"\uf094",
'fa-phone': u"\uf095",
'fa-square-o': u"\uf096",
'fa-bookmark-o': u"\uf097",
'fa-phone-square': u"\uf098",
'fa-twitter': u"\uf099",
'fa-facebook': u"\uf09a",
'fa-github': u"\uf09b",
'fa-unlock': u"\uf09c",
'fa-credit-card': u"\uf09d",
'fa-rss': u"\uf09e",
'fa-hdd-o': u"\uf0a0",
'fa-bullhorn': u"\uf0a1",
'fa-bell': u"\uf0f3",
'fa-certificate': u"\uf0a3",
'fa-hand-o-right': u"\uf0a4",
'fa-hand-o-left': u"\uf0a5",
'fa-hand-o-up': u"\uf0a6",
'fa-hand-o-down': u"\uf0a7",
'fa-arrow-circle-left': u"\uf0a8",
'fa-arrow-circle-right': u"\uf0a9",
'fa-arrow-circle-up': u"\uf0aa",
'fa-arrow-circle-down': u"\uf0ab",
'fa-globe': u"\uf0ac",
'fa-wrench': u"\uf0ad",
'fa-tasks': u"\uf0ae",
'fa-filter': u"\uf0b0",
'fa-briefcase': u"\uf0b1",
'fa-arrows-alt': u"\uf0b2",
'fa-group': u"\uf0c0",
'fa-users': u"\uf0c0",
'fa-chain': u"\uf0c1",
'fa-link': u"\uf0c1",
'fa-cloud': u"\uf0c2",
'fa-flask': u"\uf0c3",
'fa-cut': u"\uf0c4",
'fa-scissors': u"\uf0c4",
'fa-copy': u"\uf0c5",
'fa-files-o': u"\uf0c5",
'fa-paperclip': u"\uf0c6",
'fa-save': u"\uf0c7",
'fa-floppy-o': u"\uf0c7",
'fa-square': u"\uf0c8",
'fa-navicon': u"\uf0c9",
'fa-reorder': u"\uf0c9",
'fa-bars': u"\uf0c9",
'fa-list-ul': u"\uf0ca",
'fa-list-ol': u"\uf0cb",
'fa-strikethrough': u"\uf0cc",
'fa-underline': u"\uf0cd",
'fa-table': u"\uf0ce",
'fa-magic': u"\uf0d0",
'fa-truck': u"\uf0d1",
'fa-pinterest': u"\uf0d2",
'fa-pinterest-square': u"\uf0d3",
'fa-google-plus-square': u"\uf0d4",
'fa-google-plus': u"\uf0d5",
'fa-money': u"\uf0d6",
'fa-caret-down': u"\uf0d7",
'fa-caret-up': u"\uf0d8",
'fa-caret-left': u"\uf0d9",
'fa-caret-right': u"\uf0da",
'fa-columns': u"\uf0db",
'fa-unsorted': u"\uf0dc",
'fa-sort': u"\uf0dc",
'fa-sort-down': u"\uf0dd",
'fa-sort-desc': u"\uf0dd",
'fa-sort-up': u"\uf0de",
'fa-sort-asc': u"\uf0de",
'fa-envelope': u"\uf0e0",
'fa-linkedin': u"\uf0e1",
'fa-rotate-left': u"\uf0e2",
'fa-undo': u"\uf0e2",
'fa-legal': u"\uf0e3",
'fa-gavel': u"\uf0e3",
'fa-dashboard': u"\uf0e4",
'fa-tachometer': u"\uf0e4",
'fa-comment-o': u"\uf0e5",
'fa-comments-o': u"\uf0e6",
'fa-flash': u"\uf0e7",
'fa-bolt': u"\uf0e7",
'fa-sitemap': u"\uf0e8",
'fa-umbrella': u"\uf0e9",
'fa-paste': u"\uf0ea",
'fa-clipboard': u"\uf0ea",
'fa-lightbulb-o': u"\uf0eb",
'fa-exchange': u"\uf0ec",
'fa-cloud-download': u"\uf0ed",
'fa-cloud-upload': u"\uf0ee",
'fa-user-md': u"\uf0f0",
'fa-stethoscope': u"\uf0f1",
'fa-suitcase': u"\uf0f2",
'fa-bell-o': u"\uf0a2",
'fa-coffee': u"\uf0f4",
'fa-cutlery': u"\uf0f5",
'fa-file-text-o': u"\uf0f6",
'fa-building-o': u"\uf0f7",
'fa-hospital-o': u"\uf0f8",
'fa-ambulance': u"\uf0f9",
'fa-medkit': u"\uf0fa",
'fa-fighter-jet': u"\uf0fb",
'fa-beer': u"\uf0fc",
'fa-h-square': u"\uf0fd",
'fa-plus-square': u"\uf0fe",
'fa-angle-double-left': u"\uf100",
'fa-angle-double-right': u"\uf101",
'fa-angle-double-up': u"\uf102",
'fa-angle-double-down': u"\uf103",
'fa-angle-left': u"\uf104",
'fa-angle-right': u"\uf105",
'fa-angle-up': u"\uf106",
'fa-angle-down': u"\uf107",
'fa-desktop': u"\uf108",
'fa-laptop': u"\uf109",
'fa-tablet': u"\uf10a",
'fa-mobile-phone': u"\uf10b",
'fa-mobile': u"\uf10b",
'fa-circle-o': u"\uf10c",
'fa-quote-left': u"\uf10d",
'fa-quote-right': u"\uf10e",
'fa-spinner': u"\uf110",
|
rpappalax/ff-tool | fftool/firefox_run.py | Python | mpl-2.0 | 849 | 0 | import os
from outlawg import Outlawg
from fftool import (
DIR_CONFIGS,
local
)
from ini_handler import IniHandler
Log = Outlawg()
env = IniHandler()
env.load_os_config(DIR_CONFIGS)
def launch_firefox(profile_path, channel, logging, nspr_log_modules=''):
"""relies on the other functions (download, install, pro | file)
having completed.
"""
FIREFOX_APP_BIN = env.get(channel, 'PATH_FIREFOX_BIN_ENV')
Log.header('LAUNCH FIREFOX')
print("Launching Firefox {0} with profile: {1}".format(
channel,
profile_path)
)
cmd = '"{0}" -profile "{1}"'.format(FIREFOX_APP_BIN, profile_path)
print('CMD: ' + cmd)
# NSPR_LOG_MODULES
if nspr_log_modules:
Log.header('FIREFOX NSPR_LOG_MODULES LOGGING')
os.environ['NSPR_LOG_MODULES'] = nspr | _log_modules
local(cmd, logging)
|
msincenselee/vnpy | vnpy/api/easytrader/remoteclient.py | Python | mit | 5,338 | 0.003075 | # -*- coding: utf-8 -*-
import requests
from .utils.misc import file2dict
from vnpy.rpc import RpcClient
TIMEOUT = 10
class RemoteClient:
def __init__(self, broker, host, port=1430, **kwargs):
self._s = requests.session()
self._api = "http://{}:{}".format(host, port)
self._broker = broker
def prepare(
self,
config_path=None,
user=None,
password=None,
exe_path=None,
comm_password=None,
**kwargs
):
"""
登陆客户端
:param config_path: 登陆配置文件,跟参数登陆方式二选一
:param user: 账号
:param password: 明文密码
:param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe',
默认 r'C:\\htzqzyb2\\xiadan.exe'
:param comm_password: 通讯密码
:return:
"""
params = locals().copy()
params.pop("self")
# if exe_path is None:
# params['exe_path'] = 'C:\\THS\\xi | adan.exe'
if config_path is not None:
account = file2dict(config_path)
params["user"] = account["user"]
params["password"] = account["password"]
params["broker"] = self._broker
# prepare需要启动同花顺客户端,需要的时间比较长,所以超时给长一些时间
response = self._s.post(self._api + "/prepare", json=params, tim | eout=60)
if response.status_code >= 300:
raise Exception(response.json()["error"])
return response.json()
@property
def balance(self):
return self.common_get("balance")
@property
def position(self):
return self.common_get("position")
@property
def today_entrusts(self):
return self.common_get("today_entrusts")
@property
def today_trades(self):
return self.common_get("today_trades")
@property
def cancel_entrusts(self):
return self.common_get("cancel_entrusts")
def auto_ipo(self):
return self.common_get("auto_ipo")
def exit(self):
return self.common_get("exit")
def common_get(self, endpoint):
response = self._s.get(self._api + "/" + endpoint, timeout=TIMEOUT)
if response.status_code >= 300:
print(Exception(response.json()["error"]))
return response.json()
def buy(self, security, price, amount, **kwargs):
params = locals().copy()
params.pop("self")
response = self._s.post(self._api + "/buy", json=params, timeout=TIMEOUT)
if response.status_code >= 300:
raise Exception(response.json()["error"])
return response.json()
def sell(self, security, price, amount, **kwargs):
params = locals().copy()
params.pop("self")
response = self._s.post(self._api + "/sell", json=params, timeout=TIMEOUT)
if response.status_code >= 300:
raise Exception(response.json()["error"])
return response.json()
def cancel_entrust(self, entrust_no):
params = locals().copy()
params.pop("self")
response = self._s.post(self._api + "/cancel_entrust", json=params, timeout=TIMEOUT)
if response.status_code >= 300:
raise Exception(response.json()["error"])
return response.json()
###########
# written by 黄健威
# 以下是新增加的ZMQ Client
# 整个接口对外保持和原来的一致
# 通过对原requests接口的“鸭子类型替换”来实现透明化
def use(broker, host, port=1430, use_zmq=False, **kwargs):
if use_zmq:
return ZMQRemoteClient(broker, host, port)
else:
return RemoteClient(broker, host, port)
class ZMQResponse(object):
# 这个类是模仿requests的返回结果
def __init__(self, status_code, data) -> None:
self.data = data
self.status_code = status_code
def json(self):
return self.data
class MyRpcClient(RpcClient):
# 这个类把vnpy原生的rpc组件中的超时输出去除
# 原版rpc组件中,如果上一个请求后30秒内没有新的请求,会输出一段提示
def on_disconnected(self):
pass
class ZMQSession(object):
# 这个类是模仿requests的Session
def __init__(self, host, port) -> None:
req_addr = "tcp://{}:{}".format(host, port)
sub_addr = "tcp://{}:{}".format(host, port+1)
self._rpc_client = MyRpcClient()
self._rpc_client.start(req_addr, sub_addr)
def post(self, url, json=None, timeout=10):
name = url.split("/")[-1]
data, status_code = self._rpc_client.call_func(name, json)
resp = ZMQResponse(status_code, data)
return resp
def get(self, url, json=None, timeout=10):
return self.post(url, json, timeout)
def __del__(self):
# 当进程开始销毁对象时,显式调用stop来杀死后台的zmq线程,避免死锁无法退出
self._rpc_client.stop()
class ZMQRemoteClient(RemoteClient):
# 对原RemoteClient的重载
def __init__(self, broker, host, port=1430, **kwargs):
self._broker = broker
# api这个项目已经不需要了
self._api = ""
# 替换Session
self._s = ZMQSession(host, port)
def __del__(self):
del self._s |
carolFrohlich/nipype | nipype/interfaces/utility.py | Python | bsd-3-clause | 22,277 | 0.001481 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Various utilities
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import zip, range, str, open
from future import standard_library
standard_library.install_aliases()
import os
import re
import numpy as np
import nibabel as nb
from nipype import logging
from .base import (traits, TraitedSpec, DynamicTraitedSpec, File,
Undefined, isdefined, OutputMultiPath, runtime_profile,
InputMultiPath, BaseInterface, BaseInterfaceInputSpec)
from .io import IOBase, add_traits
from ..utils.filemanip import (filename_to_list, copyfile, split_filename)
from ..utils.misc import getsource, create_function_from_source
logger = logging.getLogger('interface')
if runtime_profile:
try:
import psutil
except ImportError as exc:
logger.info('Unable to import packages needed for runtime profiling. '\
'Turning off runtime profiler. Reason: %s' % exc)
runtime_profile = False
class IdentityInterface(IOBase):
"""Basic interface class generates identity mappings
Examples
--------
>>> from nipype.interfaces.utility import IdentityInterface
>>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False)
>>> ii.inputs.a
<undefined>
>>> ii.inputs.a = 'foo'
>>> out = ii._outputs()
>>> out.a
<undefined>
>>> out = ii.run()
>>> out.outputs.a # doctest: +IGNORE_UNICODE
'foo'
>>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True)
>>> ii2.inputs.a = 'foo'
>>> out = ii2.run() # doctest: +SKIP
ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run.
"""
input_spec = DynamicTraitedSpec
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, mandatory_inputs=True, **inputs):
super(IdentityInterface, self).__init__(**inputs)
if fields is None or not fields:
raise ValueError('Identity Interface fields must be a non-empty list')
# Each input must be in the fields.
for in_field in inputs:
if in_field not in fields:
raise ValueError('Identity Interface input is not in the fields: %s' % in_field)
self._fields = fields
self._mandatory_inputs = mandatory_inputs
add_traits(self.inputs, fields)
# Adding any traits wipes out all input values set in superclass initialization,
# even it the trait is not in the add_traits argument. The work-around is to reset
# the values after adding the traits.
self.inputs.set(**inputs)
def _add_output_traits(self, base):
return add_traits(base, self._fields)
def _list_outputs(self):
# manual mandatory inputs check
if self._fields and self._mandatory_inputs:
for key in self._fields:
value = getattr(self.inputs, key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'fields'. \
You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = self._outputs().get()
for key in self._fields:
val = getattr(self.inputs, key)
if isdefined(val):
outputs[key] = val
return outputs
class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
axis = traits.Enum('vstack', 'hstack', usedefault=True,
desc='direction in which to merge, hstack requires same number of elements in each input')
no_flatten = traits.Bool(False, usedefault=True, desc='append to outlist instead of extending in vstack mode')
class MergeOutputSpec | (TraitedSpec):
out = traits.List(desc='Merged output')
class Merge(IOBase):
"""Basic interface class to merge inputs into a single list
Examples
--------
>>> fro | m nipype.interfaces.utility import Merge
>>> mi = Merge(3)
>>> mi.inputs.in1 = 1
>>> mi.inputs.in2 = [2, 5]
>>> mi.inputs.in3 = 3
>>> out = mi.run()
>>> out.outputs.out
[1, 2, 5, 3]
"""
input_spec = MergeInputSpec
output_spec = MergeOutputSpec
def __init__(self, numinputs=0, **inputs):
super(Merge, self).__init__(**inputs)
self._numinputs = numinputs
add_traits(self.inputs, ['in%d' % (i + 1) for i in range(numinputs)])
def _list_outputs(self):
outputs = self._outputs().get()
out = []
if self.inputs.axis == 'vstack':
for idx in range(self._numinputs):
value = getattr(self.inputs, 'in%d' % (idx + 1))
if isdefined(value):
if isinstance(value, list) and not self.inputs.no_flatten:
out.extend(value)
else:
out.append(value)
else:
for i in range(len(filename_to_list(self.inputs.in1))):
out.insert(i, [])
for j in range(self._numinputs):
out[i].append(filename_to_list(getattr(self.inputs, 'in%d' % (j + 1)))[i])
if out:
outputs['out'] = out
return outputs
class RenameInputSpec(DynamicTraitedSpec):
in_file = File(exists=True, mandatory=True, desc="file to rename")
keep_ext = traits.Bool(desc=("Keep in_file extension, replace "
"non-extension component of name"))
format_string = traits.String(mandatory=True,
desc=("Python formatting string for output "
"template"))
parse_string = traits.String(desc=("Python regexp parse string to define "
"replacement inputs"))
use_fullpath = traits.Bool(False, usedefault=True,
desc="Use full path as input to regex parser")
class RenameOutputSpec(TraitedSpec):
out_file = traits.File(exists=True, desc="softlink to original file with new name")
class Rename(IOBase):
"""Change the name of a file based on a mapped format string.
To use additional inputs that will be defined at run-time, the class
constructor must be called with the format template, and the fields
identified will become inputs to the interface.
Additionally, you may set the parse_string input, which will be run
over the input filename with a regular expressions search, and will
fill in additional input fields from matched groups. Fields set with
inputs have precedence over fields filled in with the regexp match.
Examples
--------
>>> from nipype.interfaces.utility import Rename
>>> rename1 = Rename()
>>> rename1.inputs.in_file = "zstat1.nii.gz"
>>> rename1.inputs.format_string = "Faces-Scenes.nii.gz"
>>> res = rename1.run() # doctest: +SKIP
>>> res.outputs.out_file # doctest: +SKIP
'Faces-Scenes.nii.gz" # doctest: +SKIP
>>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d")
>>> rename2.inputs.in_file = "functional.nii"
>>> rename2.inputs.keep_ext = True
>>> rename2.inputs.subject_id = "subj_201"
>>> rename2.inputs.run = 2
>>> res = rename2.run() # doctest: +SKIP
>>> res.outputs.out_file # doctest: +SKIP
'subj_201_func_run02.nii' # doctest: +SKIP
>>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii")
>>> rename3.inputs.in_file = "func_epi_1_1.nii"
>>> rename3.inputs.parse_string = "func_(?P<seq>\w*)_.*" |
vhf/django-disqus | disqus/templatetags/disqus_tags.py | Python | bsd-3-clause | 4,607 | 0.005644 | from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.functional import curry
from django.utils.encoding import force_unicode
register = template.Library()
class ContextSetterNode(template.Node):
def __init__(self, var_name, var_value):
self.var_name = var_name
self.var_value = var_value
def _get_value(self, value, context):
"""
Attempts to resolve the value as a variable. Failing that, it returns
its actual value
"""
try:
var_value = template.Variable(value).resolve(context)
except template.VariableDoesNotExist:
var_value = self.var_value.var
return var_value
def render(self, context):
if isinstance(self.var_value, (list, tuple)):
var_value = ''.join([force_unicode(self._get_value(x, context)) for x in self.var_value])
else:
var_value = self._get_value(self.var_value, context)
context[self.var_name] = var_value
return ''
def generic_setter_compiler(var_name, name, node_class, parser, token):
"""
Returns a ContextSetterNode.
For calls like {% set_this_value "My Value" %}
"""
bits = token.split_contents()
if(len(bits) < 2):
message = "%s takes at least one argument" % name
raise template.TemplateSyntaxError(message)
return node_class(var_name, bits[1:])
# Set the disqus_developer variable to 0/1. Default is 0
set_disqus_developer = curry(generic_setter_compiler, 'disqus_developer', 'set_disqus_developer', ContextSetterNode)
# Set the disqus_identifier variable to some unique value. Defaults to page's URL
set_disqus_identifier = curry(generic_setter_compiler, 'disqus_identifier', 'set_disqus_identifier', ContextSetterNode)
# Set the disqus_url variable to some value. Defaults to page's location
set_disqus_url = curry(generic_setter_compiler, 'disqus_url', 'set_disqus_url', ContextSetterNode)
# Set the disqus_title variable to some value. Defaults to page's title or URL
set_disqus_title = curry(generic_setter_compiler, 'disqus_title', 'set_disqus_title', ContextSetterNode)
def get_config(context):
"""
return the formatted javascript for any disqus config variables
"""
conf_vars = ['disqus_developer', 'disqus_identifier', 'disqus_url', 'disqus_title']
output = []
for item in conf_vars:
if item in context:
output.append('\tvar %s = "%s";' % (item, context[item]))
return '\n'.join(output)
def disqus_dev():
"""
Return the HTML/js code to enable DISQUS comments on a local |
development server if settings.DEBUG is True.
"""
if settings.DEBUG:
return """<script type="text/javascript">
var disqus_developer = 1;
var disqus_url = 'http://%s/';
</script>""" % Site.objects.get_current().domain
return ""
def disqus_num_replies(context, shortname=''):
"""
Return the HTML/js code which t | ransforms links that end with an
#disqus_thread anchor into the threads comment count.
"""
shortname = getattr(settings, 'DISQUS_WEBSITE_SHORTNAME', shortname)
return {
'shortname': shortname,
'config': get_config(context),
}
def disqus_recent_comments(context, shortname='', num_items=5, excerpt_length=200, hide_avatars=0, avatar_size=32):
"""
Return the HTML/js code which shows recent comments.
"""
shortname = getattr(settings, 'DISQUS_WEBSITE_SHORTNAME', shortname)
return {
'shortname': shortname,
'num_items': num_items,
'hide_avatars': hide_avatars,
'avatar_size': avatar_size,
'excerpt_length': excerpt_length,
'config': get_config(context),
}
def disqus_show_comments(context, shortname=''):
"""
Return the HTML code to display DISQUS comments.
"""
shortname = getattr(settings, 'DISQUS_WEBSITE_SHORTNAME', shortname)
return {
'shortname': shortname,
'config': get_config(context),
}
register.tag('set_disqus_developer', set_disqus_developer)
register.tag('set_disqus_identifier', set_disqus_identifier)
register.tag('set_disqus_url', set_disqus_url)
register.tag('set_disqus_title', set_disqus_title)
register.simple_tag(disqus_dev)
register.inclusion_tag('disqus/num_replies.html', takes_context=True)(disqus_num_replies)
register.inclusion_tag('disqus/recent_comments.html', takes_context=True)(disqus_recent_comments)
register.inclusion_tag('disqus/show_comments.html', takes_context=True)(disqus_show_comments)
|
ExaScience/smurff | python/test/test_scarce.py | Python | mit | 1,034 | 0.005803 | #!/usr/bin/env python
import unittest
import numpy as np
import scipy.sparse as sp
import smurff
def matrix_with_explicit_zeros():
matrix_rows = np.array([0, 0, 1, 1, 2, 2])
matrix_cols = np.array([ | 0, 1, 0, 1, 0, 1])
| matrix_vals = np.array([0, 1, 0, 1, 0, 1], dtype=np.float64)
matrix = sp.coo_matrix((matrix_vals, (matrix_rows, matrix_cols)), shape=(3, 4))
return matrix
class TestScarce(unittest.TestCase):
"""
We make sure that we do not eliminate zeros in SMURFF
accidentally
"""
def test_simple(self):
matrix = matrix_with_explicit_zeros()
self.assertTrue(matrix.nnz == 6)
matrix.eliminate_zeros()
self.assertTrue(matrix.nnz == 3)
def test_smurff(self):
matrix = matrix_with_explicit_zeros()
self.assertTrue(matrix.nnz == 6)
predictions = smurff.bpmf(matrix, Ytest=matrix, num_latent=4, burnin=5, nsamples=5)
self.assertEqual(len(predictions), 6)
if __name__ == '__main__':
unittest.main()
|
RidgeRun/gstd-1.x | tests/libgstc/python/test_libgstc_python_list_elements.py | Python | gpl-2.0 | 2,117 | 0 | #!/usr/bin/env python3
# GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2020 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISI | NG IN ANY WAY | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from pygstc.gstc import *
from pygstc.logger import *
class TestGstcListElementsMethods(unittest.TestCase):
def test_list_elements(self):
pipeline = 'videotestsrc name=v0 ! fakesink name=x0'
self.gstd_logger = CustomLogger('test_libgstc', loglevel='DEBUG')
self.gstd_client = GstdClient(logger=self.gstd_logger)
self.gstd_client.pipeline_create('p0', pipeline)
self.assertEqual(self.gstd_client.list_elements('p0'),
[{'name': 'x0'}, {'name': 'v0'}])
self.gstd_client.pipeline_delete('p0')
if __name__ == '__main__':
unittest.main()
|
mic4ael/indico | indico/modules/announcement/views.py | Python | mit | 373 | 0 | # This file is part of Indico.
# Copyright (C) 2002 - 20 | 20 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.admin.views import WPAdmin
class | WPAnnouncement(WPAdmin):
template_prefix = 'announcement/'
|
fritsvanveen/QGIS | python/utils.py | Python | gpl-2.0 | 20,592 | 0.002865 | # -*- coding: utf-8 -*-
"""
***************************************************************************
utils.py
---------------------
Date : November 2009
Copyright : (C) 2009 by Martin Dobias
Email : wonder dot sk at gmail dot com
***************************************************************************
* *
* This prog | ram is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
**************************************** | ***********************************
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
__author__ = 'Martin Dobias'
__date__ = 'November 2009'
__copyright__ = '(C) 2009, Martin Dobias'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
"""
QGIS utilities module
"""
from qgis.PyQt.QtCore import QCoreApplication, QLocale
from qgis.PyQt.QtWidgets import QPushButton, QApplication
from qgis.core import Qgis, QgsExpression, QgsMessageLog, qgsfunction, QgsMessageOutput, QgsWkbTypes
from qgis.gui import QgsMessageBar
import sys
import traceback
import glob
import os.path
try:
import configparser
except ImportError:
import ConfigParser as configparser
import warnings
import codecs
import time
import functools
if sys.version_info[0] >= 3:
import builtins
builtins.__dict__['unicode'] = str
builtins.__dict__['basestring'] = str
builtins.__dict__['long'] = int
builtins.__dict__['Set'] = set
# ######################
# ERROR HANDLING
warnings.simplefilter('default')
warnings.filterwarnings("ignore", "the sets module is deprecated")
def showWarning(message, category, filename, lineno, file=None, line=None):
stk = ""
for s in traceback.format_stack()[:-2]:
if hasattr(s, 'decode'):
stk += s.decode(sys.getfilesystemencoding())
else:
stk += s
if hasattr(filename, 'decode'):
decoded_filename = filename.decode(sys.getfilesystemencoding())
else:
decoded_filename = filename
QgsMessageLog.logMessage(
u"warning:{}\ntraceback:{}".format(warnings.formatwarning(message, category, decoded_filename, lineno), stk),
QCoreApplication.translate("Python", "Python warning")
)
warnings.showwarning = showWarning
def showException(type, value, tb, msg, messagebar=False):
if msg is None:
msg = QCoreApplication.translate('Python', 'An error has occurred while executing Python code:')
logmessage = ''
for s in traceback.format_exception(type, value, tb):
logmessage += s.decode('utf-8', 'replace') if hasattr(s, 'decode') else s
title = QCoreApplication.translate('Python', 'Python error')
QgsMessageLog.logMessage(logmessage, title)
try:
blockingdialog = QApplication.instance().activeModalWidget()
window = QApplication.instance().activeWindow()
except:
blockingdialog = QApplication.activeModalWidget()
window = QApplication.activeWindow()
# Still show the normal blocking dialog in this case for now.
if blockingdialog or not window or not messagebar or not iface:
open_stack_dialog(type, value, tb, msg)
return
bar = iface.messageBar()
# If it's not the main window see if we can find a message bar to report the error in
if not window.objectName() == "QgisApp":
widgets = window.findChildren(QgsMessageBar)
if widgets:
# Grab the first message bar for now
bar = widgets[0]
item = bar.currentItem()
if item and item.property("Error") == msg:
# Return of we already have a message with the same error message
return
widget = bar.createMessage(title, msg + " " + QCoreApplication.translate("Python", "See message log (Python Error) for more details."))
widget.setProperty("Error", msg)
stackbutton = QPushButton(QCoreApplication.translate("Python", "Stack trace"), pressed=functools.partial(open_stack_dialog, type, value, tb, msg))
button = QPushButton(QCoreApplication.translate("Python", "View message log"), pressed=show_message_log)
widget.layout().addWidget(stackbutton)
widget.layout().addWidget(button)
bar.pushWidget(widget, QgsMessageBar.WARNING)
def show_message_log(pop_error=True):
if pop_error:
iface.messageBar().popWidget()
iface.openMessageLog()
def open_stack_dialog(type, value, tb, msg, pop_error=True):
if pop_error:
iface.messageBar().popWidget()
if msg is None:
msg = QCoreApplication.translate('Python', 'An error has occurred while executing Python code:')
# TODO Move this to a template HTML file
txt = u'''<font color="red"><b>{msg}</b></font>
<br>
<h3>{main_error}</h3>
<pre>
{error}
</pre>
<br>
<b>{version_label}</b> {num}
<br>
<b>{qgis_label}</b> {qversion} {qgisrelease}, {devversion}
<br>
<h4>{pypath_label}</h4>
<ul>
{pypath}
</ul>'''
error = ''
lst = traceback.format_exception(type, value, tb)
for s in lst:
error += s.decode('utf-8', 'replace') if hasattr(s, 'decode') else s
error = error.replace('\n', '<br>')
main_error = lst[-1].decode('utf-8', 'replace') if hasattr(lst[-1], 'decode') else lst[-1]
version_label = QCoreApplication.translate('Python', 'Python version:')
qgis_label = QCoreApplication.translate('Python', 'QGIS version:')
pypath_label = QCoreApplication.translate('Python', 'Python Path:')
txt = txt.format(msg=msg,
main_error=main_error,
error=error,
version_label=version_label,
num=sys.version,
qgis_label=qgis_label,
qversion=Qgis.QGIS_VERSION,
qgisrelease=Qgis.QGIS_RELEASE_NAME,
devversion=Qgis.QGIS_DEV_VERSION,
pypath_label=pypath_label,
pypath=u"".join(u"<li>{}</li>".format(path) for path in sys.path))
txt = txt.replace(' ', ' ') # preserve whitespaces for nicer output
dlg = QgsMessageOutput.createMessageOutput()
dlg.setTitle(msg)
dlg.setMessage(txt, QgsMessageOutput.MessageHtml)
dlg.showMessage()
def qgis_excepthook(type, value, tb):
showException(type, value, tb, None, messagebar=True)
def installErrorHook():
sys.excepthook = qgis_excepthook
def uninstallErrorHook():
sys.excepthook = sys.__excepthook__
# install error hook() on module load
installErrorHook()
# initialize 'iface' object
iface = None
def initInterface(pointer):
from qgis.gui import QgisInterface
from sip import wrapinstance
global iface
iface = wrapinstance(pointer, QgisInterface)
#######################
# PLUGINS
# list of plugin paths. it gets filled in by the QGIS python library
plugin_paths = []
# dictionary of plugins
plugins = {}
plugin_times = {}
# list of active (started) plugins
active_plugins = []
# list of plugins in plugin directory and home plugin directory
available_plugins = []
# dictionary of plugins providing metadata in a text file (metadata.txt)
# key = plugin package name, value = config parser instance
plugins_metadata_parser = {}
def findPlugins(path):
""" for internal use: return list of plugins in given path """
for plugin in glob.glob(path + "/*"):
if not os.path.isdir(plugin):
continue
if not os.path.exists(os.path.join(plugin, '__init__.py')):
continue
metadataFile = os.path.join(plugin, 'metadata.txt')
if not os.path.exists(metadataFile):
continue
cp = configparser.ConfigParser()
try:
f = codecs.open(metadataFile, "r", "utf8")
cp.read_fi |
USGSDenverPychron/pychron | pychron/dvc/cache.py | Python | apache-2.0 | 2,303 | 0 | # ===============================================================================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from datetime import datetime
class DVCCache(object):
def __init__(self, max_size=1000):
self._cache = {}
self.max_size = max_size
def clear(self):
self._cache.clear()
def remove(self, key):
try:
self._cache.pop(key)
except KeyEr | ror:
pass
def clean(self):
t = 60 * 15 # 15 minutes
now = datetime.now()
remove = (
k
for k, v in self._cache.items()
if (now - v["date_accessed"]).total_seconds() > t
)
for k in remove:
del self._cache[k]
def report(self):
return len(self._cache)
def get(self, item):
obj = self._cache.get(item) |
if obj:
obj["date_accessed"] = datetime.now()
return obj["value"]
def update(self, key, value):
if key not in self._cache and len(self._cache) > self.max_size:
self.remove_oldest()
self._cache[key] = {"date_accessed": datetime.now(), "value": value}
def remove_oldest(self):
"""
Remove the entry that has the oldest accessed date
"""
oldest_entry = None
for key in self._cache:
if oldest_entry is None:
oldest_entry = key
elif (
self._cache[key]["date_accessed"]
< self._cache[oldest_entry]["date_accessed"]
):
oldest_entry = key
self._cache.pop(oldest_entry)
# ============= EOF =============================================
|
mezz64/home-assistant | homeassistant/components/aurora_abb_powerone/sensor.py | Python | apache-2.0 | 4,749 | 0.000211 | """Support for Aurora ABB PowerOne Solar Photvoltaic (PV) inverter."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from aurorapy.client import AuroraError, AuroraSerialClient
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENERGY_KILO_WATT_HOUR, POWER_WATT, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .aurora_device import AuroraEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = [
SensorEntityDescription(
key="instantaneouspower",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_WATT,
state_class=SensorStateClass.MEASUREMENT,
name="Power Output",
),
SensorEntityDescription(
key="temp",
device_class=SensorDeviceClass.TEMPERATURE,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
name="Temperature",
),
SensorEntityDescription(
key="totalenergy",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
name="Total Energy",
),
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up aurora_abb_powerone sensor based on a config entry."""
entities = []
client = hass.data[DOMAIN][config_entry | .entry_id]
data = config_entry.data
for sens in | SENSOR_TYPES:
entities.append(AuroraSensor(client, data, sens))
_LOGGER.debug("async_setup_entry adding %d entities", len(entities))
async_add_entities(entities, True)
class AuroraSensor(AuroraEntity, SensorEntity):
"""Representation of a Sensor on a Aurora ABB PowerOne Solar inverter."""
def __init__(
self,
client: AuroraSerialClient,
data: Mapping[str, Any],
entity_description: SensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(client, data)
self.entity_description = entity_description
self.available_prev = True
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
try:
self.available_prev = self._attr_available
self.client.connect()
if self.entity_description.key == "instantaneouspower":
# read ADC channel 3 (grid power output)
power_watts = self.client.measure(3, True)
self._attr_native_value = round(power_watts, 1)
elif self.entity_description.key == "temp":
temperature_c = self.client.measure(21)
self._attr_native_value = round(temperature_c, 1)
elif self.entity_description.key == "totalenergy":
energy_wh = self.client.cumulated_energy(5)
self._attr_native_value = round(energy_wh / 1000, 2)
self._attr_available = True
except AuroraError as error:
self._attr_state = None
self._attr_native_value = None
self._attr_available = False
# aurorapy does not have different exceptions (yet) for dealing
# with timeout vs other comms errors.
# This means the (normal) situation of no response during darkness
# raises an exception.
# aurorapy (gitlab) pull request merged 29/5/2019. When >0.2.6 is
# released, this could be modified to :
# except AuroraTimeoutError as e:
# Workaround: look at the text of the exception
if "No response after" in str(error):
_LOGGER.debug("No response from inverter (could be dark)")
else:
raise error
finally:
if self._attr_available != self.available_prev:
if self._attr_available:
_LOGGER.info("Communication with %s back online", self.name)
else:
_LOGGER.warning(
"Communication with %s lost",
self.name,
)
if self.client.serline.isOpen():
self.client.close()
|
botswana-harvard/edc-sync | edc_sync/migrations/0004_auto_20180104_1158.py | Python | gpl-2.0 | 3,466 | 0.001443 | # Generated by Django 2.0 on 2018-01-04 11:58
import _socket
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edc_sync', '0003_auto_20170518_1233'),
]
operations = [
migrations.AlterModelOptions(
name='incomingtransaction',
options={'ordering': ['timestamp']},
),
migrations.AddField(
model_name='client',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='client',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='history',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='history',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='incomingtransaction',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='incomingtransaction',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='outgoingtransaction',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='outgoingtransaction',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='server',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='server',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='client',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
migrations.AlterField(
model_name='history',
name='hostname_created',
field=models.CharField(blank=True | , default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
migrations.AlterField(
model_name='incomingtransaction',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modifi | ed on create only)', max_length=60),
),
migrations.AlterField(
model_name='outgoingtransaction',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
migrations.AlterField(
model_name='server',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
]
|
poderomedia/kfdata | kgrants/spiders/grants.py | Python | gpl-2.0 | 3,682 | 0.008148 | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from kgrants.items import KgrantsItem
from scrapy.http import Request
import time
class GrantsSpider(Spider):
name = "grants"
allowed_domains = ["www.knightfoundation.org"]
pages = 1
base_url = 'http://www.knightfoundation.org'
start_url_str = 'http://www.knightfoundation.org/grants/?sort=title&page=%s'
def __init__(self, pages=None, *args, **kwargs):
super(GrantsSpider, self).__init__(*args, **kwargs)
if pages is not None:
self.pages = pages
self.start_urls = [ self.start_url_str % str(page) for page in xrange(1,int(self.pages)+1)]
def parse(self, response):
hxs = Selector(response)
projects = hxs.xpath('//article')
for project in projects:
time.sleep(2)
project_url = self.base_url + ''.join(project.xpath('a/@href').extract())
grants = KgrantsItem()
grants['page'] = project_url
grants['project'] = ''.join(project.xpath('a/div/header/h1/text()').extract()).strip()
grants['description'] = ''.join(project.xpath('p/text()').extract()).strip()
yield Request(grants['page'],
callback = self.parse_project,
meta={'grants':grants})
def parse_project(self,response):
hxs = Selector(response)
grants = response.meta['grants']
details = hxs.xpath('//section[@id="grant_info"]')
fields = hxs.xpath('//dt')
values = hxs.xpath('//dd')
self.log('field: <%s>' % fields.extract())
for item in details:
grants['fiscal_agent'] = ''.join(item.xpath('header/h2/text()').extract()).strip()
count = 0
for field in fields:
normalized_field = ''.join(field.xpath('text()').extract()).strip().lower().replace(' ','_')
self.log('field: <%s>' % normalized_field)
try:
grants[normalized_field] = values.xpath('text()').extract()[count]
except:
| if normalized_field == 'community':
grants[normalized_field] = values.xpath('a/text()').extract()[1]
elif normalized_field == 'focus_area':
grants[normalized_field] = values.xpath('a/text()').extract()[0]
count += 1
grants['grantee_contact_email'] = | ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/@href').extract()).replace('mailto:','').strip()
grants['grantee_contact_name'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="email"]/a/text()').extract()).strip()
grants['grantee_contact_location'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="location"]/text()').extract()).strip()
grants['grantee_contact_facebook'] = ''.join(
item.xpath('section[@id="grant_contact"]/ul/li[@class="facebook"]/a/@href').extract()).strip()
grants['grantee_contact_twitter'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="twitter"]/a/@href').extract()
grants['grantee_contact_website'] = item.xpath('section[@id="grant_contact"]/ul/li[@class="website"]/a/@href').extract()
if 'grant_period' in grants:
grant_period = grants['grant_period'].split(' to ')
grants['grant_period_start'] = grant_period[0]
grants['grant_period_end'] = grant_period[1]
yield grants
|
brj424/nector | malware/migrations/0001_initial.py | Python | gpl-3.0 | 1,104 | 0.001812 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-12 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
| ]
operations = [
migrations.CreateModel(
name='Malware',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_id', models.CharField(max_length=90)),
('alert_type', models.CharField(max_length=80)),
('file_name', models.CharField(max_length=80)),
('com | puter', models.CharField(max_length=80)),
('contact_group', models.CharField(max_length=80)),
('virus', models.CharField(max_length=80)),
('actual_action', models.CharField(max_length=80)),
('comment', models.CharField(max_length=100)),
('numeric_ip', models.GenericIPAddressField(default='0.0.0.0', protocol='ipv4')),
],
),
]
|
kiseyno92/SNU_ML | Practice7/code/eval.py | Python | mit | 1,438 | 0.017385 | import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from IPython import embed
from tensorflow import flags
def main(_):
mnist = input_data.read_data_sets("./data", one_hot=True)
# defien model input: image and ground-truth label
model_inputs = tf.placeholder(dtype=tf.float32, shape=[None, 784])
labels = tf.placeholder(dtype=tf.float32, shape=[None, 10])
# define parameters for Logistic Regression model
w = tf.Variable(tf.zeros(shape=[784, 10]))
b = tf.Variable(tf.zeros(shape=[10]))
logits = tf.matmul(model_inputs, w) + b
predictions = tf.nn.softmax(logits | )
# define cross entropy loss term
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels,
logits=predictions)
dense_predictions = tf.argmax(predictions, axis=1)
dense_labels = tf.argmax(labels, axis=1)
equals = tf.cast(tf.equal(dense_predictions, dense_labels), tf.float32)
acc = tf.reduce_mean(equals)
saver = tf.train.Saver()
with tf.Sessi | on() as sess:
final_acc = 0.0
sess.run(tf.global_variables_initializer())
for step in range(50):
images_val, labels_val = mnist.validation.next_batch(100)
feed = {model_inputs: images_val, labels: labels_val}
acc = sess.run(acc, feed_dict=feed)
final_acc += acc
final_acc /= 50.0
print ("Full Evaluation Accuracy : {}".format(final_acc))
if __name__ == "__main__":
tf.app.run()
|
myt00seven/svrg | nips2017_mnist/mnist.py | Python | mit | 14,964 | 0.004945 | #!/usr/bin/env python
"""
Batch Normalization + SVRG on MNIST
CPU Version
Independent Study
May 18, 2016
Yintai Ma
"""
from __future__ import print_function
import sys
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import pylab
import numpy as np
import theano
import theano.tensor as T
import lasagne
from collections import OrderedDict
# May 18, 2016, Yintai Ma
# standard setting , epoch = 20, batch size = 100
NUM_EPOCHS = 20
BATCH_SIZE = 100
NUM_HIDDEN_UNITS = 500
LEARNING_RATE = 0.01
MOMENTUM = 0.9
FREQUENCY = 0.1
MODEL = 'mlp'
GRADIENT = 'sgd'
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def custom_svrg2(loss, params, m, learning_rate=0.01, objective=None, data=None, target=None, getpred=None):
theano.pp(loss)
grads = theano.grad(loss, params)
n = data.shape[0]
updates = OrderedDict()
rng = T.shared_randomstreams.RandomStreams(seed=149)
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
mu = grad / n
def oneStep(w):
t = rng.choice(size=(1,), a=n)
loss_part_tilde = objective(getpred(data[t], param), target[t])
loss_part_tilde = loss_part_tilde.mean()
g_tilde = theano.grad(loss_part_tilde, param)
loss_part = objective(getpred(data[t], w), target[t])
loss_part = loss_part.mean()
g = theano.grad(loss_part, w)
w = w - learning_rate * (g - g_tilde + mu)
return w
w_tilde, scan_updates = theano.scan(fn=oneStep, outputs_info=param, n_steps=m)
updates.update(scan_updates)
updates[param] = w_tilde[-1]
return updates
def mysgd(loss_or_grads, params, learning_rate):
grads = lasagne.updates.get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * grad
return updates
def mysvrg(loss_or_grads, params, learning_rate,avg_gradient):
#Not Working right now
grads = lasagne.updates.get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * (grad- grad_it + avg_gradient[param])
return updates
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function | that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None):
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
l_hid = lasagne.layers.DenseLayer(
l_in, num_units=NUM_HIDDEN_UNITS,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# l | _hid = lasagne.layers.DenseLayer(
# l_hid, num_units=NUM_HIDDEN_UNITS,
# nonlinearity=lasagne.nonlinearities.rectify)
l_out = lasagne.layers.DenseLayer(
l_hid, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
def build_mlpbn(input_var=None):
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
l_hidden = lasagne.layers.batch_norm (
lasagne.layers.DenseLayer(
l_in,
num_units=NUM_HIDDEN_UNITS,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
# l_hidden = lasagne.layers.batch_norm (
# lasagne.layers.DenseLayer(
# l_hidden,
# num_units=NUM_HIDDEN_UNITS,
# nonlinearity=lasagne.nonlinearities.rectify,
# )
# )
l_out = lasagne.layers.batch_norm (
lasagne.layers.DenseLayer(
l_hidden,
num_units=NUM_HIDDEN_UNITS,
nonlinearity=lasagne.nonlinearities.softmax,
)
)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model=MODEL,gradient = GRADIENT, num_epochs=NUM_EPOCHS):
# Load the dataset
NUM_EPOCHS = num_epochs
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector(' |
astrobin/astrobin | astrobin/migrations/0004_userprofile_updated.py | Python | agpl-3.0 | 432 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-17 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
| ('astrobin', '0003_auto_20180217_0933'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='updated',
field=models.DateTimeField(auto_now=True, null=True),
| ),
]
|
mgarciafernandez/magnipy | magnipy.py | Python | gpl-3.0 | 4,413 | 0.046 | import numpy, math, json, os, ROOT, scipy.special, scipy.interpolate
__GRAPHIX__ = 'ROOT'
def GetNSigmas(chi2):
sigmas = numpy.linspace(0,20,1000)
prob = map(lambda x : scipy.special.erf(x/math.sqrt(2)), sigmas)
f = scipy.interpolate.interp1d(prob,sigmas,kind='linear')
return f(1-chi2)
class CorrelationFunction(object):
def __init__(self,name=''):
self.angle_ = numpy.zeros([0])
self.w_ = numpy.zeros([0])
self.error_ = numpy.zeros([0])
self.Nth_ = len(self.w_)
self.name_ = name
self.plot_ = None
if __GRAPHIX__ == 'ROOT':
self.plot_ = ROOT.TGraphErrors()
def __str__(self):
return str(zip(self.angle_,self.w_,self.error_))
def __repr__(self):
return str(zip(self.angle_,self.w_,self.error_))
def __len__(self):
return self.Nth_
def __getitem__(self,ii):
return (self.angle_[ii],self.w_[ii],self.error_[ii])
def __hash__(self):
data = {}
data['angle'] = self.angle_
data['w'] = self.w_
data['error'] = self.error_
data['name'] = self.name_
data['Nth'] = self.Nth_
return 0
def Plot(self,opts=''):
if self.plot_ is None:
print 'Graphic library not defined.'
raise Exception
elif self.Nth_ == 0:
print 'Empty object. No points to draw.'
raise Exception
elif not (self.Nth_ == len(se | lf.w_) == len(self.angle_)):
print | 'Dimension does not agree. Chech angle and w.'
raise Exception
elif __GRAPHIX__ == 'ROOT' :
for th in xrange(self.Nth_):
self.plot_.SetPoint(th,self.angle_[th],self.w_[th])
self.plot_.SetPointError(th,0.,self.error_[th])
class DataW(CorrelationFunction):
def __init__(self,name=''):
CorrelationFunction.__init__(self,name)
self.covariance_ = numpy.zeros([0,0])
self.pathWtheta_ = ''
self.pathCovariance_ = ''
def ReadAthenaFunction(self,path):
tmp = numpy.loadtxt(path,usecols=[0])
if self.pathCovariance_ != '' and tmp.size != self.Nth_ :
print 'Number of points does not agree with previous covariance file: ',self.pathCovariance_
raise Exception
else:
self.angle_ = numpy.loadtxt(path,usecols=[0])
self.w_ = numpy.loadtxt(path,usecols=[1])
self.error_ = numpy.loadtxt(path,usecols=[2])
self.Nth_ = len(self.w_)
if path[0] != '/':
if path[0] == '.':
self.pathW_ = os.getcwd()+'/'+path[1:]
else:
self.pathW_ = os.getcwd()+'/'+path[:]
else:
self.pathW_ = path[:]
def SetDiagonalCovariance(self):
self.covariance_ = numpy.zeros([self.Nth_,self.Nth_])
for th in xrange(self.Nth_):
self.covariance_[th][th] = self.error_[th]**2
self.pathCovariance_ = ''
def ReadAthenaCovariance(self,path):
tmp = numpy.loadtxt(path)
if self.pathWtheta_ != '' and tmp.size != self.Nth_**2 :
print 'Number of points does not agree with previous wtheta file: ',self.pathWtheta_
raise Exception
else:
self.covariance_ = numpy.loadtxt(path)
self.Nth_ = int(math.sqrt(self.covariance_.size))
if path[0] != '/':
if path[0] == '.':
self.pathCovariance_ = os.getcwd()+'/'+path[1:]
else:
self.pathCovariance_ = os.getcwd()+'/'+path[:]
else:
self.pathCovariance_ = path[:]
def GetChi(self,w_theory):
if self.Nth_ == 0:
print 'Empty data object! Can not do fit.'
raise Exception
elif len(w_theory) != self.Nth_:
print 'Length of input array ',len(w_theory),' does not agree with those of data ',self.Nth_,'.'
raise Exception
errorM = numpy.linalg.inv(self.covariance_)
chisq = 0.
for th1 in xrange(self.Nth_):
for th2 in xrange(self.Nth_):
chisq += ( self.w_[th1]-w_theory[th1] )*( self.w_[th2]-w_theory[th2] )*errorM[th1][th2]
return chisq
class TheoMagW(CorrelationFunction):
def __init__(self,name='',bias=1.,alpha=1.):
CorrelationFunction.__init__(self,name)
self.w0_ = numpy.zeros([0])
self.alpha_ = alpha
self.bias_ = bias
self.pathWtheta_ = ''
def ReadFunction(self,path):
self.angle_ = numpy.loadtxt(path,usecols=[0])
self.w0_ = numpy.loadtxt(path,usecols=[1])
self.Nth_ = len(self.w0_)
self.w_ = map(lambda x: x*self.alpha_*self.bias_,self.w0_)
self.error_ = numpy.zeros([self.Nth_])
if path[0] != '/':
if path[0] == '.':
self.pathWtheta_ = os.getcwd()+'/'+path[1:]
else:
self.pathWtheta_ = os.getcwd()+'/'+path[:]
else:
self.pathWtheta_ = path[:]
def Update(self):
self.w_ = map(lambda x: x*self.alpha_*self.bias_,self.w0_)
|
ghchinoy/tensorflow | tensorflow/python/training/saving/functional_saver_test.py | Python | apache-2.0 | 5,040 | 0.002778 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for the functional saver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import test
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
class SaverTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_resource_variable(self):
v1 = resource_variable_ops.ResourceVariable(2.)
self.evaluate(v1.initializer)
saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v1.assign(1.))
self.evaluate(saver.restore(prefix))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
self.evaluate(v2.initializer)
second_saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
self.evaluate(second_saver.restore(prefix))
self.assertEqual(2., self.evaluate(v2))
def test_to_proto(self):
v1 = resource_variable_ops.ResourceVariable(2.)
saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
proto_accumulator = []
wrapped = wrap_function.wrap_function(
lambda: proto_accumulator.append(saver.to_proto()), signature=())
self.assertEqual(1, len(proto_accumulator))
proto = proto_accumulator[0]
save = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_tensor_by_name(proto.save_tensor_name))
restore = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_ten | sor_name),
fetches=wrapped.graph.get_operation_by_name(proto.restore_op_name))
save_path = save(constant_op.constant(prefix))
v1.assign(1.)
restore(constant_op.constant(save_path))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
second_saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
second_saver.restore(save_path)
self.assertEqual(2., self.evaluate(v2))
|
@test_util.run_v1_only(
"Needs an API to setup multiple devices, b/124805129")
# Set up multiple devices when graph building. Before test.main() we configure
# the devices for eager execution.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
def test_checkpoint_is_sharded_by_device(self):
with ops.device("cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("cpu:1"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("cpu:2"):
v2 = resource_variable_ops.ResourceVariable(2.)
self.evaluate([v0.initializer, v1.initializer, v2.initializer])
saver = functional_saver.MultiDeviceSaver(
list(saveable_object_util.saveable_objects_for_op(v0, "v0"))
+ list(saveable_object_util.saveable_objects_for_op(v1, "v1"))
+ list(saveable_object_util.saveable_objects_for_op(v2, "v2")))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(4, len(gfile.Glob(prefix + "*")))
self.evaluate(v0.assign(-1.))
self.evaluate(v1.assign(-1.))
self.evaluate(v2.assign(-1.))
self.evaluate(saver.restore(constant_op.constant(prefix)))
self.assertEqual(0., self.evaluate(v0))
self.assertEqual(1., self.evaluate(v1))
self.assertEqual(2., self.evaluate(v2))
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
|
jaa2015/FlaskProject | flaskproject/users/models.py | Python | mit | 1,443 | 0 | from flask_security import UserMixin, RoleMixin
from ..core import db
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users | .id')),
db.Column('role_id', db.Integer(), db.ForeignKey('roles.id')))
class Role(db.Model, RoleMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Role %r>' % (self.name)
class User(db.Model, UserMixin):
__tablename__ = 'users'
| id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(120))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(100))
current_login_ip = db.Column(db.String(100))
login_count = db.Column(db.Integer)
registered_at = db.Column(db.DateTime())
birth_date = db.Column(db.DateTime())
last_edit_date = db.Column(db.DateTime())
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
|
tongpa/PollSurveyWeb | pollandsurvey/service/interfacewebservice.py | Python | gpl-2.0 | 1,487 | 0.014122 | # -*- coding: utf-8 -*-
from tg.configuration import AppConfig, config
from tg import request
from pollandsurvey import model
from tgext.pyutilservice import Utility
import logging
log = logging.getLogger(__name__)
from tgext.pylogservice import LogDBHandler
class InterfaceWebService(object):
def __init__(self):
self.modules ='INTER | FACESERVICE.WEBSERVICE'
dh = LogDBHandler( config=config,requ | est=request)
log.addHandler(dh)
self.utility = Utility()
def mapVoterUser(self, voter):
"""
Check Voter and User in table sur_member_user is Empty will create again.
if not will pass.
Keyword arguments:
voter -- Object Voter
"""
self.memberUser = model.MemberUser();
try:
if voter:
user = model.User.by_email_address(voter.email)
if user :
self.memberUser = model.MemberUser.getByUserIdandVoter(user.user_id, voter.id_voter)
if self.memberUser is None:
self.memberUser = model.MemberUser();
self.memberUser.user_id = user.user_id
self.memberUser.id_voter = voter.id_voter
self.memberUser.save()
del user
except Exception as e:
log.error("mapVoterUser : %s" %e, extra=extraLog(modules=self.modules));
return self.memberUser; |
sio2project/oioioi | oioioi/problems/forms.py | Python | gpl-3.0 | 9,816 | 0.001324 | from collections import OrderedDict
from django import forms
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from oioioi.base.utils.input_with_generate import TextInputWithGenerate
from oioioi.base.utils.inputs import narrow_input_field
from oioioi.contests.models import ProblemStatementConfig, RankingVisibilityConfig
from oioioi.problems.models import OriginInfoValue, Problem, ProblemSite
class ProblemUploadForm(forms.Form):
contest_id = forms.CharField(widget=forms.HiddenInput, required=False)
def __init__(self, contest, existing_problem, *args, **kwargs):
user = kwargs.pop('user', None)
super(ProblemUploadForm, self).__init__(*args, **kwargs)
self.round_id = None
self.visibility = None
if contest and not existing_problem:
choices = [(r.id, r.name) for r in contest.round_set.all()]
if len(choices) >= 2:
fields = list(self.fields.items())
fields[0:0] = [
('round_id', forms.ChoiceField(choices=choices, label=_("Round")))
]
self.fields = OrderedDict(fields)
elif len(choices) == 1:
self.round_id = choices[0][0]
if 'oioioi.problemsharing' in settings.INSTALLED_APPS and not existing_problem:
if user and user.has_perm('teachers.teacher'):
choices = [
(Problem.VISIBILITY_FRIENDS, 'Friends'),
(Problem.VISIBILITY_PRIVATE, 'Private'),
(Problem.VISIBILITY_PUBLIC, 'Public'),
]
default_visibility = Problem.VISIBILITY_FRIENDS
if contest:
last_problem = (
Problem.objects.filter(contest=contest, author=user)
.order_by('-id')
.first()
)
if (
last_problem
and last_problem.visibility == Problem.VISIBILITY_PRIVATE
):
default_visibility = Problem.VISIBILITY_PRIVATE
self.initial.update({'visibility': default_visibility})
self.fields.update(
{
'visibility': forms.ChoiceField(
choices=choices,
label=_("Visibility"),
required=True,
initial=default_visibility,
)
}
)
def clean(self):
cleaned_data = super(ProblemUploadForm, self).clean()
if self.round_id:
cleaned_data['round_id'] = self.round_id
if self.visibility:
cleaned_data['visibility'] = self.visibility
return cleaned_data
class PackageUploadForm(ProblemUploadForm):
package_file = forms.FileField(label=_("Package file"))
class ProblemStatementConfigForm(forms.ModelF | orm):
class Meta(object):
fields = '__all__'
model = ProblemStatementConfig
widgets = {'visible': forms.RadioSelect()}
class RankingVisibilityConfigForm(forms.ModelForm):
class Meta(object):
fields = '__all__'
model = RankingVisib | ilityConfig
widgets = {'visible': forms.RadioSelect()}
class ProblemSiteForm(forms.ModelForm):
class Meta(object):
fields = ['url_key']
model = ProblemSite
widgets = {'url_key': TextInputWithGenerate()}
class ProblemsetSourceForm(forms.Form):
url_key = forms.CharField(label=_("Enter problem's secret key"), required=True)
def __init__(self, url_key, *args, **kwargs):
super(ProblemsetSourceForm, self).__init__(*args, **kwargs)
if url_key:
self.initial = {'url_key': url_key}
class ProblemStatementReplaceForm(forms.Form):
file_name = forms.ChoiceField(label=_("Statement filename"))
file_replacement = forms.FileField(label=_("Replacement file"), required=True)
def __init__(self, file_names, *args, **kwargs):
super(ProblemStatementReplaceForm, self).__init__(*args, **kwargs)
upload_file_field = self.fields['file_replacement']
file_name_field = self.fields['file_name']
file_name_field.choices = [('', '')] + [(name, name) for name in file_names]
self._set_field_show_always('file_name')
narrow_input_field(file_name_field)
narrow_input_field(upload_file_field)
self.initial.update({'file_name': ''})
def _set_field_show_always(self, field_name):
self.fields[field_name].widget.attrs['data-submit'] = 'always'
class PackageFileReuploadForm(forms.Form):
file_name = forms.ChoiceField(label=_("File name"))
file_replacement = forms.FileField(label=_("Replacement file"), required=False)
def __init__(self, file_names, *args, **kwargs):
super(PackageFileReuploadForm, self).__init__(*args, **kwargs)
upload_file_field = self.fields['file_replacement']
file_name_field = self.fields['file_name']
file_name_field.choices = [('', '')] + [(name, name) for name in file_names]
self._set_field_show_always('file_name')
narrow_input_field(file_name_field)
narrow_input_field(upload_file_field)
self.initial.update({'file_name': ''})
def _set_field_show_always(self, field_name):
self.fields[field_name].widget.attrs['data-submit'] = 'always'
def _localized_formset_get_initial(localized_objects):
return [
{'language': lang[0]}
for lang in settings.LANGUAGES
if not localized_objects.filter(language=lang[0]).exists()
]
class ProblemNameInlineFormSet(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
kwargs['initial'] = _localized_formset_get_initial(kwargs['instance'].names)
super(ProblemNameInlineFormSet, self).__init__(*args, **kwargs)
self.max_num = len(settings.LANGUAGES)
class LocalizationFormset(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
kwargs['initial'] = _localized_formset_get_initial(
kwargs['instance'].localizations
)
super(LocalizationFormset, self).__init__(*args, **kwargs)
self.min_num = self.max_num = len(settings.LANGUAGES)
for form in self.forms:
form.empty_permitted = False
class OriginInfoValueForm(forms.ModelForm):
@transaction.atomic
def save(self, commit=True):
instance = super(OriginInfoValueForm, self).save(commit=False)
# Ensure parent_tag exists on problems
category = self.cleaned_data['category']
parent_tag = category.parent_tag
instance.parent_tag = parent_tag
problems = self.cleaned_data.get('problems').prefetch_related('origintag_set')
for problem in problems:
if parent_tag not in problem.origintag_set.all():
parent_tag.problems.add(problem)
if commit:
instance.save()
return instance
class Meta(object):
model = OriginInfoValue
fields = ('category', 'value', 'order', 'problems')
exclude = ('parent_tag',)
def _label_from_instance(obj):
return obj.full_name
class OriginTagThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OriginTagThroughForm, self).__init__(*args, **kwargs)
self.fields['origintag'].label_from_instance = _label_from_instance
class Meta(object):
labels = {'origintag': _("Origin Tag")}
help_texts = {
'origintag': _(
"Origin tags inform about the problem's general origin "
"- e.g. a specific competition, olympiad, or programming camp."
)
}
class OriginInfoValueThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OriginInfoValueThroughForm, self).__init__(*args, **kwargs)
self.fields['origininfovalue'].label_from_instance = _label_from_instance
class Meta(object):
|
tensor-tang/Paddle | python/paddle/fluid/incubate/fleet/collective/__init__.py | Python | apache-2.0 | 14,141 | 0.001061 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import logging
import paddle.fluid as fluid
import paddle.fluid.io as io
import paddle.fluid.transpiler.distribute_transpiler as dist_transpiler
from paddle.fluid.incubate.fleet.base.fleet_base import Fleet
from paddle.fluid.incubate.fleet.base.fleet_base import Mode
from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer
from paddle.fluid import compiler
import os
import sys
import six
class LambConfig(object):
def __init__(self):
pass
class DistFCConfig(object):
def __init__(self):
pass
class Collective(Fleet):
def __init__(self):
super(Collective, self).__init__(Mode.COLLECTIVE)
self._local_ip = 0
self.startup_program = None
self._origin_program = None
self._transpiled_program = None
self.main_program = None
def init_worker(self):
logging.warn(
"You should not call 'init_worker' method for collective mode.")
def run_worker(self, main_programs=None, scopes=None):
logging.warn(
"You should not call 'run_worker' method for collective mode.")
def init_server(self, model_dir=None):
logging.warn(
"You should not call 'init_server' method for collective mode.")
def run_server(self):
logging.warn(
"You should not call 'run_server' method for collective mode.")
def stop_worker(self):
logging.warn(
"You should not call 'stop_worker' method for collective mode.")
def distributed_optimizer(self, optimizer, strategy=None):
self._optimizer = \
CollectiveOptimizer(optimizer, strategy)
return self._optimizer
def save_inference_model(self,
executor,
dirname,
feeded_var_names=None,
target_vars=None,
main_program=None,
export_for_deployment=True):
io.save_inference_model(dirname, feeded_var_names, target_vars,
executor, main_program, None, None,
export_for_deployment)
def save_persistables(self, executor, dirname, main_program=None):
io.save_persistables(executor, dirname, main_program, None)
fleet = Collective()
class DistributedStrategy(fluid.BuildStrategy):
"""
Init function of DistributedStrategy
"""
def __init__(self):
super(DistributedStrategy, self).__init__()
self.use_local_sgd = False
self.use_dist_fc = False
self.dist_fc_config = None # DistFCConfig
self.mode = "nccl2" # or collective
self.collective_mode = None # local_sgd or grad_allreduce
self.nccl_comm_num = 1
self.forward_recompute = False
self.recompute_checkpoints = []
self.exec_strategy = fluid.ExecutionStrategy()
# configurations below are used for unit test
self._ut4grad_allreduce = False
class CollectiveOpBasedOptimizer(DistributedOptimizer):
"""
Collective Operator Base Class For Distributed Optimizer
The class is invisible to a user
"""
def __init__(self, optimizer, strategy=None):
assert isinstance(
strategy,
DistributedStrategy), "strategy must be DistributedStrategy"
super(CollectiveOpBasedOptimizer, self).__init__(optimizer, strategy)
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
return self._optimizer.backward(loss, startup_program, parameter_list,
no_grad_set, callbacks)
def apply_gradients(self, params_grads):
return self._optimizer.apply_gradients(params_grads)
class CollectiveOptimizer(DistributedOptimizer):
"""
DistributedOptimizer is a wrapper for paddle.fluid.optimizer
A user should pass a paddle.fluid.optimizer to DistributedOptimizer
minimize() function is implemented.
DistributedOptimizer is the starting point for a user who wants to
run distributed training. The optimized information will be stored in
Fleet() instance who holds the global information about current distributed
training.
"""
def __init__(self, optimizer, strategy=DistributedStrategy()):
super(CollectiveOptimizer, self).__init__(optimizer, strategy)
if strategy.forward_recompute:
self.forward_recompute = True
self.recompute_checkpoints = strategy.recompute_checkpoints
else:
self.forward_recompute = False
self.print_config = False
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
return self._optimizer.backward(loss, startup_program, parameter_list,
no_grad_set, callbacks)
def apply_gradients(self, params_grads):
return self._optimizer.apply_gradients(params_grads)
def _check_condition(self, name, **kwargs):
for k, v in six.iteritems(kwargs):
if v is True:
assert False, "you can't use %s and %s together" % (name, k)
def _check_collective_mode(self, main_program, optimizer, strategy):
"""
Check the conflict condtions.
"""
if strategy.use_local_sgd:
strategy.mode = "collective"
strategy.collective_mode = "local_sgd"
self._check_condition(
"use_local_sgd",
use_dgc=main_program._enable_dgc,
use_dist_fc=strategy.use_dist_fc,
use_lamb=main_program._use_lamb)
if strategy.use_dist_fc:
self._check_condition(
"use_dist_fc",
use_dgc=main_program._enable_dgc,
use_local_sgd=strategy.use_local_sgd,
use_lamb=main_program._use_lamb)
assert strategy.dist_fc_config is not None, "DistributedStrategy.dist_fc_config should be set"
if strategy._ut4grad_allreduce:
strategy.mode = "collective"
strategy.collective_mode = "grad_allreduce"
self._check_condition(
"_ut4grad_allreduce",
use_dgc=main_program._enable_dgc,
use_lamb=main_program._use_lamb)
if self._strategy.collective_mode=="local_sgd" \
or self._strategy.collective_mode == "grad_allreduce":
assert self._strategy.mode == "collective", \
"local_sg | d and grad_allreduce can be used under collective mode"
def _transpile(self, startup_program, main_program):
"""
Transpile the programs to distributed programs. And add the variables.
"""
worker_endpoints = fleet.worker_endpoints() |
trainer_id = fleet.worker_index()
current_endpoint = fleet.worker_endpoints()[trainer_id]
worker_endpoints_env = ','.join(worker_endpoints)
trainers_num = fleet.worker_num()
if self.print_config:
print("worker_endpoints:{} trainers_num:{} current_endpoint:{} \
trainer_id:{}".format(worker_endpoints, trainers_num,
current_endpoint, trainer_id))
# call transpiler
config = dist_transpiler.DistributeTranspilerConfig()
config. |
kbaseIncubator/catalog | lib/biokbase/catalog/local_function_reader.py | Python | mit | 13,976 | 0.003864 | import json
import os
'''
Class responsible for parsing/validating the local function specs, processing the specs,
and returning something that can be saved to the DB.
Typical usage is to initialize and the read_and_validate.
Then, once a compilation report is made, you can perform a full validation.
Finally, to create entries for the db, you can call extract_lf_names and extract_lf_records
'''
class LocalFunctionReader:
def __init__(self):
self.function_specs = {}
'''
Quickly parses and validates that there are specs defined in the correct format in the
correct version, and reads in everything. If things looked ok, returns a simple report
that can be used to print stuff to logs.
'''
def parse_and_basic_validation(self, basedir, module_details, module_name, version,
git_commit_hash):
report = {
'n_local_functions': 0,
'functions_parsed': [],
'functions_errored': []
}
# 1) list files in ui/local_functions
if os.path.isdir(os.path.join(basedir, 'ui', 'local_functions')):
for spec in os.listdir(os.path.join(basedir, 'ui', 'local_functions')):
if os.path.isfile(os.path.join(basedir, 'ui', 'local_functions', spec)):
file_name_tokens = spec.split('.')
if len(file_name_tokens) != 2:
continue
if file_name_tokens[1] == 'json':
# a spec is defined, so extract out the function id
function_id = file_name_tokens[0]
try:
with open(
os.path.join(basedir, 'ui', 'local_functions', spec)) as file:
spec_parse = json.load(file)
except Exception as e:
report['functions_errored'].append({'filename': spec, 'error': str(e)})
continue
# make sure basic required fields are there
if 'name' not in spec_parse:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification missing required field 'name'"})
continue
if not isinstance(spec_parse['name'], str): # need to update for Python3
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification field 'name' must be a string"})
continue
if 'short_description' not in spec_parse:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification missing required field 'short_description'"})
continue
if not isinstance(spec_parse['short_description'], str):
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification field 'short_description' must be a string"})
continue
if 'long_description' not in spec_parse:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification missing required field 'long_description'"})
continue
long_description = spec_parse['long_description']
# right now authors should be optional, tags should be optional
authors = []
if 'authors' in spec_parse:
if self._validate_as_list_of_strings(spec_parse['authors']):
authors = spec_parse['authors']
else:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'authors' must be a list of strings"})
continue
else:
# default aut | hors to module owners
for o in module_details['ow | ners']:
authors.append(o['kb_username'])
# could probably make this code cleaner, but for now just do brute force if/else to validate
tags = {'categories': [], 'input': {'file_types': [], 'kb_types': []},
'output': {'file_types': [], 'kb_types': []}}
if 'tags' in spec_parse:
if not isinstance(spec_parse['tags'], dict):
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'tags' must be an object"})
continue
if 'categories' in spec_parse:
if self._validate_as_list_of_strings(
spec_parse['tags']['categories']):
tags['categories'] = spec_parse['tags']['categories']
else:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'authors' must be a list of strings"})
continue
if 'input' in spec_parse['tags']:
if not isinstance(spec_parse['tags']['input'], dict):
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'tags.input' must be an object"})
continue
if 'kb_types' in spec_parse['tags']['input']:
if self._validate_as_list_of_strings(
spec_parse['tags']['input']['kb_types']):
tags['input']['kb_types'] = spec_parse['tags']['input'][
'kb_types']
else:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'tags.input.kb_types' must be a list of strings"})
continue
if 'file_types' in spec_parse['tags']['input']:
if self._validate_as_list_of_strings(
spec_parse['tags']['input']['file_types']):
tags['input']['file_types'] = spec_parse['tags']['input'][
'file_types']
else:
report['functions_errored'].append(
{'filename': spec,
'error': "Local Function specification optional field 'tags.input.file_types' must be a list of strings"})
continue
if 'output' in spec_parse['tags']:
|
max-ionov/russian-anaphora | resolute-text.py | Python | gpl-3.0 | 1,079 | 0.032437 | #!/usr/bin/python2.7
# -!- coding: utf-8 -!-
# usage: resolute-text.py input pronouns model
import os, sys, codecs
import lemmatizer, anaphoramllib
usage = 'usage: resolute-text.py input pronouns model'
if(__name__ == '__main__'):
if len(sys.argv) < 4:
print (usage)
sys.exit()
text = ''
pronouns = anaphoramllib.LoadPronouns(sys.argv[2])
inpFile = codecs.open(sys.argv[1], encoding = 'utf-8')
for line in (line_raw for line_raw in inpFile):
text += line
words, curOffset = lemmatizer.lemmatizer(text)#, loadFrom = sys.argv[1])
groups = lemmatizer.GetGroups(words)#, loadFrom = sys.argv[1])
mlResolutor = anaphoramllib.AnaphoraResolutorML()
mlResolutor.LoadPronouns(pronouns)
mlResoluto | r.SetWindow(20, 0)
mlResolutor.LoadModel(sys.argv[3], sys.argv[3] + '.labels')
for group in groups:
if group[1] in mlResolutor.pronounIndex:
antecedent = mlResolutor.FindAntecedent(group, groups)
if len(antecedent) == 0:
print 'no results for group at offset %d' % | group[-2]
else:
print group[0], ' ---> ', antecedent[0][1][0]
#print antecedent |
kevingu1003/python-pptx | pptx/opc/spec.py | Python | mit | 978 | 0 | # encoding: utf-8
"""
Provides mappings that embody aspects of the Open XML spec ISO/IEC 29500.
"""
from .constants import CONTENT_TYPE as CT
default_content_types = (
('bin', CT.PML_PRINTER_SETTINGS),
('bin', CT.SML_PRINTER_SETTINGS),
('bin', CT.WML_PRINTER_SETTINGS),
('bmp', CT.BMP),
('emf', CT.X_EMF),
('fntdata', CT.X_FONTDATA),
('gif', CT.GIF),
('jpe', CT.JPEG),
('jpeg', CT.JPEG),
('jpg | ', CT.JPEG),
('png', CT.PNG),
('rels', CT.OPC_RELATIONSHIPS),
('tif', CT.TIFF),
('tiff', CT.TIFF),
('wdp', CT.MS_PHOTO),
('wmf', CT.X_ | WMF),
('xlsx', CT.SML_SHEET),
('xml', CT.XML),
)
image_content_types = {
'bmp': CT.BMP,
'emf': CT.X_EMF,
'gif': CT.GIF,
'jpe': CT.JPEG,
'jpeg': CT.JPEG,
'jpg': CT.JPEG,
'png': CT.PNG,
'tif': CT.TIFF,
'tiff': CT.TIFF,
'wdp': CT.MS_PHOTO,
'wmf': CT.X_WMF,
}
|
ganga-devs/ganga | ganga/GangaLHCb/test/Unit/DiracAPI/TestDiracCommands.py | Python | gpl-3.0 | 16,423 | 0.003532 |
import datetime
from collections import namedtuple
import os
import tempfile
import time
import uuid
import random
import stat
from textwrap import dedent
import pytest
from GangaCore.Utility.logging import getLogger
from GangaDirac.Lib.Utilities.DiracUtilities import execute
from GangaCore.testlib.mark import external
from GangaCore.testlib.GangaUnitTest import load_config_files, clear_config
logger = getLogger(modulename=True)
statusmapping = {
'Checking': 'submitted',
'Completed': 'running',
'Deleted': 'failed',
'Done': 'completed',
'Failed': 'failed',
'Killed': 'killed',
'Matched': 'submitted',
'Received': 'submitted',
'Running': 'running',
'Staging': 'submitted',
'Stalled': 'running',
'Waiting': 'submitted',
}
JobInfo = namedtuple('JobInfo', ['id', 'get_file_lfn', 'remove_file_lfn'])
@pytest.yield_fixture(scope='module')
def load_config():
"""Load the Ganga config files before the test and clean them up afterwards"""
load_config_files()
# make sure post-boostrap hook is run to ensure Dirac config options are set correctly
# Only becomes an issue if this test is run on it's own
from GangaLHCb import postBootstrapHook
postBootstrapHook()
yield
clear_config()
@pytest.yield_fixture(scope='class')
def dirac_job(load_config):
sandbox_str = uuid.uuid4()
get_file_str = uuid.uuid4()
remove_file_str = uuid.uuid4()
exe_script = """#!/bin/bash
echo '%s' > sandboxFile.txt
echo '%s' > getFile.dst
echo '%s' > removeFile.dst
""" % (sandbox_str, get_file_str, remove_file_str)
logger.info("exe_script:\n%s\n" % str(exe_script))
exe_file, exe_path_name = tempfile.mkstemp()
with os.fdopen(exe_file, 'wb') as f:
f.write(exe_script)
st = os.stat(exe_path_name)
os.chmod(exe_path_name, st.st_mode | stat.S_IEXEC)
api_script = """
# Script written in TestDiracCommands.py
from LHCbDIRAC.Interfaces.API.Dirac import Dirac
from LHCbDIRAC.Interfaces.API.Job import Job
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForCountry
uk_ses = getSEsForCountry('uk')['Value']
j = Job()
j.setName('Ganga-DiracCommands-InitTestJob')
j.setCPUTime(10)
j.setExecutable('###EXE_SCRIPT_BASE###','','Ganga_Executable.log')
j.setInputSandbox(['###EXE_SCRIPT###'])
j.setOutputSandbox(['std.out','std.err','sandboxFile.txt'])
j.setOutputData(['getFile.dst', 'removeFile.dst'], outputSE=uk_ses)
#submit the job to dirac
dirac=Dirac()
result = dirac.submitJob(j)
output(result)
"""
api_script = dedent(api_script)
final_submit_script = api_script.replace('###EXE_SCRIPT###', exe_path_name).replace('###EXE_SCRIPT_BASE###', os.path.basename(exe_path_name))
confirm = execute(final_submit_script, return_raw_dict=True)
if not isinstance(confirm, dict):
raise RuntimeError('Problem submitting job\n{0}'.format(confirm))
assert 'OK' in confirm, 'Failed to submit job!'
assert confirm['OK'], 'Failed to submit job!'
job_id = confirm['Value']
logger.info(job_id)
os.remove(exe_path_name | )
logger.info('Waiting for DIRAC job to finish')
timeout = 1200
end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout)
status = execute('status([%s], %s)' % (job_id, repr(statusmapping)), return_raw_dict=True)
while (status['OK'] and statusmapping[status['Value'][0][1]] not in ['completed', 'failed']) and datetime.datetime.utcnow() < end_time:
time.sleep(5)
status = execute('status([%s], %s)' % (job_id, repr(st | atusmapping)), return_raw_dict=True)
print("Job status: %s" % status)
assert 'OK' in status, 'Failed to get job Status!'
assert status['OK'], 'Failed to get job Status!'
assert statusmapping[status['Value'][0][1]] == 'completed', 'job not completed properly: %s' % status
logger.info("status: %s", status)
output_data_info = execute('getOutputDataInfo("%s")' % job_id, return_raw_dict=True)
logger.info('output_data_info: %s' % output_data_info)
max_retry = 20
count = 0
while not output_data_info.get('OK', True) and count != max_retry:
time.sleep(5)
output_data_info = execute('getOutputDataInfo("%s")' % job_id, return_raw_dict=True)
logger.info("output_data_info:\n%s\n", output_data_info)
count += 1
assert 'OK' in output_data_info, 'getOutputDataInfo Failed!'
assert output_data_info['OK'], 'getOutputDataInfo Failed!'
logger.info("\n\n\noutput_data_info: %s\n\n\n" % output_data_info)
get_file_lfn = output_data_info['Value']['getFile.dst']['LFN']
remove_file_lfn = output_data_info['Value']['removeFile.dst']['LFN']
logger.info("%s %s", get_file_lfn, remove_file_lfn)
yield JobInfo(job_id, get_file_lfn, remove_file_lfn)
confirm = execute('removeFile("%s")' % get_file_lfn, return_raw_dict=True)
assert 'OK' in confirm, 'removeFile Failed!'
assert confirm['OK'], 'removeFile Failed!'
@pytest.fixture(scope='module')
def dirac_sites(load_config):
"""Grab a shuffled list of UK DIRAC storage elements"""
site_script = dedent("""
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForCountry
output(getSEsForCountry('uk'))
""")
output = execute(site_script, return_raw_dict=True)
assert output['OK'], 'Could not fetch list of SEs'
sites = output['Value']
random.shuffle(sites)
return sites
@external
class TestDiracCommands(object):
def test_peek(self, dirac_job):
confirm = execute('peek("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'peek command not executed successfully'
def test_getJobCPUTime(self, dirac_job):
confirm = execute('getJobCPUTime("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'getJobCPUTime command not executed successfully'
def test_getOutputData(self, dirac_job):
confirm = execute('getOutputData("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'getOutputData command not executed successfully'
def test_getOutputSandbox(self, dirac_job):
confirm = execute('getOutputSandbox("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'getOutputSandbox command not executed successfully'
def test_getOutputDataInfo(self, dirac_job):
confirm = execute('getOutputDataInfo("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'getOutputDataInfo command not executed successfully'
assert isinstance(confirm['Value']['getFile.dst'], dict), 'getOutputDataInfo command not executed successfully'
def test_getOutputDataLFNs(self, dirac_job):
confirm = execute('getOutputDataLFNs("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
logger.info(confirm)
assert confirm['OK'], 'getOutputDataLFNs command not executed successfully'
def test_normCPUTime(self, dirac_job):
confirm = execute('normCPUTime("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'normCPUTime command not executed successfully'
assert isinstance(confirm['Value'], str), 'normCPUTime ommand not executed successfully'
def test_getStateTime(self, dirac_job):
confirm = execute('getStateTime("%s", "completed")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'getStateTime command not executed successfully'
assert isinstance(confirm['Value'], datetime.datetime), 'getStateTime command not executed successfully'
def test_timedetails(self, dirac_job):
confirm = execute('timedetails("%s")' % dirac_job.id, return_raw_dict=True)
logger.info(confirm)
assert confirm['OK'], 'timedetails command not executed successfully'
assert isinstance(confirm['Value'], dict), 'Command not executed successfully'
def test_y_reschedule(self |
Kalimaha/pact-test | tests/runners/pact_tests_runner.py | Python | mit | 767 | 0 | from pact_test.runners import pact_tests_runner
def test_consumer_tests(mocker):
mocker.spy(pact_tests_runner, 'run_consumer_tests')
pact_tests_runner.verify(v | erify_consumers=True)
assert pact_tests_runner.run_consumer_tests.call_count == 1
def test_provider_tests(mocker):
| mocker.spy(pact_tests_runner, 'run_provider_tests')
pact_tests_runner.verify(verify_providers=True)
assert pact_tests_runner.run_provider_tests.call_count == 1
def test_default_setup(mocker):
mocker.spy(pact_tests_runner, 'run_consumer_tests')
mocker.spy(pact_tests_runner, 'run_provider_tests')
pact_tests_runner.verify()
assert pact_tests_runner.run_consumer_tests.call_count == 0
assert pact_tests_runner.run_provider_tests.call_count == 0
|
Parcks/core | src/domain/model/post_install/post_install_runnable.py | Python | gpl-2.0 | 1,228 | 0.004072 | """
Scriptable Packages Installer - Parcks
Copyright (C) 2017 JValck - Setarit
This program is free software; you can redistribute it and/or
modify it under the te | rms of the GNU General Public License
as published by the Fr | ee Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Setarit - parcks[at]setarit.com
"""
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
class PostInstallRunnable(object):
def __init__(self, name = None):
"""
Default constructor
:param name: The name of the post-installation script that is displayed to the user
:type name: str
"""
__metaclass__=ABCMeta
self.name = name
@abstractmethod
def run(self):
pass |
juanAFernandez/project-S | mongoDBAPI/try.py | Python | gpl-2.0 | 32 | 0 | fro | m api import *
s | imulaUnDia()
|
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tests/test_internals.py | Python | gpl-2.0 | 42,078 | 0.002139 | # -*- coding: utf-8 -*-
# pylint: disable=W0102
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series, Categorical
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
import pandas as pd
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn, assert_series_equal)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert_almost_equal(left.mgr_locs, right.mgr_locs)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(
x=arr, shape=shape,
strides=(arr.itemsize,) + (0,) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N,)
shape = (num_items,) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2',
'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('bool'):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category'):
values = Categorical([1,1,2,2,3,3,3,3,4,4])
elif typestr in ('category2'):
values = Categorical(['a','a','a','a','b','b','c','c','c','d'])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N,)
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr, placement, item_shape=item_shape,
num_offset=num_offset,))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
self.assertEqual(int32block.dtype, np.int32)
def test_pickle(self):
def _check(blk):
assert_block_equal(self.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert_almost_equal(self.fblock.mgr_locs, [0, 2, 4])
def test_attrs(self):
self.assertEqual(self.fblock.shape, self.fblock.values.shape)
self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
self.assertEqual(len(self.fblock), len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals,
ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals,
ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
assert_almost_equal(merged.mgr_locs, [0, 1, 2, 3])
assert_almost_equal(merged.values[[0, 2]], avals)
assert_almost_equal(merged.values[[1, 3]], bvals)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assertIsNot(cop, self.fblock)
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
| pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert_almost_equal(newb.mgr_locs, [2, 4])
self.assertTrue((newb.values[0] == 1).all())
newb = self.fblock.copy()
newb.de | lete(1)
assert_almost_equal(newb.mgr_locs, [0, 4])
self.assertTrue((newb.values[1] == 2).all())
newb = self.fblock.copy()
newb.delete(2)
assert_almost_equal(newb.mgr_locs, [0, 2])
self.assertTrue((newb.values[1] == 1).all())
newb = self.fblock.copy()
self.assertRaises(Exception, newb.delete, 3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
|
SReiver/django-taggit-autocomplete-jqueryui | taggit_autocomplete_jqueryui/managers.py | Python | bsd-2-clause | 818 | 0.002445 | # coding=utf-8
from taggit.forms import TagField
from taggit.managers import TaggableManager
from django.conf import settings
from widgets import TagAutocomplete
class TaggableManagerAutocomplete(TaggableManager):
def formfield(self, form_class=TagField, **kwargs):
field = (super(TaggableManagerAutocomplete, self).
formfield(form_class, **kwargs))
field.widget = TagAutocomplete()
return field
def save_form_data(self, instance, value):
value = map(lamb | da v: v.strip(), value)
getattr(instance, self.name).set(*value)
if 'south' in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_ignored_fields
exc | ept ImportError:
pass
else:
add_ignored_fields(["^taggit_autocomplete_jqueryui\.managers"])
|
Guanghan/ROLO | ROLO_evaluation.py | Python | apache-2.0 | 54,621 | 0.015159 | # Copyright (c) <2016> <GUANGHAN NING>. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Script File: ROLO_evaluation.py
Description:
ROLO is short for Recurrent YOLO, aimed at simultaneous object detection and tracking
Paper: http://arxiv.org/abs/1607.05781
Author: Guanghan Ning
Webpage: http://guanghan.info/
'''
import numpy
print numpy.__path__
import cv2
import os
import numpy as np
import sys
import ROLO_utils as utils
import matplotlib.pyplot as plot
import pickle
import scipy.io
import re
import h5py
import matlab.engine
''' -----------------------------Deal with benchmark results: matlab format-------------------------- '''
def choose_benchmark_method(id):
if id == 0:
method = 'STRUCK'
elif id == 1:
method = 'CXT'
elif id == 2:
method = 'TLD'
elif id == 3:
method = 'OAB'
elif id == 4:
method = 'CSK'
elif id == 5:
method = 'RS'
elif id == 6:
method = 'LSK'
elif id == 7:
method = 'VTD'
elif id == 8:
method = 'VTS'
elif id == 9:
method = 'CNN-SVM'
elif id == 10:
method = 'Staple'
return method
def choose_mat_file(method_id, sequence_id):
[wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id)
method_name = choose_benchmark_method(method_id)
mat_file = sequence_name + '_' + method_name + '.mat'
return mat_file
def load_mat_results(mat_file, TRE, SRE, OPE, id):
if TRE is True:
fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_TRE'
elif SRE is True:
fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_SRE'
elif OPE is True:
fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_TRE'
id = 0
mat_path = os.path.join(fold, mat_file)
CNN_SVM = False
if CNN_SVM is True:
eng = matlab.engine.start_matlab()
content = eng.load(mat_path,nargout=1)
mat_results= content['results'][0]['res']#[0]
numbers= [0, content['results'][0]['len']]
eng.exit()
else:
mat = scipy.io.loadmat(mat_path)
mat_results = mat['results'][0][id][0][0][5]
mat_range_str = mat['results'][0][id][0][0][2]
numbers= re.findall(r'\d+', str(mat_range_str))
return [mat_results, int(numbers[0]), int(numbers[1])]
def load_benchmark_results():
# 1. read mat file, output numpy file to: e.g., /u03/Guanghan/dev/ROLO-dev/benchmark/DATA/Car1/STRUCK/
# 2. convert to same format as yolo and rolo
# 3. evaluate AUC and avg_IOU score, for drawing the success plot
# 4. Compare with ROLO and YOLO's OPE (3 parts: TRE ,SRE, SRER)
return
def evaluate_benchmark_avg_IOU(method_id): # calculate AUC(Average Under Curve) of benchmark algorithms
''' PARAMETERS '''
evaluate_st = 0
evaluate_ed = 29
num_evaluate= evaluate_ed - evaluate_st + 1.0
avg_score= 0
method_name= choose_benchmark_method(method_id)
file_name= 'output/IOU/avgIOU_' + method_name + '.txt'
f= open(file_name, 'w')
for sequence_id in range(evaluate_st, evaluate_ed + 1):
[wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id)
# Load benchmark detection loc
mat_file = choose_mat_file(method_id, sequence_id)
[locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, False, True, 0)
# Load ground truth detection loc
gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt')
lines = utils.load_dataset_gt(gt_file_path)
#
total= 0
total_score= 0
for id in range(0, ed_frame_num):
location= locations[id]
gt_location = utils.find_gt_location(lines, id)
score = utils.iou(location, gt_location)
total_score += score
total += 1.0
total_score /= total
[dummy, dummy, sequence_name, dummy, dummy]= utils.choose_video_sequence(sequence_id)
print(method_name, ',' ,sequence_name, ": avg_IOU = ", total_score)
f.write(method_name + ', ' + sequence_name + ": avg_IOU = " + str("{:.3f}".format(total_score)) + '\n')
avg_score += total_score
f.close()
avg_score /= num_evaluate
print('average score over all sequences:', avg_score)
def evaluate_benchmark_AUC_OPE(method_id): # calculate AUC(Average Under Curve) of benchmark algorithms
''' PARAMETERS '''
evaluate_st = 0
evaluate_ed = 29
num_evaluate= evaluate_ed - evaluate_st + 1.0
AUC_score= []
for thresh_int in range(0, 100, 5):
thresh = thresh_int / 100.0 + 0.0001
print("thresh= ", thresh)
avg_score= 0
for sequence_id in range(evaluate_st, evaluate_ed + 1):
[wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id)
# Load benchmark detection loc
mat_file = choose_mat_file(method_id, sequence_id)
[locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, False, True, 0)
#print(locations)
# Load ground truth detection loc
gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt')
lines = utils.load_dataset_gt(gt_file_path)
#
total= 0
total_score= 0
for id in range(0, ed_frame_num):
location= locations[id]
gt_location = utils.find_gt_location(lines, id)
score = utils.cal_benchmark_score(location, gt_location, thresh)
total_score += score
total += 1.0
total_score /= total
avg_score += total_score
AUC_score.append(avg_score/num_evaluate)
print("(thresh, AUC_score) = ", thresh, ' ', avg_score/num_evaluate)
method_name= choose_benchmark_method(method_id)
file_name= 'output/AUC_score_' + method_name + '.pickle'
with open(file_name, 'w') as f:
pickle.dump(AUC_score, f)
def evaluate_benchmark_AUC_TRE(method_id): # calculate TRE of AUC(Average Under Curve) of benchmark algorithms
''' PARAMETERS '''
evaluate_st = 0
evaluate_ed = 29
TRE_num = 20
num_evaluate= evaluate_ed - evaluate_st + 1.0
AUC_score= []
for thresh_int in range(0, 100, 5):
thresh = thresh_int / 100.0 + 0.0001
print("thresh= ", thresh)
avg_score= 0
for sequence_id in range(evaluate_st, evaluate_ed + 1):
[wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id)
# Load ground truth detection loc
gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt')
lines = utils.load_dataset_gt(gt_file_path)
# Load benchmark detection loc
mat_file = choose_mat_file(method_id, sequence_id)
total_score_over_TREs= 0
for locations_id in range(0, TRE_num):
| [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id)
ct_frames= 0
| total_score_over_frames= 0
for id in range(st_frame_num-1, ed_frame_num):
id_offset= id - st_frame_num + 1
location= locations[id_offset] # id_offset, not id
gt_location = utils.find_gt_location(lines, id) #id, not id_offset
score = utils.cal_benchmark_score(location, g |
p4apple/sphinx_gt4g | api/sphinxapi.py | Python | gpl-2.0 | 34,870 | 0.053628 | #
# $Id: sphinxapi.py 4522 2014-01-30 11:00:18Z tomat $
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2014, Andrew Aksyonoff
# Copyright (c) 2008-2014, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can | find it at http://www.gnu.org/
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING
# We strongly recommend you to use SphinxQL instead of the API
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SE | ARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x11E
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x103
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x101
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_FILTER_STRING = 3
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_FACTORS = 1001
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_EXTENDED2 # query matching mode (default is SPH_MATCH_EXTENDED2)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0 # per-query max_predicted_time
self._outerorderby = '' # outer match sort by
self._outeroffset = 0 # outer offset
self._outerlimit = 0 # outer limit
self._hasouter = False # sub-select enabled
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
self._host = host
if isinstance(port, int):
assert(port>0 and port<65536)
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or r |
agry/NGECore2 | scripts/mobiles/generic/faction/imperial/imp_sandtrooper_75.py | Python | lgpl-3.0 | 1,432 | 0.027933 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mo | bileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_sand_trooper')
mobileTemplate.setLevel(75)
mobileTemplate.s | etDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_stormtrooper_sand_trooper_m.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('imp_sandtrooper_75', mobileTemplate)
return |
hkpeprah/git-achievements | app/achievement/migrations/0001_initial.py | Python | gpl-2.0 | 27,950 | 0.007048 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Difficulty'
db.create_table(u'achievement_difficulty', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('points', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('achievement', ['Difficulty'])
# Adding model 'Badge'
db.create_table(u'achievement_badge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('achievement', ['Badge'])
# Adding model 'Method'
db.create_table(u'achievement_method', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('callablemethod', self.gf('django.db.models.fields.CharField')(max_length=50)),
('argument_type', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('achievement', ['Method'])
# Adding model 'Qualifier'
db.create_table(u'achievement_qualifier', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('callablemethod', self.gf('django.db.models.fields.CharField')(max_length=50)),
('argument_type', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('return_type', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('achievement', ['Qualifier'])
# Adding model 'Quantifier'
db.create_table(u'achievement_quantifier', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('callablemethod', self.gf('django.db.models.fields.CharField')(max_length=50)),
('argument_type', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('achievement', ['Quantifier'])
# Adding model 'ConditionType'
db.create_table(u'achievement_conditiontype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
| ('custom', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_si | gnal('achievement', ['ConditionType'])
# Adding model 'CustomCondition'
db.create_table(u'achievement_customcondition', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Event'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('condition_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.ConditionType'])),
('method', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('achievement', ['CustomCondition'])
# Adding model 'ValueCondition'
db.create_table(u'achievement_valuecondition', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Event'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('condition_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.ConditionType'])),
('method', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.Method'])),
('attribute', self.gf('django.db.models.fields.CharField')(max_length=200)),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
('qualifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.Qualifier'], null=True, blank=True)),
('quantifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.Quantifier'], null=True, blank=True)),
))
db.send_create_signal('achievement', ['ValueCondition'])
# Adding model 'AttributeCondition'
db.create_table(u'achievement_attributecondition', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['services.Event'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('condition_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.ConditionType'])),
('method', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['achievement.Method'])),
('attributes', self.gf('jsonfield.fields.JSONField')(default={})),
))
db.send_create_signal('achievement', ['AttributeCondition'])
# Adding M2M table for field qualifiers on 'AttributeCondition'
m2m_table_name = db.shorten_name(u'achievement_attributecondition_qualifiers')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('attributecondition', models.ForeignKey(orm['achievement.attributecondition'], null=False)),
('qualifier', models.ForeignKey(orm['achievement.qualifier'], null=False))
))
db.create_unique(m2m_table_name, ['attributecondition_id', 'qualifier_id'])
# Adding model 'AchievementType'
db.create_table(u'achievement_achievementtype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('custom', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('achievement', ['AchievementType'])
# Adding model 'AchievementCondition'
db.create_table(u'achievement_achievementcondition', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
))
db.send_create_signal('achievement', ['AchievementCondition'])
# Adding M2M table for field achievements on 'AchievementCondition'
m2m_table_name = db.shorten_name(u'achievement_achievementcondition_achievements')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('achievementcondition', models.ForeignKey(orm['achievement.achievementcondition'], null=False)),
('achievement', models.ForeignKey(orm['achie |
jmartinm/InvenioAuthorLists | modules/bibclassify/lib/bibclassify_config.py | Python | gpl-2.0 | 9,775 | 0.003376 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibClassify configuration file.
When writing changes, please either delete the cached ontology in your
temporary directory or use the rebuild-cache option in order to
regenerate the cached ontology.
If you want to change this configuration, we recommend to create a
local configuration file names 'bibclassify_config_local.py' that
contains the changes to apply.
"""
import re
import logging
import sys
import os
# make sure invenio lib is importable
if os.path.dirname(__file__) not in sys.path:
sys.path.insert(0, os.path.dirname(__file__))
import config
VERSION = '0.4.9'
logging_level = logging.ERROR
# ------------- main config -----------
# Save generated kw into the database?
# daemon does that
CFG_DB_SAVE_KW = True
# Number of keywords that are printed by default (this limits single keywords,
# composite keywords, and acronyms - not author keywords)
CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER = 20
# The main marc xml field where to find/save the keywords, including the
# indicators
CFG_MAIN_FIELD = '6531_'
# Other fields to take from the marc xml when generating tagcloud/list of
# keywords.
CFG_OTHER_FIELDS = ['6950_']
# Where to save author supplied keywords
CFG_AUTH_FIELD = ''
# Where to save extracted acronyms
CFG_ACRON_FIELD = ''
# ------------ bibclass config -------
# USER AGENT
CFG_BIBCLASSIFY_USER_AGENT = ""
# PARTIAL_TEXT
# Marks the part of the fulltext to keep when running a partial match.
# Each tuple contains the start and end percentages of a section.
CFG_BIBCLASSIFY_PARTIAL_TEXT = ((0, 20), (40, 60))
# Format and output marcxml records in spires format
CFG_SPIRES_FORMAT = False
# The taxonomy used when no taxonomy is specified
CFG_EXTRACTION_TAXONOMY = 'HEP'
# WORD TRANSFORMATIONS
# BibClassify creates a regular expression for each label found in the
# ontology.
# If the keyword belongs in 'INVARIABLE_WORDS', we return it whitout any
# change.
# If the keyword is found in 'EXCEPTIONS', we return its attached
# regular expression.
# If the keyword is matched by a regular expression of
# 'UNCHANGE_REGULAR_EXPRESSIONS', we return the keyword without any
# change.
# At last, we perform the sub method of Python's re module using the
# first element of the tuple as the regex and the second element as the
# replacement string.
# Regular expressions found here have been originally based on
# Wikipedia's page on English plural.
# [http://en.wikipedia.org/wiki/English_plural]
CFG_BIBCLASSIFY_INVARIABLE_WORDS = ("any", "big", "chi", "der", "eta", "few",
"low", "new", "non", "off", "one", "out", "phi", "psi", "rho", "tau",
"two", "van", "von", "hard", "weak", "four", "anti", "zero", "sinh",
"open", "high", "data", "dark", "free", "flux", "fine", "final", "heavy",
"strange")
CFG_BIBCLASSIFY_EXCEPTIONS = {
"aluminium": r"alumini?um",
"aluminum": r"alumini?um",
"analysis": r"analy[sz]is",
"analyzis": r"analy[sz]is",
"behavior": r"behaviou?rs?",
"behaviour": r"behaviou?rs?",
"color": r"colou?rs?",
"colour": r"colou?rs?",
"deflexion": r"defle(x|ct)ions?",
"flavor": r"flavou?rs?",
"flavour": r"flavou?rs?",
"gas": r"gas(s?es)?",
"lens": r"lens(es)?",
"matrix": r"matri(x(es)?|ces)",
"muon": r"muons?",
"neutrino": r"neutrinos?",
"reflexion": r"refle(x|ct)ions?",
"ring": r"rings?",
"status": r"status(es)?",
"string": r"strings?",
"sum": r"sums?",
"vertex": r"vert(ex(es)?|ices)",
"vortex": r"vort(ex(es)?|ices)",
}
CFG_BIBCLASSIFY_UNCHANGE_REGULAR_EXPRESSIONS = (
re.compile("[^e]ed$"),
re.compile("ics?$"),
re.compile("[io]s$"),
re.compile("ium$"),
re.compile("less$"),
re.compile("ous$"),
)
# IDEAS
# "al$" -> "al(ly)?"
CFG_BIBCLASSIFY_GENERAL_REGULAR_EXPRESSIONS = (
(re.compile("ional"), r"ional(ly)?"),
(re.compile("([ae])n(ce|t)$"), r"\1n(t|ces?)"),
(re.compile("og(ue)?$"), r"og(ue)?s?"),
(re.compile("([^aeiouyc])(re|er)$"), r"\1(er|re)s?"),
(re.compile("([aeiouy])[sz]ation$"), r"\1[zs]ations?"),
(re.compile("([aeiouy])[sz]ation$"), r"\1[zs]ations?"),
(re.compile("([^aeiou])(y|ies)$"), r"\1(y|ies)"),
(re.compile("o$"), r"o(e?s)?"),
(re.compile("(x|sh|ch|ss)$"), r"\1(es)?"),
(re.compile("f$"), r"(f|ves)"),
(re.compile("ung$"), r"ung(en)?"),
(re.compile("([^aiouy])s$"), r"\1s?"),
(re.compile("([^o])us$"), r"\1(i|us(es)?)"),
(re.compile("um$"), r"(a|ums?)"),
)
# PUNCTUATION TRANSFORMATIONS
# When building the regex pattern for each label of the ontology, ew also take
# care of the non-alpha characters. Thereafter are two sets of transformations.
# 'SEPARATORS' contains the transformation for the non-alpha characters that
# can be found between two words.
# 'SYMBOLS' contains punctuation that can be found at the end of a word.
# In both cases, it the separator is not found in the dictionaries, we return
# re.escape(separator)
CFG_BIBCLASSIFY_SEPARATORS = {
" ": r"[\s\n-]",
"-": r"[\s\n-]?",
"/": r"[/\s]?",
"(": r"\s?\(",
"*": r"[*\s]?",
"- ": r"\s?\-\s",
"+ ": r"\s?\+\s",
}
CFG_BIBCLASSIFY_SYMBOLS = {
"'": r"\s?\'",
}
CFG_BIBCLASSIFY_WORD_WRAP = "[^\w-]%s[^\w-]"
# MATCHING
# When searching for composite keywords, we allow two keywords separated by one
# of the component of 'VALID_SEPARATORS' to form a composite keyword. These
# separators contain also the punctuation.
CFG_BIBCLASSIFY_VALID_SEPARATORS = (
"of", "of a", "of an", "of the", "of this", "of one", "of two", "of three",
"of new", "of other", "of many", "of both", "of these", "of each", "is", "the"
)
# AUTHOR KEYWORDS
# When looking for the keywords already defined in the document, we run the
# following set of regex.
CFG_BIBCLASSIFY_AUTHOR_KW_START = \
re.compile(r"(?i)key[ -]*words?[a-z ]*[.:] *")
CFG_BIBCLASSIFY_AUTHOR_KW_END = (
re.compile(r"\n"),
re.compile(r"\.\W"),
re.compile(r"\sPACS"),
re.compile(r"(?i)1[. ]*introduction\W"),
re.compile(r"(?i)mathematics subject classification\W"),
)
CFG_BIBCLASSIFY_AUTHOR_KW_SEPARATION = re.compile(" ?; ?| ?, ?| ?- ")
# Modules to call to get output from them
#CFG_EXTERNAL_MODULES = {'webtag' : 'call_from_outside'}
CFG_EXTERNAL_MODULES = {}
log = None
_loggers = []
def get_logger(name):
"""Creates a logger for you - with the parent newseman logger and
common configuration"""
if log:
logger = log.manager.getLogger(name)
else:
logger = logging.ge | tLogger(name)
hdlr = l | ogging.StreamHandler(sys.stderr)
formatter = logging.Formatter('%(levelname)s %(name)s:%(lineno)d %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging_level)
logger.propagate = 0
if logger not in _loggers:
_loggers.append(logger)
return logger
def set_global_level(level):
global logging_level
logging_level = int(level)
for l in _loggers:
l.setLevel(logging_level)
log = get_logger('bibclassify')
STANDALONE = False
try:
import search_engine
except:
STANDALONE = True
log.warning('Bibclassify is running in a standalone mode, access to database is not supported')
if STANDALONE:
import tempfile
# try to find etcdir (first in this directory), and set etc to be one
# level higher
|
altendky/canmatrix | src/canmatrix/tests/test_sym.py | Python | bsd-2-clause | 3,192 | 0 | # -*- coding: utf-8 -*-
import io
import textwrap
import pytest
import canmatrix.canmatrix
import canmatrix.formats.sym
def test_colliding_mux_values():
f = io.BytesIO(
textwrap.dedent(
'''\
Forma | tVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[MuxedId]
ID=0h
Mux=TheMux 0,1 0h
Var=Signal unsigned 1,1
[MuxedId]
Mux=FirstMux 0,1 1h
Var=Signal unsigned 1,1
[MuxedId]
Mux=SecondMux 0,1 1h
Var=Signal unsigned 1,1
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
er | ror, = matrix.load_errors
line_number = 16
assert len(matrix.load_errors) == 1
assert isinstance(error, canmatrix.formats.sym.DuplicateMuxIdError)
assert error.line_number == line_number
error_string = str(error)
assert error_string.startswith(
'line {line_number}: '.format(line_number=line_number),
)
assert 'FirstMux' in error_string
assert 'SecondMux' in error_string
def test_parse_longname_with_colon():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.attributes['LongName'] == 'Access Level : Password'
@pytest.mark.parametrize(
'is_float, value, expected',
(
(False, '37', '37'),
(True, '37.1', '37.1'),
),
)
def test_export_default_decimal_places(is_float, value, expected):
matrix = canmatrix.canmatrix.CanMatrix()
frame = canmatrix.canmatrix.Frame()
matrix.add_frame(frame)
signal = canmatrix.canmatrix.Signal(
size=32,
is_float=is_float,
is_signed=False,
initial_value=value,
)
frame.add_signal(signal)
s = canmatrix.formats.sym.create_signal(db=matrix, signal=signal)
start = '/d:'
d, = (
segment
for segment in s.split()
if segment.startswith(start)
)
d = d[len(start):]
assert d == expected
@pytest.mark.parametrize(
'variable_type, bit_length',
(
('float', 32),
('double', 64),
)
)
def tests_parse_float(variable_type, bit_length):
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled"
{{SENDRECEIVE}}
[Symbol1]
ID=000h
DLC=8
Var=a_signal {variable_type} 0,{bit_length}
'''.format(
variable_type=variable_type,
bit_length=bit_length,
),
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == []
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.is_float
|
zhoutong/wwag | serve.py | Python | mit | 83 | 0 | from wsgiref.handle | rs import C | GIHandler
from wwag import app
CGIHandler().run(app)
|
fivejjs/inasafe | scripts/generate_volcano_evac_zone.py | Python | gpl-3.0 | 376 | 0 | from safe.engine.interpolation import make_circular_polygon
from safe.storage.core import read_layer
H = read_layer('/data_area/InaSA | FE/public_data/hazard/Marapi.shp')
print H.get_geometry()
# Generate evacuation circle (as a polygon):
radius = 3000
center = H.get_geometry()[0]
Z = make_circular_polygon(center, radius)
Z.write_to_file('Marapi_evac_zone_%im.shp' | % radius)
|
GabrielFortin/ansible-module-f5bigip | library/f5bigip_gtm_monitor_smtp.py | Python | apache-2.0 | 5,320 | 0.003383 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_gtm_monitor_smtp
short_description: BIG-IP gtm smtp monitor module
description:
- Configures a Simple Mail Transport Protocol (SMTP) monitor.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
debug:
description:
- Specifies whether the monitor sends error messages and additional information to a log file created and
labeled specifically for this monitor.
default: no
choices: ['no', 'yes']
defaults_from:
description:
- Specifies the type of monitor you want to use to create the new monitor.
default: smtp
description:
description:
- User defined description.
destination:
description:
- Specifies the IP address and service port of the resource that is the destination of this monitor.
default: '*:*'
domain:
description:
- Specifies the domain name to check, for example, bigipinternal.com.
ignore_down_response:
description:
- Specifies whether the monitor ignores a down response from the system it is monitoring.
default: disabled
choices: ['enabled', 'disabled']
interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource
is down or the status of the resource is unknown.
default: 30
name:
description:
- Specifies a unique name for the component.
required | : true
parti | tion:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
probe_timeout:
description:
- Specifies the number of seconds after which the BIG-IP system times out the probe request to the BIG-IP system.
default: 5
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
timeout:
description:
- Specifies the number of seconds the target has in which to respond to the monitor request.
default: 120
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create GTM SMTP Monitor
f5bigip_gtm_monitor_smtp:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_smtp_monitor
partition: Common
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_POLAR_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
debug=dict(type='str', choices=F5_POLAR_CHOICES),
defaults_from=dict(type='str'),
description=dict(type='str'),
destination=dict(type='str'),
domain=dict(type='str'),
ignore_down_response=dict(type='str', choices=F5_ACTIVATION_CHOICES),
interval=dict(type='int'),
probe_timeout=dict(type='int'),
timeout=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return False
class F5BigIpGtmMonitorHttp(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.gtm.monitor.smtps.smtp.create,
'read': self._api.tm.gtm.monitor.smtps.smtp.load,
'update': self._api.tm.gtm.monitor.smtps.smtp.update,
'delete': self._api.tm.gtm.monitor.smtps.smtp.delete,
'exists': self._api.tm.gtm.monitor.smtps.smtp.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpGtmMonitorHttp(check_mode=module.supports_check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
pylixm/liBlog | liBlog/visitor/middleware.py | Python | mit | 8,071 | 0.000373 | # -*- coding:utf-8 -*-
from datetime import timedelta
from django.utils import timezone
import logging
import re
import traceback
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.utils import DatabaseError, IntegrityError
from django.http import Http404
from django.db import transaction
from . import utils
from .models import Visitor, UntrackedUserAgent, BannedIP
title_re = re.compile('<title>(.*?)</title>')
log = logging.getLogger('tracking.middleware')
class VisitorTrackingMiddleware(object):
"""
Keeps track of your active users. Anytime a visitor accesses a valid URL,
their unique record will be updated with the page they're on and the last
time they requested a page.
Records are considered to be unique when the session key and IP address
are unique together. Sometimes the same user used to have two different
records, so I added a check to see if the session key had changed for the
same IP and user agent in the last 5 minutes
"""
def __init__(self, get_response=None):
"""
django 1.10 add this, see:
https://docs.djangoproject.com/en/1.10/topics/http/middleware/#writing-your-own-middleware
:param get_response:
"""
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
"""
django 1.10 add this fun
:param request:
:return:
"""
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
@property
def prefixes(self):
"""Returns a list of URL prefixes that we should not track"""
if not hasattr(self, '_prefixes'):
self._prefixes = getattr(settings, 'NO_TRACKING_PREFIXES', [])
if not getattr(settings, '_FREEZE_TRACKING_PREFIXES', False):
for name in ('MEDIA_URL', 'STATIC_URL'):
url = getattr(settings, name)
if url and url != '/':
self._prefixes.append(url)
try:
# finally, don't track requests to the tracker update pages
self._prefixes.append(
reverse('tracking-refresh-active-users'))
except NoReverseMatch:
# django-tracking hasn't been included in the URLconf if we
# get here, which is not a bad t | hing
pass
settings.NO_TRACKING_PREFIXES = self._prefixes
settings._FREEZE_TRACKING_PREFIXES = True
return self._prefixes
def process_request(self, request):
# create some useful variables
ip_address = utils.get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', '')[:255]
# 针对用户白面单的过滤
| # retrieve untracked user agents from cache
ua_key = '_tracking_untracked_uas'
untracked = cache.get(ua_key)
if untracked is None:
log.info('Updating untracked user agent cache')
untracked = UntrackedUserAgent.objects.all()
cache.set(ua_key, untracked, 3600)
# see if the user agent is not supposed to be tracked
for ua in untracked:
# if the keyword is found in the user agent, stop tracking
if user_agent.find(ua.keyword) != -1:
log.debug('Not tracking UA "%s" because of keyword: %s' % (
user_agent, ua.keyword))
return
if not request.session.session_key:
request.session.save()
session_key = request.session.session_key
# ensure that the request.path does not begin with any of the prefixes
for prefix in self.prefixes:
if request.path.startswith(prefix):
log.debug('Not tracking request to: %s' % request.path)
return
# if we get here, the URL needs to be tracked
# determine what time it is
now = timezone.localtime(timezone.now())
attrs = {
'session_key': session_key,
'ip_address': ip_address
}
# for some reason, Visitor.objects.get_or_create was not working here
try:
visitor = Visitor.objects.get(**attrs)
except Visitor.DoesNotExist:
# see if there's a visitor with the same IP and user agent
# within the last 5 minutes
cutoff = now - timedelta(minutes=5)
visitors = Visitor.objects.filter(
ip_address=ip_address,
user_agent=user_agent,
last_update__gte=cutoff
)
if len(visitors):
visitor = visitors[0]
visitor.session_key = session_key
log.debug('Using existing visitor for IP %s / UA %s: %s' % (
ip_address, user_agent, visitor.id))
else:
visitor, created = Visitor.objects.get_or_create(**attrs)
if created:
log.debug('Created a new visitor: %s' % attrs)
except:
return
# determine whether or not the user is logged in
user = request.user
if isinstance(user, AnonymousUser):
user = None
# update the tracking information
visitor.user = user
visitor.user_agent = user_agent
# if the visitor record is new, or the visitor hasn't been here for
# at least an hour, update their referrer URL
one_hour_ago = now - timedelta(hours=1)
if not visitor.last_update or visitor.last_update <= one_hour_ago:
visitor.referrer = request.META.get('HTTP_REFERER', 'unknown')[:255]
# reset the number of pages they've been to
visitor.page_views = 0
visitor.session_start = now
visitor.url = request.path
visitor.page_views += 1
visitor.last_update = now
try:
sid = transaction.savepoint()
visitor.save()
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
except DatabaseError:
log.error(
'There was a problem saving visitor information:\n%s\n\n%s' % (
traceback.format_exc(), locals()))
class VisitorCleanUpMiddleware:
"""Clean up old visitor tracking records in the database"""
def process_request(self, request):
timeout = utils.get_cleanup_timeout()
if str(timeout).isdigit():
log.debug('Cleaning up visitors older than %s hours' % timeout)
timeout = timezone.localtime(timezone.now()) - timedelta(
hours=int(timeout))
Visitor.objects.filter(last_update__lte=timeout).delete()
class BannedIPMiddleware:
"""
Raises an Http404 error for any page request from a banned IP. IP addresses
may be added to the list of banned IPs via the Django admin.
The banned users do not actually receive the 404 error--instead they get
an "Internal Server Error", effectively eliminating any access to the site.
"""
def process_request(self, request):
key = '_tracking_banned_ips'
ips = cache.get(key)
if ips is None:
# compile a list of all banned IP addresses
log.info('Updating banned IPs cache')
ips = [b.ip_address for b in BannedIP.objects.all()]
cache.set(key, ips, 3600)
# check to see if the current user's IP address is in that list
if utils.get_ip(request) in ips:
raise Http404
|
apache/aurora | src/main/python/apache/thermos/monitoring/resource.py | Python | apache-2.0 | 13,377 | 0.009793 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Monitor the resource consumption of Thermos tasks
This module contains classes used to monitor the resource consumption (e.g. CPU, RAM, disk) of
Thermos tasks. Resource monitoring of a Thermos task typically treats the task as an aggregate of
all the processes within it. Importantly, this excludes the process(es) of Thermos itself (i.e. the
TaskRunner and any other wrappers involved in launching a task).
The ResourceMonitorBase defines the interface for other components (for example, the Thermos
TaskObserver) to interact with and retrieve information about a Task's resource consumption. The
canonical/reference implementation of a ResourceMonitor is the TaskResourceMonitor, a thread which
actively monitors resources for a particular task by periodically polling process information and
disk consumption and retaining a limited (FIFO) in-memory history of this data.
"""
import threading
import time
from abc import abstractmethod
from bisect import bisect_left
from collections import namedtuple
from operator import attrgetter
from twitter.common import log
from twitter.common.collections import RingBuffer
from twitter.common.concurrent import EventMuxer
from twitter.common.exceptions import ExceptionalThread
from twitter.common.lang import Interface
from twitter.common.quantity import Amount, Time
from .disk import DiskCollectorSettings, DuDiskCollector, MesosDiskCollector
from .process import ProcessSample
from .process_collector_psutil import ProcessTreeCollector
class ResourceMonitorBase(Interface):
""" Defines the interface for interacting with a ResourceMonitor """
class Error(Exception): pass
class AggregateResourceResult(namedtuple('AggregateResourceResult',
'num_procs process_sample disk_usage')):
""" Class representing task level stats:
num_procs: total number of pids initiated by the task
process_sample: a .process.ProcessSample object representing resources consumed by the task
disk_usage: disk usage consumed in the task's sandbox
"""
class FullResourceResult(namedtuple('FullResourceResult', 'proc_usage disk_usage')):
""" Class representing detailed information on task level stats:
proc_usage: a dictionary mapping ProcessStatus objects to ProcResourceResult objects. One
entry per process in the task
disk_usage: disk usage consumed in the task's sandbox
"""
class ProcResourceResult(namedtuple('ProcResourceResult', 'process_sample num_procs')):
""" Class representing process level stats:
process_sample: a .process.ProcessSample object representing resources consumed by
the process
num_procs: total number | of pids initiated by the process
"""
| @abstractmethod
def sample(self):
""" Return a sample of the resource consumption of the task right now
Returns a tuple of (timestamp, AggregateResourceResult)
"""
@abstractmethod
def sample_at(self, time):
""" Return a sample of the resource consumption as close as possible to the specified time
Returns a tuple of (timestamp, AggregateResourceResult)
"""
@abstractmethod
def sample_by_process(self, process_name):
""" Return a sample of the resource consumption of a specific process in the task right now
Returns a ProcessSample
"""
class ResourceHistory(object):
""" Simple class to contain a RingBuffer (fixed-length FIFO) history of resource samples, with the
mapping:
timestamp => ({process_status => (process_sample, number_of_procs)}, disk_usage_in_bytes)
"""
def __init__(self, maxlen, initialize=True):
if not maxlen >= 1:
raise ValueError("maxlen must be greater than 0")
self._maxlen = maxlen
self._values = RingBuffer(maxlen, None)
if initialize:
self.add(time.time(), ResourceMonitorBase.FullResourceResult({}, 0))
def add(self, timestamp, value):
"""Store a new resource sample corresponding to the given timestamp"""
if self._values and not timestamp >= self._values[-1][0]:
raise ValueError("Refusing to add timestamp in the past!")
self._values.append((timestamp, value))
def get(self, timestamp):
"""Get the resource sample nearest to the given timestamp"""
closest = min(bisect_left(self._values, (timestamp, None)), len(self) - 1)
return self._values[closest]
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __repr__(self):
return 'ResourceHistory(%s)' % ', '.join([str(r) for r in self._values])
class HistoryProvider(object):
MAX_HISTORY = 10000 # magic number
def provides(self, history_time, min_collection_interval):
history_length = int(history_time.as_(Time.SECONDS) / min_collection_interval)
if history_length > self.MAX_HISTORY:
raise ValueError("Requested history length too large")
log.debug("Initialising ResourceHistory of length %s", history_length)
return ResourceHistory(history_length)
class DiskCollectorProvider(object):
DEFAULT_DISK_COLLECTOR_CLASS = DuDiskCollector
def __init__(
self,
enable_mesos_disk_collector=False,
settings=DiskCollectorSettings()):
self.settings = settings
self.disk_collector_class = self.DEFAULT_DISK_COLLECTOR_CLASS
if enable_mesos_disk_collector:
self.disk_collector_class = MesosDiskCollector
def provides(self, sandbox):
return self.disk_collector_class(sandbox, settings=self.settings)
class TaskResourceMonitor(ResourceMonitorBase, ExceptionalThread):
""" Lightweight thread to aggregate resource consumption for a task's constituent processes.
Actual resource calculation is delegated to collectors; this class periodically polls the
collectors and aggregates into a representation for the entire task. Also maintains a limited
history of previous sample results.
"""
PROCESS_COLLECTION_INTERVAL = Amount(20, Time.SECONDS)
HISTORY_TIME = Amount(1, Time.HOURS)
def __init__(
self,
task_id,
task_monitor,
disk_collector_provider=DiskCollectorProvider(),
process_collection_interval=PROCESS_COLLECTION_INTERVAL,
disk_collection_interval=DiskCollectorSettings.DISK_COLLECTION_INTERVAL,
history_time=HISTORY_TIME,
history_provider=HistoryProvider()):
"""
task_monitor: TaskMonitor object specifying the task whose resources should be monitored
sandbox: Directory for which to monitor disk utilisation
"""
self._task_monitor = task_monitor # exposes PIDs, sandbox
self._task_id = task_id
log.debug('Initialising resource collection for task %s', self._task_id)
self._process_collectors = dict() # ProcessStatus => ProcessTreeCollector
self._disk_collector_provider = disk_collector_provider
self._disk_collector = None
self._process_collection_interval = process_collection_interval.as_(Time.SECONDS)
self._disk_collection_interval = disk_collection_interval.as_(Time.SECONDS)
min_collection_interval = min(self._process_collection_interval, self._disk_collection_interval)
self._history = history_provider.provides(history_time, min_collection_interval)
self._kill_signal = threading.Event()
ExceptionalThread.__init__(self, name='%s[%s]' % (self.__class__.__name__, task_id))
self.daemon = True
def sample(self):
if not self.is_alive():
log.warning("TaskResourceMonitor not running - sample may be inaccurate")
return self.sample_at(time.time())
def sample_at(self, timestamp):
_timestamp, full_resources = |
hbhdytf/mac | swift/obj/reconstructor.py | Python | apache-2.0 | 41,578 | 0 | # Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join
import random
import time
import itertools
from collections import defaultdict
import six.moves.cPickle as pickle
import shutil
from eventlet import (GreenPile, GreenPool, Timeout, sleep, hubs, tpool,
spawn)
from eventlet.support.greenlets import GreenletExit
from swift import gettext_ as _
from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
dump_recon_cache, mkdirs, config_true_value, li | st_from_csv, get_hub,
tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
from swift.common.swob import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.ring.utils import is_local_device
from swift.obj.ssync_sender import Sender as ssync_sender
from swift.common.http import HTTP_OK, HTTP_NOT_FOUND, \
HTTP_INSUFFICIENT_STORAGE
from swift.obj.diskfile import DiskFileRouter, get_data_dir, \
get_tmp_dir
fro | m swift.common.storage_policy import POLICIES, EC_POLICY
from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
SuffixSyncError
SYNC, REVERT = ('sync_only', 'sync_revert')
hubs.use_hub(get_hub())
def _get_partners(frag_index, part_nodes):
"""
Returns the left and right partners of the node whose index is
equal to the given frag_index.
:param frag_index: a fragment index
:param part_nodes: a list of primary nodes
:returns: [<node-to-left>, <node-to-right>]
"""
return [
part_nodes[(frag_index - 1) % len(part_nodes)],
part_nodes[(frag_index + 1) % len(part_nodes)],
]
class RebuildingECDiskFileStream(object):
"""
This class wraps the the reconstructed fragment archive data and
metadata in the DiskFile interface for ssync.
"""
def __init__(self, datafile_metadata, frag_index, rebuilt_fragment_iter):
# start with metadata from a participating FA
self.datafile_metadata = datafile_metadata
# the new FA is going to have the same length as others in the set
self._content_length = self.datafile_metadata['Content-Length']
# update the FI and delete the ETag, the obj server will
# recalc on the other side...
self.datafile_metadata['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index
for etag_key in ('ETag', 'Etag'):
self.datafile_metadata.pop(etag_key, None)
self.frag_index = frag_index
self.rebuilt_fragment_iter = rebuilt_fragment_iter
def get_metadata(self):
return self.datafile_metadata
def get_datafile_metadata(self):
return self.datafile_metadata
@property
def content_length(self):
return self._content_length
def reader(self):
for chunk in self.rebuilt_fragment_iter:
yield chunk
class ObjectReconstructor(Daemon):
"""
Reconstruct objects using erasure code. And also rebalance EC Fragment
Archive objects off handoff nodes.
Encapsulates most logic and data needed by the object reconstruction
process. Each call to .reconstruct() performs one pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(
conf, log_route='object-reconstructor')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6000))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
# defaults subject to change after beta
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.headers = {
'Content-Length': '0',
'user-agent': 'obj-reconstructor %s' % os.getpid()}
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self._df_router = DiskFileRouter(conf, self.logger)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def _full_path(self, node, part, path, policy):
return '%(replication_ip)s:%(replication_port)s' \
'/%(device)s/%(part)s%(path)s ' \
'policy#%(policy)d frag#%(frag_index)s' % {
'replication_ip': node['replication_ip'],
'replication_port': node['replication_port'],
'device': node['device'],
'part': part, 'path': path,
'policy': policy,
'frag_index': node.get('index', 'handoff'),
}
def _get_response(self, node, part, path, headers, policy):
"""
Helper method for reconstruction that GETs a single EC fragment
archive
:param node: the node to GET from
:param part: the partition
:param path: full path of the desired EC archive
:param headers: the headers to send
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:returns: response
"""
resp = None
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, 'GET', path, headers=headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
if resp.status not in [HTTP_OK, HTTP_NOT_FOUND]:
self.logger.warning(
_("Invalid respo |
Empire-of-Code-Puzzles/checkio-empire-pawn-brotherhood | _old/verification/referee.py | Python | mit | 510 | 0 | from checkio.signals import ON_CONNECT
from checkio import api
from checkio.referees.io import CheckiOReferee
from checkio.referees import cover_codes
from checkio.referees import checkers
from tests import TESTS
cover = """def cover(func, data):
return func(set(data))
"""
api.add_listener | (
ON_CONNECT,
CheckiOReferee(
tests=TESTS,
cover_code={
'python-27': cover,
'python-3': cover
},
DEFAULT_FUNCTION_NAME="safe_pawns"
).on | _ready)
|
kylebebak/Requester | core/responses.py | Python | mit | 11,912 | 0.002015 | import sublime
import re
from urllib import parse
from concurrent import futures
from collections import namedtuple, deque
from ..deps import requests
from .parsers import PREFIX
from .helpers import truncate, prepend_scheme, is_instance, is_auxiliary_view
Request_ = namedtuple('Request', 'request, method, url, args, kwargs, ordering, session, skwargs, error')
Response = namedtuple('Response', 'req, res, err')
methods = {
'GET': requests.get,
'OPTIONS': requests.options,
'HEAD': requests.head,
'POST': requests.post,
'PUT': requests.put,
'PATCH': requests.patch,
'DELETE': requests.delete,
}
class Request(Request_):
"""Hashable `Request` namedtuple.
"""
def __hash__(self):
return hash(self.ordering)
def __eq__(self, other):
return self.ordering == other.ordering
def __ne__(self, other):
return not(self == other)
def parse_args(*args, **kwargs):
"""Used in conjunction with eval to parse args and kwargs from a string.
"""
return args, kwargs
class ResponseThreadPool:
"""Allows requests to be invoked concurrently, and allows client code to
inspect instance's responses as they are returned.
"""
def get_response(self, request, ordering):
"""Calls request with specified args and kwargs parsed from request
string.
Also sets "Response" key in env to `Response` object, to provide true
"chaining" of requests. If two requests are run serially, the second
request can reference the response returned by the previous request.
"""
if isinstance(request, Request): # no need to prepare request
req = request._replace(ordering=ordering)
else:
req = prepare_request(request, self.env, ordering, self.view)
if req.error is not None:
return Response(req, None, None)
if self.handle_special(req):
return Response(req, None, 'skwarg') # "special" requests handled separately
self.pending_requests.add(req)
res, err = None, ''
if self.is_done: # prevents further requests from being made if pool is cancelled
return Response(req, res, err) # check using: https://requestb.in/
| try:
if | req.session:
session = self.env.get(req.session)
if is_instance(session, 'requests.sessions.Session'):
res = getattr(session, req.method.lower())(*req.args, **req.kwargs)
else:
err = 'Session Error: there is no session `{}` defined in your environment'.format(req.session)
else:
res = methods.get(req.method)(*req.args, **req.kwargs)
except requests.Timeout:
err = 'Timeout Error: the request timed out'
except requests.ConnectionError as e:
err = 'Connection Error: {}'.format(e)
except SyntaxError as e:
err = '{}: {}\n\n{}'.format('Syntax Error', e,
'Run "Requester: Show Syntax" to review properly formatted requests')
except TypeError as e:
err = '{}: {}'.format('Type Error', e)
except Exception as e:
err = '{}: {}'.format('Other Error', e)
self.env['Response'] = res # to allow "chaining" of serially executed requests
if req.skwargs.get('name'):
try:
self.env[str(req.skwargs.get('name'))] = res # calling str could raise exception...
except Exception as e:
print('Name Error: {}'.format(e))
return Response(req, res, err)
def handle_special(self, req):
"""Handle "special" requests, such as downloads and uploads.
"""
from ..commands.download import Download
from ..commands.upload import Upload
if 'filename' in req.skwargs:
Download(req, req.skwargs['filename'])
return True
if 'streamed' in req.skwargs:
Upload(req, req.skwargs['streamed'], 'streamed')
return True
if 'chunked' in req.skwargs:
Upload(req, req.skwargs['chunked'], 'chunked')
return True
return False
def __init__(self, requests, env, max_workers, view):
self.is_done = False
self.responses = deque()
self.requests = requests
self.pending_requests = set()
self.env = env
self.max_workers = max_workers
self.view = view
def get_pending_requests(self):
"""Getter for `self.pending_requests`. This is a `set` that's shared
between threads, which makes iterating over it unsafe.
"""
return self.pending_requests.copy()
def run(self):
"""Concurrently invoke `get_response` for all of instance's `requests`.
"""
with futures.ThreadPoolExecutor(
max_workers=min(self.max_workers, len(self.requests))
) as executor:
to_do = []
for i, request in enumerate(self.requests):
future = executor.submit(self.get_response, request, i)
to_do.append(future)
for future in futures.as_completed(to_do):
result = future.result()
# `responses` and `pending_requests` are instance properties, which means
# client code can inspect instance to read responses as they are completed
if result.req.error is not None or result.err == 'skwarg':
continue
try:
self.pending_requests.remove(result.req)
except KeyError:
print('{} was not in pending requests, this is weird...'.format(result.req))
self.responses.append(result)
self.is_done = True
def prepare_request(request, env, ordering, view=None):
"""Parse and evaluate args and kwargs in request string under context of
env.
Also, prepare request string: if request is not prefixed with "{var_name}.",
prefix request with "requests." Accepts a request string and returns a
`Request` instance.
Finally, ensure request can time out so it doesn't hang indefinitely.
http://docs.python-requests.org/en/master/user/advanced/#timeouts
"""
settings = sublime.load_settings('Requester.sublime-settings')
req = prepend_library(request)
session = None
if not req.startswith('requests.'):
session = req.split('.')[0]
index = req.index('(')
method = req[:index].split('.')[1].strip().upper()
env['__parse_args__'] = parse_args
try:
args, kwargs = eval('__parse_args__{}'.format(req[index:]), env)
except Exception as e:
msg = 'PrepareRequest Error: {}\n\n{}'.format(e, truncate(req, 150))
if type(e) is NameError and view and is_auxiliary_view(view):
msg += '\n\nYou may have since deleted env var(s) from your requester file'
sublime.error_message(msg)
return Request(req, method, None, [], {}, ordering, session, {}, error=str(e))
else:
args = list(args)
url_from_kwargs = True
url = kwargs.get('url', None)
if url is None:
url_from_kwargs = False
try:
url = args[0]
except Exception as e:
sublime.error_message('PrepareRequest Error: {}\n{}'.format(
e, truncate(req, 150)
))
return Request(req, method, url, args, kwargs, ordering, session, {}, error=str(e))
if 'explore' in kwargs:
req, e_url = kwargs.pop('explore')
req = replace_method(prepend_library(req), 'get')
if not same_domain(prepend_scheme(e_url), prepend_scheme(url)):
# if explore URL does't have same domain as URL, remove auth kwargs from req
kwargs.pop('params', None)
kwargs.pop('headers', None)
kwargs.pop('cookies', None)
kwargs.pop('auth', None)
req = replace_url(req, e_url, replace_all=True)
else:
req = replace_url(req, e_url, replace_all=False)
url = prepend_scheme(e_url) |
xiaq/jadepy | jade/compile.py | Python | mit | 7,708 | 0 | from sys import stdout
from collections import defaultdict
from .parse import main, HTMLTag
def maybe_call(f, *args, **kwargs):
if callable(f):
return f(*args, **kwargs)
return f
class Compiler(object):
def __init__(self, stream):
self.stream = stream
self.blocks = []
self.deferred_endif = ()
self.tmpvar_count = 0
def start(self, parser):
"""
Called by the parser to start compiling.
"""
self.parser = parser
def put_tmpvar(self, val):
"""
Allocate a temporary variable, output assignment, and return the
variable name.
"""
name = '_jade_%d' % self.tmpvar_count
self.tmpvar_count += 1
self.stream.write(u'{%% set %s = %s %%}' % (name, val))
return name
def dismiss_endif(self):
"""
Dismiss an endif, only outputting the newlines.
The parser doesn't take care of if-elif-else matching. Instead, it
will try to close the if block before opening a new elif or else
block. Thus the endif block needs to be deferred, along with the
newlines after it. When non-empty, self.deferred_endif is a list
[endif, newlines].
"""
if self.deferred_endif:
self.stream.write(self.deferred_endif[1])
self.deferred_endif = ()
def put_endif(self):
"""
Output an endif.
"""
if self.deferred_endif:
self.stream.write(''.join(self.deferred_endif))
self.deferred_endif = ()
def start_block(self, tag):
"""
Called by the parser to start a block. `tag` can be either an HTMLTag
or a ControlTag.
"""
if tag.name in ('elif', 'else'):
self.dismiss_endif()
else:
self.put_endif()
self.blocks.append(tag)
if isinstance(tag, HTMLTag):
self.stream.write(u'<%s' % tag.name)
for a in tag.attr:
if isinstance(a, basestring):
self.literal(a)
continue
k, v = a
if k == 'id':
# tag(id=xxx) takes precedence over tag#xxx
tag.id_ = None
elif k == 'class':
# merge tag(class=xxx) with tag.xxx
self.stream.write(
u' class="%s{{ _jade_class(%s) |escape}}"' %
(tag.class_ and tag.class_ + u' ' or u'', v))
tag.class_ = None
continue
self.stream.write(u' %s="{{ %s |escape}}"' % (k, v))
if tag.id_:
self.stream.write(u' id="%s"' % tag.id_)
if tag.class_:
self.stream.write(u' class="%s"' % tag.class_)
self.stream.write('>')
elif tag.name == 'case':
tag.var = self.put_tmpvar(tag.head)
tag.seen_when = tag.seen_default = False
elif tag.name in ('when', 'default'):
case_tag = len(self.blocks) >= 2 and self.blocks[-2]
if not case_tag or case_tag.name != 'case':
raise self.parser.error(
'%s tag not child of case tag' % tag.name)
if tag.name == 'when':
if case_tag.seen_default:
raise self.parser.error('when tag after default tag')
self.stream.write(u'{%% %s %s == %s %%}' % (
'elif' if case_tag.seen_when else 'if',
case_tag.var, tag.head))
case_tag.seen_when = True
else:
if case_tag.seen_default:
raise self.parser.error('duplicate default tag')
if not case_tag.seen_when:
raise self.parser.error('default tag before when tag')
self.stream.write(u'{% else %}')
case_tag.seen_default = True
else:
self.stream.write(maybe_call(control_blocks[tag.name][0], tag))
def end_block(self):
"""
Called by the parser to end a block. The parser doesn't keep track of
active blocks.
"""
tag = self.blocks.pop()
if isinstance(tag, HTMLT | ag):
self.stream.write('</%s>' % tag.name)
elif tag.name in ('if', 'elif'):
self.deferred_endif = [u'{% endif %}', '']
elif tag.name == 'case':
if not tag.seen_when:
raise self.parser.error('case tag has no when child')
self.stream.write('{% endif %}')
elif | tag.name in ('when', 'default'):
pass
else:
self.stream.write(maybe_call(control_blocks[tag.name][1], tag))
def literal(self, text):
"""
Called by the parser to output literal text. The parser doesn't keep
track of active blocks.
"""
self.put_endif()
self.stream.write(text)
def newlines(self, text):
"""
Called by the parser to output newlines that are part of the indent.
"""
if self.deferred_endif:
self.deferred_endif[1] = text
else:
self.literal(text)
def end(self):
"""
Called by the parser to terminate compiling.
"""
self.put_endif()
doctypes = {
'5': '<!DOCTYPE html>',
'default': '<!DOCTYPE html>',
'xml': '<?xml version="1.0" encoding="utf-8" ?>',
'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
'Transitional//EN" "http://www.w3.org/TR/xhtml1/'
'DTD/xhtml1-transitional.dtd">',
'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
'Strict//EN" "http://www.w3.org/TR/xhtml1/'
'DTD/xhtml1-strict.dtd">',
'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
'Frameset//EN" "http://www.w3.org/TR/xhtml1/'
'DTD/xhtml1-frameset.dtd">',
'1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" '
'"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">',
'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic '
'1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">',
'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" '
'"http://www.openmobilealliance.org/tech/DTD/'
'xhtml-mobile12.dtd">'
}
def default_start(tag):
return '{%% %s %s %%}' % (tag.name, tag.head)
def default_end(tag):
return '{%% end%s %%}' % tag.name
def doctype(tag):
return doctypes.get(tag.head.lower() or 'default',
'<!DOCTYPE %s>' % tag.head)
control_blocks = defaultdict(
lambda: (default_start, default_end),
{
'=': ('{{ ', ' }}'),
'!=': ('{{ ', ' |safe}}'),
'-': ('{% ', ' %}'),
'|': ('', ''),
'//': (lambda tag: '<!--%s' % tag.head,
'-->'),
'//-': ('{#', '#}'),
':': (lambda tag: '{%% filter %s %%}' % tag.head,
'{% endfilter %}'),
'mixin': (lambda tag: '{%% macro %s %%}' % tag.head,
'{% endmacro %}'),
'prepend': (lambda tag: '{%% block %s %%}' % tag.head,
'{{ super() }} {% endblock %}'),
'append': (lambda tag: '{%% block %s %%} {{ super() }}' % tag.head,
'{% endblock %}'),
'extends': (default_start, ''),
'doctype': (doctype, ''),
'else': ('{% else %}', '{% endif %}'),
})
if __name__ == '__main__':
main(Compiler(stdout))
|
nkuttler/flaskwallet | doc/conf.py | Python | bsd-3-clause | 7,774 | 0.007461 | # -*- coding: utf-8 -*-
#
# Flaskwallet documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 17 14:33:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flaskwallet'
copyright = u'2013, Nicolas Kuttler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The l | anguage for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |tod | ay|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flaskwalletdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flaskwallet.tex', u'Flaskwallet Documentation',
u'Nicolas Kuttler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flaskwallet', u'Flaskwallet Documentation',
[u'Nicolas Kuttler'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flaskwallet', u'Flaskwallet Documentation',
u'Nicolas Kuttler', 'Flaskwallet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
hzlf/openbroadcast | website/cms/test_utils/project/sampleapp/urls.py | Python | gpl-3.0 | 842 | 0.008314 | from django.conf.urls.defaults import *
"""
Also used in cms.tests.ApphooksTestCase
"""
urlpatterns = patterns('cms.test_utils.project.sampleapp.views',
url(r'^$', 'sample_view', {'message': 'sample root page',}, name='sample-root'),
url(r'^settings/$', 'sample_view', kwargs={'message': 'sample settings page'}, name='sample-settings'),
url(r'^account/$', 'sample_view', {'message': 'sample account page'}, name='sample-account'),
url(r'^account/my_profile/$', 'sample_view', {'message': 'sample my profile page'}, name | ='sample-profile'),
url(r'^(?P<id>[0-9]+)/$', 'category_view', name='category_view'),
url(r'^notfound/$', 'notfound' | , name='notfound'),
url(r'^extra_1/$', 'extra_view', {'message': 'test urlconf'}, name='extra_first'),
url(r'^', include('cms.test_utils.project.sampleapp.urls_extra')),
)
|
sup95/zulip | zproject/settings.py | Python | apache-2.0 | 39,590 | 0.000707 | from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports prod_settings.py, and any site-specific configuration
# belongs there. The template for prod_settings.py is prod_settings_template.py
#
# See http://zulip.readthedocs.io/en/latest/settings.html for more information
#
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
import six
########################################################################
# INITIAL SETTINGS
########################################################################
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if 'DEBUG' not in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# Import variables like secrets from the prod_settings file
# Import prod_settings after determining the deployment/machine type
if PRODUCTION:
from .prod_settings import *
else:
from .dev_settings import *
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in prod_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'EMAIL_GATEWAY_EXTRA_PATTERN_HACK': None,
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'MAX_FILE_UPLOAD_SIZE': 25,
'ERROR_REPORTING': True,
'STAGING_ERROR_NOTIFICATIONS': False,
'EVENT_LOGS_ENABLED': False,
'SAVE_FRONTEND_STACKTRACES': False,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
'RABBITMQ_HOST': 'localhost',
'RABBITMQ_USERNAME': 'zulip',
'MEMCACHED_LOCATION': '127.0.0.1:11211',
'RATE_LIMITING': True,
'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'APNS_KEY_FILE': None,
'APNS_SANDBOX': True,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': 'feedback@zulip.com',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'ADMINS': '',
'SHARE_THE_LOVE': False,
'INLINE_IMAGE_PREVIEW': True,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'SEND_MISSED_MESSAGE_EMAILS_AS_USER': False,
'SERVER_EMAIL': None,
'FEEDBACK_EMAIL': None,
'WELCOME_EMAIL_SENDER': None,
'EMAIL_DELIVERER_DISABLED': False,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'Z | ULIP_COM': False,
'SHOW_OSS_ANNOUNCEMENT': False,
| 'REGISTER_LINK_DISABLED': False,
'LOGIN_LINK_DISABLED': False,
'ABOUT_LINK_DISABLED': False,
'CUSTOM_LOGO_URL': None,
'VERBOSE_SUPPORT_OFFERS': False,
'STATSD_HOST': '',
'OPEN_REALM_CREATION': False,
'REALMS_HAVE_SUBDOMAINS': False,
'ROOT_SUBDOMAIN_ALIASES': ["www"],
'REMOTE_POSTGRES_HOST': '',
'REMOTE_POSTGRES_SSLMODE': '',
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
'GOOGLE_CLIENT_ID': '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com',
'SOCIAL_AUTH_GITHUB_KEY': None,
'SOCIAL_AUTH_GITHUB_ORG_NAME': None,
'SOCIAL_AUTH_GITHUB_TEAM_ID': None,
'DBX_APNS_CERT_FILE': None,
'DBX_APNS_KEY_FILE': None,
'PERSONAL_ZMIRROR_SERVER': None,
'EXTRA_INSTALLED_APPS': [],
'DEFAULT_NEW_REALM_STREAMS': ["social", "general", "zulip"],
'REALM_CREATION_LINK_VALIDITY_DAYS': 7,
'TERMS_OF_SERVICE': None,
'TOS_VERSION': None,
'SYSTEM_ONLY_REALMS': {"zulip.com"},
'FIRST_TIME_TOS_TEMPLATE': None,
'USING_PGROONGA': False,
}
for setting_name, setting_val in six.iteritems(DEFAULT_SETTINGS):
if setting_name not in vars():
vars()[setting_name] = setting_val
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado).
ALLOWED_HOSTS += ['127.0.0.1', 'localh |
apoikos/servermon | hwdoc/management/commands/hwdoc_license.py | Python | isc | 1,809 | 0.002765 | # -*- coding: utf-8 -*- vim:fileencoding=utf-8:
# vim: tabstop=4:shiftwidth=4:softtabstop=4:expandtab
# Copyright © 2010-2012 Greek Research and Technology Network (GRNET S.A.)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all | copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EV | ENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
'''
Django management command to change BMC licsnse
'''
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy as _l
from optparse import make_option
import _common
class Command(BaseCommand):
'''
Django management command to change BMC licsnse
'''
help = _l('Changes a BMC license')
args = '[key]'
option_list = BaseCommand.option_list + (
make_option('-l', '--license',
action='store',
type='string',
dest='license',
help=_l('License key. Valid value depends on backend')),
) + _common.option_list
def handle(self, *args, **options):
'''
Handle command
'''
options['command'] = 'license_set'
result = _common.handle(self, *args, **options)
|
timothycrosley/instantly | instantly/main.py | Python | gpl-2.0 | 8,313 | 0.003609 | """ instantly/main.py
Defines the basic terminal interface for interacting with Instantly.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from pies.overrides import *
from . import __version__
from .instantly import Instantly
def main():
instantly = Instantly()
if not len(sys.argv) > 1:
print("Instantly allows you to expand simple templates, that take in a set number of arguments")
print("Usage: instantly [template name] to expand a template")
print(" type instantly help for full instructions.")
print("")
print("Installed Templates:")
print("\t" + str(instantly.installed_templates))
sys.exit(1)
command = sys.argv[1]
template_name = sys.argv[2:3] and sys.argv[2] or ""
extra_inputs = sys.argv[2:]
if command == "help":
print("Instantly Commands")
print("")
print("instantly [template name]")
print("\t Expand the named template")
print("instantly help")
print("\t Get full list of commands / help text")
print("instantly find [template name]")
print("\t Find pre-made templates to automate a task online")
print("instantly download [template name]")
print("\t Add a template shared online to your local template repository")
print("instantly install [template directory]")
print("\t Installs an instant_template directory from the local file system "
"or online repository into your personal collection of templates")
print("instantly uninstall [template name]")
print("\t Permanently removes an installed template locally")
print("instantly create_instant_template")
print("\t Create a new instant template to automate a task")
print("instantly share [template name]")
print("\t Share a template you have created with others online")
print("\t Must register your google account with http://instantly.pl/ to do this")
print("instantly unshare [template name]")
print("\t Removes a template that you previously shared from the instantly online repository.")
print("instantly location [template name]")
print("\t Will tell you where the specified template is located on disk.")
print("instantly create_settings [template directory]")
print("\t Will create an alternate settings / template directory within the current directory.")
print("instantly version")
print("\t Will tell you the version of instantly you have installed.")
sys.exit(0)
elif command == "uninstall":
if input("Are you sure you want to delete %s (y/n)? " % template_name).lower() in ("y", "yes"):
if instantly.uninstall(template_name):
print("Successfully removed %s from local templates" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "version":
print("instantly v. {0}".format(__version__))
sys.exit(0)
elif command == "location":
template = instantly.installed_template(template_name)
if not template:
print("Sorry template does not exist!")
sys.exit(1)
return template.location
sys.exit(0)
elif command == "share":
if instantly.share(template_name):
print("Successfully shared %s, thanks for helping to expand the number of instant templates!" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "unshare":
if instantly.unshare(template_name):
print("Successfully un-shared %s!" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "create_settings":
if instantly.create_settings():
print("Successfully created a new settings / templates directory!")
sys.exit(0)
else:
sys.exit(1)
elif command == "find":
results = instantly.find(template_name)
if not results:
print("Sorry: no templates have been shared that match the search term '%s'," % template_name)
print(" but you could always add one ;)")
sys.exit(0)
print("Instantly found the following templates:")
for result in results:
print(result)
print(" To install one of these templates run: instantly install [template_name]")
sys.exit(0)
elif command == "install":
if instantly.install(template_name):
| print("%(name)s has been installed as a local template. Run 'instantly %(name)s' to expand it." % \
{"name":template_name})
sys.exit(0)
else:
print("Sorry: no one has thought of a way to instantly '%s'," % template_name)
print(" but you could always create one ;)")
sys.exit(0)
else:
template_name = command
template = instantly.get_template(template_name)
| if not template:
print("Sorry: no one has thought of a way to instantly '%s'," % template_name)
print(" but you could always create one ;)")
sys.exit(1)
print("Expanding the following template:")
print(template)
arguments = {}
for argument, argument_definition in itemsview(template.arguments):
print("")
if extra_inputs:
arguments[argument] = extra_inputs.pop(0)
else:
argument_type = argument_definition.get('type', 'string')
default = instantly.settings['defaults'].get(argument, '') or argument_definition.get('default', '')
help_text = argument_definition.get('help_text')
if help_text:
print("Help Text: {0}".format(help_text))
prompt = argument_definition.get('prompt', '')
if default:
prompt += " [Default: {0}]".format(default)
if argument_type == "bool":
prompt += " (y/n)"
prompt += ": "
value = ""
while value == "":
value = input(prompt)
if argument_type == "bool":
if value.lower() in ("y", "yes"):
value = True
elif value.lower() in ("n", "no"):
value = False
else:
value = default or ""
elif argument_type == "int":
if value.isdigit():
value = int(value)
elif not value:
value = default
else:
value = ""
elif not value:
value = default
arguments[argument] = value
success_message = instantly.expand(template_name, arguments)
if success_message != False:
print("Successfully ran '{0}'!". |
adamhaney/airflow | airflow/utils/log/logging_mixin.py | Python | apache-2.0 | 5,510 | 0.000544 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import warnings
import six
from builtins import object
from contextlib import contextmanager
from logging import Handler, StreamHandler
class LoggingMixin(object):
"""
Convenience super-class to have a logger configured with the class name
"""
def __init__(self, context=None):
self._set_context(context)
# We want to deprecate the logger property in Airflow 2.0
# The log property is the de facto standard in most programming languages
@property
def logger(self):
warnings.warn(
'Initializing logger for {} using logger(), which will '
'be replaced by .log in Airflow 2.0'.format(
self.__class__.__module__ + '.' + self.__class__.__name__
),
DeprecationWarning
)
return self.log
@property
def log(self):
try:
return self._log
except AttributeError:
self._log = logging.root.getChild(
self.__class__.__module__ + '.' + self.__class__.__name__
)
return self._log
def _set_context(self, context):
if context is not None:
set_context(self.log, context)
# TODO: Formally inherit from io.IOBase
class StreamLogWriter(object):
encoding = False
"""
Allows to redirect stdout and stderr to logger
"""
def __init__(self, logger, level):
"""
:param log: The log level method to write to, ie. log.debug, log.warning
:return:
"""
self.logger = logger
self.level = level
self._buffer = str()
@property
def closed(self):
"""
Returns False to indicate that the stream is not closed (as it will be
open for the duration of Airflow's lifecycle).
For compatibility with the io.IOBase interface.
"""
return False
def write(self, message):
"""
Do whatever it takes to actually log the specified logging record
:param message: message to log
"""
if not message.endswith("\n"):
self._buffer += message
else:
self._buffer += message
self.logger.log(self.level, self._buffer.rstrip())
self._buffer = str()
def flush(self):
"""
Ensure all logging output has been flushed
"""
if len(self._buffer) > 0:
self.logger.log(self.level, self._buffer)
self._buffer = str()
def isatty(self):
"""
Returns False to indicate the fd is not connected to a tty(-like) device.
For compatibility reasons.
"""
return False
class RedirectStdHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr/stdout, but always uses
whatever sys.stderr/stderr is currently set to rather than the value of
sys.stderr/stdout at handler construction time.
"""
def __init__(self, stream):
if not isinstance(stream, six.string_types):
raise Exception("Cannot use file like objects. Use 'stdout' or 'stderr'"
" as a str and without 'ext://'.")
self._use_stderr = True
if 'stdout' in stream:
self._use_stderr = False
# StreamHandler tries to set self.stream
Handler.__init__(self)
@property
def stream(self):
if self._use_stderr:
return sys.stderr
return sys.stdout
@contextmanager
d | ef redirect_stdout(logger, level):
writer = StreamLogWriter(logger, level)
try:
sys.stdout = writer
yield
finally:
| sys.stdout = sys.__stdout__
@contextmanager
def redirect_stderr(logger, level):
writer = StreamLogWriter(logger, level)
try:
sys.stderr = writer
yield
finally:
sys.stderr = sys.__stderr__
def set_context(logger, value):
"""
Walks the tree of loggers and tries to set the context for each handler
:param logger: logger
:param value: value to set
"""
_logger = logger
while _logger:
for handler in _logger.handlers:
try:
handler.set_context(value)
except AttributeError:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
pass
if _logger.propagate is True:
_logger = _logger.parent
else:
_logger = None
|
pepincho/Python101-and-Algo1-Courses | Algo-1/week3/5-Bandwidth-Manager/bandwidth_manager.py | Python | mit | 1,423 | 0 | # Priority Queue
import heapq
class Packet:
PROTOCOL_PRIORITY = {
'ICMP': 10,
'UDP': 9,
'RTM': 8,
'IGMP': 7,
'DNS': 6,
'TCP': 5
}
def __init__(self, protocol, payload, sequence_number):
self.priority = Packet.PROTOCOL_PRIORITY[protocol]
self.payload = payload
self.sequence_number = sequence_number
def __lt__(self, other):
if self.priority == other.priority:
return self.sequence_number < other.sequence_number
else:
return self.priority > other.priority
class BandwidthManager:
def __init__(self):
self.heap = []
self.sequence_number = 0
# receives a packet with specified protocol | and payload
def rcv(self, protocol, payload):
new_packet = Packet(protocol, payload, self.sequence_number)
heapq.heappush(self.heap, new_packet)
self.sequence_number += 1
# returns the payload of the packet which should be sent
def send(self):
if len(self.heap) == 0:
return 'Nothing to send!'
| return heapq.heappop(self.heap).payload
def main():
N = int(input())
bm = BandwidthManager()
while N != 0:
line = input().split()
if line[0] == 'rcv':
bm.rcv(line[1], line[2])
else:
print(bm.send())
N -= 1
if __name__ == '__main__':
main()
|
skoli0/vmbuilder | helper/helper.py | Python | apache-2.0 | 5,599 | 0.003215 | import re
import os, sys
import json
import csv
import shutil
import ctypes
import logging
import datetime
import fileinput
import subprocess
import xml.etree.ElementTree as etree
DEFAULT_HELPER_PATH = "helper"
class Logger(object):
def __init__(self):
"""Init method
"""
self.terminal = sys.stdout
self.log = open("image-gen-logfile.log", "a")
def write(self, message):
"""Writes a log message
:param message:
:return:
"""
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S - ')
self.terminal.write(message)
self.log.write(message)
def flush(self):
"""Flushes a log message
:return:
"""
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
pass
class helper(object):
@staticmethod
def executable_in_path(executable):
'''Returns the full path to an executable according to PATH,
otherwise None.'''
if os.name == 'nt':
return shutil.which('packer')
else:
path = os.environ.get('PATH')
if not path:
print >> sys.stderr, "Warning: No PATH could be searched"
paths = path.split(':')
for path in paths:
fullpath = os.path.join(path, executable)
if os.path.isfile(fullpath) and os.access(fullpath, os.X_OK):
return fullpath
return None
@staticmethod
def validate_argtype(arg, argtype):
"""Validates argument against given type
:param arg:
:param argtype:
:return:
"""
if not isinstance(arg, argtype):
raise HelperException('{0} argument must be of type {1}'.format(
arg, argtype))
return arg
@staticmethod
def get_guestos(os_string, os_arch, vm_provider):
"""Returns guest os type for a specific provider
:param os_string:
:param os_arch:
:param vm_provider:
:return:
"""
if "linux" in os_string.lower():
guestos = re.sub(r'\W+', ' ', re.sub(r'\d+', ' ', os_string)).strip()
if "windows" in os_string.lower():
guestos = os_string
if os_arch == '64':
guestos = guestos + "_" + str(os_arch)
guestos = guestos.replace(" ", "_")
data = ""
try:
guest_os_file = os.path.join(DEFAULT_HELPER_PATH, (vm_provider.lower() + '-guestos.json'))
with open(guest_os_file) as data_file:
data = json.load(data_file)
except (OSError, IOError) as ex:
print("error in opening packer template json file")
logging.error(ex.message)
print(str(ex.message))
assert isinstance(data, object)
if guestos in data:
return data[guestos]
elif "windows" in guestos.lower():
if os_arch == 32:
return data['Windows']
else:
return data['Windows_64']
elif "linux" in guestos.lower():
if os_arch == 32:
return data['Linux']
else:
return data['Linux_64']
@staticmethod
def run(cmd):
"""Runs a command
:param cmd: Command
:return: Execution status
"""
try:
'''
p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, ''):
print(line)
retval = p.wait()
return retval
'''
p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
for line in iter(p.stdout.readline, b''):
print(line.rst | rip().decode('utf-8')),
p.stdout.close()
p.wait()
#print(cmd)
| #p = subprocess.run(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#print('returncode:', p.returncode)
#print('{}'.format(p.stdout.decode('utf-8')))
except (subprocess.CalledProcessError, KeyboardInterrupt) as e:
print("Received keyboard interrupt, terminating the build process...")
'''
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, p.pid)
return (0 != kernel32.TerminateProcess(handle, 0))
logging.error("Error occured while running command {0}, Error: {1}".format(cmd, e.message))
raise subprocess.CalledProcessError
'''
@staticmethod
def SearchReplaceInFile(file, searchpattern, replacewith):
"""
:param file:
:param searchpattern:
:param replacewith:
:return:
"""
for line in fileinput.input(file, inplace=1):
if searchpattern in line:
line = line.replace(searchpattern, replacewith)
sys.stdout.write(line)
fileinput.close()
@staticmethod
def get_productkey(_dbms_query):
"""
:param _dbms_query:
:return:
"""
return " "
class HelperException(Exception):
"""Custom helper exception
"""
pass
|
simar7/build-mozharness | configs/developer_config.py | Python | mpl-2.0 | 1,008 | 0.001984 | # This config file can be appended to any other mozharness job
# running under tbpl. The purpose of this config is to override
# values that are specific to Release Engineering machines
# that can reach specific hosts within their network.
# In other words, this config allows you to run any job
# outside of the Release Engineering network
#
# Using this config file should be accompanied with using
# --test-url and --installer-url where appropiate
import os
config = {
# General variables overwrite
"developer_mode": True,
"exes": {},
"find_links": ["http://pypi.pub.build.mozilla.org/pub"],
"tooltool_servers": ["https://secure.pub.build.mozilla.org/toolto | ol/pvt/build"],
"replace_urls": [
("http://pvtbuilds.pvt.buil | d", "https://pvtbuilds"),
("http://tooltool.pvt.build.mozilla.org/build", "https://secure.pub.build.mozilla.org/tooltool/pvt/build")
],
# Talos related
"python_webserver": True,
"virtualenv_path": '%s/build/venv' % os.getcwd(),
}
|
havardgulldahl/jottalib | src/jottalib/scanner.py | Python | gpl-3.0 | 5,128 | 0.010541 | #!/usr/bin/env python
# encoding: utf-8
"""A service to sync a local file tree to jottacloud.
Copies and updates files in the cloud by comparing md5 hashes, like the official client.
Run it from crontab at an appropriate interval.
"""
# This file is part o | f jottacloudclient.
#
# jottacloudclient is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jottacloudclient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITN | ESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jottacloudclient. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2015 Håvard Gulldahl <havard@gulldahl.no>
#import included batteries
import os, re, os.path, sys, logging, argparse
import math, time
log = logging.getLogger(__name__)
#import pip modules
from clint.textui import progress, puts, colored
#import jottalib
from jottalib.JFS import JFS
from . import jottacloud, __version__
if sys.platform != "win32":
# change progress indicators to something that looks nice
#TODO: rather detect utf-8 support in the terminal
#TODO: change this when https://github.com/kennethreitz/clint/pull/151 is merged
progress.BAR_EMPTY_CHAR =u'○'
progress.BAR_FILLED_CHAR=u'●'
def humanizeFileSize(size):
size = abs(size)
if (size==0):
return "0B"
units = ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB']
p = math.floor(math.log(size, 2)/10)
return "%.3f%s" % (size/math.pow(1024,p),units[int(p)])
def filescanner(topdir, jottapath, jfs, errorfile, exclude=None, dry_run=False, prune_files=True, prune_folders=True ):
errors = {}
def saferun(cmd, *args):
log.debug('running %s with args %s', cmd, args)
try:
return apply(cmd, args)
except Exception as e:
puts(colored.red('Ouch. Something\'s wrong with "%s":' % args[0]))
log.exception('SAFERUN: Got exception when processing %s', args)
errors.update( {args[0]:e} )
return False
_files = 0
try:
for dirpath, onlylocal, onlyremote, bothplaces, onlyremotefolders in jottacloud.compare(topdir, jottapath, jfs, exclude_patterns=exclude):
puts(colored.green("Entering dir: %s" % dirpath))
if len(onlylocal):
_start = time.time()
_uploadedbytes = 0
for f in progress.bar(onlylocal, label="uploading %s new files: " % len(onlylocal)):
if os.path.islink(f.localpath):
log.debug("skipping symlink: %s", f)
continue
log.debug("uploading new file: %s", f)
if not dry_run:
if saferun(jottacloud.new, f.localpath, f.jottapath, jfs) is not False:
_uploadedbytes += os.path.getsize(f.localpath)
_files += 1
_end = time.time()
puts(colored.magenta("Network upload speed %s/sec" % ( humanizeFileSize( (_uploadedbytes / (_end-_start)) ) )))
if prune_files and len(onlyremote):
puts(colored.red("Deleting %s files from JottaCloud because they no longer exist locally " % len(onlyremote)))
for f in progress.bar(onlyremote, label="deleting JottaCloud file: "):
log.debug("deleting cloud file that has disappeared locally: %s", f)
if not dry_run:
if saferun(jottacloud.delete, f.jottapath, jfs) is not False:
_files += 1
if len(bothplaces):
for f in progress.bar(bothplaces, label="comparing %s existing files: " % len(bothplaces)):
log.debug("checking whether file contents has changed: %s", f)
if not dry_run:
if saferun(jottacloud.replace_if_changed, f.localpath, f.jottapath, jfs) is not False:
_files += 1
if prune_folders and len(onlyremotefolders):
puts(colored.red("Deleting %s folders from JottaCloud because they no longer exist locally " % len(onlyremotefolders)))
for f in onlyremotefolders:
if not dry_run:
if saferun(jottacloud.deleteDir, f.jottapath, jfs) is not False:
logging.debug("Deleted remote folder %s", f.jottapath)
except KeyboardInterrupt:
# Ctrl-c pressed, cleaning up
pass
if len(errors) == 0:
puts('Finished syncing %s files to JottaCloud, no errors. yay!' % _files)
else:
puts(('Finished syncing %s files, ' % _files )+
colored.red('with %s errors (read %s for details)' % (len(errors), errorfile, )))
|
ArcherSys/ArcherSys | Lib/encodings/iso2022_jp_ext.py | Python | mit | 3,347 | 0.012549 | <<<<<<< HEAD
<<<<<<< HEAD
#
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.Inc | rementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decod | e=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
=======
#
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
olielvewen/QDvGrab | qdvgrab/images/qdvgrabressources_rc.py | Python | gpl-3.0 | 599,843 | 0.000008 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x23\x1c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x37\x5d\x00\x00\x37\x5d\x01\x19\x80\
\x46\x5d\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x02\x16\x0a\x2f\
\x02\xcc\x8e\xb3\x56\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\
\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x22\x9c\x49\x44\x41\x54\x78\
\xda\xec\x7d\x69\x90\x1c\xc7\x95\xde\xf7\xb2\xaa\xaf\x39\x30\x07\
\x30\x38\x38\xb8\x09\xf0\x12\x71\xf2\x5e\x8a\x3a\x96\x02\x45\x29\
\x24\x51\xeb\x5d\x29\xc2\x21\x3b\x1c\xe1\x08\xff\xf4\x0f\x87\x8f\
\x08\x47\xac\x57\x87\x17\x6b\x3b\x6c\xff\x72\xf8\x8f\x1c\x61\x47\
\x38\xd6\xbb\x21\xca\x6b\x91\xa2\x96\x26\xb5\x96\x78\x89\x37\x05\
\x10\x04\x01\x08\x00\x89\x63\x00\x0c\x30\x33\x98\xc1\x1c\x3d\x3d\
\xdd\x5d\x55\xf9\x9c\xc8\xee\x46\xb2\x3b\xa7\xae\x99\x1e\x82\x1d\
\xc2\x63\x24\xab\x3a\xab\xba\x0a\x5d\xdf\xf7\xbe\xf7\xf2\xa8\x1c\
\xdc\xb2\x5b\x76\xcb\x6e\xd9\x2d\xbb\x65\xb7\xec\xf7\xd2\x08\x4b\
\xb5\x97\x18\xd8\x7a\x0e\x78\xf7\x1c\x61\xe7\x97\x80\xbd\xb8\x65\
\x9f\xa6\xbd\x0f\xe0\xf4\xcb\xc0\x03\x5b\x19\xe7\xb6\x02\x5f\xa6\
\x4f\x81\x00\xcc\xc0\xdf\x7d\x40\xd8\xbe\x1b\xc8\x8e\x13\xae\x14\
\x09\x83\x39\xc2\x35\x22\x74\x09\x60\x30\x0b\xe0\x7a\xe9\xc1\x2d\
\x5b\x09\x2b\x02\xa8\x02\x53\xaa\x94\x24\x30\xc0\x8c\xa9\x0a\x63\
\x7d\x0f\xa3\xba\x96\x71\xe6\x03\xe0\xc0\x6e\x06\x51\xbb\x09\xc0\
\xc0\x2f\x15\xf0\x77\xdd\x46\x98\xf1\x09\x0e\x09\x57\xfa\x4e\x16\
\x19\xc7\x15\x52\x30\x40\x60\x26\x16\x0e\x81\x1c\xc0\xf7\x71\xcb\
\x56\xc0\x5c\x17\xe0\x00\x24\x03\x06\x11\x13\xc0\xbe\x14\xb2\x0a\
\x2f\xf0\x85\x1b\x20\x60\x89\x3e\x97\xf1\xbb\x51\xc6\x13\x8a\x08\
\xa0\x36\x10\xe0\x3b\x0c\xfc\x9b\x19\x82\xe3\x08\x94\x27\x9c\x5c\
\x36\xeb\x66\x99\x32\x0a\xfb\x0c\x4b\xce\xb8\x44\x6e\x5e\x50\x36\
\xe7\xba\x19\x26\x50\x83\x30\xb7\x6c\xe5\x22\x36\x31\xd8\x0f\x7c\
\xaf\x1a\xb0\x57\x61\x78\x2c\xe0\x29\x0e\x78\x55\x52\x9f\xab\x55\
\x1f\xf9\xa1\x00\x41\x20\xf1\x6f\xfb\x18\x3f\xa5\x65\x10\xe0\x69\
\x06\x76\xcd\x11\xfa\x7a\x85\x98\xbc\x94\x29\x08\x37\x4b\x2c\xf3\
\x43\x4c\x03\x1b\xaf\xf9\xdf\xcc\x07\x7c\x80\x7d\xb9\xa7\x54\x95\
\xfd\x53\xf3\xd5\x1c\xaf\x14\xee\x6c\xed\xc4\x70\x8c\x43\xaa\x39\
\xfe\x54\x36\x95\xa9\xfe\x4d\xdc\x72\x50\x7f\xb6\xeb\xe2\xef\xcf\
\x11\x37\x34\xa0\xad\xeb\xcb\x57\x56\x75\x65\xa6\x9d\x8c\xf8\xa0\
\xe2\x8a\xff\x77\x7e\x4d\xee\xe7\x23\ | xe0\x29\x26\x51\x5e\x90\x7e\
\x55\xae\x1e\xf6\x30\x33\x27\x71\xb4\x97\xf1\x5d\x5a\x0a\x01\x18\
\x38\x3f\x43\x98\x23\x21\x44\x29\xd3\xe5\x07\x85\x8c\xa0\x9e\x87\
\x8b\xf8\xce\x7a\x21\xfe\x45\xb7\xeb\x6c\xf0\x03\x89\x79\x4f\x62\
\xbc\x58\xc5\xf9\xe9\x32\x98\xdb\x01\x30\x01\x14\xf6\x80\xd3\x82\
\xce\xf1\xdf\x63\xeb\x5e\x49\x00\x8f\x26\x81\x4d\xa8\x94\xa0\x47\
\x7f\x47\x10\xb0\x63\xa8\x1b\x6b\x57\xe5\xd0\x9d\x73\x91\x75\x08\
\x25\x5f\x5e\x1e\x95\xf2\x3f\xbd\xd2\xe7\xfc\xd4\x93\x | 5c\x2c\xb9\
\xce\x82\x94\x5d\x1e\x7a\x59\x62\x4b\x5f\x68\x38\x70\xc3\xbd\xff\
\x18\x61\xa1\x4f\x08\x0f\x99\x2e\xc7\x29\xf4\x93\x33\xf0\x95\x8a\
\xf8\xd1\xce\xd5\xb9\xef\xe9\x1b\x56\x03\x14\xab\x12\x44\x01\x26\
\x05\x2d\x05\x74\xfb\x61\x92\x01\xad\x8d\x5e\x6e\x1b\x47\x01\x9b\
\xf4\x98\x05\x7a\xb2\xef\x72\x22\x2f\x8f\x55\x8b\x5c\x46\xa0\x27\
\xe7\x2a\x02\x38\x7a\xbb\x31\xe7\x6c\xd8\xe2\xc9\xff\xbc\x6a\xb2\
\xb4\xef\x85\x5e\xe7\xfb\xf0\x19\xa5\x60\x16\x72\x01\x1e\x9e\xbe\
\x28\xf1\x5d\x70\x72\x02\x3c\xfd\x34\x70\xf7\x00\xa1\xe2\x39\x05\
\x64\x73\x19\x70\xef\x97\xcb\xe2\x47\x8f\x6d\xea\xf9\x5e\xce\x11\
\x0a\xf8\x00\x44\x04\xc0\xbf\x81\x63\x10\x70\xbc\x27\x71\x44\x1d\
\x45\x9d\xc7\xd1\x20\x53\xbc\x42\xc4\xcb\x34\xa7\xbb\x37\xa7\xbf\
\x8f\xd9\x4f\xa5\x14\xf6\x07\x22\x28\x1c\xea\xe0\x3b\x37\x48\xb0\
\xae\x57\x7f\xfe\x07\xd3\x1f\x4d\x06\xbf\x5a\x9b\xff\xb3\x02\x1c\
\x39\x5f\xa9\x4a\x85\x25\x2b\x4c\x55\x28\xf8\x6e\x52\x05\xf8\x12\
\x21\xb3\x20\x9c\x92\xc8\x30\xfc\xae\xbd\x33\xf4\xc7\xf7\x6c\xe8\
\xfe\x5e\x77\xd6\x81\x2b\x08\x04\xa0\xc7\x97\xa8\xa8\x42\xbe\x8f\
\xad\x3d\x2e\xfa\x57\x0f\xa0\x2f\xef\x82\x28\x15\x10\xed\x3c\xd9\
\x66\x94\x79\xa8\xed\xff\x37\x71\x2a\xa5\x6b\xe3\xcf\x65\x94\x3d\
\x89\x7c\x46\xc0\x27\x60\xde\x0f\x94\x44\x67\x20\x15\x36\x20\xa0\
\xa0\xb6\x77\xf6\x17\xfe\xd1\xc8\xf9\xd9\x43\xa7\x36\x76\xfd\xd4\
\x09\x84\x1f\xe4\x7c\xa9\x30\xd5\xda\x93\x80\x00\x0c\xec\xb8\x4a\
\x28\x39\x22\xcf\x41\xb6\x57\xaa\x84\xcf\x75\xfe\xb9\x4a\x00\x30\
\x5f\x95\xb8\xcd\x25\xdc\x45\x8c\xaa\xda\xf6\xdf\xd6\x8d\x55\x79\
\x81\xd1\x62\x19\x33\x65\x0f\x95\x40\x82\x19\x9f\x11\xe3\xcf\x50\
\xff\x19\xa1\x9d\xd6\x2d\x5c\x74\x65\x1c\xd5\xfc\xcf\xa3\xc7\x71\
\x30\x31\x57\x55\xd8\x78\xf8\x38\x60\xcc\xab\xd2\x57\x70\xb1\x5e\
\xe2\x5f\x9e\xad\x04\xbf\xa4\xac\x28\xcf\x97\x1c\x0f\x3b\x48\x42\
\xa3\x43\x31\x04\x78\xef\xb7\x40\xb0\x96\x9c\xac\xeb\x42\x72\x6e\
\xfd\xa4\xff\xf5\xbe\x35\xf9\xe1\x92\x02\xbf\xbf\x52\x46\xc1\x25\
\x50\x77\x06\xdb\x07\xf3\x38\x7d\xad\x88\xb7\x47\xe7\x11\xf0\xad\
\x66\xdf\xcd\xb0\xd3\x53\xf3\xb8\xad\x37\x8f\x07\x37\x0c\xe0\xa3\
\xb1\x22\xb2\x57\xe7\x81\x80\x51\x76\x1d\xf4\xe7\x9d\x8d\x43\xe7\
\x8b\x5f\xbf\x7c\x67\xff\x5f\x39\x42\x54\x82\xea\x42\xa0\xb1\xbd\
\x3f\x4e\x01\x2e\x83\xb0\xa1\x4a\x99\x2a\x1c\xe9\x70\xae\x47\xf2\
\xe3\xcc\x0c\x6f\xaa\x84\x2a\x80\xc9\x2e\x17\xb7\x0f\x16\xf0\xde\
\xe5\x6b\xda\xf3\x6f\xd9\xcd\xb5\x8b\xb3\x0b\xc8\x08\x81\x5d\x43\
\xbd\x38\x76\x69\x16\x0b\x53\x0b\xa8\x08\x42\xc6\x75\x90\xaf\xfa\
\x5f\x91\x2c\xff\x26\x53\x65\x27\xa0\x2a\x69\x6c\x01\x8e\x26\xc0\
\xbd\xb7\x03\x57\x3d\x72\xb9\xec\xb0\x44\x86\x3d\xde\x5d\x9a\x2e\
\x63\x76\xc1\x43\x46\x81\x3f\xd4\xed\xe2\x52\xb1\x84\x8b\x73\xb7\
\xc0\xff\xac\xd8\x99\xe9\x79\x6c\x5a\x55\xc0\xe6\xa1\x2e\x1c\x1d\
\x99\xc6\x4c\xb1\x8a\x85\x9e\x1c\xc8\x93\xbb\x48\x61\xe8\x30\x1c\
\x60\x15\xe1\xde\xd5\x09\x92\xc0\xc9\x8f\x01\x67\x0d\x49\x4f\x38\
\x90\xc8\x94\x2b\xfe\xe0\xf1\x8b\x33\xe8\x22\xe8\x8c\xf3\x4e\x75\
\x93\x53\x53\x45\xc8\x5b\xb2\x7f\x53\xcd\xee\xb2\x29\x61\x5d\x7f\
\x0e\xaf\x9e\x9a\x40\xb9\x12\xc0\x23\x82\xbc\xad\x6f\x0d\x4b\x76\
\x25\x93\x03\xb7\x44\x98\xbc\x9a\x80\x00\x6f\x64\x08\x0f\xbb\xc4\
\xec\x13\x18\xe2\xf2\x95\x62\xfe\xf2\xf9\x69\x10\x6a\xf6\xd4\x75\
\x16\xb9\x12\xb7\xec\xb3\x65\x53\xe5\xaa\x52\x81\x3c\x46\x14\x11\
\x2e\x5d\x2e\x69\x56\x6c\x70\x44\x1e\x72\xad\xc3\x90\x04\xdf\x25\
\xbc\x95\x49\x10\x02\xb0\x1b\xc8\x8d\x02\x45\x10\x0b\x88\xa2\xee\
\xe1\xe3\x1b\xad\x19\x41\x80\x7f\xcb\xf9\x3f\x6b\xa6\x9b\x86\xae\
\x10\xe8\xce\xbb\xe0\xba\x3a\x17\x27\xe7\xc1\x0c\x41\x12\x84\x02\
\x69\x6c\x93\xf5\x03\x54\x02\x82\x90\x04\x09\x51\x2e\x55\x9b\x3a\
\x32\xbc\x40\x82\xa9\xb3\x19\xe0\x2d\xcc\xe3\xd2\xf1\x23\x98\x19\
\xbf\x02\x29\x03\xf4\xf4\x0f\x62\xf8\xee\xdd\xe8\x1a\x1c\x42\xa7\
\x5a\xc0\x0c\x02\x74\xb7\x30\xea\x04\x28\x17\x2b\x10\x52\x0a\x08\
\x90\xc6\x34\x71\x4f\x60\xd5\x01\xcb\x2a\x41\x08\xf2\xbd\x00\x60\
\x6e\xba\x11\x3a\x34\xfe\x57\xe7\x8b\x38\xf5\xab\xe7\x70\xe9\xc8\
\xbb\x28\x14\xf2\xe8\xea\xea\x02\x11\xe1\xca\xc9\x32\x3e\x52\xf5\
\x83\x5b\x6e\xc7\xdd\x4f\x7c\x1b\x3d\xeb\x37\xa2\xd3\xac\xe1\xf5\
\x04\xd3\x51\xe5\x57\x03\x64\x19\xc4\x9e\x24\x04\x0e\x92\x11\x60\
\x07\x80\xa0\x0c\x08\x07\x60\x49\x32\xe0\xa6\x9e\x2f\x29\xf5\x7e\
\xc7\x59\x69\xe2\x32\x4e\x3d\xf7\x57\x58\x3b\xd0\x87\xfd\xdf\xfc\
\x06\xd6\xac\x59\x73\x83\x00\xe5\x72\x19\xd7\xae\x5d\xc3\xc8\xc8\
\x08\x4e\x3d\xfb\x97\xd8\xf8\xd8\x57\xb1\xe6\xee\x7d\xe8\x24\x93\
\x06\x13\x83\x95\xaf\x9d\x97\x40\x4e\x0d\xd3\x1d\x49\x15\x80\x05\
\x20\x99\x00\xa6\x26\x6f\x67\x86\x64\x74\x9c\x02\xf8\xa5\x22\xae\
\xbc\xf2\x1c\x1e\xdc\xb7\x07\x3b\x76\xec\xc0\xea\xd5\xab\xd1\xdd\
\xdd\x0d\xd7\x75\x35\x01\x7c\xdf\xd7\x24\xd8\xbe\x7d\xbb\x26\xc1\
\x07\x1f\xbc\x8b\x62\x6f\x1f\xba\x86\xb7\x75\x0e\x01\xc0\x8b\x8e\
\x57\x18\x1c\x45\x42\x05\x28\x00\xa0\x0a\x41\x12\x58\x08\x6a\x05\
\x5b\x72\xe7\x29\x40\xf1\xe8\x5b\xb8\x6f\xf7\x2e\xec\xdc\xb9\xb3\
\x09\x7c\x21\x44\x5d\xd5\x24\x7a\x7a\x7a\x6e\x94\xde\xde\x5e\x7c\
\x70\xfc\x30\xe4\xba\x4d\x80\xe3\xa2\x13\x8c\x1b\x23\xe9\x2d\x21\
\x9a\xc1\x44\xd2\x03\x04\x13\x0a\x49\x08\xb0\x13\xc0\x49\x0f\xa0\
\x02\x21\x90\xa2\x15 |
jonparrott/google-cloud-python | logging/google/cloud/logging/entries.py | Python | apache-2.0 | 11,279 | 0 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log entries within the Google Stackdriver Logging API."""
import collections
import json
import re
from google.protobuf.any_pb2 import Any
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import Parse
from google.cloud.logging.resource import Resource
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
from google.cloud._helpers import _datetime_to_rfc3339
_GLOBAL_RESOURCE = Resource(type='global', labels={})
_LOGGER_TEMPLATE = re.compile(r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""", re.VERBOSE)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
def _int_or_none(value):
"""Helper: return an integer or ``None``."""
if value is not None:
value = int(value)
return value
_LOG_ENTRY_FIELDS = ( # (name, default)
| ('log_name', None),
('labels', None),
('insert_id', None),
('severity', None),
('http_request', None | ),
('timestamp', None),
('resource', _GLOBAL_RESOURCE),
('trace', None),
('span_id', None),
('trace_sampled', None),
('source_location', None),
('operation', None),
('logger', None),
('payload', None),
)
_LogEntryTuple = collections.namedtuple(
'LogEntry', (field for field, _ in _LOG_ENTRY_FIELDS))
_LogEntryTuple.__new__.__defaults__ = tuple(
default for _, default in _LOG_ENTRY_FIELDS)
_LOG_ENTRY_PARAM_DOCSTRING = """\
:type log_name: str
:param log_name: the name of the logger used to post the entry.
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
:type trace: str
:param trace: (optional) traceid to apply to the entry.
:type span_id: str
:param span_id: (optional) span_id within the trace for the log entry.
Specify the trace parameter if span_id is set.
:type trace_sampled: bool
:param trace_sampled: (optional) the sampling decision of the trace
associated with the log entry.
:type source_location: dict
:param source_location: (optional) location in source code from which
the entry was emitted.
:type operation: dict
:param operation: (optional) additional information about a potentially
long-running operation associated with the log entry.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
"""
_LOG_ENTRY_SEE_ALSO_DOCSTRING = """\
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
class LogEntry(_LogEntryTuple):
__doc__ = """
Log entry.
""" + _LOG_ENTRY_PARAM_DOCSTRING + _LOG_ENTRY_SEE_ALSO_DOCSTRING
received_timestamp = None
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return None
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries.LogEntry`
:returns: Log entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource['logName']
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = cls._extract_payload(resource)
insert_id = resource.get('insertId')
timestamp = resource.get('timestamp')
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labels = resource.get('labels')
severity = resource.get('severity')
http_request = resource.get('httpRequest')
trace = resource.get('trace')
span_id = resource.get('spanId')
trace_sampled = resource.get('traceSampled')
source_location = resource.get('sourceLocation')
if source_location is not None:
line = source_location.pop('line', None)
source_location['line'] = _int_or_none(line)
operation = resource.get('operation')
monitored_resource_dict = resource.get('resource')
monitored_resource = None
if monitored_resource_dict is not None:
monitored_resource = Resource._from_dict(monitored_resource_dict)
inst = cls(
log_name=logger_fullname,
insert_id=insert_id,
timestamp=timestamp,
labels=labels,
severity=severity,
http_request=http_request,
resource=monitored_resource,
trace=trace,
span_id=span_id,
trace_sampled=trace_sampled,
source_location=source_location,
operation=operation,
logger=logger,
payload=payload,
)
received = resource.get('receiveTimestamp')
if received is not None:
inst.received_timestamp = _rfc3339_nanos_to_datetime(received)
return inst
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = {}
if self.log_name is not None:
info['logName'] = self.log_name
if self.resource is not None:
info['resource'] = self.resource._to_dict()
if self.labels is not None:
info['labels'] = self.labels
if self.insert_id is not None:
info['insertId'] = self.insert_id
if self.severity is not None:
info['severity'] = self.severity
if self.http_request is not None:
info['httpRequest'] = self.http_request
if self.timestamp is not None:
info['timestamp'] = _datetime_to_rfc3339(self.timestamp)
if self.trace is not None:
|
annarev/tensorflow | tensorflow/python/data/util/nest.py | Python | apache-2.0 | 16,774 | 0.004292 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six as _six
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc as _collections_abc
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(list(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _yield_value(iterable):
if isinstance(iterable, _collections_abc.Mapping):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_utils.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_utils.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_utils.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(nest._sequence_like(s, child)) # pylint: disable=protected-access
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequ | ence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure | )
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return nest._sequence_like(structure, packed) # pylint: disable=protected-access
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is che |
chengdh/openerp-ktv | openerp/addons/ktv_sale/room_checkout.py | Python | agpl-3.0 | 9,855 | 0.039058 | # -*- coding: utf-8 -*-
import logging
from osv import fields, osv
import decimal_precision as dp
import ktv_helper
_logger = logging.getLogger(__name__)
#包厢结账对象
class room_checkout(osv.osv):
_name="ktv.room_checkout"
_columns = {
"room_operate_id" : fields.many2one("ktv.room_operate","room_operate_id",required = True,help="结账单所对应的room_operate对象"),
"bill_datetime" : fields.datetime("bill_datetime",required = True,readonly = True,help="结账时间"),
"open_time" : fields.datetime("open_time",required = True,help="开房时间"),
"close_time" : fields.datetime("close_time",required = True,help="关房时间"),
"guest_name" : fields.char("guest_name",size = 20,help="客人姓名"),
"persons_count" : fields.integer("persons_count",help="客人人数"),
"consume_minutes" : fields.integer("consume_minutes",required = True,help="消费时长"),
"present_minutes" : fields.integer("present_minutes",help="赠送时长"),
"presenter_id" : fields.many2one("res.users","presenter_id",help ="赠送人"),
"saler_id" : fields.many2one("res.users","saler_id",help ="销售经理"),
"fee_type_id" : fields.many2one("ktv.fee_type","fee_type_id",required = True,help="计费方式"),
"room_fee" : fields.float("room_fee", digits_compute= dp.get_precision('Ktv Room Default Precision'),help="包厢费"),
"service_fee_rate" : fields.float("service_fee_rate",digits = (15,4),help="服务费费率"),
"service_fee" : fields.float("service_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="服务费"),
"hourly_fee" : fields.float("hourly_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="钟点费"),
"sum_hourly_fee_p" : fields.float("sum_hourly_fee_p",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="茶位费合计-按位钟点费"),
"sum_buffet_fee" : fields.float("sum_buffet_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="自助餐费用合计"),
"changed_room_hourly_fee" : fields.float("changed_room_hourly_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="换房费用"),
"changed_room_minutes" : fields.integer("changed_room_minutes",help="换房消费时长度"),
"merged_room_hourly_fee" : fields.float("merged_room_hourly_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="并房费用"),
"minimum_fee" : fields.float("minimum_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="低消费用"),
"minimum_fee_diff" : fields.float("minimum_fee_diff",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="低消差额"),
"prepay_fee" : fields.float("prepay_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="预付金额"),
"drinks_fee" : fields.float("drinks_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="酒水费"),
"uncheckout_drinks_fee" : fields.float("uncheckout_drinks_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="未结酒水费"),
"minimum_drinks_fee" : fields.float("minimum_drinks_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="计入低消酒水费"),
"guest_damage_fee" : fields.float("guest_damage_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="客损费用"),
#会员卡折扣
"member_card_id" : fields.many2one("ktv.member","member_card_id",help="会员信息"),
"member_room_fee_discount_rate" : fields.float("minimum_room_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="会员-房费折扣"),
"member_room_fee_discount_fee" : fields.float("minimum_room_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="会员-房费折扣"),
"member_drinks_fee_discount_rate" : fields.float("minimum_drinks_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="会员-酒水费折扣"),
"member_drinks_fee_discount_fee" : fields.float("minimum_drinks_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="会员-酒水费折扣"),
#打折卡打折
"discount_card_id" : fields.many2one("ktv.discount_card","discount_card_id",help="打折卡id"),
"discount_card_room_fee_discount_rate" : fields.float("discount_card_room_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="打折卡-房费折扣"),
"discount_card_room_fee_discount_fee" : fields.float("discount_card_room_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="打折卡-房费折扣"),
"discount_card_drinks_fee_discount_rate" : fields.float("discount_card_drinks_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="打折卡-酒水费折扣"),
"discount_card_drinks_fee_discount_fee" : fields.float("discount_card_drinks_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="打折卡-酒水费折扣"),
#员工打折字段
"discounter_id" : fields.many2one("res.users","discounter_id",help="打折人id"),
"discounter_room_fee_discount_rate" : fields.float("discounter_room_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="操作员-房费折扣"),
"discounter_room_fee_discount_fee" : fields.float("discounter_room_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="操作员-房费折扣"),
"discounter_drinks_fee_discount_rate" : fields.float("discounter_drinks_fee_discount_rate",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="操作员-酒水费折扣"),
"discounter_drinks_fee_discount_fee" : fields.float("discounter_drinks_fee_discount_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="-酒水费折扣"),
#各种付款方式
#现金
"cash_fee" : fields.float("cash_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="现金支付金额"),
#会员卡/储值卡
"member_card_fee" : fields.float("member_card_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="会员卡支付金额"),
#信用卡&储蓄卡
"credit_card_no" : fields.char("credit_card_no",size = 64,help="信用卡号"),
"credit_card_fee" : fields.float("credit_card_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="信用卡支付金额"),
#抵用券
"sales_voucher_fee" : fields.float("sales_voucher_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="抵用券支付金额"),
#免单
"freer_id" : fields.many2one("res.users","freer_id",help="免单人"),
"free_fee" : fields.float("free_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="免单费用"),
#按位消费免单
"freer_persons_id" : fields.many2one("res.users","freer_persons_id",help="免单人"),
"free_persons_count" : fields.i | nteger("free_persons_count",help="按位消费免单人数"),
#挂账
"on_crediter_id" : fields.many2one("res.users","on_crediter_id",help="挂账人"),
"on_credit_fee" : fields.float("on_credit_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="免单费用"),
#欢唱券
"song_ticket_fee" : fie | lds.float("song_ticket_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="欢唱券抵扣费用"),
"song_ticket_fee_diff" : fields.float("song_ticket_fee_diff",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="欢唱券抵扣费用差额"),
"act_pay_fee" : fields.float("act_pay_fee",digits_compute = dp.get_precision('Ktv Room Default Precision'),help="付款金额"),
}
_defaults = {
#正常开房时,关房时间是当前时间
"bill_datetime" : fields.datetime.now,
"open_time" : fields.datetime.now,
"close_time" : fields.datetime.now,
"consume_minutes" : 0,
"present_minutes" : 0,
"room_fee" : 0,
"service_fee_rate" : 0,
"service_fee" : 0,
" |
balajikris/autorest | Samples/azure-storage/Azure.Python/storagemanagementclient/models/usage.py | Python | mit | 1,496 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: Gets the unit of measurement. Possible values include:
'Count', 'Bytes', 'Seconds', 'Percent', 'Co | untsPerSecond',
| 'BytesPerSecond'
:type unit: str or :class:`UsageUnit <petstore.models.UsageUnit>`
:param current_value: Gets the current count of the allocated resources in
the subscription.
:type current_value: int
:param limit: Gets the maximum count of the resources that can be
allocated in the subscription.
:type limit: int
:param name: Gets the name of the type of usage.
:type name: :class:`UsageName <petstore.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
|
tspus/python-matchingPursuit | data/signalGenerator.py | Python | gpl-3.0 | 6,621 | 0.050295 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
# This file is part of Matching Pursuit Python program (python-MP).
#
# python-MP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-MP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-MP. If not, see <http://www.gnu.org/licenses/>.
author: Tomasz Spustek
e-mail: tomasz@spustek.pl
University of Warsaw, July 06, 2015
'''
import numpy as np
import scipy.stats as scp
import matplotlib.pyplot as plt
from scipy.io import loadmat
from src.dictionary import tukey
def generateTestSignal(gaborParams , sinusParams , asymetricWaveformsAParams , rectangularWaveformsAParams , numberOfSamples , samplingFrequency , noiseRatio , silenceFlag = 1):
'''
gaborParams - numpy array (as for gaborFunction) or None
sinusParams - numpy array of amplitude-frequency-phase trios or None
asymetricWaveformsA - numpy array of ...
rectangularWaveformsAParams - ...
noiseRatio - float (0 - 1)
'''
time = np.arange(0,numberOfSamples)
signal = np.squeeze(np.zeros((numberOfSamples,1)))
ind1 = 0
if gaborParams is not None:
for gabor in gaborParams:
(tmp,time) = gaborFunction(gabor)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} gabors generated'.format(ind1)
ind1 = 0
if sinusParams is not None:
for param in sinusParams:
freq = (param[1] / (0.5*samplingFrequency) ) * np.pi
signal += np.array(param[0] * np.sin(freq * time + param[2]))
ind1 += 1
if silenceFlag == 0:
print '{} sinusoids generated'.format(ind1)
ind1 = 0
if asymetricWaveformsAParams is not None:
for asym in asymetricWaveformsAParams:
amplitude = asym[0]
freq = (asym[1] / (0.5*samplingFrequency) ) * np.pi
pos = asym[2]
sigma = asym[3]
asymetry = asym[4]
x = np.linspace(scp.lognorm.ppf(0.0001, asymetry),scp.lognorm.ppf(0.9999, asymetry), sigma)
envelope = scp.lognorm.pdf(x, asymetry)
tmp = np.squeeze(np.zeros((numberOfSamples,1)))
tmp[pos:pos+sigma] = amplitude * envelope
tmp = tmp * np.cos(freq * time)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} asymmetrical waveforms generated'.format(ind1)
ind1 = 0
if rectangularWaveformsAParams is not None:
for rect in rectangularWaveformsAParams:
amplitude = rect[0]
freq = (rect[1] / (0.5*samplingFrequency) ) * np.pi
pos = rect[2]
sigma = rect[3]
r = rect[4]
envelope = tukey(sigma, r)
tmp = np.squeeze(np.zeros((numberOfSamples,1)))
tmp[pos:pos+sigma] = amplitude * envelope
tmp = tmp * np.cos(freq * time)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} rectangular waveforms generated'.format(ind1)
return (signal , time)
def gaborFunction(params):
'''
params:numpy Array containing:
numberOfSamples in [samples]
samplingFreq in [Hz]
atomFreq in [Hz]
width in [s]
position in [s]
amplitude in [au]
phase in [rad]
'''
numberOfSamples = params[0]
samplingFreq = params[1]
amplitude = params[2]
position = params[3] * samplingFreq
width = params[4] * samplingFreq
frequency = (params[5] / (0.5*samplingFreq) ) * np.pi
phase = params[6]
time = np.arange(0,numberOfSamples)
signal = np.array(amplitude * np.exp(-np.pi*((time-position)/width)**2) * np.cos(frequency*(time-position)+phase))
return (signal , time)
def simpleValues():
numberOfSamples = 1000
samplingFreq = 250.0
amplitude = 12.0
position1 = 3.0
position2 = 1.0
width | = 0.5
frequency1 = 12.0
frequency2 = 15.0
phase = 0.0
gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude,position1,width,frequency1,phase],[numberOfSamples,samplingFreq,amplitude,position2,width,frequency2,phase]])
sinusParams = np.array([[5.0,5.0,0.0]])
noiseRatio = 0.0
return (gaborParams , sinusParams , None , None , noiseRatio , samplingFreq , numberOfSamples)
def advancedValues():
numberOfSa | mples = 1000
samplingFreq = 250.0
amplitude1 = 12
amplitude2 = 20
freq1 = 10.0
freq2 = 20.0
pos1 = 250
pos2 = 500
sigma = 500
asymetry = 0.45
asymetricParams = np.array([[amplitude1,freq1,pos1,sigma,asymetry],[amplitude2,freq2,pos2,sigma,asymetry]])
sinusParams = np.array([[2.0,5.0,0.0]])
noiseRatio = 0.0
return(None , sinusParams , asymetricParams , None , noiseRatio , samplingFreq , numberOfSamples)
def masterValues():
numberOfSamples = 2000
samplingFreq = 250.0
amplitude1 = 15
amplitude2 = 20
amplitude3 = 10
freq1 = 5.0
freq2 = 10.0
freq3 = 15.0
pos1 = 2.0
pos2 = 1000
pos3 = 1500
sigma1 = 0.5
sigma2 = 500
sigma3 = 300
asymetry = 0.45
rectangularity = 0.25
gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude1,pos1,sigma1,freq1,0]])
asymetricParams = np.array([[amplitude2,freq2,pos2,sigma2,asymetry]])
rectParams = np.array([[amplitude3,freq3,pos3,sigma3,rectangularity]])
sinusParams = np.array([[2.0,5.0,0.0]])
noiseRatio = 0.0
return(gaborParams , sinusParams , asymetricParams , rectParams , noiseRatio , samplingFreq , numberOfSamples)
def loadSyntheticSigmalFromEEGLABFile(nameOfFile):
structure = loadmat(nameOfFile)
data = structure['EEG']['data'][0][0]
data = data.transpose([2,0,1])
info = {}
info['samplingFreq'] = structure['EEG']['srate'][0][0][0][0]
info['numberOfChannels'] = structure['EEG']['nbchan'][0][0][0][0]
info['numberOfSamples'] = structure['EEG']['pnts'][0][0][0][0]
info['numberOfSeconds'] = structure['EEG']['pnts'][0][0][0][0] / info['samplingFreq']
info['numberOfTrials'] = structure['EEG']['trials'][0][0][0][0]
# print structure['EEG']['chanlocs'][0][0][0,2]
time = np.arange(0 , info['numberOfSeconds'] , 1./info['samplingFreq'])
return (data , time , info) |
beyondblog/kubernetes | hack/boilerplate/boilerplate.py | Python | apache-2.0 | 4,733 | 0.004014 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
refs = {}
for path in glob.glob(os.path.join(rootdir, "hack/boilerplate/boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
| ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.sub | n("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_output', '.git']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014 or 2015, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
cc-archive/commoner | src/commoner/registration/tests.py | Python | agpl-3.0 | 12,845 | 0.007318 | """
Unit tests for django-registration.
These tests assume that you've completed all the prerequisites for
getting django-registration running in the default setup, to wit:
1. You have ``registration`` in your ``INSTALLED_APPS`` setting.
2. You have created all of the templates mentioned in this
application's documentation.
3. You have added the setting ``ACCOUNT_ACTIVATION_DAYS`` to your
settings file.
4. You have URL patterns pointing to the registration and activation
views, with the names ``registration_register`` and
``registration_activate``, respectively, and a URL pattern named
'registration_complete'.
"""
import datetime
import sha
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core import management
from django.core.urlresolvers import reverse
from django.test import TestCase
from commoner.registration import forms
from commoner.registration.models import RegistrationProfile
from commoner.registration import signals
class RegistrationTestCase(TestCase):
"""
Base class for the test cases; this sets up two users -- one
expired, one not -- which are used to exercise various parts
of the application.
"""
def setUp(self):
self.sample_user = RegistrationProfile.objects.create_inactive_user(username='alice',
password='secret',
email='alice@example.com')
self.expired_user = RegistrationProfile.objects.create_inactive_user(username='bob',
password='swordfish',
email='bob@example.com')
self.expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
self.expired_user.save()
class RegistrationModelTests(RegistrationTestCase):
"""
Tests for the model-oriented functionality of django-registration,
including ``RegistrationProfile`` and its custom manager.
"""
def test_new_user_is_inactive(self):
"""
Test that a newly-created user is inactive.
"""
self.failIf(self.sample_user.is_active)
def test_registration_profile_created(self):
"""
Test that a ``RegistrationProfile`` is created for a new user.
"""
self.assertEqual(RegistrationProfile.objects.count(), 2)
def test_activation_email(self):
"""
Test that user signup sends an activation email.
"""
self.assertEqual(len(mail.outbox), 2)
def test_activation_email_disable(self):
"""
Test that activation email can be disabled.
"""
RegistrationProfile.objects.create_inactive_user(username='noemail',
password='foo',
email='nobody@example.com',
send_email=False)
self.assertEqual(len(mail.outbox), 2)
def test_activation(self):
"""
Test that user activation actually activates the user and
properly resets the activation key, and fails for an
already-active or expired user, or an invalid key.
"""
# Activating a valid user returns the user.
self.failUnlessEqual(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key).pk,
self.sample_user.pk)
# The activated user must now be active.
self.failUnless(User.objects.get(pk=self.sample_user.pk).is_active)
# The activation key must now be reset to the "already activated" constant.
self.failUnlessEqual(RegistrationProfile.objects.get(user=self.sample_user).activation_key,
RegistrationProfile.ACTIVATED)
# Activating an expired user returns False.
self.failIf(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.expired_user).activation_key))
# Activating from a key that isn't a SHA1 hash returns False.
self.failIf(RegistrationProfile.objects.activate_user('foo'))
# Activating from a key that doesn't exist returns False.
self.failIf(RegistrationProfile.objects.activate_user(sha.new('foo').hexdigest()))
def test_account_expiration_condition(self):
"""
Test that ``RegistrationProfile.activation_key_expired()``
returns ``True`` for expired users and for active users, and
``False`` otherwise.
"""
# Unexpired user returns False.
self.failIf(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
# Expired user returns True.
self.failUnless(RegistrationProfile.objects.get(user=self.expired_user).activation_key_expired())
# Activated user returns True.
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key)
self.failUnless(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
def test_expired_user_deletion(self):
"""
Test that
``RegistrationProfile.objects.delete_expired_users()`` deletes
only inactive users whose activation window has expired.
"""
RegistrationProfile.objects.delete_expired_users()
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_management_command(self):
"""
Test that ``manage.py cleanupregistration`` functions
correctly.
"""
management.call_command('cleanupregistration')
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_signals(self):
"""
Test that the ``user_registered`` and ``user_activated``
signals are sent, and that they send the ``User`` as an
argument.
"""
def receiver(sender, **kwargs):
self.assert_('user' in kwargs)
self.assertEqual(kwargs['user'].username, u'signal_test')
received_signals.append(kwargs.get('signal'))
received_signals = []
expected_signals = [signals.user_registered, signals.user_activated]
for signal in expected_signals:
signal.connect(receiver)
RegistrationProfile.objects | .create_inactive_user(username='signal_test',
password='foo',
email='nobody@example.com', |
send_email=False)
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user__username='signal_test').activation_key)
self.assertEqual(received_signals, expected_signals)
class RegistrationFormTests(RegistrationTestCase):
"""
Tests for the forms and custom validation logic included in
django-registration.
"""
fixtures = ['test_codes.json',]
def test_registration_form(self):
"""
Test that ``RegistrationForm`` enforces username constraints
and matching passwords.
"""
invalid_data_dicts = [
# Non-alphanumeric username.
{
'data':
{ 'username': 'foo/bar',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos': 'on',},
'error':
('username', [u"Enter a valid value."])
},
# Already-existing username.
{
'data':
{ 'username': 'alice',
'email': 'alice@example.com',
'password1': 'secret',
'password2': 'secret',
|
chris-sanders/layer-haproxy | tests/unit/test_libhaproxy.py | Python | gpl-3.0 | 22,253 | 0.001438 | #!/usr/bin/python3
"""Test the helper library used by the charm."""
import os
def test_pytest():
"""Test pytest works."""
assert True
def test_ph(ph):
"""Test the ph fixture works to load charm configs."""
assert isinstance(ph.charm_config, dict)
def test_proxy_config(ph):
"""Check that default proxy config can be read."""
default_keywords = ["httplog", "dontlognull"]
for option in ph.proxy_config.defaults[0].options():
assert option.keyword in default_keywords
def test_add_timeout_tunnel(ph):
"""Test adding the tunnel timeout."""
test_keyword = "timeout tunnel"
defaults = ph.proxy_config.defaults[0]
for cfg in defaults.configs():
print(cfg.keyword)
assert cfg.keyword != test_keyword
ph.add_timeout_tunnel()
tunnel_found = False
for cfg in defaults.configs():
print(cfg.keyword)
if cfg.keyword == test_keyword:
tunnel_found = True
assert tunnel_found
def test_get_config_names(ph, mock_remote_unit, config):
"""Test fetching backend names for related units."""
config["group_id"] = "test_group"
remote_unit, backend_name = ph.get_config_names([config])[0]
assert remote_unit == "unit-mock-0-0"
assert backend_name == "test_group"
def test_process_configs(ph, monkeypatch, config):
"""Test processing configuration."""
# Test writting a config file
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/0")
assert ph.process_configs([config])["cfg_good"] is True
# Test writting two configs from one unit
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/0")
assert ph.process_configs([config, config])["cfg_good"] is True
# Error if tcp requested on existing http frontend
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/1")
config["mode"] = "tcp"
assert ph.process_configs([config])["cfg_good"] is False
# Successful tcp on unused frontend
config["external_port"] = 90
assert ph.process_configs([config])["cfg_good"] is True
# Fail tcp on existing tcp frontend
config["external_port"] = 90
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/1.5")
assert ph.process_configs([config])["cfg_good"] is False
# Error if http requested on existing tcp frontend
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/2")
config["mode"] = "http"
assert ph.process_configs([config])["cfg_good"] is False
# Register with subdomain
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/2")
config["subdomain"] = "subtest"
config["external_port"] = 80
assert ph.process_configs([config])["cfg_good"] is True
# Register with only subdomain
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/3")
config["urlbase"] = None
assert ph.process_configs([config])["cfg_good"] is True
# Add two units with a group-id
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/4")
config["group_id"] = "test-group"
assert ph.process_configs([config])["cfg_good"] is True
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/5")
config["group_id"] = "test-group"
assert ph.process_configs([config])["cfg_good"] is True
# Add a unit with rewrite-path and local
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/6")
config["group_id"] = "rewrite-group"
config["rewrite-path"] = True
config["acl-local"] = True
config["urlbase"] = "/mock6"
assert ph.process_configs([config])["cfg_good"] is True
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/7")
assert ph.process_configs([config])["cfg_good"] is True
backend = ph.get_backend("rewrite-group", create=False)
rewrite_found = False
for cfg in backend.configs():
if cfg.keyword.startswith("http-request set-path"):
rewrite_found = True
assert rewrite_found
assert backend.acl("local")
check_found = False
for server in backend.servers():
for attribute in server.attributes:
if "check" in attribute:
check_found = True
assert check_found
# Add a unit with proxypass, ssl verify none, and no check
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/8")
config["subdomain"] = False
config["group_id"] = None
config["rewrite-path"] = None
config["acl-local"] = None
config["urlbase"] = "/mock8"
config["proxypass"] = True
config["ssl"] = True
config["ssl-verify"] = False
config["external_port"] = 443
config["check"] = False
assert ph.process_configs([confi | g])["cfg_good"] is True
backend = ph.get_backend("unit-mock-8-0", create=False)
forward_for_found = False
for option in backend.options():
if "forwardfor" in option.keyword:
forward_for_found = True
a | ssert forward_for_found
forward_proto_found = False
for cfg in backend.configs():
if "X-Forwarded-Proto https" in cfg.keyword:
forward_proto_found = True
assert forward_proto_found
ssl_found = False
for server in backend.servers():
if "ssl verify none" in server.attributes:
ssl_found = True
assert ssl_found
check_found = False
for server in backend.servers():
for attribute in server.attributes:
if "check" in attribute:
check_found = True
assert not check_found
# Check that the expected number of backends are in use
# Backends 0-0,0-1,2,3,4,5,6,7 should be in use by HTTP port 80
http_fe = ph.get_frontend(80, create=False)
assert len(http_fe.usebackends()) == 8
def test_get_frontend(ph):
"""Test fetching the frontend."""
import pyhaproxy
assert ph.get_frontend(80, create=False) is None
assert not isinstance(ph.get_frontend(80, create=False), pyhaproxy.config.Frontend)
assert isinstance(ph.get_frontend(80), pyhaproxy.config.Frontend)
assert isinstance(ph.get_frontend(80, create=False), pyhaproxy.config.Frontend)
assert ph.get_frontend(70).port == "70"
assert ph.get_frontend(80).port == "80"
assert ph.get_frontend(90).port == "90"
def test_get_backend(ph, monkeypatch, config):
"""Test getting backends."""
import pyhaproxy
# Create and return a new backend
new_be = ph.get_backend("test-backend")
assert isinstance(new_be, pyhaproxy.config.Backend)
assert new_be.name == "test-backend"
assert new_be.configs() == []
# Retrieve existing backend
monkeypatch.setattr("libhaproxy.hookenv.remote_unit", lambda: "unit-mock/0")
ph.process_configs([config])
backend = ph.get_backend("unit-mock-0-0")
assert backend.name == "unit-mock-0-0"
assert backend.configs() != []
def test_enable_stats(ph):
"""Test enabling stats."""
# Can't enable if FE is in use
fe9000 = ph.get_frontend(9000)
assert fe9000.port == "9000"
assert fe9000.name == "relation-9000"
assert ph.enable_stats() is False
# Can enable if FE is available
fe9000.port = 0
assert ph.enable_stats() is True
festats = ph.get_frontend(9000)
assert festats.name == "stats"
def test_disable_sats(ph):
"""Test disabling stats."""
# 9k FE is Stats after enable
assert ph.enable_stats() is True
fe9000 = ph.get_frontend(9000)
assert fe9000.name == "stats"
# 9k FE is not Stats after disable
ph.disable_stats()
fe9000 = ph.get_frontend(9000)
assert fe9000.name == "relation-9000"
def test_enable_redirect(ph):
"""Test enabling the HTTPS redirect."""
ph.enable_redirect()
fe80 = ph.get_frontend(80)
assert fe80.port == "80"
default = None
for ub in fe80.usebackends():
if ub.backend_name == "redirect":
default = ub
assert default is not None
assert default.is_default is True
be_redirect = ph.get_backend("redirect", create=False)
assert be_redirect is not None
def test_disable_redirect(ph):
|
anneline/Bika-LIMS | bika/lims/tools/bika_ar_import.py | Python | agpl-3.0 | 11,059 | 0.013835 | from DateTime import DateTime
from AccessControl import ClassSecurityInfo
from App.class_init import InitializeClass
from OFS.SimpleItem import SimpleItem
from Products.CMFCore import permissions
from Products.CMFCore.utils import UniqueObject, getToolByName
from bika.lims.config import ManageAnalysisRequests
from bika.lims.tools import ToolFolder
import csv
from bika.lims.interfaces.tools import Ibika_ar_import
from zope.interface import implements
class bika_ar_import(UniqueObject, SimpleItem):
""" ARImportTool """
implements(Ibika_ar_import)
security = ClassSecurityInfo()
id = 'bika_ar_import'
title = 'AR Import Tool'
description = 'Imports Analysis Request Data.'
meta_type = 'AR Import Tool'
security.declareProtected(ManageAnalysisRequests, 'import_file')
def import_file(self, csvfile, filename, client_id, state):
slash = filename.rfind('\\')
full_name = filename[slash + 1:]
ext = full_name.rfind('.')
if ext == -1:
actual_name = full_name
else:
actual_name = full_name[:ext]
log = []
r = self.portal_catalog(portal_type = 'Client', id = client_id)
if len(r) == 0:
log.append(' Could not find Client %s' % client_id)
return '\n'.join(log)
client = r[0].getObject()
workflow = getToolByName(self, 'portal_workflow')
updateable_states = ['sample_received', 'assigned']
reader = csv.reader(csvfile)
samples = []
sample_headers = None
batch_headers = None
row_count = 0
sample_count = 0
batch_remarks = []
for row in reader:
row_count = row_count + 1
if not row: continue
# a new batch starts
if row_count == 1:
if row[0] == 'Header':
continue
else:
msg = '%s invalid batch header' % row
# transaction_note(msg)
return state.set(status = 'failure', portal_status_message = msg)
if row_count == 2:
msg = None
if row[1] != 'Import':
msg = 'Invalid batch header - Import required in cell B2'
# transaction_note(msg)
return state.set(status = 'failure', portal_status_message = msg)
full_name = row[2]
ext = full_name.rfind('.')
if ext == -1:
entered_name = full_name
else:
entered_name = full_name[:ext]
if entered_name.lower() != actual_name.lower():
msg = 'Actual filename, %s, does not match entered filename, %s' % (actual_name, row[2])
# transaction_note(msg)
return state.set(status = 'failure', portal_status_message = msg)
batch_headers = row[0:]
arimport_id = self.generateUniqueId('ARImport')
client.invokeFactory(id = arimport_id, type_name = 'ARImport')
arimport = client._getOb(arimport_id)
arimport.processForm()
continue
if row_count == 3:
sample_count = sample_count + 1
sample_headers = row[9:]
continue
if row_count == 4:
continue
if row_count == 5:
continue
if row_count == 6:
continue
samples.append(row)
pad = 8192 * ' '
REQUEST = self.REQUEST
REQUEST.RESPONSE.wri | te(self.progress_bar(REQUEST = REQUEST))
REQUEST.RESPONSE.write('<input style="display: none;" id="progressType" value="Analysis request import">')
| REQUEST.RESPONSE.write('<input style="display: none;" id="progressDone" value="Validating...">')
REQUEST.RESPONSE.write(pad + '<input style="display: none;" id="inputTotal" value="%s">' % len(samples))
row_count = 0
next_id = self.generateUniqueId('ARImportItem', batch_size = len(samples))
(prefix, next_num) = next_id.split('_')
next_num = int(next_num)
for sample in samples:
row_count = row_count + 1
REQUEST.RESPONSE.write(pad + '<input style="display: none;" name="inputProgress" value="%s">' % row_count)
item_remarks = []
analyses = []
for i in range(9, len(sample)):
if sample[i] != '1':
continue
analyses.append(sample_headers[(i - 9)])
if len(analyses) > 0:
aritem_id = '%s_%s' % (prefix, (str(next_num)))
arimport.invokeFactory(id = aritem_id, type_name = 'ARImportItem')
aritem = arimport._getOb(aritem_id)
aritem.processForm()
aritem.edit(
SampleName = sample[0],
ClientRef = sample[1],
ClientSid = sample[2],
SampleDate = sample[3],
SampleType = sample[4],
PickingSlip = sample[5],
ReportDryMatter = sample[6],
)
aritem.setRemarks(item_remarks)
aritem.setAnalyses(analyses)
next_num += 1
arimport.edit(
ImportOption = 'c',
FileName = batch_headers[2],
ClientTitle = batch_headers[3],
ClientID = batch_headers[4],
ContactID = batch_headers[5],
CCContactID = batch_headers[6],
CCEmails = batch_headers[7],
OrderID = batch_headers[8],
QuoteID = batch_headers[9],
SamplePoint = batch_headers[10],
Remarks = batch_remarks,
Analyses = sample_headers,
DateImported = DateTime(),
)
valid = self.validate_arimport_c(arimport)
REQUEST.RESPONSE.write('<script>document.location.href="%s/client_arimports?portal_status_message=%s%%20imported"</script>' % (client.absolute_url(), arimport_id))
security.declareProtected(ManageAnalysisRequests, 'import_file_s')
def import_file_s(self, csvfile, client_id, state):
log = []
r = self.portal_catalog(portal_type = 'Client', id = client_id)
if len(r) == 0:
log.append(' Could not find Client %s' % client_id)
return '\n'.join(log)
client = r[0].getObject()
workflow = getToolByName(self, 'portal_workflow')
reader = csv.reader(csvfile)
samples = []
sample_headers = None
batch_headers = None
row_count = 0
sample_count = 0
batch_remarks = []
in_footers = False
last_rows = False
temp_row = False
temperature = ''
for row in reader:
row_count = row_count + 1
if not row: continue
if last_rows:
continue
if in_footers:
continue
if temp_row:
temperature = row[8]
temp_row = False
last_rows = True
if row[8] == 'Temperature on Arrival:':
temp_row = True
continue
if row_count > 11:
if row[0] == '':
in_footers = True
if row_count == 5:
client_orderid = row[10]
continue
if row_count < 7:
continue
if row_count == 7:
if row[0] != 'Client Name':
log.append(' Invalid file')
return '\n'.join(log)
batch_headers = row[0:]
arimport_id = self.generateUniqueId('ARImport')
client.invokeFactory(id = arimport_id, type_name = 'ARImport')
arimport = client._getOb(arimport_id)
clientname = row[1]
clientphone = row[5]
continue
if row_count == 8:
cl |
davelab6/pyfontaine | fontaine/charsets/noto_chars/notoserifgeorgian_bold.py | Python | gpl-3.0 | 8,189 | 0.015631 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSerifGeorgian-Bold'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #null ????
chars.append(0x000D) #nonmarkingreturn ????
chars.append(0x0020) #space SPACE
chars.append(0x00A0) #space NO-BREAK SPACE
chars.append(0x10A1) #uni10A1 GEORGIAN CAPITAL LETTER BAN
chars.append(0x10A2) #uni10A2 GEORGIAN CAPITAL LETTER GAN
chars.append(0x10A3) #uni10A3 GEORGIAN CAPITAL LETTER DON
chars.append(0x10A4) #uni10A4 GEORGIAN CAPITAL LETTER EN
chars.append(0x10A5) #uni10A5 GEORGIAN CAPITAL LETTER VIN
chars.append(0x10A6) #uni10A6 GEORGIAN CAPITAL LETTER ZEN
chars.append(0x10A7) #uni10A7 GEORGIAN CAPITAL LETTER TAN
chars.append(0x10A8) #uni10A8 GEORGIAN CAPITAL LETTER IN
chars.append(0x10A9) #uni10A9 GEORGIAN CAPITAL LETTER KAN
chars.append(0x10AA) #uni10AA GEORGIAN CAPITAL LETTER LAS
chars.append(0x10AB) #uni10AB GEORGIAN CAPITAL LETTER MAN
chars.append(0x10AC) #uni10AC GEORGIAN CAPITAL LETTER NAR
chars.append(0x10AD) #uni10AD GEORGIAN CAPITAL LETTER ON
chars.append(0x10AE) #uni10AE GEORGIAN CAPITAL LETTER PAR
chars.append(0x10AF) #uni10AF GEORGIAN CAPITAL LETTER ZHAR
chars.append(0x10B0) #uni10B0 GEORGIAN CAPITAL LETTER RAE
chars.append(0x10B1) #uni10B1 GEORGIAN CAPITAL LETTER SAN
chars.append(0x10B2) #uni10B2 GEORGIAN CAPITAL LETTER TAR
chars.append(0x10B3) #uni10B3 GEORGIAN CAPITAL LETTER UN
chars.append(0x10B4) #uni10B4 GEORGIAN CAPITAL LETTER PHAR
chars.append(0x10B5) #uni10B5 GEORGIAN CAPITAL LETTER KHAR
chars.append(0x10B6) #uni10B6 GEORGIAN CAPITAL LETTER GHAN
chars.append(0x10B7) #uni10B7 GEORGIAN CAPITAL LETTER QAR
chars.append(0x10B8) #uni10B8 GEORGIAN CAPITAL LETTER SHIN
chars.append(0x10B9) #uni10B9 GEORGIAN CAPITAL LETTER CHIN
chars.append(0x10BA) #uni10BA GEORGIAN CAPITAL LETTER CAN
chars.append(0x10BB) #uni10BB GEORGIAN CAPITAL LETTER JIL
chars.append(0x10BC) #uni10BC GEORGIAN CAPITAL LETTER CIL
chars.append(0x10BD) #uni10BD GEORGIAN CAPITAL LETTER CHAR
chars.append(0x10BE) #uni10BE GEORGIAN CAPITAL LETTER XAN
chars.append(0x10BF) #uni10BF GEORGIAN CAPITAL LETTER JHAN
| chars.append(0x10C0) #uni10C0 GEORGIAN CAPITAL LETTER HAE
chars.append(0x10C1) #uni10C1 GEORGIAN CAPITAL LETTER HE
chars.append(0x10C2) #uni10C2 GEORGIAN CAPITAL LETTER HIE
chars.append(0x10C3) #uni10C3 GEORGIAN CAPITAL LETTER WE
cha | rs.append(0x10C4) #uni10C4 GEORGIAN CAPITAL LETTER HAR
chars.append(0x10C5) #uni10C5 GEORGIAN CAPITAL LETTER HOE
chars.append(0x10D0) #uni10D0 GEORGIAN LETTER AN
chars.append(0x10D1) #uni10D1 GEORGIAN LETTER BAN
chars.append(0x10D2) #uni10D2 GEORGIAN LETTER GAN
chars.append(0x10D3) #uni10D3 GEORGIAN LETTER DON
chars.append(0x10D4) #uni10D4 GEORGIAN LETTER EN
chars.append(0x10D5) #uni10D5 GEORGIAN LETTER VIN
chars.append(0x10D6) #uni10D6 GEORGIAN LETTER ZEN
chars.append(0x10D7) #uni10D7 GEORGIAN LETTER TAN
chars.append(0x10D8) #uni10D8 GEORGIAN LETTER IN
chars.append(0x10D9) #uni10D9 GEORGIAN LETTER KAN
chars.append(0x10DA) #uni10DA GEORGIAN LETTER LAS
chars.append(0x10DB) #uni10DB GEORGIAN LETTER MAN
chars.append(0x10DC) #uni10DC GEORGIAN LETTER NAR
chars.append(0x10DD) #uni10DD GEORGIAN LETTER ON
chars.append(0x10DE) #uni10DE GEORGIAN LETTER PAR
chars.append(0x10DF) #uni10DF GEORGIAN LETTER ZHAR
chars.append(0x10E0) #uni10E0 GEORGIAN LETTER RAE
chars.append(0x10E1) #uni10E1 GEORGIAN LETTER SAN
chars.append(0x10E2) #uni10E2 GEORGIAN LETTER TAR
chars.append(0x10E3) #uni10E3 GEORGIAN LETTER UN
chars.append(0x10E4) #uni10E4 GEORGIAN LETTER PHAR
chars.append(0x10E5) #uni10E5 GEORGIAN LETTER KHAR
chars.append(0x10E6) #uni10E6 GEORGIAN LETTER GHAN
chars.append(0x10E7) #uni10E7 GEORGIAN LETTER QAR
chars.append(0x10E8) #uni10E8 GEORGIAN LETTER SHIN
chars.append(0x10E9) #uni10E9 GEORGIAN LETTER CHIN
chars.append(0x10EA) #uni10EA GEORGIAN LETTER CAN
chars.append(0x10EB) #uni10EB GEORGIAN LETTER JIL
chars.append(0x10EC) #uni10EC GEORGIAN LETTER CIL
chars.append(0x10ED) #uni10ED GEORGIAN LETTER CHAR
chars.append(0x10EE) #uni10EE GEORGIAN LETTER XAN
chars.append(0x10EF) #uni10EF GEORGIAN LETTER JHAN
chars.append(0x10F0) #uni10F0 GEORGIAN LETTER HAE
chars.append(0x10F1) #uni10F1 GEORGIAN LETTER HE
chars.append(0x10F2) #uni10F2 GEORGIAN LETTER HIE
chars.append(0x10F3) #uni10F3 GEORGIAN LETTER WE
chars.append(0x10F4) #uni10F4 GEORGIAN LETTER HAR
chars.append(0x10F5) #uni10F5 GEORGIAN LETTER HOE
chars.append(0x10F6) #uni10F6 GEORGIAN LETTER FI
chars.append(0x10F7) #uni10F7 GEORGIAN LETTER YN
chars.append(0x10F8) #uni10F8 GEORGIAN LETTER ELIFI
chars.append(0x10F9) #uni10F9 GEORGIAN LETTER TURNED GAN
chars.append(0x10FA) #uni10FA GEORGIAN LETTER AIN
chars.append(0x10FB) #uni10FB GEORGIAN PARAGRAPH SEPARATOR
chars.append(0x10FC) #uni10FC MODIFIER LETTER GEORGIAN NAR
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
chars.append(0x2D00) #uni2D00 GEORGIAN SMALL LETTER AN
chars.append(0x2D01) #uni2D01 GEORGIAN SMALL LETTER BAN
chars.append(0x2D02) #uni2D02 GEORGIAN SMALL LETTER GAN
chars.append(0x2D03) #uni2D03 GEORGIAN SMALL LETTER DON
chars.append(0x2D04) #uni2D04 GEORGIAN SMALL LETTER EN
chars.append(0x2D05) #uni2D05 GEORGIAN SMALL LETTER VIN
chars.append(0x2D06) #uni2D06 GEORGIAN SMALL LETTER ZEN
chars.append(0x2D07) #uni2D07 GEORGIAN SMALL LETTER TAN
chars.append(0x2D08) #uni2D08 GEORGIAN SMALL LETTER IN
chars.append(0x2D09) #uni2D09 GEORGIAN SMALL LETTER KAN
chars.append(0x2D0A) #uni2D0A GEORGIAN SMALL LETTER LAS
chars.append(0x2D0B) #uni2D0B GEORGIAN SMALL LETTER MAN
chars.append(0x2D0C) #uni2D0C GEORGIAN SMALL LETTER NAR
chars.append(0x2D0D) #uni2D0D GEORGIAN SMALL LETTER ON
chars.append(0x2D0E) #uni2D0E GEORGIAN SMALL LETTER PAR
chars.append(0x2D0F) #uni2D0F GEORGIAN SMALL LETTER ZHAR
chars.append(0x2D10) #uni2D10 GEORGIAN SMALL LETTER RAE
chars.append(0x2D11) #uni2D11 GEORGIAN SMALL LETTER SAN
chars.append(0x2D12) #uni2D12 GEORGIAN SMALL LETTER TAR
chars.append(0x2D13) #uni2D13 GEORGIAN SMALL LETTER UN
chars.append(0x2D14) #uni2D14 GEORGIAN SMALL LETTER PHAR
chars.append(0x2D15) #uni2D15 GEORGIAN SMALL LETTER KHAR
chars.append(0x2D16) #uni2D16 GEORGIAN SMALL LETTER GHAN
chars.append(0x2D17) #uni2D17 GEORGIAN SMALL LETTER QAR
chars.append(0x2D18) #uni2D18 GEORGIAN SMALL LETTER SHIN
chars.append(0x2D19) #uni2D19 GEORGIAN SMALL LETTER CHIN
chars.append(0x2D1A) #uni2D1A GEORGIAN SMALL LETTER CAN
chars.append(0x2D1B) #uni2D1B GEORGIAN SMALL LETTER JIL
chars.append(0x2D1C) #uni2D1C GEORGIAN SMALL LETTER CIL
chars.append(0x2D1D) #uni2D1D GEORGIAN SMALL LETTER CHAR
chars.append(0x2D1E) #uni2D1E GEORGIAN SMALL LETTER XAN
chars.append(0x2D1F) #uni2D1F GEORGIAN SMALL LETTER JHAN
chars.append(0x2D20) #uni2D20 GEORGIAN SMALL LETTER HAE
chars.append(0x2D21) #uni2D21 GEORGIAN SMALL LETTER HE
chars.append(0x2D22) #uni2D22 GEORGIAN SMALL LETTER HIE
chars.append(0x2D23) #uni2D23 GEORGIAN SMALL LETTER WE
chars.append(0x2D24) #uni2D24 GEORGIAN SMALL LETTER HAR
chars.append(0x2D25) #uni2D25 GEORGIAN SMALL LETTER HOE
chars.append(0x0589) #uni0589 ARMENIAN FULL STOP
chars.append(0x10A0) #uni10A0 GEORGIAN CAPITAL LETTER AN
return |
jeffFranklin/uw-restclients | restclients/canvas/external_tools.py | Python | apache-2.0 | 1,719 | 0.000582 | from restclients.canvas import Canvas
#from restclients.models.canvas import ExternalTool
class ExternalTools(Canvas):
def get_external_tools_in_account(self, account_id):
"""
Return external tools for the passed canvas account id.
https://canvas.inst | ructure.com/doc/api/external_tools.html#method.external_tools.index
"""
url = "/api/v1/accounts/%s/external_tools" % account_id
external_tools = []
for data in self._get_resource(url):
external_tools.append(self._external_tool_from_json(data))
return external_tools
def get_external_tools_in_account_by_sis_id(self, sis_id):
"""
Return external tools for given account sis id.
"""
return self | .get_external_tools_in_account(self._sis_id(sis_id,
"account"))
def get_external_tools_in_course(self, course_id):
"""
Return external tools for the passed canvas course id.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index
"""
url = "/api/v1/courses/%s/external_tools" % course_id
external_tools = []
for data in self._get_resource(url):
external_tools.append(self._external_tool_from_json(data))
return external_tools
def get_external_tools_in_course_by_sis_id(self, sis_id):
"""
Return external tools for given course sis id.
"""
return self.get_external_tools_in_course(self._sis_id(sis_id,
"course"))
def _external_tool_from_json(self, data):
return data
|
project-chip/connectedhomeip | src/controller/python/chip/setup_payload/setup_payload.py | Python | apache-2.0 | 4,568 | 0.002627 | #
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from chip.native import GetLibraryHandle, NativeLibraryHandleMethodArguments
from chip.exceptions import ChipStackError
from ctypes import CFUNCTYPE, c_char_p, c_int32, c_uint8
class SetupPayload:
# AttributeVisitor: void(const char* name, const char* value)
AttributeVisitor = CFUNCTYPE(None, c_char_p, c_char_p)
# VendorAttributeVisitor: void(uint8_t tag, const char* value)
VendorAttributeVisitor = CFUNCTYPE(None, c_uint8, c_char_p)
def __init__(self):
self.chipLib = GetLibraryHandle()
self.__InitNativeFunctions(self.chipLib)
self.attributes = {}
self.vendor_attributes = {}
def AddAttribute(name, value):
self.attributes[name.decode()] = value.decode()
def AddVendorAttribute(tag, value):
self.vendor_attributes[tag] = value.decode()
self.attribute_visitor = SetupPayload.AttributeVisitor(AddAttribute)
self.vendor_attribute_visitor = SetupPayload.VendorAttributeVisitor(
AddVendorAttribute)
def ParseQrCode(self, qrCode: str):
self.Clear()
err = self.chipLib.pychip_SetupPayload_ParseQrCode(qrCode.upper().encode(),
self.attribute_visitor,
self.vendor_attribute_visitor)
if err != 0:
raise ChipStackError(err)
return self
def ParseManualPairingCode(self, manualPairingCode: str):
self.Clear()
err = self.chipLib.pychip_SetupPayload_ParseManualPairingCode(manualPairingCode.encode(),
self.attribute_visitor,
self.vendor_attribute_visitor)
if err != 0:
raise ChipStackError(err)
return self
def PrintOnboardingCodes(self, passcode, vendorId, productId, discriminator, customFlow, capabilities, version):
self.Clear()
err = self.chipLib.pychip_SetupPayload_PrintOnboardingCodes(
passcode, vendorId, productId, discriminator, customFlow, capabilities, version)
if err != 0:
raise ChipStackError(err)
def Print(self):
for name, value in self.attributes.items():
decorated_value = self.__DecorateValue(name, value)
decorated_value = f" [{decorated_value}]" if decorated_value else ""
print(f"{name}: {value}{decorated_value}")
for tag in self.vendor_attributes:
print(
f"Vendor attribute '{tag:>3}': {self.vendor_attributes[tag]}")
def Clear(self):
self.attributes.clear()
self.vendor_attributes.clear()
def __DecorateValue(self, name, value):
if name == "RendezvousInformation":
rendezvous_methods = []
if int(value) & 0b001:
rendezvous_methods += ["SoftAP"]
if int(value) & 0b010:
rendezvous_method | s += ["BLE"]
if int(value) & 0b100:
rendezvous_methods += ["OnNetwork"]
return ', '.join(rendezvous_methods)
return None
def __InitNativeFunctions(self, chipLib):
if chipLib.pychip_SetupPayload_ParseQrCode is not None:
return
setter = NativeLibraryHandleMethodArguments(chipLib)
| setter.Set("pychip_SetupPayload_ParseQrCode",
c_int32,
[c_char_p, SetupPayload.AttributeVisitor, SetupPayload.VendorAttributeVisitor])
setter.Set("pychip_SetupPayload_ParseManualPairingCode",
c_int32,
[c_char_p, SetupPayload.AttributeVisitor, SetupPayload.VendorAttributeVisitor])
setter.Set("pychip_SetupPayload_PrintOnboardingCodes",
c_int32,
[c_uint32, c_uint16, c_uint16, c_uint16, uint8_t, uint8_t, uint8_t])
|
vighneshbirodkar/scikit-image | skimage/filters/tests/test_thresholding.py | Python | bsd-3-clause | 11,985 | 0.000334 | import numpy as np
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_raises)
import skimage
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.filters.thresholding import (threshold_adaptive,
threshold_otsu,
threshold_li,
threshold_yen,
threshold_isodata,
threshold_mean,
threshold_triangle,
threshold_minimum)
class TestSimpleImage():
def setup(self):
self.image = np.array([[0, 0, 1, 3, 5],
[0, 1, 4, 3, 4],
[1, 2, 5, 4, 1],
[2, 4, 5, 2, 1],
[4, 5, 1, 0, 0]], dtype=int)
def test_otsu(self):
assert threshold_otsu(self.image) == 2
def test_otsu_negative_int(self):
image = self.image - 2
assert threshold_otsu(image) == 0
def test_otsu_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_otsu(image) < 3
def test_li(self):
assert int(threshold_li(self.image)) == 2
def test_li_negative_int(self):
image = self.image - 2
assert int(threshold_li(image)) == 0
def test_li_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_li(image) < 3
def test_li_constant_image(self):
assert_raises(ValueError, threshold_li, np.ones((10,10)))
def test_yen(self):
assert threshold_yen(self.image) == 2
def test_yen_negative_int(self):
image = self.image - 2
assert threshold_yen(image) == 0
def test_yen_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_yen(image) < 3
def test_yen_arange(self):
image = np.arange(256)
assert threshold_yen(image) == 127
def test_yen_binary(self):
image = np.zeros([2, 256], dtype=np.uint8)
image[0] = 255
assert threshold_yen(image) < 1
def test_yen_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_yen(image) == 0
def test_yen_blank_max(self):
image = np.empty((5, 5), dtype=np.uint8)
image.fill(255)
assert threshold_yen(image) == 255
def test_isodata(self):
assert threshold_isodata(self.image) == 2
assert threshold_isodata(self.image, return_all=True) == [2]
def test_isodata_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_isodata(image) == 0
assert threshold_isodata(image, return_all=True) == [0]
def test_isodata_linspace(self):
image = np.linspace(-127, 0, 256)
assert -63.8 < threshold_isodata(image) < -63.6
assert_almost_equal(threshold_isodata(image, return_all=True),
[-63.74804688, -63.25195312])
def test_isodata_16bit(self):
np.random.seed(0)
imfloat = np.random.rand(256, 256)
assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51
assert all(0.49 < threshold_isodata(imfloat, nbins=1024,
return_all=True))
def test_threshold_adaptive_generic(self):
def func(arr):
return arr.sum() / arr.shape[0]
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='generic', param=func)
assert_equal(ref, out)
def test_threshold_adaptive_gaussian(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='gaussian')
assert_equal(ref, out)
out = threshold_adaptive(self.image, 3, method='gaussian',
param=1./3.)
assert_equal(ref, out)
def test_threshold_adaptive_mean(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='mean')
assert_equal(ref, out)
def test_threshold_adaptive_median(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, False],
[False, False, True, False, False],
[False, False, True, True, False],
[False, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='median')
assert_equal(ref, out)
def test_otsu_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 86 < threshold_otsu(camera) < 88
def test_otsu_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 106 < threshold_otsu(coins) < 108
def test_otsu_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.41 < threshold_otsu(coins) < 0.42
def test_otsu_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
with expected_warnings(['grayscale']):
assert 109 < threshold_otsu(img) < 111
def test_otsu_one_color_image():
img = np.ones((10, 10), dtype=np.uint8)
assert_raises(ValueError, threshold_otsu, img)
def test_li_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 63 < threshold_li(camera) < 65
def test_li_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 95 < threshold_li(coins) < 97
def test_li_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.37 < threshold_li(coins) < 0.38
def test_li_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
assert 66 < threshold_li(img) < 68
def test_yen_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 197 < threshold_yen(camera) < 199
def test_yen_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 109 < threshold_yen(coins) < 111
def test_yen_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.43 < threshold_yen(coins) < 0.44
def test_adaptive_even_block_size_error():
img = data.camera()
assert_raises(ValueError, threshold_adaptive, img, block_size=4)
def test_isodata_camera_image():
camera = skimage.img_as_ubyte(data.camera())
| threshold = threshold_isodata(camera)
assert np.floor((camera[camera <= threshold].mean() +
camera[camera > threshold].mean()) / 2.0) == threshold
assert threshold == 87
assert threshold_isodata(camera, return_all=True) == [87]
def test_isodata_coins_image():
coins = skimage.im | g_as_ubyte(data.coins())
threshold = threshold_isodata(coins)
assert np.floor((coins[coins <= threshold].mean() +
coins[coins > threshold].mean()) / 2.0) == threshold
assert threshold == 107
assert threshold_isodata(coins, return_all=True) == [107]
def test_isodata_moon_image():
moon = skimage.img_as_ubyte(data.moon())
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == 86
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floo |
paninetworks/neutron | neutron/db/migration/models/head.py | Python | apache-2.0 | 3,435 | 0 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db import address_scope_db # noqa
from neutron.db import agents_db # noqa
from neutron.db import agentschedulers_db # noqa
from neutron.db import allowedaddresspairs_db # noqa
from neutron.db import dvr_mac_db # noqa
from neutron.db import external_net_db # noqa
from neutron.db import extradhcpopt_db # noqa
from neutron.db import extraroute_db # noqa
from neutron.db import flavors_db # noqa
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db # noqa
from neutron.db import l3_db # noqa
from neutron.db import l3_dvrscheduler_db # noqa
from neutron.db import l3_gwmode_db # noqa
from neutron.db import l3_hamode_db # n | oqa
from neutron.db.metering import metering_db # noqa
from neutron.db import model_base
from neutron.db import models_v2 # noqa
from neutron.db import portbindings_db # noqa
from neutron.db import portsecurity_db # noqa
from neutron.db.quota import models # noqa
from neutron.db impor | t rbac_db_models # noqa
from neutron.db import securitygroups_db # noqa
from neutron.db import servicetype_db # noqa
from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)
from neutron.plugins.ml2.drivers.cisco.ucsm import ucsm_model # noqa
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.plugins.nec.db import models as nec_models # noqa
from neutron.plugins.nuage import nuage_models # noqa
from neutron.plugins.vmware.dbexts import nsx_models # noqa
from neutron.plugins.vmware.dbexts import nsxv_models # noqa
from neutron.plugins.vmware.dbexts import vcns_models # noqa
def get_metadata():
return model_base.BASEV2.metadata
|
cortesi/qtile | libqtile/extension/dmenu.py | Python | mit | 7,038 | 0.001705 | # Copyright (C) 2016, zordsdavini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import six
import shlex
from . import base
class Dmenu(base.RunCommand):
"""
Python wrapper for dmenu
http://tools.suckless.org/dmenu/
"""
defaults = [
("dmenu_font", None, "override the default 'font' and 'fontsize' options for dmenu"),
# NOTE: Do not use a list as a default value, since it would be shared
# among all the objects inheriting this class, and if one of them
# modified it, all the other objects would see the modified list;
# use a string or a tuple instead, which are immutable
("dmenu_command", 'dmenu', "the dmenu command to be launched"),
("dmenu_bottom", False, "dmenu appears at the bottom of the screen"),
("dmenu_ignorecase", False, "dmenu matches menu items case insensitively"),
("dmenu_lines", None, "dmenu lists items vertically, with the given number of lines"),
("dmenu_prompt", None, "defines the prompt to be displayed to the left of the input field"),
("dmenu_height", None, "defines the height (only supported by some dmenu forks)"),
]
def __init__(self, **config):
base.RunCommand.__init__(self, **config)
self.add_defaults(Dmenu.defaults)
def _configure(self, qtile):
base.RunComma | nd._configure(self, qtile)
dmenu_command = self.dmenu_command or self.command
if | isinstance(dmenu_command, str):
self.configured_command = shlex.split(dmenu_command)
else:
# Create a clone of dmenu_command, don't use it directly since
# it's shared among all the instances of this class
self.configured_command = list(dmenu_command)
if self.dmenu_bottom:
self.configured_command.append("-b")
if self.dmenu_ignorecase:
self.configured_command.append("-i")
if self.dmenu_lines:
self.configured_command.extend(("-l", str(self.dmenu_lines)))
if self.dmenu_prompt:
self.configured_command.extend(("-p", self.dmenu_prompt))
if self.dmenu_font:
font = self.dmenu_font
elif self.font:
if self.fontsize:
font = '{}-{}'.format(self.font, self.fontsize)
else:
font = self.font
self.configured_command.extend(("-fn", font))
if self.background:
self.configured_command.extend(("-nb", self.background))
if self.foreground:
self.configured_command.extend(("-nf", self.foreground))
if self.selected_background:
self.configured_command.extend(("-sb", self.selected_background))
if self.selected_foreground:
self.configured_command.extend(("-sf", self.selected_foreground))
# NOTE: The original dmenu doesn't support the '-h' option
if self.dmenu_height:
self.configured_command.extend(("-h", str(self.dmenu_height)))
def run(self, items=None):
if items:
if self.dmenu_lines:
lines = min(len(items), self.dmenu_lines)
else:
lines = len(items)
self.configured_command.extend(("-l", str(lines)))
proc = super(Dmenu, self).run()
if items:
input_str = "\n".join([six.u(i) for i in items]) + "\n"
return proc.communicate(str.encode(input_str))[0].decode('utf-8')
return proc
class DmenuRun(Dmenu):
"""
Special case to run applications.
config.py should have something like:
.. code-block:: python
from libqtile import extension
keys = [
Key(['mod4'], 'r', lazy.run_extension(extension.DmenuRun(
dmenu_prompt=">",
dmenu_font="Andika-8",
background="#15181a",
foreground="#00ff00",
selected_background="#079822",
selected_foreground="#fff",
dmenu_height=24, # Only supported by some dmenu forks
))),
]
"""
defaults = [
("dmenu_command", 'dmenu_run', "the dmenu command to be launched"),
]
def __init__(self, **config):
Dmenu.__init__(self, **config)
self.add_defaults(DmenuRun.defaults)
def _configure(self, qtile):
Dmenu._configure(self, qtile)
class J4DmenuDesktop(Dmenu):
"""
Python wrapper for j4-dmenu-desktop
https://github.com/enkore/j4-dmenu-desktop
"""
defaults = [
("j4dmenu_command", 'j4-dmenu-desktop', "the dmenu command to be launched"),
("j4dmenu_use_xdg_de", False, "read $XDG_CURRENT_DESKTOP to determine the desktop environment"),
("j4dmenu_display_binary", False, "display binary name after each entry"),
("j4dmenu_generic", True, "include the generic name of desktop entries"),
("j4dmenu_terminal", None, "terminal emulator used to start terminal apps"),
("j4dmenu_usage_log", None, "file used to sort items by usage frequency"),
]
def __init__(self, **config):
Dmenu.__init__(self, **config)
self.add_defaults(J4DmenuDesktop.defaults)
def _configure(self, qtile):
Dmenu._configure(self, qtile)
self.configured_command = [self.j4dmenu_command, '--dmenu',
" ".join(shlex.quote(arg) for arg in self.configured_command)]
if self.j4dmenu_use_xdg_de:
self.configured_command.append("--use-xdg-de")
if self.j4dmenu_display_binary:
self.configured_command.append("--display-binary")
if not self.j4dmenu_generic:
self.configured_command.append("--no-generic")
if self.j4dmenu_terminal:
self.configured_command.extend(("--term", self.j4dmenu_terminal))
if self.j4dmenu_usage_log:
self.configured_command.extend(("--usage-log",
self.j4dmenu_usage_log))
|
avicizhu/Load-balancer | src/visualizer/visualizer/base.py | Python | gpl-2.0 | 3,793 | 0.005537 | import ns.point_to_point
import ns.csma
import ns.wifi
import ns.bridge
import ns.internet
import ns.mesh
import ns.wimax
import ns.wimax
import ns.lte
import gobject
import os.path
import sys
PIXELS_PER_METER = 3.0 # pixels-per-meter, at 100% zoom level
class PyVizObject(gobject.GObject):
__gtype_name__ = "PyVizObject"
def tooltip_query(self, tooltip):
tooltip.set_text("TODO: tooltip for %r" % self)
class Link(PyVizObject):
pass
class InformationWindow(object):
def update(self):
raise NotImplementedError
class NetDeviceTraits(object):
def __init__(self, is_wireless=None, is_virtual=False):
assert is_virtual or is_wireless is not None
self.is_wireless = is_wireless
self.is_virtual = is_virtual
netdevice_traits = {
ns.point_to_point.PointToPointNetDevice: NetDeviceTraits(is_wireless=False),
ns.csma.CsmaNetDevice: NetDeviceTraits(is_wireless=False),
ns.wifi.WifiNetDevice: NetDeviceTraits(is_wireless=True),
ns.bridge.BridgeNetDevice: NetDeviceTraits(is_virtual=True),
ns.internet.LoopbackNetDevice: NetDeviceTraits(is_virtual=True, is_wireless=False),
ns.mesh.MeshPointDevice: NetDeviceTraits(is_virtual=True),
ns.wimax.SubscriberStationNetDevice: NetDeviceTraits(is_wireless=True),
ns.wimax.BaseStationNetDevice: NetDeviceTraits(is_wireless=True),
ns.lte.UeNetDevice: NetDeviceTraits(is_wireless=True),
ns.lte.EnbNetDevice: NetDeviceTraits(is_wireless=True),
}
def lookup_netdevice_traits(class_type):
try:
return netdevice_traits[class_type]
excep | t KeyError:
sys.stderr.write("WARNING: no NetDeviceTraits registered for device type %r; "
"I wil | l assume this is a non-virtual wireless device, "
"but you should edit %r, variable 'netdevice_traits',"
" to make sure.\n" % (class_type.__name__, __file__))
t = NetDeviceTraits(is_virtual=False, is_wireless=True)
netdevice_traits[class_type] = t
return t
def transform_distance_simulation_to_canvas(d):
return d*PIXELS_PER_METER
def transform_point_simulation_to_canvas(x, y):
return x*PIXELS_PER_METER, y*PIXELS_PER_METER
def transform_distance_canvas_to_simulation(d):
return d/PIXELS_PER_METER
def transform_point_canvas_to_simulation(x, y):
return x/PIXELS_PER_METER, y/PIXELS_PER_METER
plugins = []
plugin_modules = {}
def register_plugin(plugin_init_func, plugin_name=None, plugin_module=None):
"""
Register a plugin.
@param plugin: a callable object that will be invoked whenever a
Visualizer object is created, like this: plugin(visualizer)
"""
assert callable(plugin_init_func)
plugins.append(plugin_init_func)
if plugin_module is not None:
plugin_modules[plugin_name] = plugin_module
plugins_loaded = False
def load_plugins():
global plugins_loaded
if plugins_loaded:
return
plugins_loaded = True
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
old_path = list(sys.path)
sys.path.insert(0, plugins_dir)
for filename in os.listdir(plugins_dir):
name, ext = os.path.splitext(filename)
if ext != '.py':
continue
try:
plugin_module = __import__(name)
except ImportError, ex:
print >> sys.stderr, "Could not load plugin %r: %s" % (filename, str(ex))
continue
try:
plugin_func = plugin_module.register
except AttributeError:
print >> sys.stderr, "Plugin %r has no 'register' function" % name
else:
#print >> sys.stderr, "Plugin %r registered" % name
register_plugin(plugin_func, name, plugin_module)
sys.path = old_path
|
pat-coady/trpo | trpo/utils.py | Python | mit | 4,103 | 0.002194 | """
Logging and Data Scaling Utilities
Written by Patrick Coady (pat-coady.github.io)
"""
import numpy as np
import os
import shutil
import glob
import csv
class Scaler(object):
""" Generate scale and offset based on running mean and stddev along axis=0
offset = running mean
scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0)
"""
def __init__(self, obs_dim):
"""
Args:
obs_dim: dimension of axis=1
"""
self.vars = np.zeros(obs_dim)
self.means = np.zeros(obs_dim)
self.m = 0
self.n = 0
self.first_pass = True
def update(self, x):
""" Update running mean and variance (this is an exact method)
Args:
x: NumPy array, shape = (N, obs_dim)
see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled-
variance-of-two-groups-given-known-group-variances-mean
"""
if self.first_pass:
self.means = np.mean(x, axis=0)
self.vars = np.var(x, axis=0)
self.m = x.shape[0]
self.first_pass = False
else:
n = x.shape[0]
new_data_var = np.var(x, axis=0)
new_data_mean = np.mean(x, axis=0)
new_data_mean_sq = np.square(new_data_mean)
new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)
self.vars = (((self.m * (self.vars + np.square(self.means))) +
(n * (new_data_var + new_data_mean_sq))) / (self.m + n) -
np.square(new_means))
self.vars = np.maximum(0.0, self.vars) # occasionally goes negative, clip
self.means = new_means
self.m += n
def get(self):
""" returns 2-tuple: (scale, offset) """
return 1/(np.sqrt(self.vars) + 0.1)/3, self.means
class Logger(object):
""" Simple training logger: saves to file and optionally prints to stdout """
def __init__(self, logname, now):
"""
Args:
logname: name for log (e.g. 'Hopper-v1')
now: unique sub-directory name (e.g. date/time string)
"""
path = os.path.join('log-files', logname, now)
os.makedirs(path)
filenames = glob.glob('*.py') # put copy of all python files in log_dir
for filename in filenames: # for reference
shutil.copy(filename, path)
path = os.path.join(path, 'log.csv')
self.write_header = True
| self.log_entry = {}
self.f = open(path, 'w')
self.writer = None # DictWriter created with first call to write() method
def write(self, display=True):
""" Write 1 log entry to file, and optionally to stdout
Log fields preceded by '_' will not be printed to stdout
Args:
display: boolean, print to stdout
"""
if display:
se | lf.disp(self.log_entry)
if self.write_header:
fieldnames = [x for x in self.log_entry.keys()]
self.writer = csv.DictWriter(self.f, fieldnames=fieldnames)
self.writer.writeheader()
self.write_header = False
self.writer.writerow(self.log_entry)
self.log_entry = {}
@staticmethod
def disp(log):
"""Print metrics to stdout"""
log_keys = [k for k in log.keys()]
log_keys.sort()
print('***** Episode {}, Mean R = {:.1f} *****'.format(log['_Episode'],
log['_MeanReward']))
for key in log_keys:
if key[0] != '_': # don't display log items with leading '_'
print('{:s}: {:.3g}'.format(key, log[key]))
print('\n')
def log(self, items):
""" Update fields in log (does not write to file, used to collect updates.
Args:
items: dictionary of items to update
"""
self.log_entry.update(items)
def close(self):
""" Close log file - log cannot be written after this """
self.f.close()
|
csm0042/rpihome | rpihome/devices/device_wemo_lrlt1.py | Python | gpl-3.0 | 3,812 | 0.006558 | #!/usr/bin/python3
""" wemo_lrlt1.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ************************************************************
import copy
import datetime
import logging
import multiprocessing
import time
from .device_wemo import DeviceWemo
# Authorship Info *****************************************************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2016, The RPi-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "csmaue@gmail.com"
__status__ = "Development"
# Device class ********************************************************************************************************
class Wemo_lrlt1(DeviceWemo):
def __init__(self, name, ip, msg_out_queue, logger=None):
# Configure logger
self.logger = logger or logging.getLogger(__name__)
# Init parent class
super().__init__(name, ip, msg_out_queue, self.logger)
def check_rules(self, **kwargs):
""" This method contains the rule-set that controls external security lights """
self.home = False
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "datetime":
self.dt = value
if key == "homeArray":
self.homeArray = value
if key == "homeTime":
self.homeTime = value |
if key == "home":
self.home = value
if key == "utcOffset":
self.utcOffset = value
if key == "sunriseOffset":
self.sunriseOffset = value
if key == "s | unsetOffset":
self.sunsetOffset = value
if key == "timeout":
self.timeout = value
# Calculate sunrise / sunset times
self.sunrise = datetime.datetime.combine(datetime.datetime.today(), self.s.sunrise(self.dt, self.utcOffset))
self.sunset = datetime.datetime.combine(datetime.datetime.today(), self.s.sunset(self.dt, self.utcOffset))
# Determine if anyone is home
for h in self.homeArray:
if h is True:
self.home = True
# Decision tree to determine if screen should be awake or not
# If before sunrise + 30 minutes
if 0 <= self.dt.weekday() <= 4:
if self.homeArray[0] is True:
if self.homeArray[1] is True or self.homeArray[2] is True:
if datetime.time(5,50) <= self.dt.time() <= datetime.time(6,30):
if self.state is False:
self.logger.info("Turning on lrlt1")
self.state = True
else:
if self.state is True:
self.logger.info("Turning off lrlt1")
self.state = False
else:
if datetime.time(6,30) <= self.dt.time() <= datetime.time(7,0):
if self.state is False:
self.logger.info("Turning on lrlt1")
self.state = True
else:
if self.state is True:
self.logger.info("Turning off lrlt1")
self.state = False
else:
if self.state is True:
self.logger.info("Turning off lrlt1")
self.state = False
# Return result
return self.state |
iraklikhitarishvili/data2class | base/base.py | Python | bsd-2-clause | 2,011 | 0.001989 | from validation.validationresult.resultenum import ResultType
from validation.validationresult.result import Result
class BaseMixin:
def __init__(self, result_type: ResultType = ResultType.Base) -> None:
self._validators = []
self._result_type = result_type
@property
def validators(self): # type: List[Callable[[Any, Any], dict]]:
"""
List of validator functions.
:return: List[Callable[[Any, Any], dict]]
"""
return self._validators[:]
@validators.setter
def validators(self):
pass
@validators.deleter
def validators(self):
pass
def add_validator(self, validator):
"""
:param validator: function for data validation, it's first parameter is current field, second parameter is data to validate. validator function must return dictionary ``{"is_valid":bool,"errors":Any}``
:type validator: Callable[[Any, Any], dict]
:return: None
"""
self._validators.append(validator)
def validate(self, data):
"""
Iterates throw fields validates it's data collecting errors and
returns result
:param data: Instance of the current class
:return:
:py:class:`dict` with keys:
| 1. ``is_valid`` indicates whether data in object is valid or not
2. ``errors`` dictionary of errors
:rtype: dict
"""
# data.errors.clear()
# todo-urgent dependency in validation and stop when encountering specific errors
validations = [validator(self, data) for validator in self._validators]
if all(Result.is_valid(result) for result in validation | s):
return Result.factor_result(self._result_type, True, None)
return Result.factor_result(
self._result_type,
False,
[Result.get_error(result) for result in validations if not Result.is_valid(result)]
)
|
gercordero/va_de_vuelta | src/estadisticas.py | Python | gpl-3.0 | 1,023 | 0.036168 | import pilas
import json
from pilas.escena import Base
from general import General
from individual import Individual
class jugadores(Base): |
def __init__(self):
Base.__init__(self)
def fondo(self):
pilas.fondos.Fondo("data/img/fondos/aplicacion.jpg")
def general(self):
self.sonido_boton.reproducir()
pilas.almacenar_escena(General())
def individual(self):
self.sonido_boton.reproducir()
|
pilas.almacenar_escena(Individual())
def volver(self):
self.sonido_boton.reproducir()
pilas.recuperar_escena()
def iniciar(self):
self.fondo()
self.sonido_boton = pilas.sonidos.cargar("data/audio/boton.ogg")
self.interfaz()
self.mostrar()
def interfaz(self):
opcion= [("General",self.general),("Individual",self.individual),("Volver",self.volver)]
menu = pilas.actores.Menu(opcion, y=50, fuente="data/fonts/American Captain.ttf")
menu.escala = 1.3
enunciado = pilas.actores.Actor("data/img/enunciados/estadisticas.png",y=250)
enunciado.escala = 0.3 |
ENCODE-DCC/encoded | src/encoded/upgrade/gene.py | Python | mit | 437 | 0.002288 | from snovault | import upgrade_step
@upgrade_step('gene', '1', '2')
def gene_1_2(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5005
# go_annotations are replaced by a link on UI to GO
value.pop('go_annotations', None)
@upgrade_step('gene', '2', '3')
def gene_2_3(value, system):
# h | ttps://encodedcc.atlassian.net/browse/ENCD-6228
if value.get('locations') == []:
value.pop('locations', None)
|
waveFrontSet/box_management | box_management/boxes/urls.py | Python | mit | 1,131 | 0.000884 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.BoxListView.as_view(),
name='boxlist',
),
url(
regex=r'^(?P<box>[\w-]+)/$',
view=views.BoxItemListView.as_view(),
name='items_by_box',
),
url(
regex=r'^(?P<box>[\w-]+)/take/$',
view=views.BoxItemTakeListView.as_view(),
name='item_take_list',
),
url(
regex=r'^(?P<box>[\w-]+)/return/$',
view=views.BoxItemReturnListView.as_view(),
name='item_return_list',
),
url(
regex=r'^(?P<box>[\w-]+)/take/(?P<pk>[\d]+)$',
view=views.BoxItemTakeView | .as_view(),
name='item_take',
),
url(
regex=r'^(?P<box>[\w-]+)/return/(?P<pk>[\d]+)$',
view=views. | BoxItemReturnView.as_view(),
name='item_return',
),
url(
regex=r'^(?P<box>[\w-]+)/(?P<category>[\w-]+)/$',
view=views.BoxCategoryItemListView.as_view(),
name='items_by_category',
),
]
|
Eddy0402/Environment | vim/ycmd/third_party/jedi/test/completion/goto.py | Python | gpl-3.0 | 2,526 | 0.032462 | # goto_assignments command tests are different in syntax
definition = 3
#! 0 ['a = definition']
a = definition
#! []
b
#! ['a = definition']
a
b = a
c = b
#! ['c = b']
c
cd = 1
#! 1 ['cd = c']
cd = c
#! 0 ['cd = e']
cd = e
#! ['module math']
import math
#! ['import math']
math
#! ['import math']
b = math
#! ['b = math']
b
class C(object):
def b(self):
#! ['b = math']
b
#! ['def b']
self.b
#! 14 ['def b']
self.b()
#! 11 ['self']
self.b
return 1
#! ['def b']
b
#! ['b = math']
b
#! ['def b']
C.b
#! ['def b']
C().b
#! 0 ['class C']
C().b
#! 0 ['class C']
C().b
D = C
#! ['def b']
D.b
#! ['def b']
D().b
#! 0 ['D = C']
D().b
#! 0 ['D = C']
D().b
def c():
return ''
#! ['def c']
c
#! 0 ['def c']
c()
class ClassVar():
x = 3
#! ['x = 3']
ClassVar.x
#! ['x = 3']
ClassVar().x
# before assignments
#! 10 ['x = 3']
ClassVar.x = ''
#! 12 ['x = 3']
ClassVar().x = ''
# Recurring use of the same var name, github #315
def f(t=None):
#! 9 ['t = None']
t = t or 1
# -----------------
# imports
# -----------------
#! [' | module import_tree']
import import_tree
#! ["a = ''"]
import_tree.a
#! ['module mod1']
import import_tree.mod1
#! ['a = 1']
import_tree.mod1.a
#! ['module pkg']
import import_tree.pkg
#! ['a = list']
import_tree.pkg.a
#! ['module mod1']
import import_tree.pkg.mod1
#! ['a = 1.0']
import_tree.pkg.mod1.a
| #! ["a = ''"]
import_tree.a
#! ['module mod1']
from import_tree.pkg import mod1
#! ['a = 1.0']
mod1.a
#! ['module mod1']
from import_tree import mod1
#! ['a = 1']
mod1.a
#! ['a = 1.0']
from import_tree.pkg.mod1 import a
#! ['import os']
from .imports import os
#! ['some_variable = 1']
from . import some_variable
# -----------------
# anonymous classes
# -----------------
def func():
class A():
def b(self):
return 1
return A()
#! 8 ['def b']
func().b()
# -----------------
# on itself
# -----------------
#! 7 ['class ClassDef']
class ClassDef():
""" abc """
pass
# -----------------
# params
# -----------------
param = ClassDef
#! 8 ['param']
def ab1(param): pass
#! 9 ['param']
def ab2(param): pass
#! 11 ['param = ClassDef']
def ab3(a=param): pass
ab1(ClassDef);ab2(ClassDef);ab3(ClassDef)
# -----------------
# for loops
# -----------------
for i in range(1):
#! ['for i in range(1): i']
i
for key, value in [(1,2)]:
#! ['for key,value in [(1, 2)]: key']
key
for i in []:
#! ['for i in []: i']
i
|
pjkundert/mincemeatpy | example-sf-daemon.py | Python | mit | 15,126 | 0.008528 | #!/usr/bin/env python
import mincemeat
import glob
import logging
import repr
import socket
import errno
import asyncore
import threading
import time
import traceback
import sys
"""
example-sf-daemon -- elect a server, become a client; uses daemons
To run this test, simply start an instances of this script:
python example-sf-daemon.py
Much like example-sf-masterless.py, but uses the
mincemeat.Server_daemon and Client_daemon to implement threaded async
I/O. Also demonstrates advanced usage of the default Server.output
deque, for asynchronously accessing Map/Reduce results.
"""
class file_contents(object):
def __init__(self, pattern ):
self.text_files = glob.glob( pattern )
def __len__(self):
return len(self.text_files)
def __iter__(self):
return iter(self.text_files)
def __getitem__(self, key):
f = open(key)
try:
return f.read()
finally:
f.close()
# Obtain CD ISO from: http://www.gutenberg.org/cdproject/pgsfcd-032007.zip.torrent
datasource = file_contents( '../Gutenberg SF CD/Gutenberg SF/*18*.txt' )
#
# Map Functions.
#
# Take a name and corpus of data, and map it onto an iterable of
# (key,value) pairs.
#
def get_lower_split( name, corpus ):
import string
logging.debug( "Corpus: %-40s: %d bytes" %( name, len( corpus )))
for line in corpus.split("\n"):
for word in line.replace('--',' ').split():
word = word.lower().strip(string.punctuation+
string.whitespace+
string.digits)
if "'" in word:
for suffix in [ "'s", "'ll", "'d", "'ve" ]:
if word.endswith( suffix ):
word = word[:-len( suffix )]
if word:
yield word, 1
def get_lower_simple( k, v ):
for w in v.split():
yield w.lower(), 1
# |
# Collect, Reduce, or Finish Functions.
#
# Take (key,value) or (key,[value,...]) pairs, or an iterable
# producing such, and return the single value mapped to that key. The
# func | tional version returns just the value; the iterable version must
# return the (key,value) pair.
#
# If the function is resilient to taking a value that is either an
# iterable OR is a single value, then the same function may be used
# for any of the Collect, Reduce or Finish functions. Collect and
# Reduce will always be provided with (key,[value,...]) arguments;
# Finish may be provided with (key,[value,...]) OR (key,value). Try
# isistance(vs,list) or hasattr(vs,'__iter__'), or use functions that
# throw TypeError on non-iterables, and catch the exception.
#
def sum_values( k, vs ):
try:
return sum( vs ) # Will throw unless vs is iterable, summable
except TypeError:
return vs
def sum_values_generator( kvi ):
for k, vs in kvi:
try:
yield k, sum( vs ) # Will throw unless vs is iterable, summable
except TypeError:
yield k, vs
#
# Map Phase
#
# Each Map client runs a full pass of mapfn over the incoming data, followed
# (optionally) by a pass of collectfn over all values for each Map data_key:
#
# mapfn( source_key, data )
# --> { map_key1: [ value, ...] ), map_key2: [ value, ...], ... }
# collectfn( map_key1, [ value, value ] )
# --> data_key1: [ value ]
#
# The optional collectfn would be appropriate to (for example)
# reduce the communication payload size (eg. store the map data in
# some global filesystem, and instead return the filesystem path.)
#
# Or, if the mapfn is simple (doesn't retain information about the
# data corpus), the collectfn might collapse information about the
# result values. For example, in the simple "word count" example, the
# mapfn returns lists of the form [ 1, 1, 1, ...., 1 ]. Instead of
# transmitting this, we should use the collect function to sum these
# counters, returning a list with a single value.
#
# The .collectfn may take a (key, values) tuple (must be a scalar,
# eg. int, string and an iterable, eg. list), and return a single
# scalar value, which will be returned as a single-entry list. Or, it
# may take an iterator producing the key, values tuples, and must
# return an (key, values) list of the same types (eg. a scalar key,
# and an iterable value).
#
mapfn = get_lower_split
# When the map function produces non-optimal results, it may be
# desirable to run a collect phase, to post-process the results before
# returning them to the server. For example, the trivial map function
# for word counting produces a (very long) list of the form [1, 1,
# ..., 1]; it might be desirable to sum this list before returning. A
# less contrived example might post-process the entire set of keys
# produced by the map; a generator-style collect function can retain
# state between invocations with each key, and may decide to modify
# (or even skip) keys, or return return new/additional keys. Try
# setting collectfn to sum_values or sum_values_generator to see the
# differences in the results of the map (dramatically smaller returned
# lists)
#collectfn = None
collectfn = sum_values
#collectfn = sum_values_generator
#
# Reduce Phase
#
# The Reduce phase takes the output of Map:
#
# mapped[key] = [ value, value, ... ]
#
# data, and produces:
#
# result[key] = value
#
# If no Server.reducefn is supplied, then the Reduce phase is skipped,
# and the mapped data is passed directly to the result:
#
# result[key] = [ value, value, ... ]
#
# Therefore, any supplied Server.finishfn() must be able to handle
# either a scalar value (indicating that Reduce has completed), or
# sequence values (indicating that the Reduce phase was skipped.)
# NOTE: In the case where the reduce function is trivial (such as in
# the word counting example), it will take *significantly* longer to
# run this test, than if you specify None for reducefn, and (instead)
# use the finishfn to run the entire reduce phase in the server...
# The results should be identical. To see the difference, try
# changing reducefn to None, and setting finishfn to sum_values or
# sum_values_generator.
# Skip the Reduce phase; use the Reduce function as Server.finishfn
reducefn = None
#reducefn = sum_values
#reducefn = sum_values_generator
#finishfn = None
finishfn = sum_values
#finishfn = sum_values_generator
#
# Result Callback
#
# Instead of monitoring the Server for completion, an optional
# resultfn callback may be provided, which is invoked by the Server
# immediately with the final results upon completion.
#
def server_results(txn, results, top=None):
# Map-Reduce over 'datasource' complete. Enumerate results,
# ordered both lexicographically and by count
print "Transaction %s; %s%d results:" % (
txn, ( top and "top %d of " % top or ""), len(results))
# Collect lists of all words with each unique count
bycount = {}
for wrd,cnt in results.items():
if cnt in bycount:
bycount[cnt].append(wrd)
else:
bycount[cnt] = [wrd]
# Create linear list of words sorted by count (limit to top #)
bycountlist = []
for cnt in sorted(bycount.keys(), reverse=True):
for wrd in sorted(bycount[cnt]):
bycountlist.append((cnt, wrd))
if top and len(bycountlist) >= top:
break
# Print two columns; one sorted lexicographically, one by count
for wrd, cnt_wrd in zip(sorted([wrd for __,wrd in bycountlist],
reverse=True),
reversed(bycountlist)):
print "%8d %-40.40s %8d %s" % (results[wrd], wrd, cnt_wrd[0], cnt_wrd[1])
resultfn = None # None retains default behaviour
#resultfn = server_results # Process directly (using asyncore.loop thread)
credentials = {
'password': 'changeme',
'interface': 'localhost',
'port': mincemeat.DEFAULT_PORT,
'datasource': datasource,
'mapfn': mapfn,
'collectfn': collectfn,
|
iAddz/allianceauth | services/modules/market/models.py | Python | gpl-2.0 | 664 | 0 | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
from dja | ngo.db import models
@python_2_unicode_compatible
class MarketUser(models.Model):
user = models.OneToOneField(User,
primary_key=True,
on_delete=models.CASCADE,
related_name='market')
username = models.CharField(max_length=254)
def __str__(self):
return self.username
class Meta:
permissions = (
("access_market", u"Can access th | e Evernus Market service"),
)
|
JoshMayberry/Numerical_Methods | Bisection/bisection plotable 2.py | Python | mit | 2,124 | 0.015066 | import math
from equations import *
from my_functions import *
import numpy as np
def function(fn,x):
x = [x]
fx = fn(x)
return fx
def bip(fn,xaxis=[-1,1],inc=0.1,edes=0.01):
"""This function runs bi(), but first shows you a plot and lets you choose the roots you want.
'fn' is the name of a | n equation in 'equations.py'
'coeff' are the coefficents for the function as a list
'xax | is' is [min x on graph, max x on graph]. The default is [-1,1]
'inc' is what the tickmarks(increments) will increase by. The default is 0.1.
'edes' is the desired margin of error. The default is 1% error.
Example Input: bip(eq1,0.001)
"""
plot(fn,xaxis,inc)
xbounds = [0,0]
xbounds[0] = eval(input(' Left Bound: '))
xbounds[1] = eval(input('Right Bound: '))
#print(fn,'\n','\n',co,'\n','\n',xbounds,'\n',edes)
bi(fn,xbounds,edes)
print('Have a good day.')
def bi(fn,xbounds,edes=0.01):
"""This function finds roots (solves the equation) by steadily homing in on a single root.
Limitations: Can only find one root. It misses all others.
'fn' is the name of an equation in 'equations.py'
'xbounds' is [Left Bound,Right Bound], and should be x-axis values, not y-axis values.
'edes' is the desired margin of error. If you do not put a value here, it will default to 1% error.
Example Input: bi(eq1,[0.6,1.2],0.001)
"""
x1 = xbounds[0] #left bound
x2 = xbounds[1] #right bound
edes = edes/100 #desired %error
n = math.ceil(math.log(abs(x1-x2)/edes)/math.log(2))
#print('edes: ',edes,'\nn: ',n,'\nfn',fn,'\ncoeff',coeff)
i = 0
print('Just one moment...')
for i in range(int(n)):
xnew = (x1+x2)/2
error = abs((x1-xnew)/xnew)*100
check = function(fn,x1)*function(fn,xnew)
if check > 0:
x1 = xnew
elif check < 0:
x2 = xnew
else:
print("You broke something. I'm surprised.")
break
#print('x1: ',x1,'\nx2: ',x2,'\nxnew: ',xnew,'\nerror: ',error,'\ncheck: ',check)
print('approx. root: ',xnew,'\n%error: ',error)
|
underloki/Cyprium | kernel/brainfuck.py | Python | gpl-3.0 | 32,233 | 0.000093 | ########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# cyprium.hackademics.fr # #
# Authors: SAKAROV, mont29, afranck64 #
# Contact: admin@hackademics.fr #
# Forum: hackademics.fr #
# Twitter: @hackademics_ #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
"""
This file contains a complete valid virtual machine for brainfuck language,
as well as other similar dialects (Ook, Fast Ook and Spoon).
It also contains a virtual machine for SegFaultProg.
"""
import sys
import os
import random
import string
import kernel.utils as utils
# Languages
BRAINFUCK = 1
OOK = 10
FASTOOK = 11
SPOON = 20
SIGSEV = 30
class BrainFuck():
"""
"""
# Opcodes
# XXX Eventhough this is probably not compliant with the SegFaultProg
# standard, <v> values maybe be any positive integer, not only
# 8 bits ones.
# NOP = 0 # No operation, does nothing.
PTRINC = 1 # Increment cell pointer by <v>, up to MAXCELLS.
PTRDEC = 2 # Decrement cell pointer by <v>, up to first cell.
PTRSET = 3 # Set cell pointer to <v>, between [0..MAXCELLS].
INC = 20 # Increment cell value by <v> (8bits, cyclic).
DEC = 21 # Decrement cell value by <v> (8bits, cyclic).
# Opening brace, skip to matching closing brace if current cell is NULL.
BOPEN = 30
# Closing brace, back to matching opening brace if current cell is not
# NULL.
BCLOSE = 31
OUTPUT = 40 # Output cell value.
INPUT = 41 # Set cell value with input.
# Misc
MAXCELLS = 100000 # Max number of cells.
# Conversion mappings.
CONVERT_FUNCS = {}
TO_BRAINFUCK = {PTRINC: '>',
PTRDEC: '<',
INC: '+',
DEC: '-',
BOPEN: '[',
BCLOSE: ']',
OUTPUT: '.',
INPUT: ','}
FROM_BRAINFUCK = utils.revert_dict(TO_BRAINFUCK)
# Fast ook, in fact...
TO_OOK = {PTRINC: '.?',
PTRDEC: '?.',
INC: '..',
DEC: '!!',
BOPEN: '!?',
BCLOSE: '?!',
OUTPUT: '!.',
INPUT: '.!'}
FROM_OOK = utils.revert_dict(TO_OOK)
TO_SPOON = {PTRINC: '010',
PTRDEC: '011',
INC: '1',
DEC: '000',
BOPEN: '00100',
BCLOSE: '0011',
OUTPUT: '001010',
INPUT: '0010110'}
FROM_SPOON = utils.revert_dict(TO_SPOON)
TO_SIGSEV = {PTRINC: '>',
PTRDEC: '<',
PTRSET: '*',
INC: '+',
DEC: '-',
BOPEN: '[',
BCLOSE: ']',
OUTPUT: '.',
INPUT: ','}
FROM_SIGSEV = utils.revert_dict(TO_SIGSEV)
###########################################################################
def __init__(self, inpt=input, outpt=print, seed=None):
self.input = inpt
self.output = outpt
self.seed = seed
pass
def reset_random(self):
"""Reset random generator."""
if self.seed:
random.seed(self.seed)
else:
random.seed()
###########################################################################
# Core code.
# XXX Using instance vars here, as it might help debugging?
###########################################################################
def prepare(self, code):
"""Convert code to machine, and validate the final code."""
tp = detect_type(code)
code = self.optimize(self.CONVERT_FUNCS[tp][0](self, code))
return code
def buildbracemap(self, code):
"""Build the matching braces map of given machine code."""
open_braces = []
bracemap = {}
codeptr = 0
for codeptr, opc in enumerate(code):
opc = opc[0] # Get opcode!
if opc == self.BOPEN:
open_braces.append(codeptr)
elif opc == self.BCLOSE:
bracemap[codeptr] = open_braces[-1]
bracemap[open_braces[-1]] = codeptr
del open_braces[-1]
if open_braces:
raise ValueError("Not enough closing braces (missing {} ones)"
"".format(len(open_braces)))
return bracemap
def evaluate(self, code):
"""
Brainfuck & co virtual machine...
"""
ret = []
# Convert code to machine, and validate.
code = self.prepare(code)
bracemap = self.buildbracemap(code)
max_codeptr = len(code) - 1
cells = []
cellptr = 0
codeptr = 0
while codeptr <= max_codeptr:
cmd, val = code[codeptr]
if cmd == self.PTRINC:
if val is None:
val = 1
cellptr = min(cellptr + val, self.MAXCELLS - 1)
if cellptr >= len(cells):
cells += [0] * (cellptr - len(cells) + 1)
elif cmd == self.PTRDEC:
if val is None:
val = 1
cellptr = max(0, cellptr - val)
elif cmd == self.PTRSET:
# XXX Do nothing if no value given!
if val is not None:
cellptr = max(0, min(val, self.MAXCELLS - 1))
if cellptr >= len(cells):
cells += [0] * (cellptr - len(cells) + 1)
elif cmd == self.INC:
if val is None:
val | = 1
cells[cellptr] = (cells[cellptr] + val) % 255
elif cmd == self.DEC:
if val is None:
val = 1
| cells[cellptr] = (cells[cellptr] - val) % 255
elif cmd == self.BOPEN and cells[cellptr] == 0:
codeptr = bracemap[codeptr]
elif cmd == self.BCLOSE and cells[cellptr] != 0:
codeptr = bracemap[codeptr]
elif cmd == self.OUTPUT:
self.output(cells[cellptr])
elif cmd == self.INPUT:
inpt = self.input()
if inpt:
# XXX If user can input non-ascii chars, this can raise
# an exception... Might need better way to do this.
cells[cellptr] = ord(inpt[0].encode('ascii'))
codeptr += 1
###########################################################################
def optimize(self, code, compress=True):
"""
O |
brousch/opencraft | instance/tests/views/test_index.py | Python | agpl-3.0 | 1,651 | 0 | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Views - Index - Tests
"""
# Imports #####################################################################
from instance.tes | ts.base import WithUserTestCase
# Tests #######################################################################
class IndexViewsTestCase(WithUserTestCase):
"""
Test cases for views
"""
def test_index_unauthenticated(self):
| """
Index view - Unauthenticated users go to login page
"""
response = self.client.get('/')
self.assertRedirects(response, 'http://testserver/admin/login/?next=/')
def test_index_authenticated(self):
"""
Index view - Authenticated
"""
self.client.login(username='user1', password='pass')
response = self.client.get('/')
self.assertContains(response, 'ng-app="InstanceApp"')
|
ashutoshvt/psi4 | tests/pytests/test_geometric.py | Python | lgpl-3.0 | 3,139 | 0.010831 | import pytest
from .utils import *
import psi4
from qcengine.testing import using
@pytest.mark.parametrize('engine', [
pytest.param('optking'),
pytest.param('geometric', marks | =using('geometric')),
]) # yapf: disable
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.027032783717, 'ref_nuc': 9.300794299874}, id='rhf(df)'),
pytest.param( | {'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.027053512764, 'ref_nuc': 9.300838770294}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.230938589591, 'ref_nuc': 9.133271168193}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.230989373502, 'ref_nuc': 9.133125471291}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.420645414834, 'ref_nuc': 9.090397129492}, id='b3lyp'),
]) # yapf: disable
def test_h2o(inp, engine):
"""Optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine=engine)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
@using('geometric')
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.02079629252714, 'ref_nuc': 9.265341708725257}, id='rhf(df)'),
pytest.param({'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.02082389228, 'ref_nuc': 9.26528625744628}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.22711819393223, 'ref_nuc': 9.09137805747361}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.2271678506303, 'ref_nuc': 9.091178486990861}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.41632755714534, 'ref_nuc': 9.04535641436914}, id='b3lyp'),
]) # yapf: disable
def test_h2o_constrained(inp):
"""Constrained optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
# geometric specific options
geometric_keywords = {
'coordsys' : 'tric',
'enforce' : 0.0,
'constraints' : {
'set' : [{'type' : 'angle',
'indices' : [1, 0, 2],
'value' : 90.0 }]
}
}
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine='geometric', optimizer_keywords=geometric_keywords)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
|
endlessm/chromium-browser | tools/clang/scripts/goma_ld.py | Python | bsd-3-clause | 2,071 | 0.01014 | #! /usr/bin/env python
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Linker wrapper that performs distributed ThinLTO on Goma.
#
# Usage: Pass the original link command as parameters to this | script.
# E.g. original: clang++ -o foo foo.o
# Becomes: goma-ld clang++ -o foo foo.o
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import goma_link
import os
import re
import sys
class GomaLinkUnix(goma_link.GomaLinkBase):
# Target-platform-specific constants.
WL = '-Wl,'
TLTO = '-plugin-opt=thinlto'
| SEP = '='
GROUP_RE = re.compile(WL + '--(?:end|start)-group')
MACHINE_RE = re.compile('-m([0-9]+)')
OBJ_PATH = '-plugin-opt=obj-path' + SEP
OBJ_SUFFIX = '.o'
PREFIX_REPLACE = TLTO + '-prefix-replace' + SEP
XIR = '-x ir '
WHITELISTED_TARGETS = {
'chrome',
}
def analyze_args(self, args, *posargs, **kwargs):
# TODO(crbug.com/1040196): Builds are unreliable when all targets use
# distributed ThinLTO, so we only enable it for whitelisted targets.
# For other targets, we fall back to local ThinLTO. We must use ThinLTO
# because we build with -fsplit-lto-unit, which requires code generation
# be done for each object and target.
if args.output is None or os.path.basename(
args.output) not in self.WHITELISTED_TARGETS:
# Returning None causes the original, non-distributed linker command to be
# invoked.
return None
return super(GomaLinkUnix, self).analyze_args(args, *posargs, **kwargs)
def process_output_param(self, args, i):
"""
If args[i] is a parameter that specifies the output file,
returns (output_name, new_i). Else, returns (None, new_i).
"""
if args[i] == '-o':
return (os.path.normpath(args[i + 1]), i + 2)
else:
return (None, i + 1)
if __name__ == '__main__':
sys.exit(GomaLinkUnix().main(sys.argv))
|
madisonmay/Tomorrow | setup.py | Python | mit | 521 | 0 | from setuptools import setup, find_packages
setup(
na | me="tomorrow",
version="0.2.4",
author="Madison May",
author_email="madison@indico.io",
packages=find_packages(
exclude=[
'tests'
]
),
install_requires=[
"futures >= 2.2.0"
],
description="""
Ma | gic decorator syntax for asynchronous code.
""",
license="MIT License (See LICENSE)",
long_description=open("README.rst").read(),
url="https://github.com/madisonmay/tomorrow"
)
|
henrywm/URI | src/beginner/1759.py | Python | apache-2.0 | 73 | 0.041096 | N = | int(input())
gemido = (N-1) * "Ho "
print("{0}Ho!".format(ge | mido)) |
qdev-dk/Majorana | alazar_controllers/acq_helpers.py | Python | gpl-3.0 | 1,250 | 0.0016 | import numpy as np
import math
def sample_to_volt_u12(raw_samples, bps, input_range_volts):
"""
Applies volts conversion for 12 bit sample data stored
in 2 bytes
return:
samples_magnitude_array
samples_phase_array
"""
# right_shift 16-bit sample by 4 to get 12 bit sample
shifted_samples = np.right_shift(raw_samples, 4)
# Alazar calibration
code_zero = (1 << (bps - 1)) - 0.5
code_range = (1 << (bps - 1)) - 0.5
# Convert to volts
volt_samples = np.float64(input_range_volts *
(shifted_samples - code_zero) / code_range)
return volt_samples
def roundup(num, to_nearest):
"""
Rounds | up the 'num' to the nearest multiple of 'to_nearest', all int
Args:
num: to be rounded up
to_nearest: value to be rounded to int multiple of
return:
rounded up value
"""
remainder = num % to_nearest
| divisor = num // to_nearest
return int(num if remainder == 0 else (divisor + 1)*to_nearest)
# there seems to be alignment related issues with non power of two
# buffers so restrict to power of two for now
# smallest_power = 2 ** math.ceil(math.log2(num))
# return max(smallest_power, 256) |
pawl/CalendarAdmin | application/views/__init__.py | Python | mit | 416 | 0 | __all__ = []
import pkgutil
import inspect
# http://stackoverflow.com/questions/22209564/python-qualified-import-all-in-package
for loader, name, is_pkg in pkgutil.walk_packages(__path__):
mod | ule = loader.find_module(name).load_module(name)
for name, value in inspect.getmembers(module):
if name.start | swith('__'):
continue
globals()[name] = value
__all__.append(name)
|
GiantSteps/essentia | src/python/essentia/extractor/relativeioi.py | Python | agpl-3.0 | 3,063 | 0.024159 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
from essentia import INFO
from numpy import bincount
namespace = 'rhythm'
dependencies = [ 'tempotap', 'onsetdetection' ]
def compute(audio, pool, options):
INFO('Computing Inter Onsets Intervals...')
sampleRate = options['sampleRate']
bpm = pool.value('rhythm.bpm')
onsets = pool.value('rhythm.onset_times')
# special case
if bpm < 0 or len(onsets) < 2:
pool.add(namespace + '.' + 'relative_ioi_peaks', [float()])#, pool.GlobalScope)
pool.add(namespace + '.' + 'relative_ioi', [float()])#, pool.GlobalScope)
INFO('100% done...')
return
# 32th note interval
interp = 32.
interval = (60./bpm) / interp
riois = []
old = | onsets[0]
for i in range(1,len(onsets)): riois += [ round( (onsets[i] - onsets[i-1]) / interval ) ]
for i in range(2,len(onsets)): riois += [ round( (onsets[i] - onsets[i-2]) / interval ) ]
for i in range(3,len(onsets)): riois += [ round( (onsets[i] - onsets[i-3]) / interval ) ]
for i in range(4,len(onsets)): riois += [ round( (onsets[i] - onsets[i-4]) / interval ) ]
ioidist = essentia.array(bincount(riois))
fullioidist = essentia.array(zip( [p/interp for p in | range(len(ioidist))], [ioi/sum(ioidist) for ioi in ioidist]))
fullioidist = fullioidist[0:interp*5]
peak_detection = essentia.PeakDetection(minPosition = 0., maxPosition = len(ioidist),
maxPeaks = 5, range = len(ioidist) - 1.,
interpolate = True, orderBy = 'amplitude')
pos, mags = peak_detection(ioidist)
# scale back to 1 beat
pos = [ p/interp for p in pos ]
# ratio across whole distribution surface
mags = [ mag/sum(ioidist) for mag in mags ]
# add to pool
pool.add(namespace + '.' + 'relative_ioi_peaks', essentia.array(zip(pos,mags)))#, pool.GlobalScope)
pool.add(namespace + '.' + 'relative_ioi', fullioidist)#, pool.GlobalScope)
# debug plot
if 0:
from pylab import plot, show, hold
plot([i/interp for i in range(len(ioidist))], [ioi/sum(ioidist) for ioi in ioidist],'b+-')
hold(True)
for i,j in zip(pos,mags):
plot([i]*2,[0.,j],'+-')
hold(False)
show()
INFO('100% done...')
|
702nADOS/sumo | tools/webWizard/SimpleWebSocketServer.py | Python | gpl-3.0 | 23,861 | 0.000838 | """
@file SimpleWebSocketServer.py
@author Dave Pallot
@date 2013
@version $Id: SimpleWebSocketServer.py 22608 2017-01-17 06:28:54Z behrisch $
A web socket server implementation to be used by the osm server.py
Originally distributed under the MIT license at
https://github.com/dpallot/simple-websocket-server/tree/master/SimpleWebSocketServer.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2015-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
try:
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
except ImportError:
import socketserver
from http.server import BaseHTTPRequestHandler
from io import StringIO
import hashlib
import base64
import socket
import struct
import ssl
import sys
import errno
import codecs
from collections import deque
from select import select
__all__ = ['WebSocket', 'SimpleWebSocketServer', 'SimpleSSLWebSocketServer']
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
_VALID_STATUS_CODES = [
1000, 1001, 1002, 1003, 1007, 1008, 1009, 1010, 1011, 3000, 3999, 4000, 4999]
HANDSHAKE_STR = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %(acceptstr)s\r\n\r\n"
)
GUID_STR = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
STREAM = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
HEADERB1 = 1
HEADERB2 = 3
LENGTHSHORT = 4
LENGTHLONG = 5
MASK = 6
PAYLOAD = 7
MAXHEADER = 65536
MAXPAYLOAD = 33554432
class WebSocket(object):
def __init__(self, server, sock, address):
self.server = server
self.client = sock
self.address = address
self.handshaked = False
self.headerbuffer = ''
self.headertoread = 2048
self.fin = 0
self.data = bytearray()
self.opcode = 0
self.hasmask = 0
self.maskarray = None
self.length = 0
self.lengtharray = None
self.index = 0
self.request = None
self.usingssl = False
self.frag_start = False
self.frag_type = BINARY
self.frag_buffer = None
self.frag_decoder = codecs.getincrementaldecoder(
'utf-8')(errors='strict')
self.closed = False
self.sendq = deque()
self.state = HEADERB1
# restrict the size of header and payload for security reasons
self.maxheader = MAXHEADER
self.maxpayload = MAXPAYLOAD
def handleMessage(self):
"""
Called when websocket frame is received.
To access the frame data call self.data.
If the frame is Text then self.data is a unicode object.
If the frame is Binary then self.data is a bytearray object.
"""
pass
def handleConnected(self):
"""
Called when a websocket client connects to the server.
"""
pass
def handleClose(self):
"""
Called when a websocket server gets a Close frame from a client.
"""
pass
def _handlePacket(self):
if self.opcode == CLOSE:
pass
elif self.opcode == STREAM:
pass
elif self.opcode == TEXT:
pass
elif self.opcode == BINARY:
pass
elif self.opcode == PONG or self.opcode == PING:
if len(self.data) > 125:
raise Exception('control frame length can not be > 125')
else:
# unknown or reserved opcode so just close
raise Exception('unknown opcode')
if self.opcode == CLOSE:
status = 1000
reason = u''
length = len(self.data)
if length == 0:
pass
elif length >= 2:
status = struct.unpack_from('!H', self.data[:2])[0]
reason = self.data[2:]
if status not in _VALID_STATUS_CODES:
status = 1002
if len(reason) > 0:
try:
reason = reason.decode('utf-8', errors='strict')
except:
status = 1002
else:
status = 1002
self.close(status, reason)
return
elif self.fin == 0:
if self.opcode != STREAM:
if self.opcode == PING or self.opcode == PONG:
raise Exception('control messages can not be fragmented')
self.frag_type = self.opcode
self.frag_start = True
self.frag_decoder.reset()
if self.frag_type == TEXT:
self.frag_buffer = []
utf_str = self.frag_decoder.decode(self.data, final=False)
if utf_str:
s | elf.frag_buffer.append(utf_str)
else:
self.frag_buffer = bytearray()
self.frag_buffer.extend(self.data)
else:
if self.frag_start is False:
raise Exception('fragmentation protocol error')
if self.frag_type == TEXT:
utf_str = self.frag_decoder.decode(self.data, final=False)
if utf_str:
| self.frag_buffer.append(utf_str)
else:
self.frag_buffer.extend(self.data)
else:
if self.opcode == STREAM:
if self.frag_start is False:
raise Exception('fragmentation protocol error')
if self.frag_type == TEXT:
utf_str = self.frag_decoder.decode(self.data, final=True)
self.frag_buffer.append(utf_str)
self.data = u''.join(self.frag_buffer)
else:
self.frag_buffer.extend(self.data)
self.data = self.frag_buffer
self.handleMessage()
self.frag_decoder.reset()
self.frag_type = BINARY
self.frag_start = False
self.frag_buffer = None
elif self.opcode == PING:
self._sendMessage(False, PONG, self.data)
elif self.opcode == PONG:
pass
else:
if self.frag_start is True:
raise Exception('fragmentation protocol error')
if self.opcode == TEXT:
try:
self.data = self.data.decode('utf-8', errors='strict')
except Exception as exp:
raise Exception('invalid utf-8 payload')
self.handleMessage()
def _handleData(self):
# do the HTTP header and handshake
if self.handshaked is False:
data = self.client.recv(self.headertoread)
if not data:
raise Exception("remote socket closed")
else:
# accumulate
self.headerbuffer += data
if len(self.headerbuffer) >= self.maxheader:
raise Exception('header exceeded allowable size')
# indicates end of HTTP header
if '\r\n\r\n' in self.headerbuffer:
self.request = HTTPRequest(self.headerbuffer)
# handshake rfc 6455
if 'Sec-WebSocket-Key'.lower() in self.request.headers:
key = self.request.headers['Sec-WebSocket-Key'.lower()]
hStr = HANDSHAKE_STR % {
'accep |
iwob/pysv | pysv/smt_synthesis.py | Python | mit | 17,716 | 0.005532 | from pysv.smtlib.synth import SynthesisConstr
from pysv.smtlib.synth import SynthesisConstrTestCases
from pysv.templates import *
from pysv.contract import *
from pysv import solvers
from pysv import smt_common
#
# Python ast package documentation: https://docs.python.org/2/library/ast.html
# Python _ast package documentation: not found
# Some useful information: https://julien.danjou.info/blog/2015/python-ast-checking-method-declaration
#
# To run synthesis functions required is Z3 solver (installation instruction in README.txt). Some links:
# - Z3 project main page: https://github.com/Z3Prover/z3
# - gene | ral Z3 tutorial: http://rise4fun.com/z3/tutorial/guide
# - python Z3 tutorial: http://cpl0.net/~argp/papers/z3py-guide.pdf
# - Advanced python materials: http: | //www.cs.tau.ac.il/~msagiv/courses/asv/z3py/
#
# *** z3 binding for Scala: http://lara.epfl.ch/~psuter/ScalaZ3/
#
# z3.parse_smt2_string() - a function which can use smt2 code instead of explicitly using python api.
# (set-option :macro-finder true) - allows for filling bodies of functions.
#
class SynthesizerSMT(object):
def __init__(self, code, code_pre, code_post, program_vars, env, holes_defs, assertions = None):
assert (type(holes_defs) == list), 'Holes definitions should be contained in the list!'
if assertions is None:
assertions = []
self.code = code
self.code_pre = code_pre
self.code_post = code_post
self.program_vars = program_vars
self.holes_defs = holes_defs
self.env = env
self.assertions = assertions
self.exclude_assertions = []
self.result = None
def synthesize(self, assertions = None):
"""Synthesizes a program from specification using only user-defined assertions."""
if assertions is None:
assertions = self.assertions
self.result = synthesize(self.code, self.code_pre, self.code_post, self.program_vars,
self.env, self.holes_defs, assertions)
return self.result
def next_program(self):
"""Synthesizes a new program different from all programs previously generated by this method. This is done by adding assertions for excluding combinations of productions used in previous models."""
if self.result is not None:
a = SynthesizerSMT.get_assertion_excluding_model(self.result)
self.exclude_assertions.append(a)
return self.synthesize(self.all_assertions())
def find_all_possible_programs(self):
"""Returns a list of all programs meeting the specification."""
programs = []
while True:
res = self.next_program()
if res.decision != 'sat':
break
programs.append(res.final_code)
return programs
def reset(self):
"""Clears all result-oriented data produced in this instance since initialization."""
self.exclude_assertions = []
self.result = None
def all_assertions(self):
"""Returns list with both user-defined assertions and internal assertions added during searching for the next program."""
res = []
res.extend(self.assertions)
res.extend(self.exclude_assertions)
return res
@staticmethod
def get_assertion_excluding_model(result):
used_vars = result.used_struct_vars
text = '(assert (or '
for v in used_vars:
text += '(not (= ' + v + ' ' + result[v] + ')) '
text += '))'
return text
def _synthesize_universal(smtlib_constr, code, code_pre, code_post, program_vars, env, holes_decls = None, free_vars = None):
ib_smt2, pre_smt2, post_smt2 = smt_common.get_code_in_smt2(code, code_pre, code_post, program_vars, env,
holes_decls)
script = smtlib_constr.produce_script_synthesis(ib_smt2, pre_smt2, post_smt2, program_vars,
holes_decls, free_vars=free_vars)
smt_common.write_script_to_file(script, env)
if env.only_script:
print(script)
return None
else:
utils.logger.info('\n\n******** SCRIPT ********:\n' + script)
# Solving constraints and interpreting result.
res = solvers.run_solver(script, env)
synth_res = SynthesisResult(res, code, holes_decls, env)
return synth_res
def synthesize(code, code_pre, code_post, program_vars, env, holes_decls = None, assertions = None, free_vars = None):
"""This synthesizer executes in an external solver SMT-LIB 2.0 script containing
generated constraints. Program is synthesized to be correct with respect to delivered pre- and
post-conditions.
:param code: (str) Source code of the Python program.
:param code_pre: (str) Source code of the Python expression representing all *pre-conditions*.
:param code_post: (str) Source code of the Python expression representing all *post-conditions*.
:param program_vars: (ProgramVars) Object containing information about variables and their types.
:param env: (Options) Synthesis options.
:param holes_decls: (list[HoleDecl]) List containing declarations of all holes present in the program. If empty, then synthesis is performed only on the free variables.
:param assertions: (list[str]) Optional list of SMT-LIB 2.0 commands, which will be appended at the end of the script.
:param free_vars: (list[str]) Names of variables in the program which are free and are part of the final solution together with decisions of how holes should be filled.
:return: (SynthesisResult) Interpreted result of the solver.
"""
if holes_decls is None:
holes_decls = []
if assertions is None:
assertions = env.assertions
assert (type(holes_decls) == list), 'Holes definitions should be contained in the list!'
smtlib = SynthesisConstr(env, assertions)
return _synthesize_universal(smtlib, code, code_pre, code_post, program_vars, env, holes_decls, free_vars)
def synthesize_tc(test_cases, code, code_pre, code_post, program_vars, env, holes_decls = None, assertions = None, free_vars = None):
"""This synthesizer executes in an external solver SMT-LIB 2.0 script containing
generated constraints. Program is synthesized to be correct with respect to delivered pre- and
post-conditions.
:param test_cases: (TestCases) Set of test cases.
:param code: (str) Source code of the Python program.
:param code_pre: (str) Source code of the Python expression representing all *pre-conditions*.
:param code_post: (str) Source code of the Python expression representing all *post-conditions*.
:param program_vars: (ProgramVars) Object containing information about variables and their types.
:param env: (Options) Synthesis options.
:param holes_decls: (list[HoleDecl]) List containing declarations of all holes present in the program. If empty, then synthesis is performed only on the free variables.
:param assertions: (list[str]) Optional list of SMT-LIB 2.0 commands, which will be appended at the end of the script.
:param free_vars: (list[str]) Names of variables in the program which are free and are part of the final solution together with decisions of how holes should be filled.
:return: (SynthesisResult) Interpreted result of the solver.
"""
if holes_decls is None:
holes_decls = []
if assertions is None:
assertions = env.assertions
assert (type(holes_decls) == list), 'Holes definitions should be contained in the list!'
smtlib = SynthesisConstrTestCases(test_cases, env, assertions, holes_decls)
return _synthesize_universal(smtlib, code, code_pre, code_post, program_vars, env, holes_decls, free_vars)
class HoleDecl(object):
"""HoleDecl contains all important information about a single hole in the program.
Attributes:
-----------
id : string
Unique name of the hole.
grammar : Grammar
Grammar defining possible hole's content.
program_vars : ProgramVars | dict[string,string]
Information about vari |
GoogleCloudPlatform/repo-automation-playground | xunit-autolabeler-v2/ast_parser/lib/test_data/appengine/gae_sample.py | Python | apache-2.0 | 695 | 0 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIN | D, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_detected_tag]
# [END gae_detected_tag]
"""
[ST | ART gae_block_comment_tag]
[END gae_block_comment_tag]
"""
|
rnirmal/savanna | savanna/tests/unit/utils/test_api_validator.py | Python | apache-2.0 | 6,499 | 0 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
import unittest2
from savanna.openstack.common import uuidutils
from savanna.utils import api_validator
def _validate(schema, data):
validator = api_validator.ApiValidator(schema)
validator.validate(data)
class ApiValidatorTest(unittest2.TestCase):
def _validate_success(self, schema, data):
return _validate(schema, data)
def _validate_failure(self, schema, data):
self.assertRaises(jsonschema.ValidationError, _validate, schema, data)
def test_validate_required(self):
schema = {
"type": "object",
"properties": {
"prop-1": {
"type": "string",
},
},
}
self._validate_success(schema, {
"prop-1": "asd",
})
self._validate_success(schema, {
"prop-2": "asd",
})
schema["required"] = ["prop-1"]
self._validate_success(schema, {
"prop-1": "asd",
})
self._validate_failure(schema, {
"prop-2": "asd",
})
def test_validate_additionalProperties(self):
schema = {
"type": "object",
"properties": {
"prop-1": {
"type": "string",
},
},
"required": ["prop-1"]
}
self._validate_success(schema, {
"prop-1": "asd",
})
self._validate_success(schema, {
"prop-1": "asd",
"prop-2": "asd",
})
schema["additionalProperties"] = True
self._validate_success(schema, {
"prop-1": "asd",
})
self._validate_success(schema, {
"prop-1": "asd",
"prop-2": "asd",
})
schema["additionalProperties"] = False
self._validate_success(schema, {
"prop-1": "asd",
})
self._validate_failure(schema, {
"prop-1": "asd",
"prop-2": "asd",
})
def test_validate_string(self):
schema = {
"type": "string",
}
self._validate_success(schema, "asd")
self._validate_success(schema, "")
self._validate_failure(schema, 1)
self._validate_failure(schema, 1.5)
self._validate_failure(schema, True)
def test_validate_string_with_length(self):
schema = {
"type": "string",
"minLength": 1,
"maxLength": 10,
}
self._validate_success(schema, "a")
self._validate_success(schema, "a" * 10)
self._validate_failure(schema, "")
self._validate_failure(schema, "a" * 11)
def test_validate_integer(self):
schema = {
'type': 'integer',
}
self._validate_success(schema, 0)
self._validate_success(schema, 1)
self._validate_failure(schema, "1")
self._validate_failure(schema, "a")
self._validate_failure(schema, True)
def test_validate_integer_w_range(self):
schema = {
'type': 'integer',
'minimum': 1,
'maximum': 10,
}
self._validate_success(schema, 1)
self._validate_success(schema, 10)
self._validate_failure(schema, 0)
self._validate_failure(schema, 11)
def test_validate_uuid(self):
schema = {
"type": "string",
"format": "uuid",
}
uuid = uuidutils.generate_uuid()
self._validate_success(schema, uuid)
self._validate_failure(schema, uuid.replace("-", ""))
def test_validate_hostname(self):
schema = {
"type": "string",
"format": "hostname",
}
self._validate_success(schema, "abcd")
self._validate_success(schema, "abcd123")
self._validate_success(schema, "abcd-123")
self._validate_failure(schema, "abcd_123")
self._validate_failure(schema, "_123")
self._validate_failure(schema, | "a" * 64)
self._validate_failure(schema, "")
def test_validate_configs(self):
schema = {
"type": "object",
"properties": {
"configs": {
"type": "configs",
}
},
"additionalProperties": False
}
self._validate_success(schema, {
"configs": {
"at-1": {
"c-1": "c", |
"c-2": 1,
"c-3": True,
},
"at-2": {
"c-4": "c",
"c-5": 1,
"c-6": True,
},
},
})
self._validate_failure(schema, {
"configs": {
"at-1": {
"c-1": 1.5
},
}
})
self._validate_failure(schema, {
"configs": {
1: {
"c-1": "c"
},
}
})
self._validate_failure(schema, {
"configs": {
"at-1": {
1: "asd",
},
}
})
self._validate_failure(schema, {
"configs": {
"at-1": [
"a", "b", "c",
],
}
})
def test_validate_flavor(self):
schema = {
'type': "flavor",
}
self._validate_success(schema, 0)
self._validate_success(schema, 1)
self._validate_success(schema, "0")
self._validate_success(schema, "1")
self._validate_success(schema, uuidutils.generate_uuid())
self._validate_failure(schema, True)
self._validate_failure(schema, 0.1)
self._validate_failure(schema, "0.1")
self._validate_failure(schema, "asd")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.