repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
oliver-sanders/cylc
|
cylc/flow/flow_mgr.py
|
Python
|
gpl-3.0
| 2,831
| 0
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage flow counter and flow metadata."""
from typing import Dict, Set, Optional
import datetime
from cylc.flow import LOG
from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager
class FlowMgr:
"""Logic to manage flow counter and flow metadata."""
d
|
ef __init__(self, db_mgr: "WorkflowDatabaseManager") -> None:
"""Initialise the flow manager."""
self.db_mgr = db_mgr
self.flows: Dict[int, Dict[str, str]] = {}
self.counter: int = 0
def get_new_flow(self, description: Optional[str] = None) -> int:
"""Increment flow counter, record flow metadata."""
self.counter += 1
# record start time to nearest second
now = datetime.datetime.now()
now_sec: str = str(
now - datetime.timedelta(microseconds=now.microsecond))
description = description or "no description"
self.flows[self.counter] = {
"description": description,
"start_time": now_sec
}
LOG.info(
f"New flow: {self.counter} "
f"({description}) "
f"{now_sec}"
)
self.db_mgr.put_insert_workflow_flows(
self.counter,
self.flows[self.counter]
)
return self.counter
def load_from_db(self, flow_nums: Set[int]) -> None:
"""Load flow data for scheduler restart.
Sets the flow counter to the max flow number in the DB.
Loads metadata for selected flows (those in the task pool at startup).
"""
self.counter = self.db_mgr.pri_dao.select_workflow_flows_max_flow_num()
self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums)
self._log()
def _log(self) -> None:
"""Write current flow info to log."""
LOG.info(
"Flows:\n" + "\n".join(
(
f"flow: {f} "
f"({self.flows[f]['description']}) "
f"{self.flows[f]['start_time']}"
)
for f in self.flows
)
)
|
hayderimran7/tempest
|
tempest/api/network/test_extra_dhcp_options.py
|
Python
|
apache-2.0
| 4,057
| 0.000246
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations with the Extra DHCP Options Neutron API
extension:
port create
port list
port show
port update
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
DHCP Options extension is enabled in the [network-feature-enabled]
section of etc/tempest.conf
"""
@classmethod
def skip_checks(cls):
super(ExtraDHCPOptionsTestJSON, cls).skip_checks()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
else '2015::dead')
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
]
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
|
# Create a port with Extra DHCP Options
body = self.client.create_port(
network_id=self.network['id'],
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
|
# Confirm port created has Extra DHCP Options
body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
body = self.client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
body = self.client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
retrieved = port['extra_dhcp_opts']
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
if (retrieved_option['opt_value'] == option['opt_value'] and
retrieved_option['opt_name'] == option['opt_name']):
break
else:
self.fail('Extra DHCP option not found in port %s' %
str(retrieved_option))
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
_ip_version = 6
|
xmeng17/Malicious-URL-Detection
|
host/http_proxy/helper.py
|
Python
|
mit
| 1,790
| 0.009497
|
import codecs
import logging
import random
def import_url(path,lo,hi):
with codecs.open(path,encoding='utf-8') as f:
string = f.read()
arr = string.split('\n')
if not lo:
lo=0
if not hi:
hi=len(arr)
arr=arr[lo:hi]
url_arr = []
want = range(lo,hi)
# returns url and its number
for i,line in enumerate(arr):
if i+lo in want:
url = line.split(':')[0]
num = str(i+lo).zfill(5)
url_arr.append((num,url))
return url_arr
def import_proxy(path,mode):
with open(path) as f:
string = f.read()
arr = string.split('\n')
del(arr[-1])
proxy_arr = []
for line in arr:
if mode=='comma':
line_arr=line.split(',')
addr=line_arr[0]
port=line_arr[1]
line=addr+':'+port
dic = {}
dic['http'] = 'http://' + line
dic['https'] = 'https://' + line
proxy_arr.append(dic)
random.shuffle(proxy_arr)
return proxy_arr
def setLogger(path)
|
:
console_logger = logging.getLogger('consoleLogger')
hdlr = logging.FileHandler('./console.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
console_logger.addHandler(hdlr)
console_logger.addHandler(consoleHandler)
console_logger.setLe
|
vel(logging.DEBUG)
result_logger = logging.getLogger('resultLogger')
hdlr2 = logging.FileHandler('./'+path,encoding='utf-8')
formatter2 = logging.Formatter('%(message)s')
hdlr2.setFormatter(formatter2)
result_logger.addHandler(hdlr2)
result_logger.setLevel(logging.DEBUG)
return console_logger, result_logger
|
daviddoria/itkHoughTransform
|
Wrapping/WrapITK/Languages/SwigInterface/idx.py
|
Python
|
apache-2.0
| 1,236
| 0.01699
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(sys.path[0]+os.sep+'pygccxml-1.0.0')
import pygccxml, sys, cStringIO
# the output file
outputFile = cStringIO.StringIO()
# init the pygccxml stuff
pygccxml.declarations.scopedef_t.RECURSIVE_DEFAULT = False
pygccxml.declarations.scopedef_t.ALLOW_EMPTY_MDECL_WRAPPER = True
pygccxml_config = pygccxml.parser.config.config_t()
pygccxml_reader = pygccxml.parser.source_reader.source_reader_t(pygccxml_config)
# and read a xml file
res = pygccxml_reader.read_xml_file(sys.argv[1])
global_ns = pygccxml.declarations.get_global_namespace( res )
cable_ns = global_ns.namespace('_cable_')
wrappers_ns = cable_ns.namespace('wrappers')
module = os.path.splitext(os.path.basename(sys.argv[1
|
]))[0]
# iterate over all the typedefs in the _cable_::wrappers namespace
for typedef in wrappers_ns.typedefs():
n = typedef.name
s = typedef.type.decl_string
# drop the :: prefix - it make swig prod
|
uce invalid code
if s.startswith("::"):
s = s[2:]
print >> outputFile, "{%s} {%s} {%s}" % (s, n, module)
content = outputFile.getvalue()
if sys.argv[2] != '-':
f = file( sys.argv[2], "w" )
f.write( content )
f.close()
else:
sys.stdout.write( content )
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/pyami/copybot.py
|
Python
|
gpl-3.0
| 4,262
| 0.002112
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.pyami.scriptbase import ScriptBase
import os, StringIO
class CopyBot(ScriptBase):
def __init__(self):
super(CopyBot, self).__init__()
self.wdir = boto.config.get('Pyami', 'working_dir')
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.wdir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
self.src_name = boto.config.get(self.name, 'src_bucket')
self.dst_name = boto.config.get(self.name, 'dst_bucket')
self.replace = boto.config.getbool(self.name, 'replace_dst', True)
s3 = boto.connect_s3()
self.src = s3.lookup(self.src_name)
if not self.src:
boto.log.error('Source bucket does not exist: %s' % self.src_name)
dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None)
if dest_access_key:
dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None)
s3 = boto.connect(dest_access_key, dest_secret_key)
self.dst = s3.lookup(self.dst_name)
if not self.dst:
self.dst = s3.create_bucket(self.dst_name)
def copy_bucket_acl(self):
if boto.config.get(self.name, 'copy_acls', True):
acl = self.src.get_xml_acl()
self.dst.set_xml_acl(acl)
def copy_key_acl(self, src, dst):
if boto.config.get(self.name, 'copy_acls', True):
acl = src.get_xml_acl()
dst.set_xml_acl(acl)
def copy_keys(self):
boto.log.info('src=%s' % self.src.name)
boto.log.info('dst=%s' % self.dst.name)
try:
for key in self.src:
if not self.replace:
exists = self.dst.lookup(key.name)
if exists:
boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name))
continue
boto.log.info('copying %d bytes from key=%s' % (key.size, key.name))
prefix, base = os.path.split(key.name)
path = os.path.join(self.wdir, base)
key.get_contents_to_filename(path)
new_key = self.dst.new_key(
|
key.name)
|
new_key.set_contents_from_filename(path)
self.copy_key_acl(key, new_key)
os.unlink(path)
except:
boto.log.exception('Error copying key: %s' % key.name)
def copy_log(self):
key = self.dst.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
def main(self):
fp = StringIO.StringIO()
boto.config.dump_safe(fp)
self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue())
if self.src and self.dst:
self.copy_keys()
if self.dst:
self.copy_log()
self.notify('%s (%s) Stopping' % (self.name, self.instance_id),
'Copy Operation Complete')
if boto.config.getbool(self.name, 'exit_on_completion', True):
ec2 = boto.connect_ec2()
ec2.terminate_instances([self.instance_id])
|
sdgdsffdsfff/py-trace-parser
|
traceback_parser.py
|
Python
|
mit
| 7,757
| 0.015227
|
#coding=utf-8
'''
Created on 2015年5月18日
python traceback parser
@author: hzwangzhiwei
'''
import json
import re
def str_is_empty(s):
if s == None or s == '' or s.strip().lstrip().rstrip('') == '':
return True
return False
class TracebackParser(object):
'''
parser
'''
tb_is_trace = True
tb_content = ''
tb_header = 'Traceback (most recent call last):'
tb_files = [] # file, line, method 设计的文件信息
tb_type = '' # 类型: AttributeError
tb_msg = '' #信息: 'NoneType' object has no attribute 'model'
def __init__(self):
'''
Constructor
'''
#do nothing
pass
def _try_tb_file(self, line, header = 'Traceback (most recent call last):'):
'''尝试作为影响文件进行解析,成功范围字典,失败放回False
'''
# line = 'File "D:\Work\h28\client_replace\client\scriptvatarmembers\EquipMember.py", line 287, in onEnhanceEquip'
tb_files_re = 'File "(.*)"[,] line (\d*), in (.*)'
re_pat = re.compile(tb_files_re)
search_ret = re_pat.search(line)
if search_ret:
g = search_ret.groups()
if g and len(g) == 3:
return {'file' : g[0], 'line': g[1], 'method': g[2]}
return False
def _try_tb_type_msg(self, line):
'''尝试作为trace类型和提示消息进行解析,成功范围True,同时设置对象属性,失败放回False
'''
tb_type_msg_re = '(.*): (.*)'
re_pat = re.compile(tb_type_msg_re)
search_ret = re_pat.search(line)
if search_ret:
g = search_ret.groups()
if g and len(g) == 2:
self.tb_type = g[0]
self.tb_msg = g[1]
return True
return False
def parse(self, content):
self.tb_header = 'Traceback (most recent call last):'
self.tb_files = [] # file, line, method 设计的文件信息
self.tb_type = '' # 类型: AttributeError
self.tb_msg = '' #信息: 'NoneType' object has no attribute 'model'
self.tb_content = content
tb_lines = self.tb_content.split('\n')
is_first_line = True
for line in tb_lines:
line = line.strip().lstrip().rstrip()
if str_is_empty(line):
continue
#包含tb_header,说明是一个正确的trace
if is_first_line:
if self.tb_header in line:
is_first_line = False
continue
else:
#不是一个合法的trace
self.tb_is_trace = False
return False
else:
#解析非第一行
#1. 尝试以影响文件的解析,解析成功在下一行
tb_file = self._try_tb_file(line)
if tb_file:
self.tb_files.append(tb_file)
continue
|
#2. 解析不成功,尝试以错误类型解析,解析不成功在下一行
self._try_tb_type_msg(line)
return True
def trace_code_info(self):
if self.tb_is_trace:
if self.tb_files and len(self.tb_files) > 0:
return se
|
lf.tb_files[len(self.tb_files) - 1]
return ('', '', '')
def trace_msg(self):
return (self.tb_type, self.tb_type)
def tostring(self):
rst = ''
rst += self.tb_header
rst += '\n'
for f in self.tb_files:
rst += json.dumps(f, default = lambda o: o.__dict__)
rst += '\n'
rst += self.tb_type + ': ' + self.tb_msg
return rst
#唯一标示一个trace
def to_md5(self):
rst = ''
try:
if self.tb_is_trace:
rst += (self.tb_type + '|' + self.tb_msg)
if self.tb_files and len(self.tb_files) > 0:
f = self.tb_files[len(self.tb_files) - 1] #取最后一个
rst += ('|' + f['file'] + '|' + f['line'] + '|' + f['method'])
except:
rst = ''
import hashlib
m = hashlib.md5()
m.update(rst)
return m.hexdigest().lower()
if __name__ == '__main__':
content = '''
Traceback (most recent call last):
File "D:\Work\h28\client_replace\client\script\lib\client\GateClient.py", line 337, in entity_message
>methodname:(str)onEnhanceEquip
>_done:(NoneType)None
>entitymsg:(class common.proto_python.common_pb2.EntityMessage)routes: ""
id: "UG\022\264\327\037\375$\
>self:(class client.GateClient.GateClient)<client.GateClient.GateClient object at
>entity:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>_controller:(class mobilerpc.RpcChannel.MobileRpcController)<mobilerpc.RpcChannel.MobileRpcControlle
>entityid:(class bson.objectid.ObjectId)554712b4d71ffd24fb0c7b27
>need_reg_index:(bool)False
>method:(instancemethod)<bound method ClientAvatar.call_rpc_meth
File "D:\Work\h28\client_replace\client\script\lib\common\rpcdecorator.py", line 100, in call_rpc_method
>self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>args:(tuple)({u'res': 2, u'eid': u'55481f68d71ffd24f
>rpctype:(int)3
>rpcmethod:(class common.rpcdecorator.RpcMethod)<common.rpcdecorator.RpcMethod object at
File "D:\Work\h28\client_replace\client\script\lib\common\rpcdecorator.py", line 86, in call
>parameters:(dict){u'res': 2, u'eid': u'55481f68d71ffd24fb
>self:(class common.rpcdecorator.RpcMethod)<common.rpcdecorator.RpcMethod object at
>args:(list)[2, '55481f68d71ffd24fb0c7de4', {u'itemI
>entity:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>arg:(dict){u'itemId': 125, u'star': 3, u'itemType'
>argtype:(class common.RpcMethodArgs.Dict)ed(Dict)
>placeholder:(NoneType)None
>first:(bool)False
File "D:\Work\h28\client_replace\client\script\avatarmembers\EquipMember.py", line 287, in onEnhanceEquip
>res:(int)2
>self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>equipUid:(str)55481f68d71ffd24fb0c7de4
>notifyType:(int)2
>newEquipDict:(dict){u'itemId': 125, u'star': 3, u'itemType'
>equip:(class com.Equip.Equip)<com.Equip.Equip object at 0x17251E50>
>oldEquip:(class com.Equip.Equip)<com.Equip.Equip object at 0x2740E7D0>
File "D:\Work\h28\client_replace\client\script\avatarmembers\EquipMember.py", line 401, in getEquipNotifyDict
>newAttrDict:(dict){'basePhyStrAdditionVal': 3352.156471239
>allAttrNameSet:(set)set(['basePhyStrAdditionVal', 'criRate',
>oldAttrDict:(dict){'basePhyStrAdditionVal': 3047.414973854
>self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>notifyType:(int)2
>notifyDict:(dict){'notifyType': 2, 'attrList': []}
>chinese_attrName:(str)生命值
>sortedAllAttrNames:(list)[]
File "D:\Work\h28\client_replace\client\script\com\utils\helpers.py", line 2945, in getAttributeNameC2E
>chinese_name:(str)生命值
KeyError: '\xe7\x94\x9f\xe5\x91\xbd\xe5\x80\xbc'
'''
tb_parser = TracebackParser()
tb_parser.parse(content)
print '============'
print tb_parser.tostring()
print '============'
|
CRLab/curvox
|
src/curvox/mesh_comparisons.py
|
Python
|
mit
| 5,606
| 0.001784
|
import math
import meshlabxml
import os
import tempfile
import plyfile
import numpy as np
import numba
import binvox_rw
import subprocess
def print_hausdorff(hausdorff_distance):
for key, value in hausdorff_distance.items():
print('{}: {}'.format(key, value))
@numba.njit
def minmax(array):
# Ravel the array and return early if it's empty
array = array.ravel()
length = array.size
if not length:
return
# We want to process two elements at once so we need
# an even sized array, but we preprocess the first and
# start with the second element, so we want it "odd"
odd = length % 2
if not odd:
length -= 1
# Initialize min and max with the first item
minimum = maximum = array[0]
i = 1
while i < length:
# Get the next two items and swap them if necessary
x = array[i]
y = array[i+1]
if x > y:
x, y = y, x
# Compare the min with the smaller one and the max
# with the bigger one
minimum = min(x, minimum)
maximum = max(y, maximum)
i += 2
# If we had an even sized array we need to compare the
# one remaining item too.
if not odd:
x = array[length]
minimum = min(x, minimum)
maximum = max(x, maximum)
return minimum, maximum
def hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath):
script = meshlabxml.create.FilterScript(file_in=[mesh1_filepath, mesh2_filepath], ml_version='1.3.2')
meshlabxml.sampling.hausdorff_distance(script)
script.run_script(print_meshlabserver_output=False, skip_error=True)
return script.hausdorff_distance
@numba.jit
def hausdorff_distance_bi(mesh1_filepath, mesh2_filepath):
# get hausdorff dist from meshlab server
hd_ab = hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath)
hd_ba = hausdorff_distance_one_direction(mesh2_filepath, mesh1_filepath)
min_distance_bi = min(hd_ab["min_distance"], hd_ba["min_distance"])
max_distance_bi = max(hd_ab["max_distance"], hd_ba["max_distance"])
sm = hd_ab["mean_distance"] * hd_ab["number_points"] + hd_ba["mean_distance"] * hd_ba["number_points"]
mean_distance_bi = sm / (hd_ab["number_points"] + hd_ba["number_points"])
ms = (hd_ab["rms_distance"] ** 2) * hd_ab["number_points"] + (hd_ba["rms_distance"] ** 2) * hd_ba["number_points"]
rms_distance_bi = math.sqrt(ms / (hd_ab["number_points"] + hd_ba["number_points"]))
return {"min_distance": min_distance_bi,
"max_distance": max_distance_bi,
"mean_distance": mean_distance_bi,
"rms_distance": rms_distance_bi,
"number_points": hd_ab["number_points"]}
@numba.jit
def calculate_voxel_side_length(mesh, grid_size):
minx, maxx = minmax(mesh.vertices[:, 0])
miny, maxy = minmax(mesh.vertices[:, 1])
minz, maxz = minmax(mesh.vertices[:, 2])
return max(abs(minx - maxx) / grid_size,
abs(miny - maxy) / grid_size,
abs(minz - maxz) / grid_size)
@numba.jit
def _jaccard_distance(grid1, grid2):
intersection = np.logical_and(grid1, grid2)
intersection_count = np.count_nonzero(intersection)
union = np.logical_or(grid1, grid2)
union_count = np.count_nonzero(union)
if union_count == 0:
return 0.0
return float(intersection_count) / float(union_count)
def jaccard_similarity(mesh_filepath0, mesh_filepath1, grid_size=40, exact=True):
temp_mesh0_filepath = tempfile.mktemp(suffix=".ply")
temp_mesh1_filepath = tempfile.mktemp(suffix=".ply")
binvox0_filepath = temp_mesh0_filepath.replace(".ply", ".binvox")
binvox1_filepath = temp_mesh1_filepath.replace(".ply", ".binvox")
os.symlink(os.path.abspath(mesh_filepath0), temp_mesh0_filepath)
os.symlink(os.path.abspath(mesh_filepath1), temp_mesh1_filepath)
mesh0 = plyfile.PlyData.read(temp_mesh0_filepath)
minx, maxx = minmax(mesh0['vertex']['x'])
miny, maxy = minmax(mesh0['vertex']['y'])
minz, maxz = minmax(mesh0['vertex']['z'])
# -d: specify voxel grid size (default 256, max 1024)(no max when using -e)
# -e: exact voxelization (any voxel with part of a triangle gets set)(does not use graphics card)
# -bb <minx> <miny> <minz> <maxx> <maxy> <maxz>: force a different input model bounding box
cmd_base = "binvox -pb "
if exact:
cmd_base += "-e "
cmd_base += "-d " + str(grid_size) + " -bb " + str(minx) + " " + str(miny) + " " + str(minz) + " " + str(maxx) + " " + str(maxy) + " " + str(maxz)
mesh0_cmd = cmd_base + " " + temp_mesh0_filepath
mesh1_cmd = cmd_base + " " + temp_mesh1_filepath
process = subprocess.Popen(mesh0_cmd.split(" "), stdout=subprocess.PIPE)
command1_output, _ = process.communicate()
process = subprocess.Popen(mesh1_cmd.split(" "), stdout=subprocess.PIPE)
command2_output, _
|
= process.communicate()
with open(binvox0_filepath, 'r') as mesh0_binvox_file:
mesh0_binvox = binvox_rw.read_as_3d_array(mesh0_binvox_file)
with open(binvox1_filepath, 'r') as mesh1_binvox_file:
mesh1_binvox = binvox_rw.read_as_3d_array(mesh1_binvox_file)
jaccard = _jaccard_distance(mesh0_binvox.data, mesh1_binvox.data)
if os.path.exists(temp_mesh0_filepath):
os.remove(temp_mesh0_filepath)
if os.path.exists(temp_mesh1_filepath):
os.
|
remove(temp_mesh1_filepath)
if os.path.exists(binvox0_filepath):
os.remove(binvox0_filepath)
if os.path.exists(binvox1_filepath):
os.remove(binvox1_filepath)
return jaccard
|
jeroanan/GameCollection
|
Tests/TestPlatform.py
|
Python
|
gpl-3.0
| 1,844
| 0.003254
|
# Copyright (c) David Wilson 2015
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>.
import unittest
from Platform import Platform
class TestPlatform(unittest.TestCase):
def test_from_dict(self):
Platform.from_dict({"":""})
def test_from_dict_returns_platform(self):
result = Platform.from_dict({"":""})
self.assertIsInstance(result, Platform)
def test_from_dict_performs_mappings(self):
d = {"name": "name",
"description": "description"}
result = Platform.from_dict(d)
self.assertEqual(d["name"], result.name)
self.assertEqual(d["description"], result.description)
def test_from_mongo_result_performs_mapping(self):
"""Initialise the mapper
:param mongo_result: A MongoDB result. The following fields
can currently be mapped:
* _id
* _Platform__name
* _Platform__description
"""
d = {"_
|
id": "id",
"_Platform__name": "name",
"_Platform__description": "description"}
p = Platform.from_mongo_result(d)
self.assertEqual(d["_id"], p.id)
|
self.assertEqual(d["_Platform__name"], p.name)
self.assertEqual(d["_Platform__description"], p.description)
|
tejasjadhav/django-scheduler
|
examples/basic/apple/urls.py
|
Python
|
gpl-3.0
| 747
| 0
|
"""apple URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import vi
|
ews
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/',
|
admin.site.urls),
]
|
graphql-python/graphql-core
|
tests/execution/test_sync.py
|
Python
|
mit
| 7,073
| 0.001555
|
from gc import collect
from inspect import isawaitable
from pytest import mark, raises
from graphql import graphql_sync
from graphql.execution import execute, execute_sync
from graphql.language import parse
from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString
from graphql.validation import validate
def describe_execute_synchronously_when_possible():
def _resolve_sync(root_value, _info):
return root_value
async def _resolve_async(root_value, _info):
return root_value
schema = GraphQLSchema(
GraphQLObjectType(
"Query",
{
"syncField": GraphQLField(GraphQLString, resolve=_resolve_sync),
"asyncField": GraphQLField(GraphQLString, resolve=_resolve_async),
},
),
GraphQLObjectType(
"Mutation",
{"syncMutationField": GraphQLField(GraphQLString, resolve=_resolve_sync)},
),
)
def does_not_return_an_awaitable_for_initial_errors():
doc = "fragment Example on Query { syncField }"
assert execute(schema, parse(doc), "rootValue") == (
None,
[{"message": "Must provide an operation."}],
)
def does_not_return_an_awaitable_if_fields_are_all_synchronous():
doc = "query Example { syncField }"
assert execute(schema, parse(doc), "rootValue") == (
{"syncField": "rootValue"},
None,
)
def does_not_return_an_awaitable_if_mutation_fields_are_all_synchronous():
doc = "mutation Example { syncMutationField }"
assert execute(schema, parse(doc), "rootValue") == (
{"syncMutationField": "rootValue"},
None,
)
@mark.asyncio
async def returns_an_awaitable_if_any_field_is_asynchronous():
doc = "query Example { syncField, asyncField }"
result = execute(schema, parse(doc), "rootValue")
assert isawaitable(result)
assert await result == (
{"syncField": "rootValue", "asyncField": "rootValue"},
None,
)
def describe_execute_sync():
def does_not_return_an_awaitable_for_sync_execution():
doc = "query Example { syncField }"
result = execute_sync(schema, document=parse(doc), root_value="rootValue")
assert result == (
{"syncField": "rootValue"},
None,
)
def does_not_throw_if_not_encountering_async_execution_with_check_sync():
doc = "query Example { syncField }"
result = execute_sync(
schema, document=parse(doc), root_value="rootValue", check_sync=True
)
assert result == (
{"syncField": "rootValue"},
None,
)
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_execution_with_check_sync():
doc = "query Example { syncField, asyncField }"
with raises(RuntimeError) as exc_info:
execute_sync(
schema, document=parse(doc), root_value="rootValue", check_sync=True
)
msg = str(exc_info.value)
assert msg == "GraphQL execution failed to complete synchronously."
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_without_check_sync():
doc = "query Example { syncField, asyncField }"
result = execute_sync(schema, document=parse(doc), root_value="rootValue")
assert result == (
{"syncField": "rootValue", "asyncField": None},
[
{
"message": "String cannot represent value:"
" <coroutine _resolve_async>",
"locations": [(1, 28)],
"path": ["asyncField"],
}
],
)
# garbage collect coroutine in order to not postpone the warning
del result
collect()
def describe_graphql_sync():
def reports_errors_raised_during_schema_validation():
bad_schema = GraphQLSchema()
result = graphql_sync(schema=bad_schema, source="{ __typename }")
assert result == (None, [{"message": "Query root type must be provided."}])
def does_not_return_an_awaitable_for_syntax_errors():
doc = "fragment Example on Query { { { syncField }"
assert graphql_sync(schema, doc) == (
None,
[
|
{
"message": "Syntax Error: Expected Name, found '{'.",
"locations": [(1, 29)],
}
],
)
def does_not_return_an_awaitable_for_validation_errors():
doc = "fragment Example on Query { unknownField }"
validation_errors = validate(schema, parse(doc))
result = graphql_sync(schema, doc)
assert result == (None, validation_errors)
def does_not_ret
|
urn_an_awaitable_for_sync_execution():
doc = "query Example { syncField }"
assert graphql_sync(schema, doc, "rootValue") == (
{"syncField": "rootValue"},
None,
)
def does_not_throw_if_not_encountering_async_operation_with_check_sync():
doc = "query Example { syncField }"
assert graphql_sync(schema, doc, "rootValue") == (
{"syncField": "rootValue"},
None,
)
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_with_check_sync():
doc = "query Example { syncField, asyncField }"
with raises(RuntimeError) as exc_info:
graphql_sync(schema, doc, "rootValue", check_sync=True)
msg = str(exc_info.value)
assert msg == "GraphQL execution failed to complete synchronously."
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_without_check_sync():
doc = "query Example { syncField, asyncField }"
result = graphql_sync(schema, doc, "rootValue")
assert result == (
{"syncField": "rootValue", "asyncField": None},
[
{
"message": "String cannot represent value:"
" <coroutine _resolve_async>",
"locations": [(1, 28)],
"path": ["asyncField"],
}
],
)
# garbage collect coroutine in order to not postpone the warning
del result
collect()
|
OpenTrons/opentrons-api
|
update-server/otupdate/buildroot/__main__.py
|
Python
|
apache-2.0
| 4,273
| 0
|
"""
Entrypoint for the buildroot update server
"""
import argparse
import asyncio
import logging
import logging.config
from . import (get_app, BR_BUILTIN_VERSION_FILE,
config, constants, name_management)
from aiohttp import web
LOG = logging.getLogger(__name__)
try:
# systemd journal is available, we can use its handler
import systemd.journal # noqa(F401)
import systemd.daemon
def _handler_for(topic_name: str,
log_level: int):
return {'class': 'systemd.journal.JournalHandler',
'formatter': 'message_only',
'level': log_level,
'SYSLOG_IDENTIFIER': topic_name}
# By using sd_notify
# (https://www.freedesktop.org/software/systemd/man/sd_notify.html)
# and type=notify in the unit file, we can prevent systemd from starting
# dependent services until we actually say we're ready. By calling this
# after we change the hostname, we make anything with an After= on us
# be guaranteed to see the correct hostname
def _notify_up():
systemd.daemon.notify("READY=1")
except ImportError:
# systemd journal isn't available, probably running tests
def _handler_for(topic_name: str,
log_level: int):
return {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': log_level,
}
def _notify_up():
LOG.info("systemd couldn't be imported (host? test?), not notifying")
def configure_logging(level: int):
config = {
'version': 1,
'formatters': {
'basic': {
'format': '%(name)s %(levelname)s %(message)s'
},
'message_only': {
'format': '%(message)s'
},
},
'handlers': {
'journald': _handler_for('opentrons-update', level)
},
'loggers': {
'otupdate': {
'handlers': ['journald'],
'level': level,
'propagate': False
},
'__main__': {
'handlers': ['journald'],
'level': level,
'propagate': False,
}
},
'root': {
'handlers': ['journald'],
'level': level
}
}
logging.config.dictConfig(config)
def main():
parser = argparse.ArgumentParser(
description='Opentrons update server for buildroot systems')
parser.add_argument('-p', '--port', dest='port', type=int,
help='Port to listen on. Passed to aiohttp')
parser.add_argument('--host', dest='host', type=str, default='127.0.0.1',
help='Host to listen on. Passed to aiohttp')
parser.add_argument('--version-file', dest='version_file',
type=str, default=BR_BUILTIN_VERSION_FILE,
help='Version file path if not default')
parser.add_argument('--log-level', dest='log_level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level', default='info')
parser.add_argument('--config-file', dest='config_file',
type=str, default=None,
help='Config file path. If not specified, falls back '
f'to {config.PATH_ENVIRONMENT_VARIABLE} env var and '
f'then default path {config.DEFAULT_PATH}')
args = parser.parse_args()
loop = asyncio.get_event_loop()
configure_logging(getattr(logging, args.log_level.upper()))
LOG.info("Setting hostname")
hostname = loop.run_until_complete(name_management.s
|
etup_hostname())
LOG.info(f"Set hostname to {hostname}")
LOG.info('Building buildroot update server')
app = get_app(args.version_file, args.config_file)
name = app[constants.DEVICE_NAME_VARNAME]
LOG.info(f"Setting advertised name to {name}")
loop.run_until_complet
|
e(name_management.set_name(name))
LOG.info('Notifying systemd')
_notify_up()
LOG.info(
f'Starting buildroot update server on http://{args.host}:{args.port}')
web.run_app(app, host=args.host, port=args.port)
if __name__ == '__main__':
main()
|
Johnetordoff/osf.io
|
scripts/create_fakes.py
|
Python
|
apache-2.0
| 19,763
| 0.006274
|
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python3 -m scripts.create_fakes --user fred@cos.io
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
To create a project with a complex component structure, pass in a list representing the depth you would
like each component to contain.
Examples:
python3 -m scripts.create_fakes -u fred@cos --components '[1, 1, 1, 1]' --nprojects 1
...
|
will create a project with 4 components.
python3 -m scripts.create_fakes -u fred@cos --components '4' --nprojects 1
...will create a project with a series of components, 4 levels deep.
python3 -m scripts.create_fakes -u fred@cos --com
|
ponents '[1, [1, 1]]' --nprojects 1
...will create a project with two top level components, and one with a depth of 2 components.
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True
...will create 3 preprints with the default provider osf
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True --preprintprovider osf,test_provider
...will create 3 preprints with the providers osf and test_provider
"""
from __future__ import print_function, absolute_import
import ast
import sys
import mock
import argparse
import logging
import django
import pytz
from faker import Factory
from faker.providers import BaseProvider
django.setup()
from framework.auth import Auth
from osf_tests.factories import UserFactory, ProjectFactory, NodeFactory, RegistrationFactory, PreprintFactory, PreprintProviderFactory, fake_email
from osf import models
from website.app import init_app
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals',
|
pythonvietnam/pbc082015
|
NguyenCongDuc/QuanLySV-JSON.py
|
Python
|
gpl-2.0
| 2,031
| 0.034466
|
#Chuong trinh Quan ly hoc sinh. Xay dung ham tim kiem, them moi
#haind
#python
import json
import os
#tao menu lua chon
def menu():
print '''
Chao mung ban den voi chuong trinh Quan ly hoc sinh!
Su dung:
1. Them moi hoc sinh
2. Tim kiem hoc sinh
3. Thoat chuong trinh (quit)
'''
return input("Moi ban lua chon: ")
#tao danh sach lam bien global
ds = list()
#chuc nang them moi hoc sinh
def themHS():
print "Ban da lua cho
|
n Them moi hoc sinh"
hs = dict()
name = raw_input("Ho va ten: ")
birth = raw_input("Ngay sinh: ")
addr = raw_input("Dia chi: ")
global ds
#Tao dict
hs['Ho ten'] = name
hs['Ngay sinh'] = birth
hs['Dia chi'] = addr
|
#Them hoc sinh vao danh sach
ds.append(hs)
print ""
print "Thong tin hoc sinh vua duoc them vao: %s"%(ds)
print ""
#chuc nang tim kiem hoc sinh
def timHS():
print "Ban da lua chon Tim kiem hoc sinh"
timkiem = raw_input("Moi ban nhap ten hoc sinh muon tim: ")
ketquatim = list()
for i in ds:
if i['Ho ten'] == timkiem:
ketquatim.append(i)
print ""
print "Da ket thuc tim kiem. Ket qua tim kiem la: "
print ketquatim
print ""
# luu hs vao file
def savehs():
global ds
#convert ds ra json
dljson = json.dumps(ds)
#ghi vao file text
try:
with open("dbhs.txt", "wb") as fh:
fh.write(dljson)
print "Da luu ds hs thanh cong"
except e, Exception:
print "Co loi khi luu file"
#load lai du lieu
def loadhs(filename):
global ds
if (os.path.isfile(filename)):
print "File hop le. Bat dau load ds hs"
with open(filename, 'rb') as fh:
data = fh.readline()
ds = json.loads(data)
print "Da load ds hs ok!"
else:
print "File DB khong dung."
#thuc hien vong lap chuong trinh
vonglap = 1
choice = 0
loadhs("dbhs.txt")
while vonglap == 1:
choice = menu()
if choice == 1:
themHS()
elif choice == 2:
timHS()
elif choice == 3:
vonglap = 0
savehs()
print "Cam on ban da su dung chuong trinh"
|
irmen/Pyro4
|
examples/gui_eventloop/gui_nothreads.py
|
Python
|
mit
| 5,532
| 0.001627
|
"""
This example shows a Tkinter GUI application that uses event loop callbacks
to integrate Pyro's event loop into the Tkinter GUI mainloop.
No threads are used. The Pyro event callback is called every so often
to check if there are Pyro events to handle, and handles them synchronously.
"""
import time
import select
import Pyro4
try:
from tkinter import *
import tkinter.simpledialog as simpledialog
except ImportError:
from Tkinter import *
import tkSimpleDialog as simpledialog
# Set the Pyro servertype to the multiplexing select-based server that doesn't
# use a threadpool to service method calls. This way the method calls are
# handled inside the main thread as well.
Pyro4.config.SERVERTYPE = "multiplex"
# The frequency with which the GUI loop calls the Pyro event handler.
PYRO_EVENTLOOP_HZ = 50
class PyroGUI(object):
"""
The Tkinter GUI application that also listens for Pyro calls.
"""
def __init__(self):
self.tk = Tk()
self.tk.wm_title("Pyro in a Tkinter GUI eventloop - without threads")
self.tk.wm_geometry("500x500")
buttonframe = Frame(self.tk)
button = Button(buttonframe, text="Messagebox", command=self.button_msgbox_clicked)
button.pack(side=LEFT)
button = Button(buttonframe, text="Add some text", command=self.button_text_clicked)
button.pack(side=LEFT)
button = Button(buttonframe, text="Clear all text", command=self.button_clear_clicked)
button.pack(side=LEFT)
quitbutton = Button(buttonframe, text="Quit", command=self.tk.quit)
quitbutton.pack(side=RIGHT)
frame = Frame(self.tk, padx=2, pady=2)
buttonframe.pack(fill=X)
rlabel = Label(frame, text="Pyro server messages:")
rlabel.pack(fill=X)
self.msg = Message(frame, anchor=NW, width=500, aspect=80, background="white", relief="sunken")
self.msg.pack(fill=BOTH, expand=1)
frame.pack(fill=BOTH)
self.serveroutput = []
def install_pyro_event_callback(self, daemon):
"""
Add a callback to the tkinter event loop that is invoked every so often.
The callback checks the Pyro sockets for activity and dispatches to the
daemon's event process method if needed.
"""
def pyro_event():
while True:
# for as long as the pyro socket triggers, dispatch events
s, _, _ = select.select(daemon.sockets, [], [], 0.01)
if s:
daemon.events(s)
else:
# no more events, stop the loop, we'll get called again soon anyway
break
self.tk.after(1000 // PYRO_EVENTLOOP_HZ, pyro_event)
self.tk.after(1000 // PYRO_EVENTLOOP_HZ, pyro_event)
def mainloop(self):
self.tk.mainloop()
def button_msgbox_clicked(self):
# this button event handler is here only to show that gui events are still processed normally
number = simpledialog.askinteger("A normal popup", "Hi there enter a number", parent=self.tk)
def button_clear_clicked(self):
self.serveroutput = []
self.msg.config(text="")
def button_text_clicked(self):
# add some random text to the message list
self.add_message("The quick brown fox jumps over the lazy dog!")
def add_message(self, message):
message = "[{0}] {1}".format(time.strftime("%X"), message)
self.serveroutput.append(message)
self.serveroutput = self.serveroutput[-27:]
self.msg.config(text="\n".join(self.serveroutput))
@Pyro4.expose
class MessagePrinter(object):
"""
The Pyro object that interfaces with the GUI application.
"""
def __init__(self, gui):
self.gui = gui
def message(self, messagetext):
# Add the message to the screen.
# Note that you can't do anything that requires gui interaction
# (such as popping a dialog box asking for user input),
# because the gui (tkinter) is busy processing this pyro call.
# It can't do two things at the same time when embedded this way.
# If you do something in this method call that takes a long time
# to process, the GUI is frozen during that time (because no GUI update
# events are handled while this callback is active).
self.gui.add_message("from Pyro: " + messagetext)
def sleep(self, duration):
# Note that you can't perform blocking stuff at all because the method
# call is running in the gui mainloop thread and will freeze the GUI.
# Try it - you will see the first message but everything locks up until
# the sleep returns and the method call ends
self.gui.add_message("from Pyro: sleeping {0} seconds...".format(duration))
self.gui.tk.update()
time.sleep(duration)
self.gui.add_message("from Pyro: woke up!")
def main():
gui = PyroGUI()
# create a pyro daemon with obje
|
ct
daemon = Pyro4.Daemon()
obj = MessagePrinter(gui)
uri = daemon.register(obj, "pyrogui.message")
gui.add_message("Pyro server started. Not using threads.")
gui.add_message("Use the command line client to send messages.")
urimsg = "Pyro object uri = {0}".format(uri)
gui.add_message(urimsg)
print(urimsg)
# add a Pyro event callback to the gui's mainloop
gui.install_pyro_event_callback(daemon)
# enter the mainloop
g
|
ui.mainloop()
if __name__ == "__main__":
main()
|
VROOM-Project/vroom-scripts
|
src/mdvrp_to_json.py
|
Python
|
bsd-2-clause
| 3,918
| 0.000766
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
from utils.benchmark import get_matrix
# Generate a json-formatted problem from a MDVRP file.
# Those benchmarks use double precision for matrix costs and results
# are usually reported with 2 decimal places. As a workaround, we
# multiply all costs by CUSTOM_PRECISION before performing the usual
# integer rounding. Comparisons in benchmarks/compare_to_BKS.py are
# adjusted accordingly.
CUSTOM_PRECISION = 1000
FIRST_LINE = 0
def parse_meta(line):
meta = line.split()
if len(meta) < 1 or int(meta[0]) != 2:
print("Not a MDVRP!")
exit(2)
return {
"VEHICLES_PER_DEPOT": int(meta[1]),
"JOBS": int(meta[2]),
"DEPOTS": int(meta[3]),
}
def parse_jobs(lines, jobs, coords):
for i in range(len(lines)):
customer = lines[i].split()
if len(customer) < 5:
print("Too few columns in customer line.")
exit(2)
current_coords = [float(customer[1]), float(customer[2])]
jobs.append(
{
"id": int(customer[0]),
"location": current_coords,
"location_index": len(coords),
"service": CUSTOM_PRECISION * int(customer[3]),
"delivery": [int(customer[4])],
}
)
coords.append(current_coords)
def parse_mdvrp(input_file):
with open(input_file, "r") as f:
lines = f.readlines()
meta = parse_meta(lines[FIRST_LINE])
coords = []
# Handle capacity per depot.
first_values = lines[FIRST_LINE + 1].split()
meta["MAX_ROUTE_DURATION"] = int(first_values[0])
meta["CAPACITY"] = int(first_values[1])
for line in lines[FIRST_LINE + 2 : FIRST_LINE + 1 + meta["DEPOTS"]]:
if meta["MAX_ROUTE_DURATION"] != int(line.split()[0]):
print("Inconsistent max route duration!")
exit(1)
if meta["CAPACITY"] != int(line.split()[1]):
print("Inconsistent capacity!")
exit(1)
# Handle customer lines
jobs = []
jobs_start = FIRST_LINE + meta["DEPOTS"] + 1
parse_jobs(lines[jobs_start : jobs_start + meta["JOBS"]], jobs, coords)
# Handle depots and vehicles
vehicles = []
depots_start = jobs_start + meta["JOBS"]
for d in range(meta["DEPOTS"]):
depot = lines[depots_start + d].split()
if len(depot) < 5:
print("Too few columns in depot line.")
exit(2)
depot_id = int(depot[0])
depot_coords = [float(depot[1]), float(depot[2])]
|
location_index = len(coords)
coords.append(depot_coords)
for v in range(1, 1 + meta["VEHICLES_PER_DEPOT"]):
vehicles.append(
{
"id": 100 * depot_id + v,
"profile": "euc_2D",
"start": depot_coords,
"start_index": location_index,
"end": depot_coords,
"end_index": location_index,
"capaci
|
ty": [meta["CAPACITY"]],
}
)
meta["VEHICLES"] = len(vehicles)
if meta["MAX_ROUTE_DURATION"] != 0:
for vehicle in vehicles:
vehicle["time_window"] = [0, CUSTOM_PRECISION * meta["MAX_ROUTE_DURATION"]]
matrix = get_matrix(coords, CUSTOM_PRECISION)
return {
"meta": meta,
"vehicles": vehicles,
"jobs": jobs,
"matrices": {"euc_2D": {"durations": matrix}},
}
if __name__ == "__main__":
input_file = sys.argv[1]
instance_name = input_file[: input_file.rfind(".txt")]
output_name = instance_name + ".json"
print("- Writing problem " + input_file + " to " + output_name)
json_input = parse_mdvrp(input_file)
json_input["meta"]["NAME"] = instance_name
with open(output_name, "w") as out:
json.dump(json_input, out)
|
xzturn/caffe2
|
caffe2/python/predictor/predictor_py_utils.py
|
Python
|
apache-2.0
| 4,921
| 0.000203
|
## @package predictor_py_utils
# Module caffe2.python.predictor.predictor_py_utils
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
def create_predict_net(predictor_export_meta):
"""
Return the input prediction net.
"""
# Construct a new net to clear the existing settings.
net = core.Net(predictor_export_meta.predict_net.name or "predict")
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().external_input.extend(
predictor_export_meta.inputs + predictor_export_meta.parameters)
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if predictor_export_meta.net
|
_type is not None:
net.Proto().type = predictor_export_meta.net_type
if predictor_export_meta.num_workers is not None:
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto()
def create_predict_init_net(ws, predictor_export_meta):
"""
Return an initialization net that zero-fill all the input and
output blobs, using the shapes from the provided workspace. This is
necessary as there is no shape
|
inference functionality in Caffe2.
"""
net = core.Net("predict-init")
def zero_fill(blob):
shape = predictor_export_meta.shapes.get(blob)
if shape is None:
if blob not in ws.blobs:
raise Exception(
"{} not in workspace but needed for shape: {}".format(
blob, ws.blobs))
shape = ws.blobs[blob].fetch().shape
# Explicitly null-out the scope so users (e.g. PredictorGPU)
# can control (at a Net-global level) the DeviceOption of
# these filling operators.
with scope.EmptyDeviceScope():
net.ConstantFill([], blob, shape=shape, value=0.0)
external_blobs = predictor_export_meta.inputs + \
predictor_export_meta.outputs
for blob in external_blobs:
zero_fill(blob)
net.Proto().external_input.extend(external_blobs)
if predictor_export_meta.extra_init_net:
net.AppendNet(predictor_export_meta.extra_init_net)
# Add the model_id in the predict_net to the init_net
AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_comp_name(string, name):
if name:
return string + '_' + name
return string
def _ProtoMapGet(field, key):
'''
Given the key, get the value of the repeated field.
Helper function used by protobuf since it doesn't have map construct
'''
for v in field:
if (v.key == key):
return v.value
return None
def GetPlan(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetPlanOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetBlobs(meta_net_def, key):
blobs = _ProtoMapGet(meta_net_def.blobs, key)
if blobs is None:
return []
return blobs
def GetNet(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetNetOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetApplicationSpecificInfo(meta_net_def, key):
return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key)
def AddBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
if blobs is None:
blobs = meta_net_def.blobs.add()
blobs.key = blob_name
blobs = blobs.value
for blob in blob_def:
blobs.append(blob)
def AddPlan(meta_net_def, plan_name, plan_def):
meta_net_def.plans.add(key=plan_name, value=plan_def)
def AddNet(meta_net_def, net_name, net_def):
meta_net_def.nets.add(key=net_name, value=net_def)
def GetArgumentByName(net_def, arg_name):
for arg in net_def.arg:
if arg.name == arg_name:
return arg
return None
def AddModelIdArg(meta_net_def, net_def):
"""Takes the model_id from the predict_net of meta_net_def (if it is
populated) and adds it to the net_def passed in. This is intended to be
called on init_nets, as their model_id is not populated by default, but
should be the same as that of the predict_net
"""
# Get model_id from the predict_net, assuming it's an integer
model_id = GetArgumentByName(meta_net_def.predict_net, "model_id")
if model_id is None:
return
model_id = model_id.i
# If there's another model_id on the net, replace it with the new one
old_id = GetArgumentByName(net_def, "model_id")
if old_id is not None:
old_id.i = model_id
return
# Add as an integer argument, this is also assumed above
arg = net_def.arg.add()
arg.name = "model_id"
arg.i = model_id
|
Ircam-Web/mezzanine-organization
|
organization/network/migrations/0100_organization_slug.py
|
Python
|
agpl-3.0
| 594
| 0.001684
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-04-07
|
12:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization_network', '0099_organization_validation_status'),
]
operations = [
migrations.AddField(
|
model_name='organization',
name='slug',
field=models.CharField(blank=True, help_text='Leave blank to have the URL auto-generated from the name.', max_length=2000, null=True, verbose_name='URL'),
),
]
|
mwbetrg/englishdb
|
send-email.py
|
Python
|
cc0-1.0
| 315
| 0.009524
|
import smtplib
fromaddr = 'mwbetrg@gmail.com'
toaddrs
|
= 'awangjangok@gmail.com'
msg = 'Why,Oh why!'
username = 'mwbetrg@gmail.com'
password = '5147mwbe'
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls
|
()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
jeryfast/piflyer
|
piflyer/elevons.py
|
Python
|
apache-2.0
| 5,001
| 0.002799
|
from servo_handler import servo_handler
import number_range as n
TILT_UP_LIMIT = 90
TILT_DOWN_LIMIT = -90
class elevons:
def __init__(self):
self.left = servo_handler(1)
self.right = servo_handler(2)
self.multiplier = 2
# mobile devide tilt limits
self.pitchUpLimit = 45
self.pitchDownLimit = -45
self.rollUpLimit = 45
self.rollDownLimit = -45
self.setMultiplier(self.multiplier)
self.setServosUpDownLimit(0, 100)
## Servo settings methods
# servo multiplier, default 2 because of extended range due to elevon mixing (50% pitch 50%roll)
def setMultiplier(self, multiplier):
self.left.setMultiplier(multiplier)
self.right.setMultiplier(multiplier)
# see servo_handler.py documentation
def setServosUpDownLimit(self, up, down):
self.left.setUpDownLimit(up, down)
self.right.setUpDownLimit(up, down)
# mobile device pitch limits
def setPitchTiltLimits(self, up, down):
if (up <= TILT_UP_LIMIT and down >= TILT_DOWN_LIMIT):
self.pitchUpLimit = up
self.pitchDownLimit = down
print("pitch limit: up:%d, down:%d" % (up, down))
# mobile device roll limits
def setRollTiltLimits(self, up, down):
if (up <= TILT_UP_LIMIT and down >= TILT_DOWN_LIMIT):
self.rollUpLimit = up
self.rollDownLimit = down
print("roll limit: left:%d, right:%d" % (down, up))
# servoUpDirectionSettings
def setServosUpDirection(self, left, right):
self.left.setUpDirection(left)
self.right.setUpDirection(right)
def setPitch(self, pitch):
self.left.setPositionFromTilt(pitch / 2)
self.right.setPositionFromTilt(pitch / 2)
def setRoll(self, roll):
self.left.setPositionFromTilt(-roll / 2)
self.right.setPositionFromTilt(roll / 2)
def setPitchRoll(self, pitch, roll):
self.left.setPositionFromTilt(pitch / 2 - roll / 2)
self.right.setPositionFromTilt(pitch / 2 + roll / 2)
# set pitch only, no mixing - not tested!
def setPitchFromInput(self, pitch):
pitch = n.arduino_map(n.clamp(pitch, self.pitchDownLimit, self.pitchUpLimit), self.pitchDownLimit,
self.pitchUpLimit, -45, 45)
self.setPitch(pitch)
# set roll only, no mixing - not tested!
def setRollFromInput(self, roll):
roll = n.arduino_map(n.clamp(roll, self.rollDownLimit, self.rollUpLimit), self.rollDownLimit, self.rollUpLimit,
-45, 45)
self.setRoll(roll)
# print("servo L, R: %d %d" % (self.left.getPosition(), self.right.getPosition()))
# pitch and roll update, elevons specific method - tested
def setPitchRollFromInput(self, pitch, roll):
# both elevons have equal limits to pitch and roll input
# pitch and roll input have seperate limits
pitch = n.arduino_map(n.clamp(pitch, self.pitchDownLimit, self.pitchUpLimit), self.pitchDownLimit,
self.pitchUpLimit, -45, 45)
roll = n.arduino_map(n.clamp(roll, self.rollDownLimit, self.rollUpLimit), self.rollDownLimit, self.rollUpLimit,
-
|
45, 45)
self.setPitchRoll(pitch, roll)
# manual - raw control
def setAngle(self, pitch, roll):
#print("pitch,roll: %d %d"%(pitch,roll))
self.setPitchRollFromInput(pitch, roll)
# print("servo L, R: %d %d"%(self.left.getPosition(),self.right.getPosition()))
## Stabilize and Autopilot mode methods - not tested!, just draft
"""
def turnRight(self, val=1):
self.left.add(val)
self.right.sub(val)
def turnLeft(self, val=1):
self.
|
left.sub(val)
self.right.add(val)
def pullUp(self):
self.left.add()
self.right.add()
def pullDown(self):
self.left.sub()
self.right.sub()
"""
# stabilize mode algorithm
def stabilize(self, target_pitch, target_roll, pitch, roll):
# idea: map target-sensor values difference to pitch and roll control
self.setPitchRoll(target_pitch - pitch, target_roll - roll)
#print("control pitch/roll", target_pitch - pitch, "/", target_roll - roll)
"""
if(target_pitch<pitch):
self.pullUp()
elif(target_pitch>pitch):
self.pullDown()
if(target_roll<roll):
self.turnRight()
if(target_roll>roll):
self.turnLeft()
"""
"""def controlHdgPitch(self, hdg, pitch):
# idea: level the plane, set the bank angle, turn the plane, level the plane, set to parameters
if(hdg)
self.control()
"""
def turn(self, target_roll, roll, pitch):
# idea: set roll, apply pitch to turn
rolldiff = target_roll-roll
if(abs(rolldiff)>10):
self.setRollFromInput(rolldiff)
else:
self.setPitch(pitch)
|
JudTown17/solutions-geoprocessing-toolbox
|
capability/test/test_erg_tools/TestERGTools.py
|
Python
|
apache-2.0
| 6,160
| 0.001948
|
# -----------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# TestERGTools.py
# Description: Test ERG Tools Toolbox
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
# history:
# 4/2/2015 - mf - updates for coding standards and making tests as modules
# ==================================================
import arcpy
import sys
import traceback
import TestUtilities
import os
class LicenseError(Exception):
pass
def testERGByChemical(inFS, inMatTy, inWB, inDN, inLS, outAreas, outLines):
'''Testing ERG By Chemical'''
arcpy.AddMessage("Starting Test: ERG Tools - ERG By Chemical")
arcpy.ERGByChemical_erg(inFS, inMatTy, inWB, inDN, inLS, outAreas, outLines)
return [outAreas, outLines]
def testERGByPlacard(inFS, inPID, inWB, inDN, inLS, outAreas, outLines):
'''Testing ERG By Chemical'''
arcpy.AddMessage("Starting Test: ERG Tools - ERG By Placard")
arcpy.ERGByPlacard_erg(inFS, inPID, inWB, inDN, inLS, outAreas, outLines)
return [outAreas, outLines]
def main():
''' Tool main code '''
try:
arcpy.ImportToolbox(TestUtilities.toolbox)
arcpy.env.overwriteOutput = True
# Set tool param variables
inPoint = arcpy.Point(77.0, 38.9)
inWGS84Point = arcpy.PointGeometry(inPoint)
sr = arcpy.SpatialReference(4326)
inWGS84Point.spatial_reference = sr
# cr
|
eate an in_memory feature class to initially contain the input point
fc = arcpy.CreateFeatureclass_management("in_memory", "tempfc", "POINT",
|
None, "DISABLED", "DISABLED",
sr)[0]
# open and insert cursor
with arcpy.da.InsertCursor(fc, ["SHAPE@"]) as cursor:
cursor.insertRow([inWGS84Point])
# create a featureset object and load the fc
inputFeatureSet = arcpy.FeatureSet()
inputFeatureSet.load(fc)
# set the remaining tool parameters
inputMaterialType = "Allylamine"
inputPlacardID = 1560
inputWindBearing = 10
inputDayOrNight = "Day"
inputLargeOrSmall = "Large"
outputERGAreas = os.path.join(arcpy.env.scratchGDB, "ERGAreas")
outputERGLines = os.path.join(arcpy.env.scratchGDB, "ERGLines")
# Testing ERG By Chemical
outputERGAreas, outputERGLines = testERGByChemical(inputFeatureSet,
inputMaterialType,
inputWindBearing,
inputDayOrNight,
inputLargeOrSmall,
outputERGAreas,
outputERGLines)
# Verify Results
countAreas = int(arcpy.GetCount_management(outputERGAreas).getOutput(0))
print("ERG Area count: " + str(countAreas))
countLines = int(arcpy.GetCount_management(outputERGLines).getOutput(0))
print("ERG Line count: " + str(countLines))
if (countAreas != 3) or (countLines != 3):
print("Invalid output count (there should be 3 areas and 3 lines)!")
raise Exception("Test Failed")
print("Test Passed")
# Testing ERG By Placard
outputERGAreas, outputERGLines = testERGByPlacard(inputFeatureSet,
inputPlacardID,
inputWindBearing,
inputDayOrNight,
inputLargeOrSmall,
outputERGAreas,
outputERGLines)
# Verify Results
countAreas = int(arcpy.GetCount_management(outputERGAreas).getOutput(0))
print("ERG Area count: " + str(countAreas))
countLines = int(arcpy.GetCount_management(outputERGLines).getOutput(0))
print("ERG Line count: " + str(countLines))
if (countAreas != 3) or (countLines != 3):
print("Invalid output count (there should be 3 areas and 3 lines)!")
raise Exception("Test Failed")
print("Test Passed")
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = ("PYTHON ERRORS:\nTraceback info:\n" + tbinfo +
"\nError Info:\n" + str(sys.exc_info()[1]))
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
# MAIN =============================================
if __name__ == "__main__":
main()
|
avinassh/learning-tornado
|
tornado-book/extending-templates/basic_templates/main.py
|
Python
|
mit
| 1,328
| 0.030873
|
#!/usr/bin/env python
import os.path
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8000, help=
|
"run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render(
"index.html",
|
header_text = "Hi! I am the header",
footer_text = "the copyright stuff"
)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
debug=True,
autoescape=None
)
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
# an instance of Application is created and sent as an parameter.
# earlier this was done by following :
# app = tornado.web.Application(
# handlers=[(r'/', IndexHandler)],
# template_path=os.path.join(os.path.dirname(__file__), "templates"),
# debug=True,
# autoescape=None
# )
# http_server = tornado.httpserver.HTTPServer(app)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
rcoup/traveldash
|
traveldash/gtfs/migrations/0011_auto__add_fare__add_unique_fare_source_fare_id__add_unique_shape_sourc.py
|
Python
|
bsd-3-clause
| 16,927
| 0.007266
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Fare'
db.create_table('gtfs_fare', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True)),
('fare_id', self.gf('django.db.models.fields.CharField')(max_length=20, db_index=True)),
('price', self.gf('django.db.models.fields.FloatField')()),
('currency_type', self.gf('django.db.models.fields.CharField')(max_length=3)),
('payment_method', self.gf('django.db.models.fields.IntegerField')()),
('transfers', self.gf('django.db.models.fields.IntegerField')(null=True)),
('transfer_duration', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('gtfs', ['Fare'])
# Adding unique constraint on 'Fare', fields ['source', 'fare_id']
db.create_unique('gtfs_fare', ['source_id', 'fare_id'])
# Adding unique constraint on 'Shape', fields ['source', 'shape_id']
db.create_unique('gtfs_shape', ['source_id', 'shape_id'])
# Adding field 'Zone.source'
db.add_column('gtfs_zone', 'source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True), keep_default=False)
# Adding unique constraint on 'Zone', fields ['source', 'zone_id']
db.create_unique('gtfs_zone', ['source_id', 'zone_id'])
# Deleting field 'FareRule.payment_method'
db.delete_column('gtfs_farerule',
|
'payment_method')
# Deleting field 'FareRule.price'
db.delete_column('gtfs_farerule', 'price')
# Deleting field 'FareRule.currency_type'
db.delete_column('gtfs_farerule', 'currency_type')
# Deleting field 'FareRule.transfer_duration'
db.delete_column('gtfs_farerule', 'transfer_duration')
# Dele
|
ting field 'FareRule.transfers'
db.delete_column('gtfs_farerule', 'transfers')
# Deleting field 'FareRule.farerule_id'
db.delete_column('gtfs_farerule', 'farerule_id')
# Deleting field 'FareRule.agency'
db.delete_column('gtfs_farerule', 'agency_id')
# Adding field 'FareRule.fare'
db.add_column('gtfs_farerule', 'fare', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['gtfs.Fare']), keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Zone', fields ['source', 'zone_id']
db.delete_unique('gtfs_zone', ['source_id', 'zone_id'])
# Removing unique constraint on 'Shape', fields ['source', 'shape_id']
db.delete_unique('gtfs_shape', ['source_id', 'shape_id'])
# Removing unique constraint on 'Fare', fields ['source', 'fare_id']
db.delete_unique('gtfs_fare', ['source_id', 'fare_id'])
# Deleting model 'Fare'
db.delete_table('gtfs_fare')
# Deleting field 'Zone.source'
db.delete_column('gtfs_zone', 'source_id')
# User chose to not deal with backwards NULL issues for 'FareRule.payment_method'
raise RuntimeError("Cannot reverse this migration. 'FareRule.payment_method' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.price'
raise RuntimeError("Cannot reverse this migration. 'FareRule.price' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.currency_type'
raise RuntimeError("Cannot reverse this migration. 'FareRule.currency_type' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.transfer_duration'
raise RuntimeError("Cannot reverse this migration. 'FareRule.transfer_duration' and its values cannot be restored.")
# Adding field 'FareRule.transfers'
db.add_column('gtfs_farerule', 'transfers', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'FareRule.farerule_id'
raise RuntimeError("Cannot reverse this migration. 'FareRule.farerule_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.agency'
raise RuntimeError("Cannot reverse this migration. 'FareRule.agency' and its values cannot be restored.")
# Deleting field 'FareRule.fare'
db.delete_column('gtfs_farerule', 'fare_id')
models = {
'gtfs.agency': {
'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.TextField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'gtfs.block': {
'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'},
'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.calendar': {
'Meta': {'object_name': 'Calendar'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gtfs.calendardate': {
'Meta': {'object_name': 'CalendarDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'exception_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"})
},
'gtfs.fare': {
'Meta': {'unique_together': "(('source', 'fare_id'),)", 'object_name': 'Fare'},
'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fare_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.IntegerField', [], {}),
'price': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.related
|
sebastian-code/django-surveys
|
setup.py
|
Python
|
bsd-3-clause
| 2,169
| 0.000461
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version('surveys', '__init__.py')
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
|
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("g
|
it tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.md').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-surveys',
version=version,
description="""Surveys for django""",
long_description=readme + '\n\n' + history,
author='Sebastian Reyes Espinosa',
author_email='sebaslander@gmail.com',
url='https://github.com/sebastian-code/django-surveys',
packages=[
'surveys',
],
include_package_data=True,
install_requires=[
'jsonfield==1.0.3',
],
license="BSD",
zip_safe=False,
keywords='django-surveys',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
johncbolton/amcpy
|
111815001.py
|
Python
|
gpl-2.0
| 2,105
| 0.060333
|
import random
import math
import sympy
from sympy import latex, fraction, Symbol, Rational
localid =11181500100000
letter=["a","b","c","d"]
n=[0,0,0,0,0,0]
m=[0,0,0,0,0]
f = open("111815001.tex","w") #opens file with name of "test.txt"
for x in range(0, 1000):
localid = localid +1
writewrong=["\correctchoice{\(","\wrongchoice{\(","\wrongchoice{\(","\wrongchoice{\("]
for count in range (0,5):
n[count]=random.randint(-20, 20)
m[1]=n[4]-n[2]
m[2]=n[3]-n[1]
m[3]=n[2]-n[1]
m[4]=n[4]-n[3]
if n[2]==n[4]:
letter[0]='undefined'
letter[2]=latex(Rational(-m[3],m[2]))
letter[3]=latex(Rational(-m[4],m[3]))
letter[1]=latex(Rational(m[4],m[3]))
else:
letter[0]=latex(Rational(m[1],m[2]))
letter[1]=latex(Rational(-m[1],m[2]))
letter[2]=latex(Rational(-m[2],m[1]))
letter[3]=latex(Rational(m[2],m[1]))
zz=random.randint(1,6)
if zz==1:
letter[1]=latex(Rational(m[4],m[3]))
elif zz==2:
letter[2]=latex(Rational(m[4],m[3]))
elif zz==3:
letter[3]=
|
latex(Rational(m[4],m[3]))
n[5]=random.randint(0,10)
if n[2]==n[4]:
letter[0]='undefined'
elif n[5]==8:
zz=random.randint(1,3)
letter[zz]='undefined'
if(len(letter)==4):
for z in range (0, 4):
writewrong[z]=writewrong[z]+str(letter[z])
random.shuffle(writewrong)
f.write("\n\n\n")
f.write("\\element{slope}{")
f.write("\n")
|
f.write("\\begin{question}{")
f.write(str(localid))
f.write("}")
f.write("\n")
f.write("Find the slope using points: (")
f.write(str(n[1]))
f.write(",")
f.write(str(n[2]))
f.write(") and (")
f.write(str(n[3]))
f.write(",")
f.write(str(n[4]))
f.write("):")
f.write("\n")
f.write("\\begin{choiceshoriz}")
f.write("\n")
for y in range(0, 4):
f.write("\n")
f.write(writewrong[y])
f.write("\)}")
f.write("\n")
f.write("\\end{choiceshoriz}")
f.write("\n")
f.write("\\end{question}")
f.write("\n")
f.write("}")
f.close()
|
Azure/azure-sdk-for-python
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/operations/_replication_recovery_services_providers_operations.py
|
Python
|
mit
| 38,715
| 0.005321
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReplicationRecoveryServicesProvidersOperations(object):
"""ReplicationRecoveryServicesProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param
|
deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._
|
config = config
def list_by_replication_fabrics(
self,
fabric_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RecoveryServicesProviderCollection"]
"""Gets the list of registered recovery services providers for the fabric.
Lists the registered recovery services providers for the specified fabric.
:param fabric_name: Fabric name.
:type fabric_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecoveryServicesProviderCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.RecoveryServicesProviderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoveryServicesProviderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_replication_fabrics.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RecoveryServicesProviderCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_replication_fabrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationRecoveryServicesProviders'} # type: ignore
def get(
self,
fabric_name, # type: str
provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RecoveryServicesProvider"
"""Gets the details of a recovery services provider.
Gets the details of registered recovery services provider.
:param fabric_name: Fabric name.
:type fabric_name: str
:param provider_name: Recovery services provider name.
:type provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecoveryServicesProvider, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.RecoveryServicesProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoveryServicesProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Co
|
shelan/collectl-monitoring
|
plotter.py
|
Python
|
apache-2.0
| 3,847
| 0.00208
|
from distutils.dir_util import copy_tree
from operator import itemgetter
import pandas as pd
import sys
from jinja2 import Environment, FileSystemLoader
import os
def generate_reports(folder):
hosts = []
# get all the paths o
|
f the root folder
files = [os.path.join(folder, fn) for fn in next(os.walk(folder))[2] if not fn.startswith(".")]
for logfile in files:
try:
data = pd.read_csv(logfile, delim_whitespace=True, comment='#', header=-1, index_col='timestamp',
parse_dates={'timestamp': [0, 1]})
print "reading data from " + logfile
except Exception, err:
print
|
"duplicate index occured in " + logfile
print "There are two similar timestamps in the log." \
" To correct that error remove the duplicate entry from " + logfile
hostname = os.path.basename(logfile).replace('.tab', "")
host_data = {}
host_data['name'] = hostname
# CPU data
host_data['cpu_data'] = data.ix[:, 2].to_json(date_format='iso')
host_data['cpu_load'] = data.ix[:, 16].to_json(date_format='iso')
# Memorydata
host_data['mem_data'] = data.ix[:, 20].apply(lambda x: x / 1024000).to_json(date_format='iso')
# Disk data
host_data['disk_read'] = data.ix[:, 66].apply(lambda x: x / 1024).to_json(date_format='iso')
host_data['disk_write'] = data.ix[:, 67].apply(lambda x: x / 1024).to_json(date_format='iso')
# Network Data
host_data['net_rx'] = data.ix[:, 57].to_json(date_format='iso')
host_data['net_tx'] = data.ix[:, 58].to_json(date_format='iso')
hosts.append(host_data)
env = Environment(loader=FileSystemLoader('templates'))
env.add_extension("chartkick.ext.charts")
cpu_template = env.get_template('cpu_template.html')
memory_template = env.get_template('memory_template.html')
disk_template = env.get_template('disk_template.html')
network_template = env.get_template('network_template.html')
cpu_output = cpu_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
memory_output = memory_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
disk_output = disk_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
network_output = network_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
test_name = os.path.basename(folder)
test_name += "-report"
if not os.path.exists(test_name):
os.mkdir(test_name)
os.chdir(test_name)
# creating folder structure
if not os.path.exists('css'):
os.mkdir('css')
if not os.path.exists('js'):
os.mkdir('js')
if not os.path.exists('img'):
os.mkdir('img')
if not os.path.exists('fonts'):
os.mkdir('fonts')
copy_tree(os.path.abspath('../css'), 'css')
copy_tree(os.path.abspath('../js'), 'js')
copy_tree(os.path.abspath('../img'), 'img')
copy_tree(os.path.abspath('../fonts'), 'fonts')
with open('report_cpu.html', 'w') as f:
f.write(cpu_output)
with open('report_memory.html', 'w') as f:
f.write(memory_output)
with open('report_disk.html', 'w') as f:
f.write(disk_output)
with open('report_network.html', 'w') as f:
f.write(network_output)
def main(argv):
try:
folder = argv[1].strip()
generate_reports(folder)
print "########################################"
print "report generated successfully"
except Exception, err:
print err.message
print "should provide an input folder. ex : python plotter.py <input-folder>"
if __name__ == '__main__':
main(sys.argv)
|
pdef/pdef-python
|
python/src/pdef/tests/test_descriptors.py
|
Python
|
apache-2.0
| 7,220
| 0.000416
|
# encoding: utf-8
from __future__ import unicode_literals
import unittest
from mock import Mock
from pdef.tests.inheritance.protocol import *
from pdef.tests.interfaces.protocol import *
from pdef.tests.messages.protocol import *
class TestMessageDescriptor(unittest.TestCase):
def test(self):
descriptor = TestMessage.descriptor
assert descriptor.pyclass is TestMessage
assert descriptor.base is None
assert descriptor.discriminator is None
assert descriptor.discriminator_value is None
assert len(descriptor.subtypes) == 0
assert len(descriptor.fields) == 3
def test__nonpolymorphic_inheritance(self):
base = TestMessage.descriptor
descriptor = TestComplexMessage.descriptor
assert descriptor.pyclass is TestComplexMessage
assert descriptor.base is TestMessage.descriptor
assert descriptor.inherited_fields == base.fields
assert descriptor.fields == base.fields + descriptor.declared_fields
assert len(descriptor.subtypes) == 0
def test__polymorphic_inheritance(self):
base = Base.descriptor
subtype = Subtype.descriptor
subtype2 = Subtype2.descriptor
msubtype = MultiLevelSubtype.descriptor
discriminator = base.find_field('type')
assert base.base is None
assert subtype.base is base
assert subtype2.base is base
assert msubtype.base is subtype
assert base.discriminator is discriminator
assert subtype.discriminator is discriminator
assert subtype2.discriminator is discriminator
assert msubtype.discriminator is discriminator
assert base.discriminator_value is None
assert subtype.discriminator_value is PolymorphicType.SUBTYPE
assert subtype2.discriminator_value is PolymorphicType.SUBTYPE2
assert msubtype.discriminator_value is PolymorphicType.MULTILEVEL_SUBTYPE
assert set(base.subtypes) == {subtype, subtype2, msubtype}
assert set(subtype.subtypes) == {msubtype}
assert not subtype2.subtypes
assert not msubtype.subtypes
assert base.find_subtype(None) is base
assert base.find_subtype(PolymorphicType.SUBTYPE) is subtype
assert base.find_subtype(PolymorphicType.SUBTYPE2) is subtype2
assert base.find_subtype(PolymorphicType.MULTILEVEL_SUBTYPE) is msubtype
class TestFieldDescriptor(unittest.TestCase):
def test(self):
string0 = TestMessage.string0
bool0 = TestMessage.bool0
assert string0.name == 'string0'
assert string0.type is descriptors.string0
assert bool0.name == 'bool0'
assert bool0.type is descriptors.bool0
def test_discriminator(self):
field = Base.type
assert field.name == 'type'
assert field.type is PolymorphicType.descriptor
assert field.is_discriminator
def test_default_value(self):
message = TestMessage()
assert message.string0 == ''
assert not message.has_string0
message.string0 = 'hello'
assert message.string0 == 'hello'
assert message.has_string0
def test_default_value__set_mutable(self):
message = TestComplexMessage()
assert not message.has_list0
assert not message.has_set0
assert not message.has_map0
assert not message.has_message0
list0 = message.list0
set0 = message.set0
map0 = message.map0
message0 = message.message0
assert list0 == []
assert set0 == set()
assert map0 == {}
assert message0 == TestMessage()
assert message.list0 is list0
assert message.set0 is set0
assert message.map0 is map0
assert message.message0 is message0
def test_python_descriptor_protocol(self):
class A(object):
field = descriptors.field('field', lambda: descriptors.string0)
has_field = field.has_property
def __init__(self, field=None):
self.field = field
a = A()
assert a.field == ''
assert a.has_field is False
a.field = 'hello'
assert a.field == 'hello'
assert a.has_field
class TestInterfaceDescriptor(unittest.TestCase):
def test(self):
descriptor = TestInterface.descriptor
method = descriptor.find_method('method')
assert descriptor.pyclass is TestInterface
assert descriptor.exc is TestException.descriptor
assert len(descriptor.methods) == 13
assert method
def test_inheritance(self):
base = TestInterface.descriptor
descriptor = TestSubInterface.descriptor
assert descriptor.base is base
assert len(descriptor.methods) == (len(base.methods) + 1)
assert descriptor.find_method('subMethod')
assert descriptor.exc is TestException.descriptor
class TestMethodDescriptor(unittest.TestCase):
def test(self):
method = TestInterface.descriptor.find_method('message0')
assert method.name == 'message0'
assert method.result is TestMessage.descriptor
assert len(method.args) == 1
assert method.args[0].name == 'msg'
assert method.args[0].type is TestMessage.descrip
|
tor
def test_args(self):
method = TestInterface.descriptor.find_method('method')
assert len(method.args) == 2
assert method.args[0].name == 'arg0'
assert method.args[1].name == 'arg1'
assert method.args[0].type is descriptors.int32
assert method.args[1].type is descriptors.int32
def test_post_terminal(self):
descriptor = TestInterface.descriptor
method = descriptor.find_method('method')
post = descriptor.
|
find_method('post')
interface = descriptor.find_method('interface0')
assert method.is_terminal
assert not method.is_post
assert post.is_terminal
assert post.is_post
assert not interface.is_terminal
assert not interface.is_post
def test_invoke(self):
service = Mock()
method = TestInterface.descriptor.find_method('method')
method.invoke(service, 1, arg1=2)
service.method.assert_called_with(1, arg1=2)
class TestEnumDescriptor(unittest.TestCase):
def test(self):
descriptor = TestEnum.descriptor
assert descriptor.values == ('ONE', 'TWO', 'THREE')
def test_find_value(self):
descriptor = TestEnum.descriptor
assert descriptor.find_value('one') == TestEnum.ONE
assert descriptor.find_value('TWO') == TestEnum.TWO
class TestListDescriptor(unittest.TestCase):
def test(self):
list0 = descriptors.list0(descriptors.string0)
assert list0.element is descriptors.string0
class TestSetDescriptor(unittest.TestCase):
def test(self):
set0 = descriptors.set0(descriptors.int32)
assert set0.element is descriptors.int32
class TestMapDescriptor(unittest.TestCase):
def test(self):
map0 = descriptors.map0(descriptors.string0, descriptors.int32)
assert map0.key is descriptors.string0
assert map0.value is descriptors.int32
|
1T/harlib
|
harlib/objects/request.py
|
Python
|
lgpl-3.0
| 3,945
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# harlib
# Copyright (c) 2014-2017, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; you can redistribute it and/or
# modify it under the terms of LGPLv3 <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
from collections import Mapping
from .metamodel import HarObject
from .messages import (
HarCookie,
HarHeader,
HarQueryStringParam,
HarPostDataParam,
HarMessageBody,
HarMessage,
)
try:
|
from typing import Any, Dict, List, NamedTuple, Optional
except ImportError:
pass
class HarRequestBody(HarMessageBody):
# <postData>
_required = [
'mimeType',
]
_optional = {
|
'_size': -1,
'text': '', # HAR-1.2 required
'comment': '',
'_compression': -1,
'_encoding': '',
'params': [],
}
_types = {
'_size': int,
'_compression': int,
'params': [HarPostDataParam],
}
def __init__(self, obj=None):
# type: (Dict) -> None
har = obj or None
if isinstance(obj, Mapping):
har = obj
elif isinstance(obj, HarObject):
har = obj.to_json()
else:
har = self.decode(obj)
super(HarRequestBody, self).__init__(har)
class HarRequest(HarMessage):
# type: NamedTuple('HarRequest', [
# ('method', str),
# ('url', str),
# ('cookies', List[HarCookie]),
# ('headers', List[HarHeader]),
# ('queryString', List[HarQueryStringParam]),
# ('httpVersion', str),
# ('headersSize', int),
# ('bodySize', int),
# ('postData', HarRequestBody),
# ('_requestLine', str),
# ('_requestLineSize', str),
# ('_endpointID', str),
# ('_originURL', str),
# ('_required', List[str]),
# ('_optional', Dict[str, Any]),
# ('_types', Dict[str, Any]),
# ('_ordered', List[str]),
# ])
_required = [
'method',
'url',
'cookies',
'headers',
'queryString', # HAR-1.2 required
]
_optional = {
'httpVersion': '', # HAR-1.2 required
'headersSize': -1,
'bodySize': -1,
'postData': {'mimeType': 'UNKNOWN'},
'comment': '',
'_requestLine': '',
'_requestLineSize': -1,
'_endpointID': '',
'_originURL': '',
}
_types = {
'cookies': [HarCookie],
'headers': [HarHeader],
'postData': HarRequestBody,
'queryString': [HarQueryStringParam],
'headersSize': int,
'bodySize': int,
'_requestLineSize': int,
}
_ordered = [
'method',
'url',
'httpVersion',
'cookies',
'headers',
'queryString',
'postData',
'headersSize',
'bodySize',
'comment',
'_requestLine',
'_requestLineSize',
]
def __init__(self, obj=None):
# type: (Dict) -> None
har = obj or None
if isinstance(obj, Mapping):
har = obj
elif isinstance(obj, HarObject):
har = obj.to_json()
else:
har = self.decode(obj)
super(HarRequest, self).__init__(har)
@property
def size(self):
# type: () -> int
return self.headersSize + self.bodySize
def get_param(self, name, default=None):
# type: (str, str) -> Optional[str]
for param in self.queryString:
if param.name == name:
return param.value
return default
def post_param(self, name, default=None):
# type: (str, str) -> Optional[str]
for param in self.postData.params:
if param.name == name:
return param
return default
|
mmetak/streamlink
|
src/streamlink/plugins/artetv.py
|
Python
|
bsd-2-clause
| 3,541
| 0.000565
|
"""Plugin for Arte.tv, bi-lingual art and culture channel."""
import re
from itertools import chain
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream, RTMPStream
SWF_URL = "http://www.arte.tv/player/v2/jwplayer6/mediaplayer.6.6.swf"
JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{}/{}"
JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{}"
_url_re = re.compile(r"""
https?://(?:\w+\.)?arte.tv/guide/
(?P<language>[a-z]{2})/
(?:
(?P<video_id>.+?)/.+ | # VOD
(?:direct|live) # Live TV
)
""", re.VERBOSE)
_video_schema = validate.Schema({
"videoJsonPlayer": {
"VSR": validate.any(
|
[],
{
validate.text: {
"height": int,
"mediaType": validate.text,
|
"url": validate.text,
validate.optional("streamer"): validate.text
},
},
),
"VTY": validate.text
}
})
class ArteTV(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _create_stream(self, stream, is_live):
stream_name = "{0}p".format(stream["height"])
stream_type = stream["mediaType"]
stream_url = stream["url"]
if stream_type in ("hls", "mp4"):
if urlparse(stream_url).path.endswith("m3u8"):
try:
streams = HLSStream.parse_variant_playlist(self.session, stream_url)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HLS streams: {0}", err)
else:
yield stream_name, HTTPStream(self.session, stream_url)
elif stream_type == "f4m":
try:
streams = HDSStream.parse_manifest(self.session, stream_url)
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HDS streams: {0}", err)
elif stream_type == "rtmp":
params = {
"rtmp": stream["streamer"],
"playpath": stream["url"],
"swfVfy": SWF_URL,
"pageUrl": self.url,
}
if is_live:
params["live"] = True
else:
params["playpath"] = "mp4:{0}".format(params["playpath"])
stream = RTMPStream(self.session, params)
yield stream_name, stream
def _get_streams(self):
match = _url_re.match(self.url)
language = match.group('language')
video_id = match.group('video_id')
if video_id is None:
json_url = JSON_LIVE_URL.format(language)
else:
json_url = JSON_VOD_URL.format(language, video_id)
res = http.get(json_url)
video = http.json(res, schema=_video_schema)
if not video["videoJsonPlayer"]["VSR"]:
return
is_live = video["videoJsonPlayer"]["VTY"] == "LIVE"
vsr = video["videoJsonPlayer"]["VSR"].values()
streams = (self._create_stream(stream, is_live) for stream in vsr)
return chain.from_iterable(streams)
__plugin__ = ArteTV
|
dbentley/pants
|
src/python/pants/subsystem/subsystem.py
|
Python
|
apache-2.0
| 6,819
| 0.010412
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under t
|
he Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
|
unicode_literals, with_statement)
from twitter.common.collections import OrderedSet
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin, SubsystemDependency
class SubsystemError(Exception):
"""An error in a subsystem."""
class Subsystem(SubsystemClientMixin, Optionable):
"""A separable piece of functionality that may be reused across multiple tasks or other code.
Subsystems encapsulate the configuration and initialization of things like JVMs,
Python interpreters, SCMs and so on.
Subsystem instances can be global or per-optionable. Global instances are useful for representing
global concepts, such as the SCM used in the workspace. Per-optionable instances allow individual
Optionable objects (notably, tasks) to have their own configuration for things such as artifact
caches.
Each subsystem type has an option scope. The global instance of that subsystem initializes
itself from options in that scope. An optionable-specific instance initializes itself from options
in an appropriate subscope, which defaults back to the global scope.
For example, the global artifact cache options would be in scope `cache`, but the
compile.java task can override those options in scope `cache.compile.java`.
Subsystems may depend on other subsystems, and therefore mix in SubsystemClientMixin.
:API: public
"""
options_scope_category = ScopeInfo.SUBSYSTEM
class UninitializedSubsystemError(SubsystemError):
def __init__(self, class_name, scope):
super(Subsystem.UninitializedSubsystemError, self).__init__(
'Subsystem "{}" not initialized for scope "{}". '
'Is subsystem missing from subsystem_dependencies() in a task? '.format(class_name, scope))
class CycleException(Exception):
"""Thrown when a circular dependency is detected."""
def __init__(self, cycle):
message = 'Cycle detected:\n\t{}'.format(' ->\n\t'.join(
'{} scope: {}'.format(subsystem, subsystem.options_scope) for subsystem in cycle))
super(Subsystem.CycleException, self).__init__(message)
@classmethod
def scoped(cls, optionable):
"""Returns a dependency on this subsystem, scoped to `optionable`.
Return value is suitable for use in SubsystemClientMixin.subsystem_dependencies().
"""
return SubsystemDependency(cls, optionable.options_scope)
@classmethod
def get_scope_info(cls, subscope=None):
if subscope is None:
return super(Subsystem, cls).get_scope_info()
else:
return ScopeInfo(cls.subscope(subscope), ScopeInfo.SUBSYSTEM, cls)
@classmethod
def closure(cls, subsystem_types):
"""Gathers the closure of the `subsystem_types` and their transitive `dependencies`.
:param subsystem_types: An iterable of subsystem types.
:returns: A set containing the closure of subsystem types reachable from the given
`subsystem_types` roots.
:raises: :class:`pants.subsystem.subsystem.Subsystem.CycleException` if a dependency cycle is
detected.
"""
known_subsystem_types = set()
path = OrderedSet()
def collect_subsystems(subsystem):
if subsystem in path:
cycle = list(path) + [subsystem]
raise cls.CycleException(cycle)
path.add(subsystem)
if subsystem not in known_subsystem_types:
known_subsystem_types.add(subsystem)
for dependency in subsystem.subsystem_dependencies():
collect_subsystems(dependency)
path.remove(subsystem)
for subsystem_type in subsystem_types:
collect_subsystems(subsystem_type)
return known_subsystem_types
@classmethod
def subscope(cls, scope):
"""Create a subscope under this Subsystem's scope."""
return '{0}.{1}'.format(cls.options_scope, scope)
# The full Options object for this pants run. Will be set after options are parsed.
# TODO: A less clunky way to make option values available?
_options = None
@classmethod
def set_options(cls, options):
cls._options = options
# A cache of (cls, scope) -> the instance of cls tied to that scope.
_scoped_instances = {}
@classmethod
def global_instance(cls):
"""Returns the global instance of this subsystem.
:API: public
:returns: The global subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
"""
return cls._instance_for_scope(cls.options_scope)
@classmethod
def scoped_instance(cls, optionable):
"""Returns an instance of this subsystem for exclusive use by the given `optionable`.
:API: public
:param optionable: An optionable type or instance to scope this subsystem under.
:type: :class:`pants.option.optionable.Optionable`
:returns: The scoped subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
"""
if not isinstance(optionable, Optionable) and not issubclass(optionable, Optionable):
raise TypeError('Can only scope an instance against an Optionable, given {} of type {}.'
.format(optionable, type(optionable)))
return cls._instance_for_scope(cls.subscope(optionable.options_scope))
@classmethod
def _instance_for_scope(cls, scope):
if cls._options is None:
raise cls.UninitializedSubsystemError(cls.__name__, scope)
key = (cls, scope)
if key not in cls._scoped_instances:
cls._scoped_instances[key] = cls(scope, cls._options.for_scope(scope))
return cls._scoped_instances[key]
@classmethod
def reset(cls, reset_options=True):
"""Forget all option values and cached subsystem instances.
Used primarily for test isolation and to reset subsystem state for pantsd.
"""
if reset_options:
cls._options = None
cls._scoped_instances = {}
def __init__(self, scope, scoped_options):
"""Note: A subsystem has no access to options in scopes other than its own.
TODO: We'd like that to be true of Tasks some day. Subsystems will help with that.
Task code should call scoped_instance() or global_instance() to get a subsystem instance.
Tests can call this constructor directly though.
:API: public
"""
super(Subsystem, self).__init__()
self._scope = scope
self._scoped_options = scoped_options
self._fingerprint = None
@property
def options_scope(self):
return self._scope
def get_options(self):
"""Returns the option values for this subsystem's scope.
:API: public
"""
return self._scoped_options
|
1905410/Misago
|
misago/users/models/rank.py
|
Python
|
gpl-2.0
| 1,966
| 0
|
from django.core.urlresolvers import reverse
from djang
|
o.db import models, transaction
from django.utils.encoding import python_2_unicode_compatible
from misago.acl import version as acl_version
from misago.core.utils import slugify
_
|
_all__ = ['Rank']
class RankManager(models.Manager):
def get_default(self):
return self.get(is_default=True)
def make_rank_default(self, rank):
with transaction.atomic():
self.filter(is_default=True).update(is_default=False)
rank.is_default = True
rank.save(update_fields=['is_default'])
@python_2_unicode_compatible
class Rank(models.Model):
name = models.CharField(max_length=255)
slug = models.CharField(unique=True, max_length=255)
description = models.TextField(null=True, blank=True)
title = models.CharField(max_length=255, null=True, blank=True)
roles = models.ManyToManyField('misago_acl.Role', blank=True)
css_class = models.CharField(max_length=255, null=True, blank=True)
is_default = models.BooleanField(default=False)
is_tab = models.BooleanField(default=False)
order = models.IntegerField(default=0)
objects = RankManager()
class Meta:
get_latest_by = 'order'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.set_order()
else:
acl_version.invalidate()
return super(Rank, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
acl_version.invalidate()
return super(Rank, self).delete(*args, **kwargs)
def get_absolute_url(self):
return reverse('misago:users-rank', kwargs={'slug': self.slug})
def set_name(self, name):
self.name = name
self.slug = slugify(name)
def set_order(self):
try:
self.order = Rank.objects.latest('order').order + 1
except Rank.DoesNotExist:
self.order = 0
|
vabs22/zulip
|
zerver/lib/test_classes.py
|
Python
|
apache-2.0
| 24,636
| 0.001624
|
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional,
Sized, Tuple, Union, Text)
from django.core.urlresolvers import resolve
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.test.testcases import SerializeMixin
from django.http import HttpResponse
from django.db.utils import IntegrityError
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib.utils import is_remote_server
from zerver.lib import cache
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.lib.test_helpers import (
instrument_url, find_key_by_email,
)
from zerver.models import (
get_stream,
get_user,
get_user_profile_by_email,
get_realm,
get_realm_by_email_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
from zilencer.models import get_remote_server_by_uuid
import base64
import mock
import os
import re
import time
import ujson
import unittest
from six.moves import urllib
from six import binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
API_KEYS = {} # type: Dict[Text, Text]
def flush_caches_for_testing():
# type: () -> None
global API_KEYS
API_KEYS = {}
class UploadSerializeMixin(SerializeMixin):
"""
We cannot use override_settings to change upload directory because
because settings.LOCAL_UPLOADS_DIR is used in url pattern and urls
are compiled only once. Otherwise using a different upload directory
for conflicting test cases would have provided better performance
while providing the required isolation.
"""
lockfile = 'var/upload_lock'
@classmethod
def setUpClass(cls, *args, **kwargs):
# type: (*Any, **Any) -> None
if not os.path.exists(cls.lo
|
ckfile):
with open(cls.lockfile, 'w'): # nocoverage -
|
rare locking case
pass
super(UploadSerializeMixin, cls).setUpClass(*args, **kwargs)
class ZulipTestCase(TestCase):
# Ensure that the test system just shows us diffs
maxDiff = None # type: Optional[int]
'''
WRAPPER_COMMENT:
We wrap calls to self.client.{patch,put,get,post,delete} for various
reasons. Some of this has to do with fixing encodings before calling
into the Django code. Some of this has to do with providing a future
path for instrumentation. Some of it's just consistency.
The linter will prevent direct calls to self.client.foo, so the wrapper
functions have to fake out the linter by using a local variable called
django_client to fool the regext.
'''
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# This method should be removed when we migrate to version 3 of Python
import six
if six.PY2:
self.assertRaisesRegex = self.assertRaisesRegexp
super(ZulipTestCase, self).__init__(*args, **kwargs)
DEFAULT_REALM = Realm.objects.get(string_id='zulip')
@instrument_url
def client_patch(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
"""
We need to urlencode, since Django's function won't do it for us.
"""
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(url, encoded, **kwargs)
@instrument_url
def client_patch_multipart(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
@instrument_url
def client_put(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.delete(url, encoded, **kwargs)
@instrument_url
def client_options(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.options(url, encoded, **kwargs)
@instrument_url
def client_post(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.post(url, info, **kwargs)
@instrument_url
def client_post_request(self, url, req):
# type: (Text, Any) -> HttpResponse
"""
We simulate hitting an endpoint here, although we
actually resolve the URL manually and hit the view
directly. We have this helper method to allow our
instrumentation to work for /notify_tornado and
future similar methods that require doing funny
things to a request object.
"""
match = resolve(url)
return match.func(req)
@instrument_url
def client_get(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.get(url, info, **kwargs)
example_user_map = dict(
hamlet=u'hamlet@zulip.com',
cordelia=u'cordelia@zulip.com',
iago=u'iago@zulip.com',
prospero=u'prospero@zulip.com',
othello=u'othello@zulip.com',
AARON=u'AARON@zulip.com',
aaron=u'aaron@zulip.com',
ZOE=u'ZOE@zulip.com',
)
mit_user_map = dict(
sipbtest=u"sipbtest@mit.edu",
starnine=u"starnine@mit.edu",
espuser=u"espuser@mit.edu",
)
# Non-registered test users
nonreg_user_map = dict(
test=u'test@zulip.com',
test1=u'test1@zulip.com',
alice=u'alice@zulip.com',
newuser=u'newuser@zulip.com',
bob=u'bob@zulip.com',
cordelia=u'cordelia@zulip.com',
newguy=u'newguy@zulip.com',
me=u'me@zulip.com',
)
def nonreg_user(self, name):
# type: (str) -> UserProfile
email = self.nonreg_user_map[name]
return get_user(email, get_realm_by_email_domain(email))
def example_user(self, name):
# type: (str) -> UserProfile
email = self.example_user_map[name]
return get_user(email, get_realm('zulip'))
def mit_user(self, name):
# type: (str) -> UserProfile
email = self.mit_user_map[name]
return get_user(email, get_realm('zephyr'))
def nonreg_email(self, name):
# type: (str) -> Text
return self.nonreg_user_map[name]
def example
|
GzkV/bookstore_project
|
bookstore_project/urls.py
|
Python
|
mit
| 1,408
| 0.003551
|
"""book
|
store_projec
|
t URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from store import urls as store_urls
from django.conf import settings
from django.conf.urls.static import static
from tastypie.api import Api
from store.api import ReviewResource
v1_api=Api(api_name='v1')
v1_api.register(ReviewResource())
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^store/', include(store_urls)),
url('', include('social.apps.django_app.urls', namespace ="social")),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^api/', include(v1_api.urls)),
]+ static(settings.MEDIA_URL, document_root =settings.MEDIA_ROOT)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_express_route_gateways_operations.py
|
Python
|
mit
| 22,615
| 0.005218
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteGatewaysOperations:
"""ExpressRouteGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_subscription(
self,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways under a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways in a given resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01
|
.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.po
|
p('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> "_models.ExpressRouteGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_argume
|
erthalion/django-postman
|
postman/utils.py
|
Python
|
bsd-3-clause
| 4,047
| 0.003706
|
from __future
|
__ import unicode_literals
import re
import sys
from textwrap import TextWrapper
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_un
|
icode
from django.utils.translation import ugettext, ugettext_lazy as _
# make use of a favourite notifier app such as django-notification
# but if not installed or not desired, fallback will be to do basic emailing
name = getattr(settings, 'POSTMAN_NOTIFIER_APP', 'notification')
if name and name in settings.INSTALLED_APPS:
name = name + '.models'
__import__(name)
notification = sys.modules[name]
else:
notification = None
# give priority to a favourite mailer app such as django-mailer
# but if not installed or not desired, fallback to django.core.mail
name = getattr(settings, 'POSTMAN_MAILER_APP', 'mailer')
if name and name in settings.INSTALLED_APPS:
send_mail = __import__(name, globals(), locals(), [str('send_mail')]).send_mail
else:
from django.core.mail import send_mail
# to disable email notification to users
DISABLE_USER_EMAILING = getattr(settings, 'POSTMAN_DISABLE_USER_EMAILING', False)
# default wrap width; referenced in forms.py
WRAP_WIDTH = 55
def format_body(sender, body, indent=_("> "), width=WRAP_WIDTH):
"""
Wrap the text and prepend lines with a prefix.
The aim is to get lines with at most `width` chars.
But does not wrap if the line is already prefixed.
Prepends each line with a localized prefix, even empty lines.
Existing line breaks are preserved.
Used for quoting messages in replies.
"""
indent = force_unicode(indent) # join() doesn't work on lists with lazy translation objects
wrapper = TextWrapper(width=width, initial_indent=indent, subsequent_indent=indent)
# rem: TextWrapper doesn't add the indent on an empty text
quote = '\n'.join([line.startswith(indent) and indent+line or wrapper.fill(line) or indent for line in body.splitlines()])
return ugettext("\n\n{sender} wrote:\n{body}\n").format(sender=sender, body=quote)
def format_subject(subject):
"""
Prepend a pattern to the subject, unless already there.
Matching is case-insensitive.
"""
str = ugettext("Re: {subject}")
pattern = '^' + str.replace('{subject}', '.*') + '$'
return subject if re.match(pattern, subject, re.IGNORECASE) else str.format(subject=subject)
def email(subject_template, message_template, recipient_list, object, action, site):
"""Compose and send an email."""
ctx_dict = {'site': site, 'object': object, 'action': action}
subject = render_to_string(subject_template, ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string(message_template, ctx_dict)
# during the development phase, consider using the setting: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
def email_visitor(object, action, site):
"""Email a visitor."""
email('postman/email_visitor_subject.txt', 'postman/email_visitor.txt', [object.email], object, action, site)
def notify_user(object, action, site):
"""Notify a user."""
if action == 'rejection':
user = object.sender
label = 'postman_rejection'
elif action == 'acceptance':
user = object.recipient
parent = object.parent
label = 'postman_reply' if (parent and parent.sender_id == object.recipient_id) else 'postman_message'
else:
return
if notification:
# the context key 'message' is already used in django-notification/models.py/send_now() (v0.2.0)
notification.send(users=[user], label=label, extra_context={'pm_message': object, 'pm_action': action})
else:
if not DISABLE_USER_EMAILING and user.email and user.is_active:
email('postman/email_user_subject.txt', 'postman/email_user.txt', [user.email], object, action, site)
|
kawamon/hue
|
desktop/core/ext-py/celery-4.2.1/t/unit/concurrency/test_concurrency.py
|
Python
|
apache-2.0
| 4,669
| 0
|
from __future__ import absolute_import, unicode_literals
import os
from itertools import count
import pytest
from case import Mock, patch
from celery.concurrency.base import BasePool, apply_target
from celery.exceptions import WorkerShutdown, WorkerTerminate
class test_BasePool:
def test_apply_target(self):
scratch = {}
counter = count(0)
def gen_callback(name, retval=None):
def callback(*args):
scratch[name] = (next(counter), args)
return retval
return callback
apply_target(gen_callback('target', 42),
args=(8, 16),
callback=gen_callback('callback'),
accept_callback=gen_callback('accept_callback'))
assert scratch['target'] == (1, (8, 16))
assert scratch['callback'] == (2, (42,))
pa1 = scratch['accept_callback']
assert pa1[0] == 0
assert pa1[1][0] == os.getpid()
assert pa1[1][1]
# No accept callback
scratch.clear()
apply_target(gen_callback('target', 42),
args=(8, 16),
callback=gen_callback('callback'),
accept_callback=None)
assert scratch == {
'target': (3, (8, 16)),
'callback': (4, (42,)),
}
def test_apply_target__propagate(self)
|
:
target = Mock(name='target')
target.side_effect = KeyError()
with pytest.raises(KeyError):
apply_target(target, propagate=(KeyError,))
def test_apply_target__raises(self):
target = Mock(name='target')
target.side_effect
|
= KeyError()
with pytest.raises(KeyError):
apply_target(target)
def test_apply_target__raises_WorkerShutdown(self):
target = Mock(name='target')
target.side_effect = WorkerShutdown()
with pytest.raises(WorkerShutdown):
apply_target(target)
def test_apply_target__raises_WorkerTerminate(self):
target = Mock(name='target')
target.side_effect = WorkerTerminate()
with pytest.raises(WorkerTerminate):
apply_target(target)
def test_apply_target__raises_BaseException(self):
target = Mock(name='target')
callback = Mock(name='callback')
target.side_effect = BaseException()
apply_target(target, callback=callback)
callback.assert_called()
@patch('celery.concurrency.base.reraise')
def test_apply_target__raises_BaseException_raises_else(self, reraise):
target = Mock(name='target')
callback = Mock(name='callback')
reraise.side_effect = KeyError()
target.side_effect = BaseException()
with pytest.raises(KeyError):
apply_target(target, callback=callback)
callback.assert_not_called()
def test_does_not_debug(self):
x = BasePool(10)
x._does_debug = False
x.apply_async(object)
def test_num_processes(self):
assert BasePool(7).num_processes == 7
def test_interface_on_start(self):
BasePool(10).on_start()
def test_interface_on_stop(self):
BasePool(10).on_stop()
def test_interface_on_apply(self):
BasePool(10).on_apply()
def test_interface_info(self):
assert BasePool(10).info == {
'max-concurrency': 10,
}
def test_interface_flush(self):
assert BasePool(10).flush() is None
def test_active(self):
p = BasePool(10)
assert not p.active
p._state = p.RUN
assert p.active
def test_restart(self):
p = BasePool(10)
with pytest.raises(NotImplementedError):
p.restart()
def test_interface_on_terminate(self):
p = BasePool(10)
p.on_terminate()
def test_interface_terminate_job(self):
with pytest.raises(NotImplementedError):
BasePool(10).terminate_job(101)
def test_interface_did_start_ok(self):
assert BasePool(10).did_start_ok()
def test_interface_register_with_event_loop(self):
assert BasePool(10).register_with_event_loop(Mock()) is None
def test_interface_on_soft_timeout(self):
assert BasePool(10).on_soft_timeout(Mock()) is None
def test_interface_on_hard_timeout(self):
assert BasePool(10).on_hard_timeout(Mock()) is None
def test_interface_close(self):
p = BasePool(10)
p.on_close = Mock()
p.close()
assert p._state == p.CLOSE
p.on_close.assert_called_with()
def test_interface_no_close(self):
assert BasePool(10).on_close() is None
|
grpc/grpc-ios
|
native_src/third_party/googletest/googletest/test/googletest-env-var-test.py
|
Python
|
apache-2.0
| 4,173
| 0.005272
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
from googletest.test import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
pr
|
int(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = v
|
alue
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test
TestFlag('fail_fast', '1', '0')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('brief', '1', '0')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
stefpiatek/mdt-flask-app
|
tests/unit/test_main_forms.py
|
Python
|
mit
| 10,190
| 0.000196
|
import pytest
from wtforms.validators import ValidationError
from mdt_app.main.forms import *
from mdt_app.models import *
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestQuerySelectFunctions:
def test_get_meetings(self):
"""returns future meetings that are not cancelled in order"""
past = Meeting.query.filter_by(date='2010-11-15').first()
cancelled = Meeting.query.filter_by(is_cancelled=True).first()
meetings = get_meetings().all()
assert meetings[0].date < meetings[1].date
assert past not in meetings
assert cancelled not in meetings
def test_get_consultants(self):
"""returns only consultants in order"""
consultants = get_consultants().all()
not_consultant = User.query.filter_by(is_consultant=False).first()
assert not_consultant not in consultants
assert consultants[0].username < consultants[1].username
def test_get_users(self):
"""returns confirmed users by username"""
confirmed = get_users().all()
unconfirmed = User.query.filter_by(is_confirmed=False).first()
assert confirmed[0].username < confirmed[1].username
assert unconfirmed not in confirmed
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestCaseForm:
def setup(self):
meeting = Meeting.query.filter_by(date='2050-10-16').first()
consultant = User.query.filter_by(initials='AC').first()
self.form = CaseForm(case_id=-1,
patient_id=1,
meeting=meeting,
consultant=consultant,
|
mdt_vcmg='MDT',
medical_history='medical history here',
question='question here')
def test_validate_meeting(self):
"""
Custom validation failures:
- Case already exists for that patient on that date
- meeting and add_meeting both filled in
|
- no meeting or add_meeting data
"""
existing_date = Meeting.query.filter_by(date='2050-10-30').first()
existing_case = CaseForm(data=self.form.data,
meeting=existing_date)
double_meeting_in = CaseForm(data=self.form.data,
add_meeting='2050-11-15',)
no_meeting = CaseForm(data=self.form.data,
meeting=None)
assert self.form.validate() is True
with pytest.raises(ValidationError):
no_meeting.validate_meeting(no_meeting.meeting)
with pytest.raises(ValidationError):
existing_case.validate_meeting(existing_case.meeting)
with pytest.raises(ValidationError):
double_meeting_in.validate_meeting(double_meeting_in.meeting)
def test_validate_add_meeting(self):
"""Validate if meeting does not already exist on that date"""
existing_date = Meeting.query.first()
new_meeting = CaseForm(data=self.form.data,
meeting=None,
add_meeting='2050-11-15')
existing_meeting = CaseForm(data=self.form.data,
meeting=None,
add_meeting=existing_date.date)
assert new_meeting.validate() is True
with pytest.raises(ValidationError):
existing_meeting.validate_add_meeting(existing_meeting.add_meeting)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestCaseEditForm:
def setup(self):
meeting = Meeting.query.filter_by(date='2050-10-16').first()
consultant = User.query.filter_by(initials='AC').first()
self.user1 = User.query.first()
self.form = CaseForm(case_id=2,
patient_id=1,
meeting=meeting,
consultant=consultant,
mdt_vcmg='MDT',
medical_history='medical history here',
question='question here')
def test_validate_no_actions(self):
"""Validate only if no actions exist or are in the form"""
no_problems = CaseEditForm(data=self.form.data,
no_actions=True)
form_actions = CaseEditForm(data=self.form.data,
no_actions=True,
action='dummy action',
action_to=self.user1)
saved_actions = CaseEditForm(data=self.form.data,
no_actions=True,
case_id=1)
assert no_problems.validate() is True
with pytest.raises(ValidationError):
form_actions.validate_no_actions(form_actions.no_actions)
with pytest.raises(ValidationError):
saved_actions.validate_no_actions(saved_actions.no_actions)
def test_validate_action(self):
user1 = User.query.first()
"""
Validate passes if
- action and action_to are blank
- action, action_to and discussion are filled
Validate fails if
- one of discussion, action or action_to are blank
"""
no_data = CaseEditForm(data=self.form.data)
no_problems = CaseEditForm(data=self.form.data,
discussion='discussed',
action='dummy action',
action_to=self.user1)
no_discussion = CaseEditForm(data=no_problems.data,
discussion=None)
no_action = CaseEditForm(data=no_problems.data,
action=None)
no_action_to = CaseEditForm(data=no_problems.data,
action_to=None)
assert no_data.validate() is True
no_data.validate_action(no_data.action)
assert no_problems.validate() is True
no_problems.validate_action(no_problems.action)
with pytest.raises(ValidationError):
no_discussion.validate_action(no_discussion.action)
with pytest.raises(ValidationError):
no_action.validate_action(no_action.action)
with pytest.raises(ValidationError):
no_action_to.validate_action(no_action_to.action)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestMeetingForm:
"""Validate if meeting on that date doesn't have the same id"""
def setup(self):
self.new_meeting = MeetingForm(id=-1,
date='2050-11-15')
def test_validate_date(self):
existing_meeting = Meeting.query.first()
last_meeting = Meeting.query.all()[-1]
create_meeting = MeetingForm(data=self.new_meeting.data)
edit_meeting = MeetingForm(data=self.new_meeting.data,
id=existing_meeting.id)
create_date_clash = MeetingForm(data=self.new_meeting.data,
date=existing_meeting.date)
edit_date_clash = MeetingForm(id=last_meeting.id + 1,
date=existing_meeting.date)
assert create_meeting.validate() is True
create_meeting.validate_date(create_meeting.date)
assert edit_meeting.validate() is True
edit_meeting.validate_date(edit_meeting.date)
with pytest.raises(ValidationError):
create_date_clash.validate_date(create_date_clash.date)
with pytest.raises(ValidationError):
edit_date_clash.validate_date(edit_date_clash.date)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestPatientForm:
def setup(self):
self.patient = PatientForm(id=-1,
hospital_number='15975346',
first_name='New',
last_name='PATIENT',
date_of_birth='1987-12-05',
sex='F')
self.existi
|
rlabbe/filterpy
|
filterpy/kalman/mmae.py
|
Python
|
mit
| 6,370
| 0.000471
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-instance-attributes
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesia
|
n-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division
from copy import deepcopy
import numpy as np
from filterpy
|
.common import pretty_str
class MMAEFilterBank(object):
"""
Implements the fixed Multiple Model Adaptive Estimator (MMAE). This
is a bank of independent Kalman filters. This estimator computes the
likelihood that each filter is the correct one, and blends their state
estimates weighted by their likelihood to produce the state estimate.
Parameters
----------
filters : list of Kalman filters
List of Kalman filters.
p : list-like of floats
Initial probability that each filter is the correct one. In general
you'd probably set each element to 1./len(p).
dim_x : float
number of random variables in the state X
H : Measurement matrix
Attributes
----------
x : numpy.array(dim_x, 1)
Current state estimate. Any call to update() or predict() updates
this variable.
P : numpy.array(dim_x, dim_x)
Current state covariance matrix. Any call to update() or predict()
updates this variable.
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : ndarray
Last measurement used in update(). Read only.
filters : list of Kalman filters
List of Kalman filters.
Examples
--------
..code:
ca = make_ca_filter(dt, noise_factor=0.6)
cv = make_ca_filter(dt, noise_factor=0.6)
cv.F[:,2] = 0 # remove acceleration term
cv.P[2,2] = 0
cv.Q[2,2] = 0
filters = [cv, ca]
bank = MMAEFilterBank(filters, p=(0.5, 0.5), dim_x=3)
for z in zs:
bank.predict()
bank.update(z)
Also, see my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
Zarchan and Musoff. "Fundamentals of Kalman filtering: A Practical
Approach." AIAA, third edition.
"""
def __init__(self, filters, p, dim_x, H=None):
if len(filters) != len(p):
raise ValueError('length of filters and p must be the same')
if dim_x < 1:
raise ValueError('dim_x must be >= 1')
self.filters = filters
self.p = np.asarray(p)
self.dim_x = dim_x
if H is None:
self.H = None
else:
self.H = np.copy(H)
# try to form a reasonable initial values, but good luck!
try:
self.z = np.copy(filters[0].z)
self.x = np.copy(filters[0].x)
self.P = np.copy(filters[0].P)
except AttributeError:
self.z = 0
self.x = None
self.P = None
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def predict(self, u=0):
"""
Predict next position using the Kalman filter state propagation
equations for each filter in the bank.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
for f in self.filters:
f.predict(u)
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
def update(self, z, R=None, H=None):
"""
Add a new measurement (z) to the Kalman filter. If z is None, nothing
is changed.
Parameters
----------
z : np.array
measurement for this update.
R : np.array, scalar, or None
Optionally provide R to override the measurement noise for this
one call, otherwise self.R will be used.
H : np.array, or None
Optionally provide H to override the measurement function for this
one call, otherwise self.H will be used.
"""
if H is None:
H = self.H
# new probability is recursively defined as prior * likelihood
for i, f in enumerate(self.filters):
f.update(z, R, H)
self.p[i] *= f.likelihood
self.p /= sum(self.p) # normalize
# compute estimated state and covariance of the bank of filters.
self.P = np.zeros(self.filters[0].P.shape)
# state can be in form [x,y,z,...] or [[x, y, z,...]].T
is_row_vector = (self.filters[0].x.ndim == 1)
if is_row_vector:
self.x = np.zeros(self.dim_x)
for f, p in zip(self.filters, self.p):
self.x += np.dot(f.x, p)
else:
self.x = np.zeros((self.dim_x, 1))
for f, p in zip(self.filters, self.p):
self.x += np.dot(f.x, p)
for x, f, p in zip(self.x, self.filters, self.p):
y = f.x - x
self.P += p*(np.outer(y, y) + f.P)
# save measurement and posterior state
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def __repr__(self):
return '\n'.join([
'MMAEFilterBank object',
pretty_str('dim_x', self.dim_x),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('log-p', self.p),
])
|
RasaHQ/rasa_nlu
|
rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py
|
Python
|
apache-2.0
| 21,849
| 0.001968
|
from __future__ import annotations
import logging
from collections import OrderedDict
import scipy.sparse
import numpy as np
from typing import (
Any,
Dict,
Text,
List,
Tuple,
Callable,
Set,
Optional,
Type,
Union,
)
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.tokenizers.spacy_tokenizer import (
POS_TAG_KEY,
SpacyTokenizer,
)
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.constants import DOCS_URL_COMPONENTS
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.constants import TEXT
from rasa.shared.exceptions import InvalidConfigException
import rasa.shared.utils.io
import rasa.utils.io
logger = logging.getLogger(__name__)
END_OF_SENTENCE = "EOS"
BEGIN_OF_SENTENCE = "BOS"
FEATURES = "features"
@DefaultV1Recipe.register(
DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=True
)
class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent):
"""Extracts and encodes lexical syntactic features.
Given a sequence of tokens, this featurizer produces a sequence of features
where the `t`-th feature encodes lexical and syntactic information about the `t`-th
token and it's surrounding tokens.
In detail: The lexical syntactic features can be specified via a list of
configurations `[c_0, c_1, ..., c_n]` where each `c_i` is a list of names of
lexical and syntactic features (e.g. `low`, `suffix2`, `digit`).
For a given tokenized text, the featurizer will consider a window of size `n`
around each token and evaluate the given list of configurations as follows:
- It will extract the features listed in `c_m` where `m = (n-1)/2` if n is even and
`n/2` from token `t`
- It will extract the features listed in `c_{m-1}`,`c_{m-2}` ... , from the last,
second to last, ... token before token `t`, respectively.
- It will extract the features listed `c_{m+1}`, `c_{m+1}`, ... for the first,
second, ... token `t`, respectively.
It will then combine all these features into one feature for position `t`.
Example:
If we specify `[['low'], ['upper'], ['prefix2']]`, then for each position `t`
the `t`-th feature will encode whether the token at position `t` is upper case,
where the token at position `t-1` is lower case and the first two characters
of the token at position `t+1`.
"""
FILENAME_FEATURE_TO_IDX_DICT = "feature_to_idx_dict.pkl"
# NOTE: "suffix5" of the token "is" will be "is". Hence, when combining multiple
# prefixes, short words will be represented/encoded repeatedly.
_FUNCTION_DICT: Dict[Text, Callable[[Token], Union[Text, bool, None]]] = {
"low": lambda token: token.text.islower(),
"title": lambda token: token.text.istitle(),
"prefix5": lambda token: token.text[:5],
"prefix2": lambda token: token.text[:2],
"suffix5": lambda token: token.text[-5:],
"suffix3": lambda token: token.text[-3:],
"suffix2": lambda token: token.text[-2:],
"suffix1": lambda token: token.text[-1:],
"pos": lambda token: token.data.get(POS_TAG_KEY, None),
"pos2": lambda token: token.data.get(POS_TAG_KEY, [])[:2]
if POS_TAG_KEY in token.data
else None,
"upper": lambda token: token.text.isupper(),
"digit": lambda token: token.text.isdigit(),
}
SUPPORTED_FEATURES = sorted(
set(_FUNCTION_DICT.keys()).union([END_OF_SENTENCE, BEGIN_OF_SENTENCE])
)
@classmethod
def _extract_raw_features_from_token(
cls, feature_name: Text, token: Token, token_position: int, num_tokens: int,
) -> Text:
"""Extracts a raw feature from the token at the given position.
Args:
feature_name: the name of a supported feature
token: the token from which we want to extract the feature
token_position: the position of the token inside the tokenized text
num_tokens: the total number of tokens in the tokenized text
Returns:
the raw feature value as text
"""
if feature_name not in cls.SUPPORTED_FEATURES:
raise InvalidConfigException(
f"Configured feature '{feature_name}' not valid. Please check "
f"'{DOCS_URL_COMPONENTS}' for valid configuration parameters."
)
if feature_name == END_OF_SENTENCE:
return str(token_position == num_tokens - 1)
if feature_name == BEGIN_OF_SENTENCE:
return str(token_position == 0)
return str(cls._FUNCTION_DICT[feature_name](token))
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Tokenizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns the component's default config."""
return {
**SparseFeaturizer.get_default_config(),
FEATURES: [
["low", "title", "upper"],
["BOS", "EOS", "low", "upper", "title", "digit"],
["low", "title", "upper"],
|
],
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
feature_to_idx_dict: Optional[Dict[Tuple[int, Text], Dict[Text, int]]] = None,
) -> None:
"""Instantiates a new `LexicalSyntacticFeaturizer` instance."""
super().__init__(execution_contex
|
t.node_name, config)
# graph component
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
# featurizer specific
self._feature_config = self._config[FEATURES]
self._set_feature_to_idx_dict(
feature_to_idx_dict or {}, check_consistency_with_config=True
)
@classmethod
def validate_config(cls, config: Dict[Text, Any]) -> None:
"""Validates that the component is configured properly."""
if FEATURES not in config:
return # will be replaced with default
feature_config = config[FEATURES]
message = (
f"Expected configuration of `features` to be a list of lists that "
f"that contain names of lexical and syntactic features "
f"(i.e. {cls.SUPPORTED_FEATURES}). "
f"Received {feature_config} instead. "
)
try:
configured_feature_names = set(
feature_name
for pos_config in feature_config
for feature_name in pos_config
)
except TypeError as e:
raise InvalidConfigException(message) from e
if configured_feature_names.difference(cls.SUPPORTED_FEATURES):
raise InvalidConfigException(message)
def _set_feature_to_idx_dict(
self,
feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]],
check_consistency_with_config: bool = False,
) -> None:
"""Sets the "feature" to index mapping.
Here, "feature" denotes the combination of window position, feature name,
and feature_value.
Args:
feature_to_idx_dict: mapping from tuples of window position and feature name
to a mapping from feature values to indices
check_consistency_with_config: whether the consistency with the current
`self.config` should be checked
"""
self._feature_to_idx_dict = feature_to_idx_dict
self._number_of_features = sum(
[
len(feature_values.values())
for feature_va
|
NiuXWolf/Introduction-to-Algorithms
|
B/BitArray/BitArray.py
|
Python
|
mit
| 2,972
| 0.023217
|
#BitArray
#Yu.Yang
#
class bitarray():
def __init__(self,length,defaultValue=False):
if (length < 0):
raise Exception("Length param error")
self.array=[]
self.length=length
fillValue=defaultValue
for i in range(self.length):
self.array.append(defaultValue)
self.version=0
def input_from_array(self,value):
if(isinstance(value,list)==False):
raise Exception("value is not a Array")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.Set(i,value[i])
self.version+=1
return self
def __len__(self):
return self.length
def __str__(self):
str="["
for i in range(self.length):
str+="1" if self.array[i]==True else "0"
str+=" "
str+="]"
return str
def Get (self,index):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
return self.array[index]
def Set (self,index,value):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
if (value):
self.array[index]=True
else:
self.array[index]=False
self.version+=1
def SetAll(self,value):
for i in range(self.length):
self.Set(i,value)
self.version+=1
def And (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]&=value.Get(i)
self.version+=1
return self
def Or (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in
|
range(self.length):
self.array[i]|=value.Get(i)
self.version+=1
return self
def Xor (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("Argum
|
entException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]^=value.Get(i)
self.version+=1
return self
def Not (self):
for i in range(self.length):
self.array[i] =not self.array[i]
self.version+=1
return self
|
jawilson/home-assistant
|
homeassistant/components/integration/sensor.py
|
Python
|
apache-2.0
| 7,671
| 0.001043
|
"""Numeric integration of data coming from a source sensor over time."""
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
PLATFORM_SCHEMA,
STATE_CLASS_TOTAL,
SensorEntity,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_METHOD,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
|
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__n
|
ame__)
ATTR_SOURCE_ID = "source"
CONF_SOURCE_SENSOR = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT_OF_MEASUREMENT = "unit"
TRAPEZOIDAL_METHOD = "trapezoidal"
LEFT_METHOD = "left"
RIGHT_METHOD = "right"
INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD]
# SI Metric prefixes
UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-histogram"
DEFAULT_ROUND = 3
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_UNIT_OF_MEASUREMENT),
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In(
INTEGRATION_METHOD
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the integration sensor."""
integral = IntegrationSensor(
config[CONF_SOURCE_SENSOR],
config.get(CONF_NAME),
config[CONF_ROUND_DIGITS],
config[CONF_UNIT_PREFIX],
config[CONF_UNIT_TIME],
config.get(CONF_UNIT_OF_MEASUREMENT),
config[CONF_METHOD],
)
async_add_entities([integral])
class IntegrationSensor(RestoreEntity, SensorEntity):
"""Representation of an integration sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
integration_method,
):
"""Initialize the integration sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = None
self._method = integration_method
self._name = name if name is not None else f"{source_entity} integral"
self._unit_template = (
f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}"
)
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
self._attr_state_class = STATE_CLASS_TOTAL
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
if state := await self.async_get_last_state():
try:
self._state = Decimal(state.state)
except (DecimalException, ValueError) as err:
_LOGGER.warning("Could not restore last state: %s", err)
else:
self._attr_device_class = state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
@callback
def calc_integration(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
if (
self.device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER
):
self._attr_device_class = DEVICE_CLASS_ENERGY
if (
old_state is None
or new_state is None
or old_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
or new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
):
return
try:
# integration as the Riemann integral of previous measures.
area = 0
elapsed_time = (
new_state.last_updated - old_state.last_updated
).total_seconds()
if self._method == TRAPEZOIDAL_METHOD:
area = (
(Decimal(new_state.state) + Decimal(old_state.state))
* Decimal(elapsed_time)
/ 2
)
elif self._method == LEFT_METHOD:
area = Decimal(old_state.state) * Decimal(elapsed_time)
elif self._method == RIGHT_METHOD:
area = Decimal(new_state.state) * Decimal(elapsed_time)
integral = area / (self._unit_prefix * self._unit_time)
assert isinstance(integral, Decimal)
except ValueError as err:
_LOGGER.warning("While calculating integration: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate integral: %s", err)
else:
if isinstance(self._state, Decimal):
self._state += integral
else:
self._state = integral
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_integration
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
if isinstance(self._state, Decimal):
return round(self._state, self._round_digits)
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
ZiTAL/zpy
|
private/lib/env.py
|
Python
|
agpl-3.0
| 439
| 0.047836
|
#!/usr/b
|
in/python2
# -*- coding: utf-8 -*-
import json, web
from lib.log import Log
class Env(object):
@staticmethod
def get(key):
if key and key in web.ctx.env:
return web.ctx.env[key]
else:
return web.ctx.env
@staticmethod
def set(key, value):
web.ctx.env[key] = value
@staticmethod
def setFromFile(file):
fenv = open(file)
jenv = json.load(fenv)
for key,value in jenv.items():
web.ctx.env[key] = valu
|
e
|
phenoxim/cinder
|
cinder/tests/unit/api/v3/test_workers.py
|
Python
|
apache-2.0
| 8,104
| 0
|
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import router as router_v3
from cinder.api.v3 import workers
from cinder.common import constants
from cinder import context
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
SERVICES = (
[objects.Service(id=1, host='host1', binary=constants.VOLUME_BINARY,
cluster_name='mycluster'),
objects.Service(id=2, host='host2', binary=constants.VOLUME_BINARY,
cluster_name='mycluster')],
[objects.Service(id=3, host='host3', binary=constants.VOLUME_BINARY,
cluster_name='mycluster'),
objects.Service(id=4, host='host4', binary=constants.VOLUME_BINARY,
cluster_name='mycluster')],
)
def app():
# no auth, just let environ['cinder.context'] pass through
api = router_v3.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v3'] = api
return mapper
@ddt.ddt
class WorkersTestCase(test.TestCase):
"""Tes Case for the cleanup of Workers entries."""
def setUp(self):
super(WorkersTestCase, self).setUp()
self.context = context.RequestContext(user_id=None,
project_id=fake.PROJECT_ID,
is_admin=True,
read_deleted='no',
overwrite=False)
self.controller = workers.create_resource()
def _get_resp_post(self, body, version=mv.WORKERS_CLEANUP, ctxt=None):
"""Helper to execute a POST workers API call."""
req = webob.Request.blank('/v3/%s/workers/cleanup' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume ' + version
req.environ['cinder.context'] = ctxt or self.context
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_old_api_version(self, rpc_mock):
res = self._get_resp_post({}, mv.get_prior_version(mv.WORKERS_CLEANUP))
self.assertEqual(http_client.NOT_FOUND, res.status_code)
rpc_mock.assert_not_called()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_not_authorized(self, rpc_mock):
ctxt = context.RequestContext(user_id=None,
project_id=fake.PROJECT_ID,
is_admin=False,
read_deleted='no',
overwrite=False)
res = self._get_resp_post({}, ctxt=ctxt)
self.assertEqual(http_client.FORBIDDEN, res.status_code)
rpc_mock.assert_not_called()
@ddt.data({'binary': 'nova-scheduler'},
{'disabled': 'sure'}, {'is_up': 'nop'},
{'resource_type': 'service'}, {'resource_id': 'non UUID'},
{'is_up': 11}, {'disabled': 11},
{'is_up': ' t
|
rue '}, {'disabled': ' false '})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_wrong_param(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
expected = 'Invalid input'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@ddt.data({'fake_ke
|
y': 'value'})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_with_additional_properties(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
expected = 'Additional properties are not allowed'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
def _expected_services(self, cleaning, unavailable):
def service_view(service):
return {'id': service.id, 'host': service.host,
'binary': service.binary,
'cluster_name': service.cluster_name}
return {'cleaning': [service_view(s) for s in cleaning],
'unavailable': [service_view(s) for s in unavailable]}
@ddt.data({'service_id': 10}, {'binary': 'cinder-volume'},
{'binary': 'cinder-scheduler'}, {'disabled': 'false'},
{'is_up': 'no'}, {'resource_type': 'Volume'},
{'resource_id': fake.VOLUME_ID, 'host': 'host@backend'},
{'host': 'host@backend#pool'},
{'cluster_name': 'cluster@backend'},
{'cluster_name': 'cluster@backend#pool'},
{'service_id': None},
{'cluster_name': None}, {'host': None},
{'resource_type': ''}, {'resource_type': None},
{'resource_id': None})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=SERVICES)
def test_cleanup_params(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.ACCEPTED, res.status_code)
rpc_mock.assert_called_once_with(self.context, mock.ANY)
cleanup_request = rpc_mock.call_args[0][1]
for key, value in body.items():
if key in ('disabled', 'is_up'):
if value is not None:
value = value == 'true'
self.assertEqual(value, getattr(cleanup_request, key))
self.assertEqual(self._expected_services(*SERVICES), res.json)
@mock.patch('cinder.db.worker_get_all',
return_value=[mock.Mock(service_id=1, resource_type='Volume')])
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=SERVICES)
def test_cleanup_missing_location_ok(self, rpc_mock, worker_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.ACCEPTED, res.status_code)
rpc_mock.assert_called_once_with(self.context, mock.ANY)
cleanup_request = rpc_mock.call_args[0][1]
self.assertEqual(fake.VOLUME_ID, cleanup_request.resource_id)
self.assertEqual(1, cleanup_request.service_id)
self.assertEqual('Volume', cleanup_request.resource_type)
self.assertEqual(self._expected_services(*SERVICES), res.json)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_missing_location_fail_none(self, rpc_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
self.assertIn('Invalid input', res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=[1, 2])
def test_cleanup_missing_location_fail_multiple(self, rpc_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
self.assertIn('Invalid input', res.json['badRequest']['message'])
rpc_mock.assert_not_called()
|
clips/pattern
|
examples/04-search/02-constraint.py
|
Python
|
bsd-3-clause
| 2,667
| 0.002625
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from patt
|
ern.search import search, Pattern, Con
|
straint
from pattern.en import parsetree, parse, Sentence
# What we call a "search word" in example 01-search.py
# is actually called a constraint, because it can contain different options.
# Options are separated by "|".
# The next search pattern retrieves words that are a noun OR an adjective:
s = parsetree("big white rabbit")
print(search("NN|JJ", s))
print("")
# This pattern yields phrases containing an adjective followed by a noun.
# Consecutive constraints are separated by a space:
print(search("JJ NN", s))
print("")
# Or a noun preceded by any number of adjectives:
print(search("JJ?+ NN", s))
print("")
# Note: NN marks singular nouns, NNS marks plural nouns.
# If you want to include both, use "NN*" as a constraint.
# This works for NN*, VB*, JJ*, RB*.
s = parsetree("When I sleep the big white rabbit will stare at my feet.")
m = search("rabbit stare at feet", s)
print(s)
print(m)
print("")
# Why does this work?
# The word "will" is included in the result, even if the pattern does not define it.
# The pattern should break when it does not encounter "stare" after "rabbit."
# It works because "will stare" is one verb chunk.
# The "stare" constraint matches the head word of the chunk ("stare"),
# so "will stare" is considered an overspecified version of "stare".
# The same happens with "my feet" and the "rabbit" constraint,
# which matches the overspecified chunk "the big white rabbit".
p = Pattern.fromstring("rabbit stare at feet", s)
p.strict = True # Now it matches only what the pattern explicitly defines (=no match).
m = p.search(s)
print(m)
print("")
# Sentence chunks can be matched by tag (e.g. NP, VP, ADJP).
# The pattern below matches anything from
# "the rabbit gnaws at your fingers" to
# "the white rabbit looks at the carrots":
p = Pattern.fromstring("rabbit VP at NP", s)
m = p.search(s)
print(m)
print("")
if m:
for w in m[0].words:
print("%s\t=> %s" % (w, m[0].constraint(w)))
print("")
print("-------------------------------------------------------------")
# Finally, constraints can also include regular expressions.
# To include them we need to use the full syntax instead of the search() function:
import re
r = re.compile(r"[0-9|\.]+") # all numbers
p = Pattern()
p.sequence.append(Constraint(words=[r]))
p.sequence.append(Constraint(tags=["NN*"]))
s = Sentence(parse("I have 9.5 rabbits."))
print(s)
print(p.search(s))
print("")
|
wiredrive/wtframework
|
wtframework/wtf/testobjects/testcase.py
|
Python
|
gpl-3.0
| 8,295
| 0.001085
|
##########################################################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import sys
from unittest.case import _ExpectedFailure, _UnexpectedSuccess, SkipTest
import warnings
from six import u
import unittest2
class WatchedTestCase(unittest2.TestCase):
'''
This test case extends the unittest.TestCase to add support for
registering TestWatchers for listening on TestEvents.
'''
def __init__(self, *args, **kwargs):
self.__wtf_test_watchers__ = []
super(WatchedTestCase, self).__init__(*args, **kwargs)
# '_' prefix is added to hide it form nosetest
def _register_watcher(self, watcher, position=-1):
"""
Register a test watcher.
Args:
watcher: A test watcher to register.
Kwargs:
position: position in execution queue to insert this watcher.
"""
self.__wtf_test_watchers__.insert(position, watcher)
# '_' prefix is added to hide it form nosetest
def _unregister_watcher(self, watcher):
""""
Unregister a test watcher.
Args:
watcher : Reference to TestWatcher to unregister.
"""
self.__wtf_test_watchers__.remove(watcher)
def get_log(self):
"""
Get a log of events fired.
Returns:
list - list of string names of events fired.
"""
log = []
for watcher in self.__wtf_test_watchers__:
log = watcher.get_log() + log
return log
def run(self, result=None):
"""
Overriding the run() method to insert calls to our TestWatcher call-backs.
Most of this method is a copy of the unittest.TestCase.run() method source.
Kwargs:
result: TestResult object.
"""
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
# Track if clean up was run, so we can run clean up if setup failed.
did_tear_down_execute = False
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
# Run our test watcher actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.before_setup(self, result)
# Run test setup.
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
# Run our test watcher actions.
|
for test_watcher in self.__wtf_test_watchers__:
test_watcher.before_test(self, result)
|
# Run our test
testMethod()
# Run our test watcher post test actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_pass(self, result)
except self.failureException as e:
result.addFailure(self, sys.exc_info())
# Run our test watcher on fail actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_failure(self, result, e)
except _ExpectedFailure, e:
addExpectedFailure = getattr(
result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn(u("Use of a TestResult without an addExpectedFailure method is deprecated"),
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(
result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn(u("Use of a TestResult without an addUnexpectedSuccess method is deprecated"),
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception as e:
result.addError(self, sys.exc_info())
# Run our test watcher on error actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_error(self, result, e)
else:
success = True
try:
did_tear_down_execute = True
# Run our test watcher after test actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_test(self, result)
# Do tear down.
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
finally: # Run our test watcher actions for after tear down..
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_teardown(self, result)
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
# Execute tear down if it did not get executed.
if not did_tear_down_execute:
# Run our test watcher after test actions.
try:
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_test(self, result)
self.tearDown()
except:
# do nothing, test case would already failed and failure is
# already handled.
pass
finally: # Run our test watcher actions for after tear down..
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_teardown(self, result)
# Remove test watchers. For some strange reason these apply to all test
# cases, not just the currently running one. So we remove them
# here.
self.__wtf_test_watchers__ = []
result.stopTest(self)
if orig_result is None:
stopTestRun =
|
popovsn777/new_training
|
test/__init__.py
|
Python
|
apache-2.0
| 22
| 0
|
__author__ = 'Elm
|
ira
|
'
|
holocronweaver/wanikani2anki
|
scripts/webdriver.py
|
Python
|
mpl-2.0
| 1,795
| 0.001671
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import mimetypes
from selenium import webdriver
import selenium.common.exception
|
s
class WebDriver:
"""Use a Selenium web driver to ease login, navigation, and access of
HTTPS sites.
WARNING: Always use a dummy Firefox profile for scraping, never
your main profile! This avoids possibility of corrupting your main
profile.
"""
def __init__(self, firefox_profile,
download_path=None, download_types=None):
self.fp = webdriver.FirefoxProfile(firefox_profile)
if download_path:
s
|
elf._configure_downloads(download_path, download_types)
self.driver = webdriver.Firefox(self.fp)
self.driver.set_page_load_timeout(5)
def _configure_downloads(self, path, types):
"""WARNING: Changes profile settings.
Examples of types include audio/mpeg and text/csv. See Firefox options for complete list."""
self.fp.set_preference('media.play-stand-alone', False)
self.fp.set_preference('browser.download.folderList', 2)
self.fp.set_preference('browser.download.manager.showWhenStarting', False)
self.fp.set_preference('browser.download.dir', path)
mime_types = ', '.join([mimetypes.types_map[type] for type in types])
self.fp.set_preference('browser.helperApps.neverAsk.saveToDisk', types)
def get_html(self, url):
self.driver.get(url)
html_source = self.driver.page_source
return html_source
def __getattr__(self, name):
'''Forward undefined fields to underlying driver.'''
return getattr(self.driver, name)
|
EwanC/pyProc
|
proc_scraper/proc_maps.py
|
Python
|
mit
| 600
| 0
|
#!/usr/bin/env python3
from .proc_base import ProcBase
class ProcMaps(ProcBase):
|
'''Object represents the /proc/[pid]/maps file.'''
def __init__(self, pid):
'''
Read file by callin
|
g base class constructor
which populates self.content. This file is
already ASCII printable, so no further
parsing is needed.
'''
super().__init__('/proc/{0}/maps'.format(pid))
def dump(self):
'''Print information gathered to stdout.'''
super().dump() # Print file header
if self.content:
print(self.content)
|
andersroos/rankedftw
|
tasks/delete_data_for_sample.py
|
Python
|
agpl-3.0
| 2,383
| 0.007134
|
#!/usr/bin/env python3
# noinspection PyUnresolvedReferences
import init_django
from django.db import transaction
from common.utils import utcnow
from main.archive import DataArchiver
from main.delete import DataDeleter
from main.models import Ranking
from main.purge import purge_player_data
from tasks.base import Command
class Main(Command):
def __init__(self):
super().__init__("Delete ranking and all cache data and ranking data linked to it, used for broken "
"rankings.",
pid_file=True, stoppable=False)
self.add_argument('--delete', dest="delete", action='store_true', default=False,
help="If this is not set, deletes a dry run will be performed instead.")
self.add_argument('--keep-rankings', '-r', dest="keep_rankings", default=None,
help="Comma separated list of rankings to keep.")
def run(self, args, logger):
keep_ids = (int(id) for id in args.keep_rankings.split(","))
with transaction.atomic():
remove_ids = [r.id for r in Ranking.objects.exclude(id__in=keep_ids)]
data_deleter = DataDeleter(dry_run=not args.delete)
data_archiver = DataArchiver(utcnow(), remove=True)
# Remove rankings.
for remove_id in remove_ids:
data_deleter.delete_ranking(remove_id)
# Archive all rankings except the last.
if args.delete:
rankings = Ranking.objects.order_by("-id")[1:]
for ranking in rankings:
logger.i
|
nfo(f"archiving ranking {ranking.id}")
data_archiver.archive_ranking(ranking, self.check_stop)
else:
logger.info("DRY RUN no archiving of rankings")
# Delete ladders that are no longer needed.
keep_season_ids = {r.season_id for r in Ranking.objects.all()}
|
data_deleter.delete_ladders(tuple(keep_season_ids))
# Delete cache data that is unused.
data_deleter.agressive_delete_cache_data()
# Purge players and teams.
if args.delete:
purge_player_data(check_stop=self.check_stop)
else:
logger.info("DRY RUN no purge player data")
return 0
if __name__ == '__main__':
Main()()
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-reprex/package.py
|
Python
|
lgpl-2.1
| 1,861
| 0.002149
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReprex(RPackage):
"""Convenience wrapper that uses the 'rmarkdown' package to render small
snippets of code to target formats that include both code and output.
The goal is to encourage the sharing of small, reproducible, and
runnable examples on code-oriented websites, such as
<http://stackoverflow.com> and <https://github.com>, or in email.
'reprex' also extracts clean, runnable R code from various common
formats, such as copy/paste from an R session."""
homepage = "https://github.com/jennybc/reprex"
url = "https://cloud.r-project.org/src/contrib/reprex_0.1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reprex"
version('0.3.0', sha256='203c2ae6343f6ff887e7a5a3f5d20bae465f6e8d9745c982479f5385f4effb6c')
version('0.2.1', sha256='5d234ddfbcadc5a5194a58eb88973c51581e7e2e231f146974af8f42747b45f3')
version('0.1.1', sha256='919ae93039b2d8fb8eace98d
|
a9376c031d734d9e75c237efb24d047f35b5ba4b')
depends_on('r@3.0.2:', when='@:0.1.2', type=('build', 'run'))
depends_on('r@3.1:', when='@0.2.0:', type=('build', 'run')
|
)
depends_on('r-callr@2.0.0:', type=('build', 'run'))
depends_on('r-clipr@0.4.0:', type=('build', 'run'))
depends_on('r-knitr', when='@:0.1.9', type=('build', 'run'))
depends_on('r-rmarkdown', type=('build', 'run'))
depends_on('r-whisker', type=('build', 'run'))
depends_on('r-rlang', when='@0.2.0:', type=('build', 'run'))
depends_on('r-withr', when='@0.2.0:', type=('build', 'run'))
depends_on('r-fs', when='@0.2.1:', type=('build', 'run'))
depends_on('pandoc@1.12.3:')
|
lucemia/gcloud-python
|
gcloud/datastore/test_query.py
|
Python
|
apache-2.0
| 25,434
| 0
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestQuery(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.datastore.query import Query
return Query
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults_wo_implicit_dataset_id(self):
self.assertRaises(ValueError, self._makeOne)
def test_ctor_defaults_w_implicit_dataset_id(self):
from gcloud._testing import _Monkey
from gcloud.datastore import _implicit_environ
_DATASET = 'DATASET'
with _Monkey(_implicit_environ, DATASET_ID=_DATASET):
query = self._makeOne()
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, None)
self.assertEqual(query.namespace, None)
self.assertEqual(query.ancestor, None)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.group_by, [])
def test_ctor_explicit(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
_KIND = 'KIND'
_NAMESPACE = 'NAMESPACE'
ancestor = Key('ANCESTOR', 123, dataset_id=_DATASET)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
GROUP_BY = ['foo']
query = self._makeOne(
dataset_id=_DATASET,
kind=_KIND,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
group_by=GROUP_BY,
)
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.group_by, GROUP_BY)
def test_namespace_setter_w_non_string(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_DATASET = 'DATASET'
_NAMESPACE = 'NAMESPACE'
query = self._makeOne(_DATASET)
query.namespace = _NAMESPACE
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET)
query.kind = _KIND
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_DATASET = 'DATASET'
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._makeOne(_DATASET, _KIND_BEFORE)
self.assertEqual(query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqua
|
l(que
|
ry.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
_NAME = u'NAME'
key = Key('KIND', 123, dataset_id='DATASET')
query = self._makeOne(_DATASET)
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
key = Key('KIND', 123, dataset_id='DATASET')
query = self._makeOne(_DATASET, ancestor=key)
del query.ancestor
self.assertTrue(query.ancestor is None)
def test_add_filter_setter_w_unknown_operator(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from gcloud.datastore.entity import Entity
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])
def test_add_filter_w_whitespace_property_name(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
key = Key('Foo', dataset_id='DATASET')
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__invalid_operator(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
key = Key('Foo', dataset_id='DATASET')
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter, '__key__', '<', key)
def test_filter___key__invalid_value(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter, '__key__', '=', None)
def test_projection_setter_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.projection = []
self.assertEqual(query.projection, [])
def test_projection_setter_string(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
|
Psycojoker/cocktailator
|
cocktails/urls.py
|
Python
|
gpl-3.0
| 1,074
| 0.005597
|
# encoding: utf-8
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import admin
from data import get_ingredients, get_cocktails
admin.autodiscover()
def ingredients(request):
return render(request, "ingredients.haml", {"ingredients": get_ingredients(request.GET.get("filtering_string", []))})
def cocktails(request):
print request.GET.keys()
cocktails = get_cocktails(request.GET.keys())
if cocktails:
return render(request, "cocktails.haml", {"cocktails": cocktails})
else:
return HttpResponse(u"<p>Aucun cocktails ne peut être fait avec ce
|
s ingrédients</p>")
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name="home.html"), name='home'),
url(r'^in
|
gredients/$', ingredients, name='ingredients'),
url(r'^cocktails/$', cocktails, name='cocktails'),
# url(r'^cocktails/', include('cocktails.foo.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
veltzer/demos-python
|
src/exercises/basic/digits_report/solution9.py
|
Python
|
gpl-3.0
| 433
| 0
|
#!/usr/bin/env p
|
ython
digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
found = True
while found:
input_string = input('Please give me some digits... \n')
found = False
for character in input_string:
if character not in digits:
# we have a non digit!
print('Error, you gave me non digits')
found = True
break
print('starting real wo
|
rk on', input_string)
|
jk0/pyhole
|
pyhole/core/version.py
|
Python
|
apache-2.0
| 1,788
| 0
|
# Copyright 2011-2016 Josh Kearney
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
#
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Version Handling"""
import os
import sys
__VERSION__ = "0.8.9"
def current_git_hash():
"""Return the current git hash."""
git_file = ".git/refs/heads/master"
git_path = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir, git_file))
if not os.path.exists(git_path):
git_path = os.getcwd() + "/" + git_file
if not os.path.exists(git_path):
git_path = os.getcwd() + "/../" + git_file
if not os.path.exists(git_path):
return None
with open(git_path, "r") as git:
git_hash = git.read()
return git_hash[0:5]
def version_string():
"""Return the full version."""
git_hash = current_git_hash()
if git_hash:
return "pyhole v%s (%s) - https://github.com/jk0/pyhole" % (
__VERSION__, git_hash)
return "pyhole v%s - https://github.com/jk0/pyhole" % __VERSION__
def version_hash():
"""Return the current version with git hash."""
git_hash = current_git_hash()
return "%s-%s" % (__VERSION__, git_hash)
def version():
"""Return the current version."""
return __VERSION__
|
eufat/gender-ml
|
src/gender_last_two.py
|
Python
|
mit
| 1,327
| 0.009043
|
import nltk
from read_lineByLine_toList import read_line_by_line
from find_consonants import count_consonants
import random
male_list = read_line_by_line('datasets/male.csv')
female_list = read_line_by_line('datasets/female.csv')
# ambil kata pertama dari kalimat
def get_first_word(words):
words_list = words.split()
if len(words_list) > 1 :
# kalau lebih besar dari satu kata dalam kalimat, return kata pertama
return words_list[0]
else:
# kalau hanya satu kata dalam kalimat, return kata itu
return words
def gender_features_last_two(word):
word = get_first_word(word) # ambil kata pertama
return {'last_two': word[-2:]} # return huru
|
f 2 kata terakhir
labeled_names = (
[(name, 'male'
|
) for name in male_list] +
[(name, 'female') for name in female_list]
)
random.shuffle(labeled_names)
featuresets = [(gender_features_last_two(n), gender) for (n, gender) in labeled_names]
train_set = featuresets[500:]
test_set = featuresets[:500]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print ("ACCURACY IS: ")
print (nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features()
test_name = "aldey Wahyu Putra"
test_name.capitalize()
print(test_name + " is " + classifier.classify(gender_features_last_two(test_name)))
|
EvanK/ansible
|
test/integration/targets/vault/faux-editor.py
|
Python
|
gpl-3.0
| 1,212
| 0
|
#!/usr/bin/env python
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option)
|
any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML f
|
iles. See
# https://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import time
import os
def main(args):
path = os.path.abspath(args[1])
fo = open(path, 'r+')
content = fo.readlines()
content.append('faux editor added at %s\n' % time.time())
fo.seek(0)
fo.write(''.join(content))
fo.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[:]))
|
eadgarchen/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2.py
|
Python
|
apache-2.0
| 14,548
| 0.003299
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Versi
|
on 2.0 (the "License");
# you may not use this file except in compliance wit
|
h the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooli
|
Vaidyanath/tempest
|
tempest/api/volume/test_volumes_extend.py
|
Python
|
apache-2.0
| 1,453
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requ
|
ired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2ExtendTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2ExtendTest, cls).setup_clients()
cls.client = cls.volumes_client
@test.attr(type='gate')
def test_volume_extend(self):
# Extend Volume Test.
self.volume = self.create_volume()
extend_size = int(self.volume['size']) + 1
self.client.extend_volume(self.volume['id'], extend_size)
self.client.wait_for_volume_status(self.volume['id'], 'available')
volume = self.client.get_volume(self.volume['id'])
self.assertEqual(int(volume['size']), extend_size)
class VolumesV1ExtendTest(VolumesV2ExtendTest):
_api_version = 1
|
Fokko/incubator-airflow
|
tests/models/test_variable.py
|
Python
|
apache-2.0
| 3,100
| 0.00129
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
se
|
lf.assertEqual(test_var.val, 'value')
def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
|
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
|
skladsec/facebookFriendSaver
|
facebookFriendSaver.py
|
Python
|
isc
| 3,504
| 0.041952
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, getopt, re, os
try:
from splinter import Browser
except:
print "Please install Splinter: http://splinter.readthedocs.org/en/latest/install.html"
sys.exit();
import getpass
from splinter.request_handler.status_code import HttpResponseError
def main(argv):
email = None
txtopt = None
profile = None
self = None
socks = None
socksPort = None
try:
opts, args = getopt.getopt(argv, "ho:m:p:s:S:P:",["port=","socks=","self=","profile=","output=","mail=","help"])
except:
print "Use --help for help"
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print 'Usage %s options \n' % (os.path.basename(__file__))
print ' -h, --help This help'
print ' -m, --mail Your facebook login email'
print ' -o, --output Your output file name'
print ' -p, --profile Profile to capture friends(name after facebook.com/)'
print ' -s, --self Your profile name(name after facebook.com/)'
print ' -S, --socks Socks Proxy Address for Tor use'
print ' -P, --port Port Socks for Tor use'
sys.exit()
elif opt in ("-o","--output"):
txtopt = arg
elif opt in ("-m","--mail"):
email = arg
elif opt in ("-p","--profile"):
profile = arg
elif opt in ("-s","--self"):
self = arg
elif opt in ("-S","--socks"):
socks = arg
elif opt in ("-P","--port"):
socksPort = arg
if not email or not txtopt or not self:
print 'Use --help for help'
sys.exit()
password = getpass.getpass()
if socks and socksProt:
proxy_settings = {
'network.proxy.type':1,
'network.proxy.socks': socks,
'network.proxy.socks_port': socksPort
}
browser = Browser('firefox',profile_preferences=proxy_settings)
else:
browser = Browser()
# with Browser() as browser:
browser.visit('https://m.facebook.com/')
browser.fill("email",email);
browser.fill("pass",password);
browser.find_by_name("login").click()
if browser.is_element_present_by_css('.login_error_box'):
print 'The email and password didn\'t work.'
sys.exit()
try:
fileopt = open(txtopt, 'a')
except:
sys.exit('Unable to open file %s' % txtopt)
if not profile:
browser.find_link_by_text("Profile").click()
print 'Accessing profile at %s\n' % browser.url
browser.find_link_by_text("Friends").click()
print 'Accessing friends at %s\n' % browser.url
else:
url = 'https://m.facebook.com/%s/friends?refid=17' % profile
print 'Accessing profile friends at %s\n' % url
browser.visit(url)
friends = browser.find_by_css('a')
notList = ["/a/mobile/friends/add_friend.php","language.php","/help/","/settings/","/pages/","/bugnub/","/policies/","/logout","/home","/friends","/messages/","/notifications.php","/buddylist.php","/menu/","/photo.php","
|
/mbasic/","%s"%profile,"%s"%self]
for fr
|
iend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
while browser.is_element_present_by_css("#m_more_friends"):
browser.find_by_css('#m_more_friends a').first.click()
friends = browser.find_by_css('a')
for friend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.stdout.write('\nQuit by keyboard interrupt sequence!')
|
morrillo/oerp_migrator
|
oerp_migrator.py
|
Python
|
gpl-2.0
| 9,295
| 0.049828
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
import yaml
import xmlrpclib
import sys
import ConfigParser
import pdb
import logging
from datetime import date
from datetime import datetime
def get_model_id(oerp_destino,model=None):
""" Return model processed ID """
if model == None:
return_id = 0
else:
sock = oerp_destino['sock']
return_id = 0
control_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','search',[('model_name','=',model)])
if control_ids:
data = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','read',control_ids,['max_id'])
for data_item in data:
return_id = data_item['max_id']
return return_id
def update_model_id(oerp_destino,model=None,maxId=0):
""" Updates control table with maximum processed ID """
if model == None:
return None
sock = oerp_destino['sock']
control_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','search',[('model_name','=',model)])
vals_update = {
'max_id': maxId
}
vals_insert = {
'model_name': model,
'max_id': maxId
}
if control_ids:
return_id = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','write',control_ids,vals_update)
else:
return_id = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','create',vals_insert)
return None
def get_field_type(oerp_origen,model):
if not oerp_origen or not model:
import pdb;pdb.set_trace()
exit(1)
args = [('model','=',model)]
fields = ['name','ttype','relation','required']
model_search = 'ir.model.fields'
args = [('model','=',model)]
sock = oerp_origen['sock']
field_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'],model_search,'search',args)
data = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'],model_search,'read',field_ids,fields)
return_dict = {}
for data_item in data:
return_dict[data_item['name']] = [data_item['ttype'],data_item['relation'],data_item['required']]
return return_dict
def get_lookup_ids(oerp_destino=None,relation_parm=None,ids_parm=None):
if not oerp_destino or not relation_parm or not ids_parm:
import pdb;pdb.set_trace()
exit(1)
sock = oerp_destino['sock']
args = [('name','=',ids_parm[1])]
obj_destino_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],relation_parm,'search',args)
if obj_destino_ids:
return obj_destino_ids[0]
else:
#import pdb;pdb.set_trace()
args = [('origin_id','=',ids_parm[0])]
try:
obj_destino_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],relation_parm,'search',args)
if obj_destino_ids:
return obj_destino_ids[0]
else:
return 0
except:
logging.error("Problem looking up id for %s. Assigning default value"%(relation_parm))
return 1
return 0
def read_models(config=None,section=None):
if not config or not section:
exit(1)
return_dict = {}
for dict_keys in config.keys():
return_dict[dict_keys] = {}
if dict_keys not in ['origin','destination']:
fields = config[dict_keys]['fields']
if 'filter' in config[dict_keys].keys():
_filter = config[dict_keys]['filter']
else:
_filter = ''
return_dict[dict_keys]['filter'] = _filter
return_dict[dict_keys]['sequence'] = config[dict_keys]['sequence']
return_dict[dict_keys]['fields'] = fields.split(',')
return return_dict
def connect_openerp(dict_parms = None):
if not dict_parms:
exit(1)
# Get the uid
dict_connection = {}
sock_common = xmlrpclib.ServerProxy ('http://'+dict_parms['hostname']+':'+str
|
(dict_parms['port'])+'/xmlrpc/common')
# import pdb;pdb.set_trace()
uid = sock_common.login(dict_parms['dbname'], dict_parms['username'], dict_parms['password'])
#replace localhost with the address of the server
sock = xmlrpclib.ServerProxy('http://'+dict_parms['hostname']+':'+str(dict_parms['port'])+'/xmlrpc/object')
dict_connection['uid'] = uid
dict_connection['pwd'] = dict_parms['password']
dict_connection['dbname'] = dict_parms['dbnam
|
e']
dict_connection['sock'] = sock
return dict_connection
def migrate_model(oerp_origen = None, oerp_destino = None, model = None, fields = None, filter_parm = ''):
if not oerp_origen or not oerp_destino or not model or not fields:
exit(1)
logging.info("Migrando modelo %s"%(model))
# data_obj = oerp_origen.get(model)
sock = oerp_origen['sock']
if filter_parm <> '':
data_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'search',[])
else:
filter_id = get_model_id(oerp_destino,model)
data_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'search',[('id','>',filter_id)])
field_types = get_field_type(oerp_origen,model)
fields.append('create_date')
data_items = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'read',data_ids,fields)
max_id = 0
for data in data_items:
dict_insert = {}
for field in fields:
if field in field_types:
if field_types[field][0] not in ['many2many','one2many','many2one']:
if field_types[field][0] != 'boolean' and data[field]:
# if field_types[field][0] == 'char':
dict_insert[field] = data[field]
else:
if data[field]:
dict_insert[field] = data[field]
else:
if field_types[field][0] == 'many2one':
if data[field]:
dict_insert_field = get_lookup_ids(oerp_destino,field_types[field][1],data[field])
if dict_insert_field <> 0:
dict_insert[field] = dict_insert_field
else:
dict_insert[field] = data[field][0]
else:
if field_types[field][2]:
dict_insert[field] = 1
if 'id' not in dict_insert.keys():
dict_insert['origin_id'] = data['id']
if data['id'] > max_id:
max_id = data['id']
logging.debug(dict_insert)
sock_destino = oerp_destino['sock']
destination_ids = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'], \
model,'search',[('origin_id','=',data['id'])])
if destination_ids:
data_items = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
model,'write',destination_ids,dict_insert)
else:
try:
data_items = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
model,'create',dict_insert)
except:
logging.error(dict_insert)
# import pdb;pdb.set_trace()
pass
update_model_id(oerp_destino,model,max_id)
logging.info("Fin migración modelo %s"%(model))
return None
def validate_setup(dict_models = {}, oerp_destino = {}):
if not dict_models:
logging.error("No dict_models parameter in validate_setup")
return False
if not oerp_destino:
logging.error("No oerp_destino parameter in validate_setup")
return False
for model in dict_models.keys():
if model not in ['origin','destination']:
args = [('model','=',model)]
fields = ['name','ttype','relation','required']
model_search = 'ir.model.fields'
args = [('model','=',model),('name','=','origin_id')]
sock = oerp_destino['sock']
origin_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],model_search,'search',args)
if not origin_ids:
logging.error("Model "+model+" does not have origin_id column")
return False
return True
def main(configfile_parm = ''):
logging.basicConfig(filename='migrator.log',level=logging.DEBUG)
logging.info("Comenzando la migración")
stream = file(configfile_parm,'r')
dict_yaml = yaml.safe_load(stream)
if not dict_yaml['origin'] or not dict_yaml['destination']:
logging.error('No origin/destination specified in yaml file.')
exit(1)
dict_origin = dict_yaml['origin']
logging.info("Origin host: %s port: %s database: %s"%(dict_origin['hostname'],dict_origin['port'],dict_origin['dbname']))
dict_destination = dict_yaml['destination']
logging.info("Destination host
|
npalko/uRPC
|
examples/python/client.py
|
Python
|
bsd-3-clause
| 385
| 0.038961
|
import sys
sys.path.append('../../python/')
import urpc
import randexample_pb2
if __name__ == '__main__':
client = urpc.client.SingleThreadClient()
request = randexample_pb2.Request
|
()
reply = randexample_pb2.Reply()
request.nMessage = 1
request.nSample = 10
client.sendRequest('some service',8,request)
reply.ParseFromString(client.getReply())
print r
|
eply
|
kzcashteam/kzcash
|
qa/rpc-tests/multi_rpc.py
|
Python
|
mit
| 4,593
| 0.005225
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to kzcash.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "kzcash.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
#################
|
#################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with
|
different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
studio1247/gertrude
|
document_dialog.py
|
Python
|
gpl-3.0
| 13,966
| 0.002794
|
# -*- coding: utf-8 -*-
# This file is part of Gertrude.
#
# Gertrude is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gertrude is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from builtins import str as text
import traceback
import subprocess
import wx
import wx.lib.filebrowsebutton
from ooffice import *
class DocumentDialog(wx.Dialog):
def __init__(self, parent, modifications):
self.modifications = modifications
self.document_generated = False
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, -1, "Génération de document")
# This ne
|
xt step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
self.sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(wx.StaticText(self, -1, "Format :"), 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
if not IsOODocument(modifications.template):
self.format = wx.Choice(self, -1, c
|
hoices=["Texte"])
elif sys.platform == 'win32':
self.format = wx.Choice(self, -1, choices=["LibreOffice", "PDF"])
else:
self.format = wx.Choice(self, -1, choices=["LibreOffice"])
self.format.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.onFormat, self.format)
sizer.Add(self.format, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
default_output = normalize_filename(modifications.default_output)
self.extension = os.path.splitext(default_output)[-1]
wildcard = "OpenDocument (*%s)|*%s|PDF files (*.pdf)|*.pdf" % (self.extension, self.extension)
self.fbb = wx.lib.filebrowsebutton.FileBrowseButton(self, -1,
size=(600, -1),
labelText="Nom de fichier :",
startDirectory=config.documents_directory,
initialValue=os.path.join(config.documents_directory, default_output),
fileMask=wildcard,
fileMode=wx.SAVE)
sizer.Add(self.fbb, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.sizer.Add(sizer, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.gauge = wx.Gauge(self, -1, size=(-1, 10))
self.gauge.SetRange(100)
self.sizer.Add(self.gauge, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.LEFT | wx.TOP, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
self.sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sauver_ouvrir = wx.Button(self, -1, "Sauver et ouvrir")
self.sauver_ouvrir.SetDefault()
self.Bind(wx.EVT_BUTTON, self.OnSauverOuvrir, self.sauver_ouvrir)
sizer.Add(self.sauver_ouvrir, 0, wx.LEFT | wx.RIGHT, 5)
self.sauver = wx.Button(self, -1, "Sauver")
self.Bind(wx.EVT_BUTTON, self.OnSauver, self.sauver)
sizer.Add(self.sauver, 0, wx.RIGHT, 5)
if modifications.multi:
button = wx.Button(self, -1, "Sauver individuellement")
self.Bind(wx.EVT_BUTTON, self.OnSauverUnitaire, button)
sizer.Add(button, 0, wx.RIGHT, 5)
if modifications.email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyer, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.RIGHT, 5)
if modifications.multi is False and not modifications.email_to:
self.sauver_envoyer.Disable()
if database.creche.caf_email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email à la CAF")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyerCAF, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.LEFT | wx.RIGHT, 5)
# btnsizer.Add(self.ok)
btn = wx.Button(self, wx.ID_CANCEL)
sizer.Add(btn, 0, wx.RIGHT, 5)
self.sizer.Add(sizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.CenterOnScreen()
def onFormat(self, _):
filename = os.path.splitext(self.fbb.GetValue())[0]
if self.format.GetSelection() == 0:
self.fbb.SetValue(filename + self.extension, None)
else:
self.fbb.SetValue(filename + ".pdf", None)
def Sauver(self):
self.fbb.Disable()
self.sauver.Disable()
if self.sauver_ouvrir:
self.sauver_ouvrir.Disable()
self.filename = self.fbb.GetValue()
f, e = os.path.splitext(self.filename)
if e == ".pdf":
self.pdf = True
self.oo_filename = f + self.extension
else:
self.pdf = False
self.oo_filename = self.filename
config.documents_directory = os.path.dirname(self.filename)
dlg = None
try:
if self.modifications.multi is not False:
errors = {}
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
for i, (filename, modifs) in enumerate(simple_modifications):
self.gauge.SetValue((100 * i) / len(simple_modifications))
errors.update(GenerateDocument(modifs, filename=filename))
if self.pdf:
f, e = os.path.splitext(filename)
convert_to_pdf(filename, f + ".pdf")
os.remove(filename)
else:
self.filename = self.filename.replace(" <prenom> <nom>", "")
self.oo_filename = self.oo_filename.replace(" <prenom> <nom>", "")
errors = GenerateDocument(self.modifications, filename=self.oo_filename, gauge=self.gauge)
if self.pdf:
convert_to_pdf(self.oo_filename, self.filename)
os.remove(self.oo_filename)
self.document_generated = True
if errors:
message = "Document %s généré avec des erreurs :\n" % self.filename
for label in errors.keys():
message += '\n' + label + ' :\n '
message += '\n '.join(errors[label])
dlg = wx.MessageDialog(self, message, 'Message', wx.OK | wx.ICON_WARNING)
except IOError:
print(sys.exc_info())
dlg = wx.MessageDialog(self, "Impossible de sauver le document. Peut-être est-il déjà ouvert ?", 'Erreur',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
except Exception as e:
info = sys.exc_info()
message = ' [type: %s value: %s traceback: %s]' % (info[0], info[1], traceback.extract_tb(info[2]))
dlg = wx.MessageDialog(self, message, 'Erreur', wx.OK | wx.ICON
|
rjferrier/fluidity
|
tools/optimality.py
|
Python
|
lgpl-2.1
| 32,527
| 0.017647
|
#!/usr/bin/python
# Copyright (C) 2006 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
# Prof. C Pain
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# amcgsoftware@imperial.ac.uk
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os.path
import numpy
import argparse
import shlex
from subprocess import Popen, PIPE
import scipy.optimize
import string
import libspud
from fluidity_tools import stat_parser
from fluidity_tools import stat_creator
import time
import pickle
import glob
import math
import shutil
# Hack for libspud to be able to read an option from a different files.
# A better solution would be to fix libspud or use an alternative implementation like
# https://github.com/gmarkall/manycore_form_compiler/blob/master/mcfc/optionfile.py
def superspud(filename, cmd):
libspud.load_options(filename)
r = None
if hasattr(cmd, '__iter__'):
for c in cmd:
exec "try: r = " + c + "\nexcept libspud.SpudNewKeyWarning: pass"
else:
exec "try: r = " + cmd + "\nexcept libspud.SpudNewKeyWarning: pass"
libspud.clear_options()
return r
# Executes the model specified in the optimality option tree
# The model stdout is printed to stdout.
def run_model(m, opt_options, model_options):
update_custom_controls(m, opt_options)
if (superspud(model_options, "libspud.have_option('/adjoint/controls/load_controls')")):
# If the model is loading the default controls, we need to make suer the control files are up to date:
update_default_controls(m, opt_options, model_options)
command_line = superspud(opt_options, "libspud.get_option('/model/command_line')")
option_file = superspud(opt_options, "libspud.get_option('/model/option_file')")
args = shlex.split(command_line)
args.append(option_file)
p = Popen(args, stdout=PIPE,stderr=PIPE)
out = string.join(p.stdout.readlines() )
outerr = string.join(p.stderr.readlines() )
if p.wait()
|
!= 0:
print "Model execution failed."
print "The error
|
was:"
print outerr
exit()
if verbose:
print "Model output: "
print out
# Intialises the custom controls using the supplied python code.
def get_custom_controls(opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies python function to initialise the controls.
if ctype == 'custom':
initial_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/initial_control')")
d = {}
exec initial_control_code in d
m[cname] = d['initial_control']()
return m
# Initialse the default controls by reading in the control files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control files).
def read_default_controls(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'):len(ctrl_file)-4])
except:
print "Error while reading the control files."
print "The control file ", ctrl_file, " does not conform the standard naming conventions for control files."
exit()
f = open(ctrl_file, 'rb')
m[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print "Warning: Found no control file for control ", cname, "."
return m
# Initialse the default controli bounds by reading in the control bound files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control bound files).
def read_default_control_bounds(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m_bounds = {"lower_bound": {}, "upper_bound": {}}
# Loop over controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype != 'default':
continue
have_bound = {}
# Loop over lower and upper bound
for k in m_bounds.keys():
have_bound[k] = superspud(model_options, "libspud.have_option('/adjoint/controls/control["+str(i)+"/bounds/"+k+"')")
if not have_bound[k]:
continue
act_flag = False # Check that at least one control bound file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_'+k+'_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'+k+'_'):len(ctrl_file)-4])
except:
print "Error while reading the control bound files."
print "The control bound file ", ctrl_file, " does not conform the standard naming conventions for control files."
exit()
f = open(ctrl_file, 'rb')
m_bounds[k][(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print "Warning: Found no control bound file for control ", cname, "."
return m_bounds
# Completes the control bounds by adding the missing controls and filling them with nan's
def complete_default_control_bounds(m, m_bounds):
bound_types = {"lower_bound": {}, "upper_bound": {}}
for bound_type in bound_types:
for control in m.keys():
if m_bounds[bound_type].has_key(control):
continue
# We need objects as dtype because we want to keep the Nones for later
m_bounds[bound_type][control] = numpy.empty(shape = m[control].shape, dtype=object)
m_bounds[bound_type][control].fill(None)
return m_bounds
# Returns the control derivatives for both the custom and the default controls.
def read_control_derivatives(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
derivs = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = F
|
alexalv-practice/topoml
|
modules/root.py
|
Python
|
mit
| 532
| 0
|
# -*- coding: utf-8 -*-
import cherrypy
class Root(object):
exposed = True
@cherrypy.tools.json_out()
def GET(self, id=None):
return ["Hello", "world", "!"]
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
|
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
cherrypy.quick
|
start(Root(), '/', conf)
|
cgranade/qutip
|
qutip/tests/test_cy_structs.py
|
Python
|
bsd-3-clause
| 1,816
| 0
|
import pytest
import numpy as np
import scipy.sparse
import qutip
from qutip.fastsparse import fas
|
t_csr_matrix
from qutip.cy.checks import (_test_sorting, _test_
|
coo2csr_inplace_struct,
_test_csr2coo_struct, _test_coo2csr_struct)
from qutip.random_objects import rand_jacobi_rotation
def _unsorted_csr(N, density=0.5):
M = scipy.sparse.diags(np.arange(N), 0, dtype=complex, format='csr')
nvals = N**2 * density
while M.nnz < 0.95*nvals:
M = rand_jacobi_rotation(M)
M = M.tocsr()
return fast_csr_matrix((M.data, M.indices, M.indptr), shape=M.shape)
def sparse_arrays_equal(a, b):
return not (a != b).data.any()
@pytest.mark.repeat(20)
def test_coo2csr_struct():
"Cython structs : COO to CSR"
A = qutip.rand_dm(5, 0.5).data
assert sparse_arrays_equal(A, _test_coo2csr_struct(A.tocoo()))
@pytest.mark.repeat(20)
def test_indices_sort():
"Cython structs : sort CSR indices inplace"
A = _unsorted_csr(10, 0.25)
B = A.copy()
B.sort_indices()
_test_sorting(A)
assert np.all(A.data == B.data)
assert np.all(A.indices == B.indices)
@pytest.mark.repeat(20)
def test_coo2csr_inplace_nosort():
"Cython structs : COO to CSR inplace (no sort)"
A = qutip.rand_dm(5, 0.5).data
B = _test_coo2csr_inplace_struct(A.tocoo(), sorted=0)
assert sparse_arrays_equal(A, B)
@pytest.mark.repeat(20)
def test_coo2csr_inplace_sort():
"Cython structs : COO to CSR inplace (sorted)"
A = qutip.rand_dm(5, 0.5).data
B = _test_coo2csr_inplace_struct(A.tocoo(), sorted=1)
assert sparse_arrays_equal(A, B)
@pytest.mark.repeat(20)
def test_csr2coo():
"Cython structs : CSR to COO"
A = qutip.rand_dm(5, 0.5).data
B = A.tocoo()
C = _test_csr2coo_struct(A)
assert sparse_arrays_equal(B, C)
|
lugensa/js.dynatree
|
js/dynatree/__init__.py
|
Python
|
bsd-3-clause
| 362
| 0.005525
|
from fanstatic import Library, Resource
import js.jquery
import js.jqueryui
library = Library('dynatree', 'resources')
dynatree_css = Resource(library, 'src/skin-vista/ui.dynatree.css')
dynatree = Resource(library, 'src/jquery.dynatree.js',
mi
|
nified='src/jquery.dynatree.min.js',
depends=[dyn
|
atree_css, js.jquery.jquery, js.jqueryui.jqueryui])
|
viveksck/author-dedupe
|
src/flln_partition.py
|
Python
|
gpl-3.0
| 4,607
| 0.001519
|
# Copyright 2008, Jeffrey Regier, jeff [at] stat [dot] berkeley [dot] edu
# This file is part of Author-Dedupe.
#
# Author-Dedupe is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Author-Dedupe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Author-Dedupe. If not, see <http://www.gnu.org/licenses/>.
import re
import author
import partition_part
from collections import defaultdict
def compatible_names(a1, a2):
def compatible_name_part(w1, w2):
w1 = re.sub(r'\W', '', w1)
w2 = re.sub(r'\W', '', w2)
l = min(len(w1), len(w2))
if not l:
return False
return w1[:l] == w2[:l]
short, long = list(a1.middle_names), a2.middle_names
if len(short) > len(long):
return compatible_names(a2, a1)
# the front first names must be compatible
# (note: last names here are always equal)
if not compatible_name_part(a1.first_name, a2.first_name):
return False
# try finding each middle name of long in short, and remove the
# middle name from short if found
for wl in long:
if not short:
break
ws = short.pop(0)
if not compatible_name_part(ws, wl):
short.insert(0, ws)
# true iff short is a compatible substring of long
return short == []
class FllnPartition():
"""(first-letter-first-name, last-name) partition"""
def __init__(self, authors, info_comp):
self.info_comp = info_comp
self.load_parts(authors)
self.load_compat_mat(authors)
def load_parts(self, authors):
self.parts = set()
def singleton_part(a):
part = partition_part.PartitionPart()
part.add(a)
return part
self.parts.update([singleton_part(a) for a in authors])
def load_compat_mat(self, authors):
self.compat_map = defaultdict(set)
for a1 in authors:
for a2 in authors:
if compatible_names(a1, a2):
self.compat_map[a1].add(a2)
def get_partition_compat(self, part):
compat_maps = [self.compat_map[a] for a in part]
return reduce(set.intersection, compat_maps)
def stricter_than(self, less_p, more_p):
less_compat = self.get_partition_compat(less_p)
more_compat = self.get_partition_compat(more_p)
return less_compat < more_compat
def is_equivalent(self, p1, p2):
compat1 = self.get_partition_compat(p1)
compat2 = self.get_partition_compat(p2)
return compat1 == compat2
def target_equivalent(self, source_p):
for p in self.parts:
if p == source_p:
continue
if self.is_equivalent(source_p, p):
return p
def find_stricter(self, source_p):
stricter = []
for p in self.parts:
if p == source_p:
continue
if self.stricter_than(p, source_p):
stricter.append(p)
return stricter
def target_sole_stricter(self, source_p):
stricter = self.find_stricter(source_p)
if len(stricter) == 1:
return stricter[0]
elif len(stricter) > 1:
for s in stricter:
if self.info_comp.compare(source_p, s) < 7e-6:
return s
def merge_iter(self, get_target_f):
num_changes = 0
# copy avoids a run time error when the set changes size
for p in set.copy(self.parts):
target = get_target_f(p)
if target:
target.extend(p)
self.parts.remove(p)
|
num_changes += 1
return num_changes
def merge(self):
self.merge_iter(self.target_equivalent)
#iteratively merge the parts into the stricter parts,
#when there is only one stricter part
while self.merge_iter(self.target_sole_stricter):
pass
#TODO: why doesn't it ever help to do this more than once?
for part in self.parts:
merged_name = part.full_name()
for a in part:
|
a.merged_name = merged_name
|
ibamacsr/painelmma_api
|
restApp/tests.py
|
Python
|
mit
| 8,630
| 0.000579
|
#from django.test import TestCase
from datetime import date
from decimal import *
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import *
from .mommy_recipes import *
def get_response(client, url, params):
return client.get(
url,
params,
format='json'
)
class TestDiarioAwifs(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 10, 'tipo': 'AWIFS'}
deter_awifs_1.make(data_imagem=date(2015, 10, 10))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
self.assertEqual(data_received[0]['dia'], 10)
self.assertEqual(data_received[0]['total'], Decimal('0.13'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.29)
respon
|
se = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_rece
|
ived), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.29'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.31)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.60'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('1.60'))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 2))
deter_awifs_2.make(data_imagem=date(2015, 11, 3), area_km2=1.2)
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 11, 'tipo': 'AWIFS'}
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 3)
self.assertEqual(response.data[0]['data'][0]['dia'], 1)
self.assertEqual(response.data[0]['data'][0]['total'], Decimal('1.64'))
self.assertEqual(response.data[0]['data'][1]['dia'], 2)
self.assertEqual(response.data[0]['data'][1]['total'], Decimal('0.82'))
self.assertEqual(response.data[0]['data'][2]['dia'], 3)
self.assertEqual(response.data[0]['data'][2]['total'], Decimal('1.2'))
class TestDiarioDeter(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
daily_deter_1.make(data_imagem=date(2015, 8, 1))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('0.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 1), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('1.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 9), area_km2=1.89)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[1]['dia']
area = data_received[1]['total']
self.assertEqual(len(data_received), 2)
self.assertEqual(day, 9)
self.assertEqual(area, Decimal('1.89'))
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 11), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=2)
daily_deter_1.make(data_imagem=date(2015, 8, 30), area_km2=2)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 5)
self.assertEqual(data_received[0]['dia'], 1)
self.assertEqual(data_received[1]['dia'], 9)
self.assertEqual(data_received[2]['dia'], 10)
self.assertEqual(data_received[3]['dia'], 11)
self.assertEqual(data_received[4]['dia'], 30)
self.assertEqual(data_received[0]['total'], Decimal('1.23'))
self.assertEqual(data_received[1]['total'], Decimal('1.89'))
self.assertEqual(data_received[2]['total'], Decimal('3'))
self.assertEqual(data_received[3]['total'], Decimal('1'))
self.assertEqual(data_received[4]['total'], Decimal('2'))
class TestDiarioQualif(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'BA', 'ano': 2013, 'mes': 9,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
class TestMontly(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-mensal')
# self.user = User.objects.create_user(
# 'test', 'test@test.com', 'password'
# )
# self.token = Token.objects.get(user=self.user)
# def test_response(self):
# response = get_response(self.client, self.url, None)
# self.assertEqual(response.status_code, 200)
def test_daily_deter_response(self):
daily_deter_1.make()
daily_deter_2.make()
response = self.client.post(
revese("api:login"),
{'username': 'test', 'password': 'password'},
format='json'
)
params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER'}
response = get_response(self.client, self.url, params)
self.assertEqual(response.status_code, 200)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
# def test_public_deter_response(self):
# public_deter_1.make()
# public_deter_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# def test_daily_deter_qualif_response(self):
# daily_deter_qualif_1.make()
# daily_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_qualif_response(self):
# public_deter_qualif_1.make()
# public_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
#
|
nonZero/demos-python
|
src/examples/long/reading_a_static_file_from_inside_a_package/mypack/mymod.py
|
Python
|
gpl-3.0
| 731
| 0.001368
|
# CHECK_WITH python2
import pkg_resources
import os.path # for dir
|
name, join
import pkgutil # for get_data
static_file_content = pkg_resources.resource_string(
'mypack', 'static_file.html').decode()
print('static_file_content is [{0}]'.format(static_file_content))
def get_real_filename(filename):
return os.path.join(os.path.dirname(__file__), filename)
def get_data(filename):
|
return open(get_real_filename(filename), 'rb').read()
static_file_content2 = get_data('static_file.html').decode()
print('static_file_content2 is [{0}]'.format(static_file_content2))
static_file_content3 = pkgutil.get_data('mypack', 'static_file.html').decode()
print('static_file_content3 is [{0}]'.format(static_file_content3))
|
turbokongen/home-assistant
|
homeassistant/components/motion_blinds/__init__.py
|
Python
|
apache-2.0
| 4,571
| 0.000875
|
"""The motion_blinds component."""
import asyncio
from datetime import timedelta
import logging
from socket import timeout
from motionblinds import MotionMulticast
from homeassistant import config_entries, core
from homeassistant.const import CONF_API_KEY, CONF_HOST, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DOMAIN,
KEY_COORDINATOR,
KEY_GATEWAY,
KEY_MULTICAST_LISTENER,
MANUFACTURER,
MOTION_PLATFORMS,
)
from .gateway import ConnectMotionGateway
_LOGGER = logging.getLogger(__name__)
def setup(hass: core.HomeAssistant, config: dict):
"""Set up the Motion Blinds component."""
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the motion_blinds components from a config entry."""
hass.data.setdefault(DOMAIN, {})
host = entry.data[CONF_HOST]
key = entry.data[CONF_API_KEY]
# Create multicast Listener
if KEY_MULTICAST_LISTENER not in hass.data[DOMAIN]:
multicast = MotionMulticast()
hass.data[DOMAIN][KEY_MULTICAST_LISTENER] = multicast
# start listening for local pushes (only once)
await hass.async_add_executor_job(multicast.Start_listen)
# register stop callback to shutdown listening for local pushes
def stop_motion_multicast(event):
"""Stop multicast thread."""
_LOGGER.debug("Shutting down Motion Listener")
multicast.Stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_motion_multicast)
# Connect to motion gateway
multicast = hass.data[DOMAIN][KEY_MULTICAST_LISTENER]
connect_gateway_class = ConnectM
|
otionGateway(hass, multicast)
if not await connect_gateway_class.async_connect_gateway(host, key):
raise ConfigEntryNotReady
motion_gateway = connect_gateway_class.gateway_device
def update_gateway():
"""Call all updates using one async_add_executor_job."""
motion_gateway.Update()
for blind in motion_gateway.device_list.values():
try:
blind.Update()
except ti
|
meout:
# let the error be logged and handled by the motionblinds library
pass
async def async_update_data():
"""Fetch data from the gateway and blinds."""
try:
await hass.async_add_executor_job(update_gateway)
except timeout:
# let the error be logged and handled by the motionblinds library
pass
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name=entry.title,
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=600),
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_GATEWAY: motion_gateway,
KEY_COORDINATOR: coordinator,
}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, motion_gateway.mac)},
identifiers={(DOMAIN, entry.unique_id)},
manufacturer=MANUFACTURER,
name=entry.title,
model="Wi-Fi bridge",
sw_version=motion_gateway.protocol,
)
for component in MOTION_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(
hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in MOTION_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
if len(hass.data[DOMAIN]) == 1:
# No motion gateways left, stop Motion multicast
_LOGGER.debug("Shutting down Motion Listener")
multicast = hass.data[DOMAIN].pop(KEY_MULTICAST_LISTENER)
await hass.async_add_executor_job(multicast.Stop_listen)
return unload_ok
|
VagrantApe/flaskMicroblog
|
tests.py
|
Python
|
bsd-3-clause
| 5,133
| 0.054744
|
#!flask/bin/python
import os
import unittest
from coverage import coverage
cov = coverage(branch = True, omit = ['flask/*', 'tests.py'])
cov.start()
from config import basedir
from app import app, db
from app.models import User
from datetime import datetime, timedelta
from app.models import User, Post
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'test.db')
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_avatar(self):
u = User(nickname = 'john', email = 'john@example.com')
avatar = u.avatar(128)
expected = 'http://www.gravatar.com/avatar/d4c74594d841139328695756648b6bd6'
assert avatar[0:len(expected)] == expected
def test_make_unique_nickname(self):
u = User(nickname = 'john', email = 'john@example.com')
db.session.add(u)
db.session.commit()
nickname = User.make_unique_nickname('susan')
assert nickname == 'susan'
nickname = User.make_unique_nickname('john')
assert nickname != 'john'
# make another user with the new nickname
u = User(nickname = nickname, email = 'susan@example.com')
db.session.add(u)
db.session.commit()
nickname2 = User.make_unique_nickname('john')
assert nickname2 != 'john'
assert nickname2 != nickname
def test_follow(self):
u1 = User(nickname = 'john', email = 'john@example.com')
u2 = User(nickname = 'susan', email = 'susan@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
assert u1.unfollow(u2) == None
u = u1.follow(u2)
db.session.add(u)
db.session.commit()
assert u1.follow(u2) == None
assert u1.is_following(u2)
assert u1.followed.count() == 1
assert u1.followed.first().nickname == 'susan'
assert u2.followers.count() == 1
assert u2.followers.first().nickname == 'john'
u = u1.unfollow(u2)
assert u != None
db.session.add(u)
db.session.commit()
assert u1.is_following(u2) == False
assert u1.followed.count() == 0
assert u2.followers.count() == 0
def test_follow_posts(self):
u1 = User(nickname = 'john', email = 'john@example.com')
u2 = User(nickname = 'susan', email = 'susan@example.com')
u3 = User(nickname = 'mary', email = 'mary@example.com')
u4 = User(nickname = 'david', email = 'david@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
# make four posts
utcnow = datetime.utcnow()
p1 = Post(body = "post from john", author = u1, timestamp = utcnow + timedelta(seconds = 1))
p2 = Post(body = "post from susan", author = u2, timestamp = utcnow + timedelta(seconds = 2))
p3 = Post(body = "post from mary", author = u3, timestamp = utcnow + timedelta(seconds = 3))
p4 = Post(body = "post from david", author = u4, timestamp = utcnow + timedelta(seconds = 4))
db.session.add(p1)
db.session.add(p2)
db.session.add(p3)
db.session.add(p4)
db.session.commit()
# setup the followers
u1.follow(u1) # john follows himself
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u2) # susan follows herself
u2.follow(u3) # susan follows mary
u3.follow(u3) # mary follows herself
u3.follow(u4) # mary follows david
u4.follow(u4) # david follows himself
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
assert len(f1) == 3
assert len(f2) == 2
assert len(f3) == 2
assert len(f4) == 1
assert f1 == [p4, p2, p1]
assert f2 == [p3, p2]
assert f3 == [p4, p3]
assert f4 == [p4]
def test_delete_post(self):
# create a user and a post
u = User(nickname = 'john', email = 'john@example.com')
p = Post(body = 'test post', author = u, timestamp = datetime.utcnow())
db.session.add(u)
db.session.add(p)
db.session.commit()
# query the post and destroy the session
p = Post.query.get(1)
db.session.remove()
# delete the post using a new session
db.session = db.create_scoped_session()
db.session.delete(p)
db.session.commit()
def test_user(self):
# make valid nicknames
n = User.make_valid_nickname('John_123')
assert n == 'John_123'
n = User.make_valid_nickname('John_[123]\n')
assert n == 'John_123'
# create a user
u = User(nickname = 'john', email
|
= 'john@example.com')
db.session.add(u)
db.session.commit()
assert u.is_authenticated() == True
assert u.is_active() == Tr
|
ue
assert u.is_anonymous() == False
assert u.id == int(u.get_id())
def __repr__(self):
return '<User %r>' % (self.nickname)
if __name__ == '__main__':
try:
unittest.main()
except:
pass
cov.stop()
cov.save()
print "\n\nCoverage Report:\n"
cov.report()
print "HTML version: " + os.path.join(basedir, "tmp/coverage/index.html")
cov.html_report(directory = 'tmp/coverage')
cov.erase()
|
andurilhuang/Movie_Income_Prediction
|
paper/historycode/toAnna/get_test.py
|
Python
|
mit
| 1,236
| 0.006472
|
import os
import requests
import json
import pandas as pd
import numpy as np
import time
|
from datetime import datetime
TMDB_KEY = "60027f35df522f00e57a79b9d35
|
68423"
"""
def get_tmdb_id_list():
#function to get all Tmdb_id between 06-16
import requests
import json
# from year 1996-2016
year = range(2006,2017)
## 50 pages
page_num = range(1,50)
id_list = []
tmdb_id_query = "https://api.themoviedb.org/3/discover/movie?" \
+ "api_key=%s" \
+ "&language=en-US&sort_by=release_date.asc" \
+ "&include_adult=false&include_video=false" \
+ "&page=%d" \
+ "&primary_release_year=%d"
for n in page_num:
for yr in year:
rq = requests.get(tmdb_id_query % (TMDB_KEY,n,yr)).json()
for item in rq['results']:
id_list.append(item['id'])
return id_list
start = time.time()
ID_LIST = get_tmdb_id_list()
stop = time.time()
print(ID_LIST)
print(stop - start)
"""
query = "https://api.themoviedb.org/3/movie/%d?" \
+"api_key=%s" \
+"&language=en-US"
movie_id = 78
request = requests.get(query %(movie_id,TMDB_KEY)).json()
|
automl/paramsklearn
|
ParamSklearn/components/data_preprocessing/balancing.py
|
Python
|
bsd-3-clause
| 4,086
| 0.000245
|
import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.components.base import \
ParamSklearnPreprocessingAlgorithm
from ParamSklearn.constants import *
class Balancing(ParamSklearnPreprocessingAlgorithm):
def __init__(self, strategy, random_state=None):
self.strategy = strategy
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def get_weights(self, Y, classifier, preprocessor, init_params, fit_params):
if init_params is None:
init_params = {}
if fit_params is None:
fit_params = {}
# Classifiers which require sample weights:
# We can have adaboost in here, because in the fit method,
# the sample weights are normalized:
# https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/ensemble/weight_boosting.py#L121
clf_ = ['adaboost', 'gradient_boosting']
pre_ = []
if classifier in clf_ or preprocessor in pre_:
if len(Y.shape) > 1:
offsets = [2 ** i for i in range(Y.shape[1])]
Y_ = np.sum(Y * offsets, axis=1)
else:
Y_ = Y
unique, counts = np.unique(Y_, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
sample_weights = np.ones(Y_.shape)
for i, ue in enumerate(unique):
mask = Y_ == ue
sample_weights[mask] *= cw[i]
if classifier in clf_:
fit_params['classifier:sample_weight'] = sample_weights
if preprocessor in pre_:
fit_params['preprocessor:sample_weight'] = sample_weights
# Classifiers which can adjust sample weights themselves via the
# argument `class_weight`
clf_ = ['decision_tree', 'extra_trees', 'liblinear_svc',
'libsvm_svc', 'random_forest', 'sgd']
pre_ = ['liblinear_svc_preprocessor',
'extra_trees_preproc_for_classification']
if classifier in clf_:
init_params['classifier:class_weight'] = 'auto'
if preprocessor in pre_:
init_params['preprocessor:class_weight'] = 'auto'
clf_ = ['ridge']
if classifier in clf_:
class_weights = {}
unique, counts = np.unique(Y
|
, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
for i, ue in enumerate(unique):
class_weights[ue] = cw[i]
if classifier in clf_:
init_params['classifier:class_weight'] = class_weights
return init_params, fit_params
@staticm
|
ethod
def get_properties(dataset_properties=None):
return {'shortname': 'Balancing',
'name': 'Balancing Imbalanced Class Distributions',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA, SIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# TODO add replace by zero!
strategy = CategoricalHyperparameter(
"strategy", ["none", "weighting"], default="none")
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
def __str__(self):
name = self.get_properties()['name']
return "ParamSklearn %s" % name
|
yephper/django
|
tests/template_tests/filter_tests/test_rjust.py
|
Python
|
bsd-3-clause
| 1,060
| 0.00283
|
from django.template.defaultfilters import rjust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class
|
RjustTests(SimpleTestCase):
@setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'})
def test_rjust01(self):
output = self.engine.render_to_str
|
ing('rjust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
@setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'})
def test_rjust02(self):
output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
class FunctionTests(SimpleTestCase):
def test_rjust(self):
self.assertEqual(rjust('test', 10), ' test')
def test_less_than_string_length(self):
self.assertEqual(rjust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(rjust(123, 4), ' 123')
|
wavefrontHQ/python-client
|
wavefront_api_client/models/response_container_paged_maintenance_window.py
|
Python
|
apache-2.0
| 4,929
| 0.000203
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class ResponseContainerPagedMaintenanceWindow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'PagedMaintenanceWindow',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerPagedMaintenanceWindow - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:return: The response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:rtype: PagedMaintenanceWindow
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerPagedMaintenanceWindow.
:param response: The response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:type: PagedMaintenanceWindow
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:return: The status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerPagedMaintenanceWindow.
:param status: The status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerPagedMaintenanceWindow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerPagedMaintenanceWindow
|
):
return F
|
alse
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerPagedMaintenanceWindow):
return True
return self.to_dict() != other.to_dict()
|
savoirfairelinux/partner-addons
|
partner_multi_relation_extended/models/res_partner.py
|
Python
|
lgpl-3.0
| 1,635
| 0
|
# -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import _, api, fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.onchange('parent_id')
def onchange_parent_id(self):
res = super(ResPartner, self).onchange_parent_id()
if self.parent_id:
work_relation_type = self.env['res.partner.relation.type'].search([
('is_work_relation', '=', True),
])
if not work_relation_type:
res['warning'] = {
'title': _('Warning'),
'message':
|
_('You cannot set a parent entity, as there is '
'not any partner relation type flagged as '
'"Work Relation".')
}
self.parent_id = False
return res
@api.model
def create(self, vals):
"""
Creat
|
e a relation between a contact and its parent only when the
parent is a company.
"""
res = super(ResPartner, self).create(vals)
if res.parent_id and res.parent_id.is_company:
work_relation_type = self.env['res.partner.relation.type'].search([
('is_work_relation', '=', True),
])
self.env['res.partner.relation'].create({
'left_partner_id': res.id,
'right_partner_id': res.parent_id.id,
'type_id': work_relation_type.id,
'date_start': fields.Date.today(),
})
return res
|
python-krasnodar/python-krasnodar.ru
|
src/lessons/admin.py
|
Python
|
mit
| 248
| 0.004032
|
from d
|
jango.contrib import admin
from .models import Lesson, Series
class LessonAdmin(admin.ModelAdmin):
pass
class SeriesAdmin(admin.ModelAdmin):
pass
admin.site.register(Lesson, LessonAdmin)
admin.sit
|
e.register(Series, SeriesAdmin)
|
kjaym/pypif
|
pypif/obj/common/reference.py
|
Python
|
apache-2.0
| 8,674
| 0.001268
|
from six import string_types
from pypif.obj.common.display_item import DisplayItem
from pypif.obj.common.name import Name
from pypif.obj.common.pages import Pages
from pypif.obj.common.pio import Pio
class Reference(Pio):
"""
Information about a referenced publication.
"""
def __init__(self, doi=None, isbn=None, issn=None, url=None, title=None, publisher=None, journal=None, volume=None,
issue=None, year=None, figure=None, table=None, pages=None, authors=None, editors=None,
affiliations=None, acknowledgements=None, references=None, tags=None, **kwargs):
"""
Constructor.
:param doi: String with DOI of the published work
:param isbn: String with ISBN of the published work
:param issn: String with ISSN of the published work
:param url: String with URL to the published work
:param title: String with title of the published work.
:param publisher: String with publisher of the work.
:param journal: String with the journal in which the work was published.
:param volume: String with the volume in which the work was published.
:param issue: String with the issue in which the work was published.
:param year: String with the year in which the work was published.
:param figure: Dictionary or :class:`.DisplayItem` object with the figure to reference.
:param table: Dictionary or :class:`.DisplayItem` object with the table to reference.
:param pages: String, integer, dictionary, or :class:`.Pages` object with the starting and ending pages for
the published work.
:param authors: List of strings, dictionaries, or :class:`.Name` objects with information about the authors.
:param editors: List of strings, dictionaries, or :class:`.Name` objects with information about the editors.
:param affiliations: List of strings with affiliations.
:param acknowledgements: List of strings with acknowledgements.
:param references: List of dictionaries or :class:`.Reference` objects with works cited by this published work.
:param tags: List of strings or numbers that are tags for this object.
:param kwargs: Dictionary of fields that are not supported.
"""
super(Reference, self).__init__(tags=tags, **kwargs)
self._doi = None
self.doi = doi
self._isbn = None
self.isbn = isbn
self._issn = None
self.issn = issn
self._url = None
self.url = url
self._title = None
self.title = title
self._publisher = None
self.publisher = publisher
self._journal = None
self.journal = journal
self._volume = None
self.volume = volume
self._issue = None
self.issue = issue
self._year = None
self.year = year
self._figure = None
self.figure = figure
self._table = None
self.table = table
self._pages = None
self.pages = pages
self._authors = None
self.authors = authors
self._editors = None
self.editors = editors
self._affiliations = None
self.affiliations = affiliations
self._acknowledgements = None
self.acknowledgements = acknowledgements
self._references = None
self.references = references
@property
def doi(self):
return self._doi
@doi.setter
def doi(self, doi):
self._validate_type('doi', doi, string_types)
self._doi = doi
@doi.deleter
def doi(self):
self._doi = None
@property
def isbn(self):
return self._isbn
@isbn.setter
def isbn(self, isbn):
self._validate_type('isbn', isbn, string_types)
self._isbn = isbn
@isbn.deleter
def isbn(self):
self._isbn = None
@property
def issn(self):
return self.issn
@issn.setter
def issn(self, issn):
self._validate_type('issn', issn, string_types)
self._issn = issn
@issn.deleter
def issn(self):
self._issn = None
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._validate_type('url', url, string_types)
self._url = url
@url.deleter
def url(self):
self._url = None
@property
def title(self):
return self._title
@title.setter
def title(self, title):
self._validate_type('title', title, string_types)
self._title = title
@title.deleter
def title(self):
self._title = None
@property
def publisher(self):
return self._publisher
@publisher.setter
def publisher(self, publisher):
self._validate_type('publisher', publisher, string_types)
self._publisher = publisher
@publisher.deleter
def publisher(self):
self._publisher = None
@property
def journal(self):
return self._journal
@journal.setter
def journal(self, journal):
self._validate_type('journal', journal, string_types)
self._journal = journal
@journal.deleter
def journal(self):
self._journal = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, volume):
self._validate_type('volume', volume, string_types)
self._volume = volume
@volume.deleter
def volume(self):
self._volume = None
@property
def issue(self):
return self._issue
@issue.setter
def issue(self, issue):
self._validate_type('issue', issue, string_types)
self._issue = issue
@issue.deleter
def issue(self):
self._issue = None
@property
def year(self):
return self._year
@year.setter
def year(self, year):
self._validate_type('year', year, string_types)
self._year = year
@year.deleter
def year(self):
self._year = None
@property
def figure(self):
return self._figure
@figure.setter
def figure(self, figure):
self._validate_type('figure', figure, dict, DisplayItem)
self._figure = self._get_object(DisplayItem, figure)
@figure.deleter
def figure(self):
self._figure = None
@property
def table(self):
return self._table
@table.setter
def table(self, table):
self._validate_type('table', table, dict, DisplayItem)
self._table = self._get_object(DisplayItem, table)
@table.deleter
def table(self):
self._table = None
@property
def pages(self):
return self._pages
@pages.setter
def pages(self, pages):
self._validate_type('pages', pages, string_types, int, dict, Pages)
self._pages = self._get_object(Pages, pages)
@pages.deleter
def pages(self):
self._pages = None
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, authors):
self._validate_list_type('authors', authors, string_types, dict, Name)
self._authors = self._get_object(Name, authors)
@authors.deleter
def authors(self):
self._authors = None
@property
def editors(self):
return self._editors
@editors.setter
def editors(self, editors):
self._validate_list_type('editors', editors, string_types, dict, Name)
self._editors = self._get_object(Name, editors)
@editors.deleter
def editors(self):
self._editors = None
@property
def affiliations(self):
return self._affiliations
@affiliatio
|
ns.setter
def affiliations(self, affiliations):
self
|
._validate_list_type('affiliations', affiliations, string_types)
self._affiliations = affiliations
@affiliations.deleter
def affiliations(self):
self._affiliations = None
@property
def acknowledgements(self):
return self._acknowledgements
@acknowledgements.setter
def acknowledgements(self, acknowledgements):
self._valida
|
hyms/academicControl
|
manage.py
|
Python
|
gpl-2.0
| 273
| 0
|
#!/usr/bin/env python
import os
import sys
import MySQLdb
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "academicControl.settings")
from django.core.m
|
anagement import execute_from_command_line
exec
|
ute_from_command_line(sys.argv)
|
endlessm/chromium-browser
|
third_party/angle/third_party/VK-GL-CTS/src/scripts/khr_util/format.py
|
Python
|
bsd-3-clause
| 3,242
| 0.029611
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
from itertools import chain
INL_HEADER_TMPL = """\
/* WARNING: This is auto-generated file. Do not modify, since changes will
* be lost! Modify the generating script instead.
*
* Generated from {registryName} revision {revision}.
*/\
"""
def genInlHeader (registryName, revision):
return INL_HEADER_TMPL.format(
registryName = registryName,
revision = str(revision))
def genInlHeaderForSource (registrySource):
return genInlHeaderForSource(registrySource.getFilename(), registrySource.getRevision())
def nextMod (val, mod):
if val % mod == 0:
return val + mod
else:
return int(val/mod)*mod + mod
def indentLines (lines):
tabSize = 4
# Split into columns
lineColumns = [line.split("\t") for line in lines if line is not None]
if len(lineColumns) == 0:
return
numColumns = max(len(line) for line in lineColumns)
# Figure out max length per column
columnLengths = [nextMod(max(len(line[ndx]) for line in lineColumns if len(line) > ndx), tabSize) for ndx in range(numColumns)]
for line in lineColumns:
indented = []
for columnNdx, col in enumerate(line[:-1]):
colLen = len(col)
while colLen < columnLengths[columnNdx]:
col += "\t"
colLen = nextMod(colLen, tabSize)
indented.append(col)
# Append last col
indented.append(line[-1])
yield "".join(indented)
def readFile (filename):
f = open(filename, 'rb')
data = f.read()
f.close()
return data
def writeFileIfChanged (filename, data):
if not os.path.exists(filename) or readFile(filename) != data:
if (sys.version_info < (3, 0)):
f = open(filename, 'wt')
else:
f = open(filename, 'wt', newline='\n')
f.write(data)
f.close()
def writeLines (filename, lines):
text = ""
for line in lines:
text += line
text += "\n"
writeFileIfChanged(filename, text)
print(filename)
def writeInlFile (filename, header, source):
writeLines(filename, chain([header], source))
def normalizeConstant (constant):
value = int(constant, base=0)
if value >= 1 << 63:
suffix = 'ull'
elif value >= 1 << 32:
|
suffix = 'll'
elif value >= 1 << 31:
suffix = 'u'
else:
suffix = ''
return constant + suffix
def commandParams (command):
if len(command.params) > 0:
return ", ".join(param.declaration for param in command.params)
|
else:
return "void"
def commandArgs (command):
return ", ".join(param.name for param in command.params)
|
colinmarc/python-spdy
|
python-spdy/context.py
|
Python
|
bsd-2-clause
| 6,954
| 0.039977
|
from spdy.frames import *
from spdy._zlib_stream import Inflater, Deflater
from bitarray import bitarray
SERVER = 'SERVER'
CLIENT = 'CLIENT'
class SpdyProtocolError(Exception):
pass
def _bitmask(length, split, mask=0):
invert = 1 if mask == 0 else 0
b = str(mask)*split + str(invert)*(length-split)
return int(b, 2)
_first_bit = _bitmask(8, 1, 1)
_last_15_bits = _bitmask(16, 1, 0)
class Context(object):
def __init__(self, side, version=2):
if side not in (SERVER, CLIENT):
raise TypeError("side must be SERVER or CLIENT")
if not version in VERSIONS:
raise NotImplementedError()
self.version = version
self.deflater = Deflater(version)
self.inflater = Inflater(version)
self.frame_queue = []
self.input_buffer = bytearray()
if side == SERVER:
self._stream_id = 2
self._ping_id = 2
else:
self._stream_id = 1
self._ping_id = 1
@property
def next_stream_id(self):
sid = self._stream_id
self._stream_id += 2
return sid
@property
def next_ping_id(self):
pid = self._ping_id
self._ping_id += 2
return pid
def incoming(self, chunk):
self.input_buffer.extend(chunk)
def get_frame(self):
frame, bytes_parsed = self._parse_frame(bytes(self.input_buffer))
if bytes_parsed:
self.input_buffer = self.input_buffer[bytes_parsed:]
return frame
def put_frame(self, frame):
if not isinstance(frame, Frame):
raise TypeError("frame must be a valid Frame object")
self.frame_queue.append(frame)
def outgoing(self):
out = bytearray()
while len(self.frame_queue) > 0:
frame = self.frame_queue.pop(0)
out.extend(self._encode_frame(frame))
return out
def _parse_header_chunk(self, compressed_data, version):
chunk = self.inflater.decompress(compressed_data)
length_size = 2 if version == 2 else 4
headers = {}
#first two bytes: number of pairs
num_values = int.from_bytes(chunk[0:length_size], 'big')
#after that...
cursor = length_size
for _ in range(num_values):
#two/four bytes: length of name
name_length = int.from_bytes(chunk[cursor:cursor+length_size], 'big')
cursor += length_size
#next name_length bytes: name
name = chunk[cursor:cursor+name_length].decode('UTF-8')
cursor += name_length
#two/four bytes: length of value
value_length = int.from_bytes(chunk[cursor:cursor+length_size], 'big')
cursor += length_size
#next value_length bytes: value
value = chunk[cursor:cursor+value_length].decode('UTF-8')
cursor += value_length
if name_length == 0 or value_length == 0:
raise SpdyProtocolError("zero-length name or value in n/v block")
if name in headers:
raise SpdyProtocolError("duplicate name in n/v block")
headers[name] = value
return headers
def _parse_frame(self, chunk):
if len(chunk) < 8:
return (None, 0)
#first bit: control or data frame?
control_frame = (chunk[0] & _first_bit == _first_bit)
if control_frame:
#second byte (and rest of first, after the first bit): spdy version
spdy_version = int.from_bytes(chunk[0:2], 'big') & _last_15_bits
if spdy_version != self.version:
raise SpdyProtocolError("incorrect SPDY version")
#third and fourth byte: frame type
frame_type = int.from_bytes(chunk[2:4], 'big')
if not frame_type in FRAME_TYPES:
raise SpdyProtocolError("invalid frame type: {0}".format(frame_type))
#fifth byte: flags
flags = chunk[4]
#sixth, seventh and eighth bytes: length
length = int.from_bytes(chunk[5:8], 'big')
frame_length = length + 8
if len(chunk) < frame_length:
return (None, 0)
#the rest is data
data = chunk[8:frame_length]
bits = bitarray()
bits.frombytes(data)
frame_cls = FRAME_TYPES[frame_type]
args = {
'version': spdy_version,
'flags': flags
}
for key, num_bits in frame_cls.definition(spdy_version):
if not key:
bits = bits[num_bits:]
continue
if num_bits == -1:
value = bits
else:
value = bits[:num_bits]
bits = bits[num_bits:]
if key == 'headers': #headers are compressed
args[key] = self._parse_header_chunk(value.tobytes(), self.version)
else:
#we have to pad values on the left, because bitarray will assume
#that you want it padded from the right
gap = len(value) % 8
if gap:
zeroes = bitarray(8 - gap)
zeroes.setall(False)
value = zeroes + value
args[key] = int.from_bytes(value.tobytes(), 'big')
if num_bits == -1:
break
frame = frame_cls(**args)
else: #data frame
#first four bytes, except the first bit: stream_id
stream_id = int.from_bytes(_ignore_first_bit(chunk[0:4]), 'big')
#fifth byte: flags
flags = chunk[4]
#sixth, seventh and eight bytes: length
length = int.from_bytes(chunk[5:8], 'big')
frame_length = 8 + length
if len(chunk) < frame_length:
return (0, None)
data = chunk[8:frame_length]
frame = DataFrame(stream_id, data)
return (frame, frame_length)
def _encode_header_chunk(self, headers):
chunk = bytearray()
#first two bytes: number of pairs
chunk.extend(len(headers).to_bytes(2, 'big'))
#after that...
for name, value in headers.items():
name = bytes(name, 'UTF-8')
value = bytes(value, 'UTF-8')
#two bytes: length of name
chunk.extend(len(name).to_bytes(2, 'big'))
#next name_length bytes: name
chunk.extend(name)
#two bytes: length of value
chunk.extend(len(value).to_bytes(2, 'big'))
#next value_length bytes: value
chunk.extend(value)
return self.deflater.compress(bytes(chunk))
def _encode_frame(self, frame):
out = bytearray()
if frame.is_control:
#first two bytes: version
out.extend(frame.version.to_bytes(2, 'big'))
#set the first bit to control
out[0] = out[0] | _first_bit
#third and fourth: frame type
out.extend(frame.frame_type.to_bytes(2, 'big'))
#fifth: flags
out.append(frame.flags)
bits = bitarray()
for key, num_bits in frame.definition(self.version):
if not key:
zeroes = bitarray(num_bits)
zeroes.setall(False)
bits += zeroes
continue
value = getattr(frame, key)
if key == 'headers':
chunk = bitarray()
chunk.frombytes(self._encode_header_chunk(value))
else:
chunk
|
= bitarray(bin(value)[2:])
zeroes = bitarray(num_bits - len(chunk))
zeroes.setall(False)
chunk = zeroe
|
s + chunk #pad with zeroes
bits += chunk
if num_bits == -1:
break
data = bits.tobytes()
#sixth, seventh and eighth bytes: length
out.extend(len(data).to_bytes(3, 'big'))
# the rest is data
out.extend(data)
else: #data frame
#first four bytes: stream_id
out.extend(frame.stream_id.to_bytes(4, 'big'))
#fifth: flags
out.append(frame.flags)
#sixth, seventh and eighth bytes: length
data_length = len(frame.data)
out.extend(data_length.to_bytes(3, 'big'))
#rest is data
out.extend(frame.data)
return out
|
fritz0705/lglass
|
contrib/grs-import/grs-import-sqlid.py
|
Python
|
mit
| 2,771
| 0.003609
|
#!/bin/python
# coding: utf-8
import argparse
import sys
import signal
import traceback
import datetime
import lglass.object
import lglass_sql.nic
def objects(lines):
obj = []
for line in lines:
if isinstance(line, bytes):
line = line.decode("iso-8859-15")
if not line.strip() and obj:
yield lglass.object.parse_object(obj)
obj = []
elif line[0] in {'%', '#'} or not line.strip():
continue
else:
obj.append(line)
if obj:
yield lglass.object.parse_object(obj)
argparser = argparse.ArgumentParser()
argparser.add_argument("--schema", "-s")
argparser.add_argument("--encoding", "-e", default="iso-8859-15")
argparser.add_argument("database")
args = argparser.parse_args()
database = lglass_sql.nic.NicDatabase(args.database, schema=args.schema)
if hasattr(database, "session"):
session = database.session()
else:
session = database
print("Collecting
|
local objects...", end='', flush=True)
current_objects = set(session.all_ids())
print(" Done.")
stats = dict(create
|
d=0,
updated=0,
deleted=0,
ignored=0,
start=datetime.datetime.now())
def report():
global stats
global current_objects
print("Created {} / Updated {} / Deleted {} / "
"Ignored {} objects in {}".format(stats["created"],
stats["updated"],
stats["deleted"],
stats["ignored"],
datetime.datetime.now() - stats["start"]))
print("{} objects left".format(len(current_objects)))
signal.signal(signal.SIGUSR1, lambda *args: report())
print("Creating or updating local objects...", end='', flush=True)
for obj in objects(sys.stdin.buffer):
try:
obj = database.create_object(obj)
spec = database.primary_spec(obj)
try:
local_obj = session.fetch(*spec)
except KeyError:
session.save(obj)
stats["created"] += 1
continue
if local_obj.sql_id in current_objects:
current_objects.remove(local_obj.sql_id)
if local_obj == obj:
stats["ignored"] += 1
continue
session.save(obj)
stats["updated"] += 1
except Exception as e:
print("Error at object {!r}".format(obj))
traceback.print_exc()
stats["ignored"] += 1
print("Done")
print("Deleting local objects...", end='', flush=True)
for id_ in current_objects:
try:
session.delete_by_id(id_)
stats["deleted"] += 1
except Exception as e:
traceback.print_exc(e)
stats["ignored"] += 1
if hasattr(session, "commit"):
session.commit()
if hasattr(session, "close"):
session.close()
print("Done")
report()
|
AMOboxTV/AMOBox.LegoBuild
|
plugin.program.super.favourites/utils.py
|
Python
|
gpl-2.0
| 18,265
| 0.010183
|
#
# Copyright (C) 2014-2015
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Fou
|
ndation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcaddon
import xbmcgui
import os
import re
import sfile
def GetXBMCVersion():
#xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "me
|
thod": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
version = xbmcaddon.Addon('xbmc.addon').getAddonInfo('version')
version = version.split('.')
return int(version[0]), int(version[1]) #major, minor eg, 13.9.902
ADDONID = 'plugin.program.super.favourites'
ADDON = xbmcaddon.Addon(ADDONID)
HOME = ADDON.getAddonInfo('path')
ROOT = ADDON.getSetting('FOLDER')
if not ROOT:
ROOT = 'special://profile/addon_data/plugin.program.super.favourites/'
SHOWXBMC = ADDON.getSetting('SHOWXBMC') == 'true'
INHERIT = ADDON.getSetting('INHERIT') == 'true'
ALPHA_SORT = ADDON.getSetting('ALPHA_SORT') == 'true'
LABEL_NUMERIC = ADDON.getSetting('LABEL_NUMERIC') == 'true'
LABEL_NUMERIC_QL = ADDON.getSetting('LABEL_NUMERIC_QL') == 'true'
PROFILE = os.path.join(ROOT, 'Super Favourites')
VERSION = ADDON.getAddonInfo('version')
ICON = os.path.join(HOME, 'icon.png')
FANART = os.path.join(HOME, 'fanart.jpg')
SEARCH = os.path.join(HOME, 'resources', 'media', 'search.png')
GETTEXT = ADDON.getLocalizedString
TITLE = GETTEXT(30000)
DISPLAYNAME = 'Kodi'
NUMBER_SEP = ' | '
PLAYABLE = xbmc.getSupportedMedia('video') + '|' + xbmc.getSupportedMedia('music')
PLAYABLE = PLAYABLE.replace('|.zip', '')
PLAYABLE = PLAYABLE.split('|')
PLAYMEDIA_MODE = 1
ACTIVATEWINDOW_MODE = 2
RUNPLUGIN_MODE = 3
ACTION_MODE = 4
HOMESPECIAL = 'special://home/'
HOMEFULL = xbmc.translatePath(HOMESPECIAL)
DEBUG = ADDON.getSetting('DEBUG') == 'true'
KEYMAP_HOT = 'super_favourites_hot.xml'
KEYMAP_MENU = 'super_favourites_menu.xml'
MAJOR, MINOR = GetXBMCVersion()
FRODO = (MAJOR == 12) and (MINOR < 9)
GOTHAM = (MAJOR == 13) or (MAJOR == 12 and MINOR == 9)
HELIX = (MAJOR == 14) or (MAJOR == 13 and MINOR == 9)
ISENGARD = (MAJOR == 15) or (MAJOR == 14 and MINOR == 9)
KRYPTON = (MAJOR == 17) or (MAJOR == 16 and MINOR == 9)
ESTUARY = xbmc.getCondVisibility('System.HasAddon(%s)' % 'skin.estuary') == 1
FILENAME = 'favourites.xml'
FOLDERCFG = 'folder.cfg'
def Log(text):
log(text)
def log(text):
try:
output = '%s V%s : %s' % (TITLE, VERSION, str(text))
if DEBUG:
xbmc.log(output)
else:
xbmc.log(output, xbmc.LOGDEBUG)
except:
pass
def DialogOK(line1, line2='', line3=''):
d = xbmcgui.Dialog()
d.ok(TITLE + ' - ' + VERSION, line1, line2 , line3)
def DialogYesNo(line1, line2='', line3='', noLabel=None, yesLabel=None):
d = xbmcgui.Dialog()
if noLabel == None or yesLabel == None:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3) == True
else:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3, noLabel, yesLabel) == True
def Progress(title, line1 = '', line2 = '', line3 = ''):
dp = xbmcgui.DialogProgress()
dp.create(title, line1, line2, line3)
dp.update(0)
return dp
def generateMD5(text):
if not text:
return ''
try:
import hashlib
return hashlib.md5(text).hexdigest()
except:
pass
try:
import md5
return md5.new(text).hexdigest()
except:
pass
return '0'
def LaunchSF():
xbmc.executebuiltin('ActivateWindow(videos,plugin://%s)' % ADDONID)
def CheckVersion():
try:
prev = ADDON.getSetting('VERSION')
curr = VERSION
if prev == curr:
return
verifySuperSearch()
VerifySettinngs()
VerifyZipFiles()
src = os.path.join(ROOT, 'cache')
dst = os.path.join(ROOT, 'C')
sfile.rename(src, dst)
ADDON.setSetting('VERSION', curr)
if prev == '0.0.0' or prev == '1.0.0':
sfile.makedirs(PROFILE)
#call showChangeLog like this to workaround bug in openElec
script = os.path.join(HOME, 'showChangelog.py')
cmd = 'AlarmClock(%s,RunScript(%s),%d,True)' % ('changelog', script, 0)
xbmc.executebuiltin(cmd)
except:
pass
def VerifyZipFiles():
#cleanup corrupt zip files
sfile.remove(os.path.join('special://userdata', '_sf_temp.zip'))
sfile.remove(os.path.join('special://userdata', 'SF_Temp'))
def VerifySettinngs():
#patch any settings that have changed types or values
if ADDON.getSetting('DISABLEMOVIEVIEW') == 'true':
ADDON.setSetting('DISABLEMOVIEVIEW', 'false')
ADDON.setSetting('CONTENTTYPE', '')
def verifySuperSearch():
old = os.path.join(ROOT, 'Search')
dst = os.path.join(ROOT, 'S')
sfile.rename(old, dst)
try: sfile.makedirs(dst)
except: pass
src = os.path.join(HOME, 'resources', 'search', FILENAME)
dst = os.path.join(dst, FILENAME)
if not sfile.exists(dst):
sfile.copy(src, dst)
try:
#patch any changes
xml = sfile.read(dst)
xml = xml.replace('is/?action=movies_search&', 'is/?action=movieSearch&')
xml = xml.replace('is/?action=people_movies&', 'is/?action=moviePerson&')
xml = xml.replace('is/?action=shows_search&', 'is/?action=tvSearch&')
xml = xml.replace('is/?action=people_shows&', 'is/?action=tvPerson&')
f = sfile.file(dst, 'w')
f.write(xml)
f.close()
except:
pass
import favourite
new = favourite.getFavourites(src, validate=False)
#line1 = GETTEXT(30123)
#line2 = GETTEXT(30124)
for item in new:
fave, index, nFaves = favourite.findFave(dst, item[2])
if index < 0:
#line = line1 % item[0]
#if DialogYesNo(line1=line, line2=line2):
favourite.addFave(dst, item)
def UpdateKeymaps():
if ADDON.getSetting('HOTKEY') != GETTEXT(30111): #i.e. not programmable
DeleteKeymap(KEYMAP_HOT)
DeleteKeymap(KEYMAP_MENU)
VerifyKeymaps()
def DeleteKeymap(map):
path = os.path.join('special://profile/keymaps', map)
DeleteFile(path)
def DeleteFile(path):
tries = 5
while sfile.exists(path) and tries > 0:
tries -= 1
try:
sfile.remove(path)
except:
xbmc.sleep(500)
def verifyLocation():
#if still set to default location reset, to workaround
#Android bug in browse folder dialog
location = ADDON.getSetting('FOLDER')
profile = 'special://profile/addon_data/plugin.program.super.favourites/'
userdata = 'special://userdata/addon_data/plugin.program.super.favourites/'
if (location == profile) or (location == userdata):
ADDON.setSetting('FOLDER', '')
def verifyPlugins():
folder = os.path.join(ROOT, 'Plugins')
if sfile.exists(folder):
return
try: sfile.makedirs(folder)
except: pass
def VerifyKeymaps():
reload = False
scriptPath = ADDON.getAddonInfo('profile')
scriptPath = os.path.join(scriptPath, 'captureLauncher.py')
if not sfile.exists(scriptPath):
DeleteKeymap(KEYMAP_MENU) #ensure gets updated to launcher version
src = os.path.join(HOME, 'captureLauncher.py')
sfile.copy(src, scriptPath)
if VerifyKeymapH
|
karllessard/tensorflow
|
tensorflow/python/keras/layers/normalization_test.py
|
Python
|
apache-2.0
| 30,233
| 0.006648
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class
|
BatchNormalizationTest(kera
|
s_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 2, 4, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_float16(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype=policy.Policy('mixed_float16'))
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@combinations.generate(combinations.combine(mode=['graph', 'eager'],
fused=[True, False]))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision_does_not_overflow(self, fused):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(1, 1, 1),
fused=fused,
dtype=policy.Policy('mixed_float16'))
x = np.array([-1000., 1000.]).reshape((2, 1, 1, 1))
y = norm(x, training=True)
expected_y = np.array([-1.0, 1.0]).reshape((2, 1, 1, 1))
self.assertAllClose(keras.backend.eval(y), expected_y)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
# We use the same data shape for all the data we use in this test.
# This will prevent any used tf.functions from retracing.
# This helps us verify that changing trainable and recompiling really
# does update the training loop, rather than a different data shape
# triggering a retrace.
data_shape = (100, 3)
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(
|
99designs/colorific
|
colorific/script.py
|
Python
|
isc
| 4,123
| 0
|
# -*- coding: utf-8 -*-
#
# script.py
# colorific
#
import sys
import optparse
from colorific import config
from colorific.palette import (
extract_colors, print_colors, save_palette_as_image, color_stream_mt,
color_stream_st)
class Application(object):
def __init__(self):
self.parser = self.create_option_parser()
def create_option_parser(self):
usage = '\n'.join([
"%prog [options]",
"",
"Reads a stream of image filenames from stdin, and outputs a ",
"single line for each containing hex color values."])
parser = optparse.OptionParser(usage)
parser.add_option(
'-p',
'--parallel',
action='store',
dest='n_processes',
type='int',
default=config.N_PROCESSES)
parser.add_option(
'--min-saturation',
action='store',
dest='min_saturation',
default=config.MIN_SATURATION,
type='float',
help="Only keep colors which meet this saturation "
"[%.02f]" % config.MIN_SATURATION)
parser.add_option(
'--max-colors',
action='store',
dest='max_colors',
type='int',
default=config.MAX_COLORS,
help="The maximum number of colors to output per palette "
"[%d]" % config.MAX_COLORS)
parser.add_option(
'--min-distance',
action='store',
dest='min_distance',
type='float',
default=config.MIN_DISTANCE,
help="The minimum distance colors must have to stay separate "
"[%.02f]" % config.MIN_DISTANCE)
parser.add_option(
'--min-prominence',
|
action='store',
dest='min_prominence',
type='float
|
',
default=config.MIN_PROMINENCE,
help="The minimum proportion of pixels needed to keep a color "
"[%.02f]" % config.MIN_PROMINENCE)
parser.add_option(
'--n-quantized',
action='store',
dest='n_quantized',
type='int',
default=config.N_QUANTIZED,
help="Speed up by reducing the number in the quantizing step "
"[%d]" % config.N_QUANTIZED)
parser.add_option(
'-o',
action='store_true',
dest='save_palette',
default=False,
help="Output the palette as an image file")
return parser
def run(self):
argv = sys.argv[1:]
(options, args) = self.parser.parse_args(argv)
if args:
# image filenames were provided as arguments
for filename in args:
try:
palette = extract_colors(
filename,
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized)
except Exception as e: # TODO: it's too broad exception.
print >> sys.stderr, filename, e
continue
print_colors(filename, palette)
if options.save_palette:
save_palette_as_image(filename, palette)
sys.exit(1)
if options.n_processes > 1:
# XXX add all the knobs we can tune
color_stream_mt(n=options.n_processes)
else:
color_stream_st(
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized,
save_palette=options.save_palette)
def main():
application = Application()
application.run()
if __name__ == '__main__':
main()
|
yashi/debbindiff
|
debbindiff/comparators/text.py
|
Python
|
gpl-3.0
| 1,446
| 0
|
# -*- coding: utf-8 -*-
#
# debbindiff: highlight differences between two builds of Debian packages
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
#
# debbindiff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# debbindiff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with debbindiff. If not, see <http://www.gnu.org/licenses/>.
import codecs
from debbindiff.comparators.binary import
|
compare_binary_files
from debbindiff.difference import Difference
def compare_text_files(path1, path2, encoding, source=None):
if encoding is None:
encoding = 'utf-8'
try:
file1 = codecs.open(path1, 'r
|
', encoding=encoding)
file2 = codecs.open(path2, 'r', encoding=encoding)
difference = Difference.from_file(file1, file2, path1, path2, source)
except (LookupError, UnicodeDecodeError):
# unknown or misdetected encoding
return compare_binary_files(path1, path2, source)
if not difference:
return []
return [difference]
|
reverie/seddit.com
|
redditchat/core/permissions.py
|
Python
|
mit
| 2,122
| 0.004241
|
import functools
from common.tornado_cookies import get_secure_cookie, generate_secure_cookie
from core import cookies
class Perms(object):
NONE = None
READ = 'r'
WRITE = 'w'
def _permission_level(user, room):
"""
`user`'s permission level on `room`, ignoring cookies
"""
if not user.is_authenticated():
return Perms.READ
else:
return Perms.WRITE
def _get_cached_perm_level(request, cookie_name):
perm = get_secure_cook
|
ie(request, cookie_name)
if not perm:
return
assert perm in ('r', 'w')
return perm
def _set_cached_perm_level(response, cookie_name, perm_level):
assert perm_level
|
in ('r', 'w')
cookie_val = generate_secure_cookie(cookie_name, perm_level)
response.set_cookie(cookie_name, cookie_val)
def _perm_level_satisfies(perm_val, perm_req):
"""
If a user has permission level `perm_val`,
and is requesting access level `perm_req`.
"""
if perm_req == perm_val:
return True
if (perm_val == Perms.WRITE) and (perm_req == Perms.READ):
return True
return False
def get_permission(request, response, room, perm_req):
"""
Returns True or False.
Sets a cookie on the response object to cache
the result, if necessary.
"""
assert perm_req in (Perms.READ, Perms.WRITE)
if cookies.has_cached_room_permission(
room.shortname,
perm_req,
functools.partial(get_secure_cookie, request),
session_key=request.session.session_key,
uid=getattr(request.user, 'id', None)):
return True
# Cached permission does not satisfy requirement.
perm_actual = _permission_level(request.user, room)
if perm_actual == Perms.NONE:
return False
assert perm_actual in (Perms.READ, Perms.WRITE)
result = _perm_level_satisfies(perm_actual, perm_req)
cookie_name = cookies.room_cookie_name(room.shortname, session_key=request.session.session_key, uid=getattr(request.user, 'id', None))
if result:
_set_cached_perm_level(response, cookie_name, perm_actual)
return result
|
salomax/livremarketplace
|
app_test/supplier_test.py
|
Python
|
apache-2.0
| 5,758
| 0.000521
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2016, Marcos Salomão.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import webtest
import endpoints
import logging
from test_utils import TestCase
from protorpc.remote import protojson
from protorpc import message_types
from google.appengine.ext import testbed
from google.appengine.api import users
from app.supplier.services import SupplierService
from app.supplier.messages import SupplierPostMessage
from app.supplier.messages import SupplierGetMessage
from app.supplier.messages import SupplierSearchMessage
from app.supplier.messages import SupplierKeyMessage
from app.supplier.messages import SupplierCollectionMessage
from app.exceptions import NotFoundEntityException
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class SupplierTestCase(TestCase):
def setUp(self):
# Call super method
super(SupplierTestCase, self).setUp()
# Create service
supplierService = endpoints.api_server(
[SupplierService], restricted=False)
# Create test
self.testapp = webtest.TestApp(supplierService)
def save(self, request):
""" Call save endpoint.
"""
response = self.testapp.post(
'/_ah/spi/SupplierService.save',
protojson.encode_message(request),
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierGetMessage, response.body)
def search(self, request):
""" Call search endpoint.
"""
response = self.testapp.post('/_ah/spi/SupplierService.search',
protojson.encode_message(request),
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierCollectionMessage, response.body)
def list(self):
""" Call list endpoint.
"""
response = self.testapp.post(
'/_ah/spi/SupplierService.list',
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierCollectionMessage, response.body)
def delete(self, id, expect_errors=False):
""" Call delete endpoint.
"""
response = self.testapp.post('/_ah/spi/SupplierService.delete',
protojson.encode_message(
SupplierKeyMessage(id=id)), content_type='application/json',
expect_errors=expect_errors)
if not expect_errors:
self.assertEqual(response.status, '200 OK')
def testSave(self):
""" Save supplier.
"""
request = SupplierPostMessage(
name='Test',
email='email@email.com',
phone='99999999',
location='Test Location')
supplier = self.save(request)
self.assertIsNotNone(supplier)
self.assertIsNotNone(supplier.id)
self.assertEqual(supplier.name, 'Test')
self.assertEqual(supplier.email, 'email@email.com')
self.assertEqual(supplier.phone, '99999999')
self.assertEqual(supplier.location, 'Test Location')
request = SupplierPostMessage(
id=supplier.id,
name='Test123',
email='email123@email.com',
phone='123123123',
location='Test Location 123')
supplier = self.save(request)
self.assertIsNotNone(supplier)
self.assertIsNotNone(supplier.id)
self.assertEqual(supplier.name, 'Test123')
self.assertEqual(supplier.email, 'email123@email.com')
self.assertEqual(supplier.phone, '123123123')
self.assertEqual(supplier.location, 'Test Location 123')
return supplier
def testSearch(self):
""" Search a supplier.
"""
self.t
|
estSave()
request = SupplierSearchMessage(name='Test')
list = self.search(request)
self.assertIsNotNone(list)
self.assertIsNo
|
tNone(list.items)
self.assertTrue(len(list.items) == 1)
request = SupplierSearchMessage(name='Yyy')
list = self.search(request)
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 0)
def testList(self):
""" List all suppliers.
"""
self.testSave()
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) > 0)
def testDelete(self):
""" Delete the supplier.
"""
supplier = self.testSave()
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 1)
self.delete(supplier.id)
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 0)
self.assertRaises(NotFoundEntityException, self.delete(
id=supplier.id, expect_errors=True))
|
stackforge/tacker
|
tacker/objects/common.py
|
Python
|
apache-2.0
| 2,537
| 0
|
# Copyright (C) 2021 NEC Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.db.db_sqlalchemy import models
def apply_filters(query, filters):
"""Apply filters to a SQLAlchemy query.
:param query: The query object to which we apply filters.
:param filters: A dict or an iterable of dicts, where each one includes
the necesary information to create a filter to be applied
to the query. There are single query filters, such as
filters = {'model': 'Foo', 'field': 'name', 'op': '==',
'value': 'foo'}. And multiple query filters, such as
|
filters = {'and': [
{'field': 'name', 'model': 'Foo', 'value': 'foo',
'op': '=='},
{'field': 'id', 'model': 'Bar', 'value': 'bar',
'op': '=='}
]}
|
"""
def apply_filter(query, filter):
value = filter.get('value')
op = filter.get('op')
model = getattr(models, filter.get('model'))
column_attr = getattr(model, filter.get('field'))
if 'in' == op:
query = query.filter(column_attr.in_(value))
elif 'not_in' == op:
query = query.filter(~column_attr.in_(value))
elif '!=' == op:
query = query.filter(column_attr != value)
elif '>' == op:
query = query.filter(column_attr > value)
elif '>=' == op:
query = query.filter(column_attr >= value)
elif '<' == op:
query = query.filter(column_attr < value)
elif '<=' == op:
query = query.filter(column_attr <= value)
elif '==' == op:
query = query.filter(column_attr == value)
return query
if 'and' in filters:
for filter in filters.get('and'):
query = apply_filter(query, filter)
else:
query = apply_filter(query, filters)
return query
|
hwoods723/script.gamescenter
|
resources/lib/eventdetails.py
|
Python
|
gpl-2.0
| 22,677
| 0.033558
|
# -*- coding: utf-8 -*-
'''
script.matchcenter - Football information for Kodi
A program addon that can be mapped to a key on your remote to display football information.
Livescores, Event details, Line-ups, League tables, next and previous matches by team. Follow what
others are saying about the match in twitter.
Copyright (C) 2016 enen92
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmcgui
import xbmc
import sys
import thesportsdb
import random
import threading
import pytz
import re
import ignoreleagues
from resources.lib.utilities import positions
from resources.lib.utilities import ssutils
from resources.lib.utilities.addonfileio import FileIO
from resources.lib.utilities.common_addon import *
api = thesportsdb.Api("7723457519235")
class detailsDialog(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
self.isRunning = True
self.match = kwargs["item"]
self.controls = []
def onInit(self):
self.setEventDetails()
def setEventDetails(self):
xbmc.executebuiltin("ClearProperty(has_lineups,Home)")
xbmc.executebuiltin("SetProperty(has_details,1,home)")
#livematch
if 'idEvent' not in self.match.__dict__.keys():
header = self.match.League + " - " + translate(32017) + " " + str(self.match.Round)
matchTime = ssutils.translatematch(self.match.Time)
matchHomeGoals = self.match.HomeGoals
matchAwayGoals = self.match.AwayGoals
matchpercent = 0.0
#match time
if "'" in self.match.Time.lower():
try:
matchpercent = float(int((float(self.match.Time.replace("'",""))/90)*100))
except: pass
else:
if self.match.Time.lower() == "halftime":
matchpercent = 50.0
elif self.match.Time.lower() == "postponed" or self.match.Time.lower() == "not started":
matchpercent = 0.0
elif self.match.Time.lower() == "finished":
matchpercent = 100.0
#match status
if self.match.Time.lower() == "finished": status = os.path.join(addon_path,"resources","img","redstatus.png")
elif "'" in self.match.Time.lower(): status = os.path.join(addon_path,"resources","img","greenstatus.png")
else: status = os.path.join(addon_path,"resources","img","yellowstatus.png")
stadium = self.match.Stadium
matchReferee = self.match.Referee
matchSpectators = self.match.Spectators
matchHomeGoalDetails = self.match.HomeGoalDetails
matchHomeTeamRedCardDetails = self.match.HomeTeamRedCardDetails
matchHomeTeamYellowCardDetails = self.match.HomeTeamYellowCardDetails
matchHomeSubDetails = self.match.HomeSubDetails
matchAwayGoalDetails = self.match.AwayGoalDetails
matchAwayTeamRedCardDetails = self.match.AwayTeamRedCardDetails
matchAwayTeamYellowCardDetails = self.match.AwayTeamYellowCardDetails
matchAwaySubDetails = self.match.AwaySubDetails
#past match
else:
header = self.match.strLeague + " - " + translate(32017) + " " + str(self.match.intRound)
matchTime = ssutils.translatematch("Finished")
matchHomeGoals = self.match.intHomeScore
matchAwayGoals = self.match.intAwayScore
status = os.path.join(addon_path,"resources","img","redstatus.png")
matchpercent = 100.0
stadium = self.match.HomeTeamObj.strStadium
matchReferee = ""
matchSpectators = self.match.intSpectators
matchHomeGoalDetails = self.match.strHomeGoalDetails
matchHomeTeamRedCardDetails = self.match.strHomeRedCards
matchHomeTeamYellowCardDetails = self.match.strHomeYellowCards
matchHomeSubDetails = ""
matchAwayGoalDetails = self.match.strAwayGoalDetails
matchAwayTeamRedCardDetails = self.match.strAwayRedCards
matchAwayTeamYellowCardDetails = self.match.strAwayYellowCards
matchAwaySubDetails = ""
self.getControl(32500).setLabel(header)
if self.match.HomeTeamObj:
if self.match.HomeTeamObj.strTeamBadge:
self.getControl(32501).setImage(self.match.HomeTeamObj.strTeamBadge)
else:
self.getControl(32501).setImage(os.path.join(addon_path,"resources","img","nobadge_placeholder.png"))
if self.match.HomeTeamObj.strTeamJersey:
self.getControl(32502).setImage(self.match.HomeTeamObj.strTeamJersey)
else:
self.getControl(32502).setImage(os.path.
|
join(addon_path,"resources","img","nokit_placeholder.png"))
else:
self.getControl(32501).setImage(os.path.join(addo
|
n_path,"resources","img","nobadge_placeholder.png"))
self.getControl(32502).setImage(os.path.join(addon_path,"resources","img","nokit_placeholder.png"))
#Default values for team names. It depends if it is a live object or simple a past event
if ("HomeTeam" in self.match.__dict__.keys() and "AwayTeam" in self.match.__dict__.keys()):
self.getControl(32503).setLabel(self.match.HomeTeam)
self.getControl(32506).setLabel(self.match.AwayTeam)
else:
self.getControl(32503).setLabel(self.match.strHomeTeam)
self.getControl(32506).setLabel(self.match.strAwayTeam)
if show_alternative == "true":
if self.match.HomeTeamObj: self.getControl(32503).setLabel(self.match.HomeTeamObj.AlternativeNameFirst)
if self.match.AwayTeamObj: self.getControl(32506).setLabel(self.match.AwayTeamObj.AlternativeNameFirst)
if self.match.AwayTeamObj:
if self.match.AwayTeamObj.strTeamBadge:
self.getControl(32504).setImage(self.match.AwayTeamObj.strTeamBadge)
else:
self.getControl(32504).setImage(os.path.join(addon_path,"resources","img","nobadge_placeholder.png"))
if self.match.AwayTeamObj.strTeamJersey:
self.getControl(32505).setImage(self.match.AwayTeamObj.strTeamJersey)
else:
self.getControl(32505).setImage(os.path.join(addon_path,"resources","img","nokit_placeholder.png"))
else:
self.getControl(32504).setImage(os.path.join(addon_path,"resources","img","nobadge_placeholder.png"))
self.getControl(32505).setImage(os.path.join(addon_path,"resources","img","nokit_placeholder.png"))
if matchHomeGoals and matchAwayGoals:
self.getControl(32507).setLabel(str(matchHomeGoals)+"-"+str(matchAwayGoals))
if matchTime:
self.getControl(32508).setLabel(matchTime)
#Match Status (yellow,green,red)
self.getControl(32509).setImage(status)
#Match progress bar
self.getControl(32510).setPercent(matchpercent)
#Stadium and location
self.getControl(32511).setLabel(stadium)
#Spectators and Referee
if matchReferee:
self.getControl(32512).setLabel("[COLOR selected]" + translate(32023) + ": [/COLOR]" + matchReferee)
if matchSpectators:
self.getControl(32513).setLabel(matchSpectators + " " + translate(32024))
#Home Team Event Details
vars = [("goal",matchHomeGoalDetails),("redcard",matchHomeTeamRedCardDetails),("yellowcard",matchHomeTeamYellowCardDetails),("sub",matchHomeSubDetails)]
hometeamevents = {}
home_subs = {}
for key,var in vars:
if key and var:
if ";" in var:
events = var.split(";")
if events:
for event in events:
stringregex = re.findall("(\d+)'\:(.*)", event)
if stringregex:
for time,strevent in stringregex:
if key == "sub":
if time in home_subs.keys():
if strevent.strip().startswith("in"):
home_subs[time]["in"] = strevent
if "out" in home_subs[time].keys():
if not int(time) in hometeamevents.keys():
hometeamevents[int(time)] = [(key,home_subs[time]["out"] + " |" + home_subs[time]["in"])]
else:
hometeamevents[int(time)].append((key,home_subs[time]["out"] + " |" + home_subs[time]["in"]))
#Remove item from dict (we might have more than one sub associated to a given minute)
home_subs.pop(time,
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/test/input/func_w0611.py
|
Python
|
bsd-3-clause
| 378
| 0.005291
|
"""check un
|
used import
"""
__revision__ = 1
import os
import sys
class NonRegr:
"""???"""
def __init__(self):
print 'initialized'
def sys(self):
"""should not get sys from there..."""
print self, sys
def dummy(self, truc):
"""yo"""
return self, truc
def blop(self):
|
"""yo"""
print self, 'blip'
|
1st1/uvloop
|
examples/bench/echoserver.py
|
Python
|
mit
| 6,317
| 0
|
import argparse
import asyncio
import gc
import os.path
import pathlib
import socket
import ssl
PRINT = 0
async def echo_server(loop, address, unix):
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 1000000)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.read(1000000)
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
class EchoBufferedProtocol(asyncio.BufferedProtocol):
def connection_made(self, transport):
self.transport = transport
# Here the buffer is intended to be copied, so that the outgoing buffer
# won't be wrongly updated by next read
self.buffer = bytearray(256 * 1024)
def connection_lost(self, exc):
self.transport = None
def get_buffer(self, sizehint):
return self.buffer
def buffer_updated(self, nbytes):
self.transport.write(self.buffer[:nbytes])
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--streams', default=False, action='store_true')
parser.add_argument('--proto', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
parser.add_argument('--ssl', default=False, action='store_true')
parser.add_argument('--buffered', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
import uvloop
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr
|
):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
server_context = None
|
if args.ssl:
print('with SSL')
if hasattr(ssl, 'PROTOCOL_TLS'):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
else:
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_cert.pem'),
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_key.pem'))
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('cannot use --stream and --buffered simultaneously')
exit(1)
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr,
ssl=server_context)
else:
coro = asyncio.start_server(echo_client_streams,
*addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('using buffered protocol')
protocol = EchoBufferedProtocol
else:
print('using simple protocol')
protocol = EchoProtocol
if unix:
coro = loop.create_unix_server(protocol, addr,
ssl=server_context)
else:
coro = loop.create_server(protocol, *addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
else:
if args.ssl:
print('cannot use SSL for loop.sock_* methods')
exit(1)
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
KernelAnalysisPlatform/KlareDbg
|
extra/parseida/parseidb.py
|
Python
|
gpl-3.0
| 553
| 0.009042
|
impor
|
t sys
from hexdump import hexdump
# name.id0 - contains contents of B-tree style database
# name.id1 - contains flags that describe each program byte
# name.nam - contains index information related to named program locations
# name.til - contains information about local type definitions
BTREE_PAGE_SIZE = 8192
#dat = open(sys.argv[1]).read()
#id0 = dat[0x104:]
dat = open("/Users/geohot/tmp/test.id0").read()
print hex(len(dat)), len(dat)/BTREE_PAGE_SIZE
for i in range(
|
0, len(dat), BTREE_PAGE_SIZE):
hexdump(dat[i:i+0xC0])
print ""
|
jkwill87/mapi
|
tests/endpoints/test_endpoints_tmdb.py
|
Python
|
mit
| 4,908
| 0
|
# coding=utf-8
"""Unit tests for mapi/endpoints/tmdb.py."""
import pytest
from mapi.endpoints import tmdb_find, tmdb_movies, tmdb_search_movies
from mapi.exceptions import MapiNotFoundException, MapiProviderException
from tests import JUNK_TEXT
GOONIES_IMDB_ID = "tt0089218"
GOONIES_TMDB_ID = 9340
JUNK_IMDB_ID = "tt1234567890"
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__imdb_success(tmdb_api_key):
expected_top_level_keys = {
"movie_results",
"person_results",
"tv_episode_results",
"tv_results",
"tv_season_results",
}
expected_movie_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"poster_path",
"popularity",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_find(tmdb_api_key, "imdb_id", GOONIES_IMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert len(result.get("movie_results", {})) > 0
assert expected_movie_results_keys == set(
result.get("movie_results", {})[0].keys()
)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_find(JUNK_TEXT, "imdb_id", GOONIES_IMDB_ID, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__invalid_id_imdb(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_IMDB_ID)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_mo
|
vies__success(tmdb_api_key):
expected_top_level_keys = {
"adult",
"backdrop_path",
"belongs_to_collection",
"budget",
"genres",
"homepage",
"id",
"imdb_id",
"original_language",
"original_title",
"overview",
"popularity",
"post
|
er_path",
"production_companies",
"production_countries",
"release_date",
"revenue",
"runtime",
"spoken_languages",
"status",
"tagline",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_movies(tmdb_api_key, GOONIES_TMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert result.get("original_title") == "The Goonies"
def test_tmdb_movies__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_movies(JUNK_TEXT, "", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__id_tmdb_fail(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_movies(tmdb_api_key, "1" * 10)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__success(tmdb_api_key):
expected_top_level_keys = {
"page",
"results",
"total_pages",
"total_results",
}
expected_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_search_movies(tmdb_api_key, "the goonies", 1985)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert isinstance(result["results"], list)
assert expected_results_keys == set(result.get("results", [{}])[0].keys())
assert len(result["results"]) == 1
assert result["results"][0]["original_title"] == "The Goonies"
result = tmdb_search_movies(tmdb_api_key, "the goonies")
assert len(result["results"]) > 1
def test_tmdb_search_movies__bad_api_key():
with pytest.raises(MapiProviderException):
tmdb_search_movies(JUNK_TEXT, "the goonies", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_title(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_search_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_year(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_search_movies(
tmdb_api_key, "the goonies", year=JUNK_TEXT, cache=False
)
|
eli261/jumpserver
|
apps/common/__init__.py
|
Python
|
gpl-2.0
| 150
| 0.006667
|
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app
|
.
|
|
rew4332/tensorflow
|
tensorflow/contrib/slim/python/slim/model_analyzer.py
|
Python
|
apache-2.0
| 3,218
| 0.007147
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for th
|
e specific language governing permissions and
# limitations under the License.
# ========================
|
======================================================
"""Tools for analyzing the operations and variables in a TensorFlow graph.
To analyze the operations in a graph:
images, labels = LoadData(...)
predictions = MyModel(images)
slim.model_analyzer.analyze_ops(tf.get_default_graph(), print_info=True)
To analyze the model variables in a graph:
variables = tf.model_variables()
slim.model_analyzer.analyze_vars(variables, print_info=False)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def tensor_description(var):
"""Returns a compact and informative string about a tensor.
Args:
var: A tensor variable.
Returns:
a string with type and size, e.g.: (float32 1x8x8x1024).
"""
description = '(' + str(var.dtype.name) + ' '
sizes = var.get_shape()
for i, size in enumerate(sizes):
description += str(size)
if i < len(sizes) - 1:
description += 'x'
description += ')'
return description
def analyze_ops(graph, print_info=False):
"""Compute the estimated size of the ops.outputs in the graph.
Args:
graph: the graph containing the operations.
print_info: Optional, if true print ops and their outputs.
Returns:
total size of the ops.outputs
"""
if print_info:
print('---------')
print('Operations: name -> (type shapes) [size]')
print('---------')
total_size = 0
for op in graph.get_operations():
op_size = 0
shapes = []
for output in op.outputs:
# if output.num_elements() is None or [] assume size 0.
output_size = output.get_shape().num_elements() or 0
if output.get_shape():
shapes.append(tensor_description(output))
op_size += output_size
if print_info:
print(op.name, '\t->', ', '.join(shapes), '[' + str(op_size) + ']')
total_size += op_size
return total_size
def analyze_vars(variables, print_info=False):
"""Prints the names and shapes of the variables.
Args:
variables: list of variables, for example tf.all_variables().
print_info: Optional, if true print variables and their shape.
Returns:
total size of the variables.
"""
if print_info:
print('---------')
print('Variables: name (type shape) [size]')
print('---------')
total_size = 0
for var in variables:
# if var.num_elements() is None or [] assume size 0.
var_size = var.get_shape().num_elements() or 0
total_size += var_size
if print_info:
print(var.name, tensor_description(var), '[' + str(var_size) + ']')
return total_size
|
saai/codingbitch
|
DP/largestRectangleArea.py
|
Python
|
mit
| 597
| 0.005025
|
cla
|
ss Solution:
# @param {integer[]} height
# @return {integer}
def largestRectangleArea(self, height):
n = len(height)
ma = 0
stack = [-1]
for i in xrange(n):
while(stack[-1] > -1):
if height[i]<height[stack[-1]]:
top = stack.pop()
|
ma = max(ma, height[top]*(i-1-stack[-1]))
else:
break
stack.append(i)
while(stack[-1] != -1):
top = stack.pop()
ma = max(ma, height[top]*(n-1-stack[-1]))
return ma
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.