max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
TestContract.py | alexanderpfefferle/EVM-PoW | 0 | 12757451 | <reponame>alexanderpfefferle/EVM-PoW
from eth_account import Account
from web3 import Web3
import json
import os
import argparse
arg_parser = argparse.ArgumentParser(
description="Test a given contract by deploying and executing it.")
arg_parser.add_argument(
"contract", help="path to the contracts .abi and .bin.patched file")
mode_group = arg_parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument("-p", "--param", help="deploy the challenge contract with given parameters for the constructor",
action='store', type=lambda x: eval(str(x))) # use eval and a list to allow arbitrary number/type of parameters
mode_group.add_argument("-a", "--address", help="use already created contract with given address",
action='store', type=str)
arg_parser.add_argument("-t", "--target", help="set the PoW hash limit for a reward",
action='store', default="0x2000000000000000000000000000000000000000000000000000000000000000")
arg_parser.add_argument("-i", "--iterations", help="maximum number of different inputs given to offChain",
action='store', type=int, default=32)
arg_parser.add_argument("-s", "--submit", help="submit inputs on-chain if they will be rewarded (counterexample/hash below target)",
action='store_true')
args = arg_parser.parse_args()
w3 = Web3(Web3.HTTPProvider(os.environ['WEB3PROVIDER']))
privkey = os.environ['PRIVKEY']
addy = Account.from_key(privkey).address
abi = open(args.contract+".abi").read().strip()
contract = None
if args.address:
contract = w3.eth.contract(address=args.address, abi=json.loads(abi))
else:
bytecode = open(args.contract+".bin.patched").read().strip()
contract = w3.eth.contract(bytecode=bytecode, abi=json.loads(abi))
signed_txn = w3.eth.account.signTransaction(contract.constructor(*args.param, args.target).buildTransaction(dict(
nonce=w3.eth.getTransactionCount(addy),
gas=3000000,
value=w3.toWei(0.1, 'ether'),
)), privkey)
txid = w3.eth.sendRawTransaction(signed_txn.rawTransaction).hex()
print("TX submitted, waiting for confirmation")
tx = w3.eth.waitForTransactionReceipt(txid)
print("Contract deployed:", tx["contractAddress"])
contract = w3.eth.contract(
address=tx["contractAddress"], abi=json.loads(abi))
target = contract.functions.target().call().hex()
print("Target:", "0x"+target)
print("Active:", contract.functions.active().call())
print("Start mining:")
for i in range(args.iterations):
is_counterexample, hash_value = contract.functions.offChain(i).call({
'from': addy})
hash_value = hash_value.hex()
print(i, is_counterexample, "0x"+hash_value)
if args.submit and (is_counterexample or int(hash_value, 16) < int(target, 16)):
txid = w3.eth.sendRawTransaction(w3.eth.account.signTransaction(contract.functions.onChain(i).buildTransaction({
'nonce': w3.eth.getTransactionCount(addy),
'gas': 300000}), privkey).rawTransaction).hex()
tx = w3.eth.waitForTransactionReceipt(txid)
print("Submitted on-chain tx:", tx["transactionHash"].hex())
if is_counterexample:
print("Counterexample has been found!")
break
| 2.203125 | 2 |
cea/utilities/doc_graphviz.py | architecture-building-systems/cea-toolbox | 121 | 12757452 | <gh_stars>100-1000
"""
doc_graphviz.py
Creates the graphviz output used to visualize script dependencies.
This file relies on the schemas.yml to create the graphviz plots.
"""
import os
import cea.config
import cea.schemas
from jinja2 import Template
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.14"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def create_graphviz_files(graphviz_data, documentation_dir):
"""
:param dict graphviz_data: maps script names to a set of
(input/output, script, locator_method, folder_name, file_name)
:param documentation_dir: folder with the documentation in it ($repo/docs)
:return: None
"""
if os.path.exists(os.path.join(documentation_dir, "graphviz")):
for fname in os.listdir(os.path.join(documentation_dir, "graphviz")):
print("deleting {fname}".format(fname=fname))
os.remove(os.path.join(documentation_dir, "graphviz", fname))
for script_name in graphviz_data:
print("Creating graph for: {script_name}".format(**locals()))
# creating new variable to preserve original trace_data used by other methods
trace_data = shorten_trace_data_paths(sorted(graphviz_data[script_name]))
trace_data = unique_users_creators(trace_data)
# set of unique scripts
scripts = sorted(set([td[1] for td in trace_data]))
# set of common dirs for each file accessed by the script(s)
db_group = sorted(set(td[3] for td in trace_data))
# float containing the node width for the largest file name
width = 5
# jinja2 template setup and execution
template_path = os.path.join(documentation_dir, "templates", "graphviz_template.gv")
template = Template(open(template_path, 'r').read())
digraph = template.render(tracedata=trace_data, script_name=script_name, scripts=scripts, db_group=db_group,
width=width)
digraph = remove_extra_lines(digraph)
with open(os.path.join(documentation_dir, "graphviz", "{script}.gv".format(script=script_name)), 'w') as f:
f.write(digraph)
def unique_users_creators(trace_data):
"""
Make sure that the data does not define the same script as producer _and_ consumer at the same time. Prefer
producer.
:param trace_data: list of tuples of form (0:input/output, 1:script, 2:locator_method, 3:folder_name, 4:file_name)
:return: trace_data, filtered
"""
input_lms = set(t[2] for t in trace_data if t[0] == "input")
trace_data = [t for t in trace_data if t[0] == "input" or t[2] not in input_lms]
return trace_data
def remove_extra_lines(digraph):
digraph = "\n".join([line for line in digraph.split('\n') if len(line.strip())])
return digraph
def shorten_trace_data_paths(trace_data):
"""
Shorten the paths in trace_data to max 3 components
:param trace_data:
:return:
"""
for i, (direction, _script, method, path, db) in enumerate(trace_data):
path = "/".join(path.rsplit('/')[-3:]) # only keep max last 3 components
trace_data[i] = (direction, _script, method, path, db)
return trace_data
def get_list_of_digraphs(documentation_dir, schema_scripts):
list_of_digraphs = []
for script in schema_scripts:
graphviz_file = os.path.join(documentation_dir, 'graphviz/%s.gv' % script)
if os.path.isfile(graphviz_file):
underline = '-' * len(script)
with open(graphviz_file) as viz:
digraph = viz.read()
contents = [[script, underline, digraph]]
list_of_digraphs.extend(contents)
return list_of_digraphs
def main(_):
schemas = cea.schemas.schemas(plugins=[])
schema_scripts = cea.schemas.get_schema_scripts(plugins=[])
documentation_dir = os.path.join(os.path.dirname(cea.config.__file__), '..', 'docs')
graphviz_data = {}
for script in schema_scripts:
trace_data = set()
for locator_method in schemas:
file_path = schemas[locator_method]['file_path']
file_name = os.path.basename(file_path)
folder_name = os.path.dirname(file_path)
if script in schemas[locator_method]['created_by']:
trace_data.add(('output', script, locator_method, folder_name, file_name))
if script in schemas[locator_method]['used_by']:
trace_data.add(('input', script, locator_method, folder_name, file_name))
graphviz_data[script] = trace_data
create_graphviz_files(graphviz_data, documentation_dir)
list_of_digraphs = get_list_of_digraphs(documentation_dir=documentation_dir, schema_scripts=schema_scripts)
template_path = os.path.join(documentation_dir, "templates", "graphviz_template.rst")
template = Template(open(template_path, 'r').read())
with open(os.path.join(documentation_dir, 'script-data-flow.rst'), 'w') as fp:
fp.write(template.render(list_of_digraphs=list_of_digraphs))
if __name__ == '__main__':
main(cea.config.Configuration())
| 2.453125 | 2 |
agents/sonic_util.py | wassname/retro-baselines | 4 | 12757453 | """
Environments and wrappers for Sonic training.
"""
import gym
import numpy as np
import gzip
import retro
import os
from baselines.common.atari_wrappers import WarpFrame, FrameStack
# from retro_contest.local import make
import logging
import retro_contest
import pandas as pd
train_states = pd.read_csv('../data/sonic_env/sonic-train.csv')
validation_states = pd.read_csv('../data/sonic_env/sonic-validation.csv')
logger = logging.getLogger(__name__)
def make(game, state, discrete_actions=False, bk2dir=None, max_episode_steps=4000):
"""Make the competition environment."""
print('game:', game, 'state:', state)
use_restricted_actions = retro.ACTIONS_FILTERED
if discrete_actions:
use_restricted_actions = retro.ACTIONS_DISCRETE
try:
env = retro.make(game, state, scenario='contest', use_restricted_actions=use_restricted_actions)
except Exception:
env = retro.make(game, state, use_restricted_actions=use_restricted_actions)
if bk2dir:
env.auto_record(bk2dir)
env = retro_contest.StochasticFrameSkip(env, n=4, stickprob=0.25)
env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def make_env(stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
start_state = train_states.sample().iloc[0]
env = make(game=start_state.game, state=start_state.state, max_episode_steps=600)
env = SonicDiscretizer(env)
# env = AllowBacktracking(env)
env = RandomGameReset(env)
env = EpisodeInfo(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
return env
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
['DOWN', 'B'], ['B']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
class RewardScaler(gym.RewardWrapper):
"""
Bring rewards to a reasonable scale for PPO.
This is incredibly important and effects performance
drastically.
"""
def reward(self, reward):
return reward * 0.01
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info
class RandomGameReset(gym.Wrapper):
def __init__(self, env, state=None):
"""Reset game to a random level."""
super().__init__(env)
self.state = state
def step(self, action):
return self.env.step(action)
def reset(self):
# Reset to a random level (but don't change the game)
try:
game = self.env.unwrapped.gamename
except AttributeError:
logger.warning('no game name')
pass
else:
game_path = retro.get_game_path(game)
# pick a random state that's in the same game
game_states = train_states[train_states.game == game]
# if self.state:
# game_states = game_states[game_states.state.str.contains(self.state)]
# Load
choice = game_states.sample().iloc[0]
state = choice.state + '.state'
logger.info('reseting env %s to %s %s', self.unwrapped.rank, game, state)
with gzip.open(os.path.join(game_path, state), 'rb') as fh:
self.env.unwrapped.initial_state = fh.read()
return self.env.reset()
class EpisodeInfo(gym.Wrapper):
"""
Add information about episode end and total final reward
"""
def __init__(self, env):
super(EpisodeInfo, self).__init__(env)
self._ep_len = 0
self._ep_rew_total = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._ep_len = 0
self._ep_rew_total = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._ep_len += 1
self._ep_rew_total += rew
if done:
if "episode" not in info:
info = {"episode": {"l": self._ep_len, "r": self._ep_rew_total}}
elif isinstance(info, dict):
if "l" not in info["episode"]:
info["episode"]["l"] = self._ep_len
if "r" not in info["episode"]:
info["episode"]["r"] = self._ep_rew_total
return obs, rew, done, info
| 2.3125 | 2 |
pyne_xmas_elves/server/elves/game/consumers.py | pythonnortheast/xmas-elves | 0 | 12757454 | <gh_stars>0
"""Websocket consumers.
"""
from channels import Group
def ws_connect(message):
# Add to reader group
Group("session").add(message.reply_channel)
# Accept the connection request
message.reply_channel.send({"accept": True})
def ws_disconnect(message):
# Remove from reader group on clean disconnect
Group("session").discard(message.reply_channel)
| 2.578125 | 3 |
test_project/runtests.py | mvpoland/django-datatrans | 1 | 12757455 | # This file mainly exists to allow python setup.py test to work.
import os, sys
from django.test.utils import get_runner
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "test_project.settings"
test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, test_dir)
def runtests():
try:
from django import setup
setup()
except ImportError:
pass
test_runner = get_runner(settings)
failures = test_runner().run_tests([])
sys.exit(failures)
if __name__ == "__main__":
runtests()
| 1.84375 | 2 |
instruments/base_instrument.py | coder6464/PYBOR | 30 | 12757456 | # Copyright © 2017 <NAME>, All rights reserved
# http://github.com/omartinsky/pybor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from yc_curve import *
from yc_date import *
def assert_is_set(variables):
return all([var!='null'] for var in variables)
def assert_is_not_set(variables):
return all([var=='null'] for var in variables)
def get_dataframe_row_cells(row):
fcastL = row['Forecast Curve Left']
fcastR = row['Forecast Curve Right']
discL = row['Discount Curve Left']
discR = row['Discount Curve Right']
convL = row['Convention Left']
convR = row['Convention Right']
start = row['Start']
length = row['Length']
return fcastL, fcastR, discL, discR, convL, convR, start, length
class Instrument:
def __init__(self, name):
self.name_ = name
def get_name(self):
return self.name_
def get_pillar_date(self):
assert False, 'method must be implemented in child class %s' % type(self)
def calc_par_rate(self, curvemap):
assert False, 'method must be implemented in child class %s' % type(self)
def drdp(self):
return 1.e+2
def price_from_par_rate(self, x): # TODO rename quote_from_rate
return x*1.e+2
def par_rate_from_price(self, x): # TODO rename rate_from_quote
return x*1.e-2
def __str__(self):
return self.name_
| 2.046875 | 2 |
cpgf/samples/irrlicht/05.userinterface.py | mousepawmedia/libdeps | 187 | 12757457 | cpgf._import(None, "builtin.debug");
cpgf._import(None, "builtin.core");
class SAppContext:
device = None,
counter = 0,
listbox = None
Context = SAppContext();
GUI_ID_QUIT_BUTTON = 101;
GUI_ID_NEW_WINDOW_BUTTON = 102;
GUI_ID_FILE_OPEN_BUTTON = 103;
GUI_ID_TRANSPARENCY_SCROLL_BAR = 104;
def makeMyEventReceiver(receiver) :
def OnEvent(me, event) :
if event.EventType == irr.EET_GUI_EVENT :
id = event.GUIEvent.Caller.getID();
env = Context.device.getGUIEnvironment();
if event.GUIEvent.EventType == irr.EGET_SCROLL_BAR_CHANGED :
if id == GUI_ID_TRANSPARENCY_SCROLL_BAR :
pos = cpgf.cast(event.GUIEvent.Caller, irr.IGUIScrollBar).getPos();
skin = env.getSkin();
for i in range(irr.EGDC_COUNT) :
col = skin.getColor(i);
col.setAlpha(pos);
skin.setColor(i, col);
elif event.GUIEvent.EventType == irr.EGET_BUTTON_CLICKED :
if id == GUI_ID_QUIT_BUTTON :
Context.device.closeDevice();
return True;
elif id == GUI_ID_NEW_WINDOW_BUTTON :
Context.listbox.addItem("Window created");
Context.counter = Context.counter + 30;
if Context.counter > 200 :
Context.counter = 0;
window = env.addWindow(irr.rect_s32(100 + Context.counter, 100 + Context.counter, 300 + Context.counter, 200 + Context.counter), False, "Test window");
env.addStaticText("Please close me", irr.rect_s32(35,35,140,50), True, False, window);
return True;
elif id == GUI_ID_FILE_OPEN_BUTTON :
Context.listbox.addItem("File open");
env.addFileOpenDialog("Please choose a file.");
return True;
return False;
receiver.OnEvent = OnEvent;
def start() :
driverType = irr.driverChoiceConsole();
if driverType == irr.EDT_COUNT :
return 1;
device = irr.createDevice(driverType, irr.dimension2d_u32(640, 480));
if device == None :
return 1;
device.setWindowCaption("cpgf Irrlicht Python Binding - User Interface Demo");
device.setResizable(True);
driver = device.getVideoDriver();
env = device.getGUIEnvironment();
skin = env.getSkin();
font = env.getFont("../../media/fonthaettenschweiler.bmp");
if font :
skin.setFont(font);
skin.setFont(env.getBuiltInFont(), irr.EGDF_TOOLTIP);
env.addButton(irr.rect_s32(10,240,110,240 + 32), None, GUI_ID_QUIT_BUTTON, "Quit", "Exits Program");
env.addButton(irr.rect_s32(10,280,110,280 + 32), None, GUI_ID_NEW_WINDOW_BUTTON, "New Window", "Launches a Window");
env.addButton(irr.rect_s32(10,320,110,320 + 32), None, GUI_ID_FILE_OPEN_BUTTON, "File Open", "Opens a file");
env.addStaticText("Transparent Control:", irr.rect_s32(150,20,350,40), True);
scrollbar = env.addScrollBar(True, irr.rect_s32(150, 45, 350, 60), None, GUI_ID_TRANSPARENCY_SCROLL_BAR);
scrollbar.setMax(255);
scrollbar.setPos(env.getSkin().getColor(irr.EGDC_WINDOW).getAlpha());
env.addStaticText("Logging ListBox:", irr.rect_s32(50,110,250,130), True);
listbox = env.addListBox(irr.rect_s32(50, 140, 250, 210));
env.addEditBox("Editable Text", irr.rect_s32(350, 80, 550, 100));
Context.device = device;
Context.counter = 0;
Context.listbox = listbox;
MyEventReceiver = cpgf.cloneClass(irr.IEventReceiverWrapper);
makeMyEventReceiver(MyEventReceiver);
receiver = MyEventReceiver();
device.setEventReceiver(receiver);
env.addImage(driver.getTexture("../../media/irrlichtlogo2.png"), irr.position2d_s32(10,10));
while device.run() and driver :
if device.isWindowActive() :
driver.beginScene(True, True, irr.SColor(0,200,200,200));
env.drawAll();
driver.endScene();
device.drop();
return 0;
start();
| 2.109375 | 2 |
models/system_model_v3/sim_params.py | trangnv/geb-simulations-h20 | 7 | 12757458 | <filename>models/system_model_v3/sim_params.py
SIMULATION_TIME_STEPS = 100
MONTE_CARLO_RUNS = 1
| 1.085938 | 1 |
mstrio/api/objects.py | LLejoly/mstrio-py | 0 | 12757459 | from mstrio.utils.helper import response_handler
def get_object_info(connection, id, type, error_msg=None):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == "project":
headers = {'X-MSTR-ProjectID': None}
response = connection.session.get(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type})
if not response.ok:
if error_msg is None:
error_msg = "Error getting information for the specific object."
response_handler(response, error_msg, whitelist=[('ERR001', 500)])
return response
def delete_object(connection, id, type, error_msg=None):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == 32:
headers = {'X-MSTR-ProjectID': None}
response = connection.session.delete(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type})
if not response.ok:
if error_msg is None:
error_msg = "Error deleting object {}".format(id)
response_handler(response, error_msg)
return response
def update_object(connection, id, body, type, error_msg=None, verbose=True):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
body: (object): body of the response
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == 32:
headers = {'X-MSTR-ProjectID': str(id)}
response = connection.session.put(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type},
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error updating object {}".format(id)
response_handler(response, error_msg, verbose=verbose)
return response
def copy_object(connection, id, name, folder_id, type, error_msg=None):
"""Create a copy of a specific object.
You identify the object with the object ID and object type. You obtain the
authorization token needed to execute the request using POST /auth/login;
you obtain the project ID using GET /projects. You pass the authorization
token and the project ID in the request header. You specify the object ID in
the path of the request and object type as a query parameter; possible
values for object type are provided in EnumDSSXMLObjectTypes. You specify
the name and location (folder ID) of the new object in the body of the
request. If you do not specify a new name, a default name is generated, such
as 'Old Name (1)'. If you do not specify a folder ID, the object is saved in
the same folder as the source object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
connection._validate_project_selected()
body = {
"name": name,
"folderId": folder_id
}
response = connection.session.post(url=connection.base_url + '/api/objects/' + id + '/copy',
params={'type': type},
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error creating a copy of object {}".format(id)
response_handler(response, error_msg)
return response
def get_vldb_settings(connection, id, type, project_id=None, error_msg=None):
"""Get vldb settings for an object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
project_id: project ID
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.get(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets",
params={'type': type},
headers=headers)
if not response.ok:
if error_msg is None:
error_msg = "Error getting VLDB settings for object '{}'".format(id)
response_handler(response, error_msg)
return response
def delete_vldb_settings(connection, id, type, project_id=None, error_msg=None):
"""Delete all customized vldb settings in one object, this operation will
reset all vldb settings to default.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
project_id: project ID
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.delete(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets",
params={'type': type},
headers=headers)
if not response.ok:
if error_msg is None:
error_msg = "Error resetting all custom vldb settings to default for object '{}'".format(id)
response_handler(response, error_msg)
return response
def set_vldb_settings(connection, id, type, name, body, project_id=None, error_msg=None):
"""Set vldb settings for one property set in one object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
name: property set name
project_id: project ID
body: [{"name": "string",
"value": {}}]
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.put(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets/{name}",
params={'type': type},
headers=headers,
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error setting vldb settings for object '{}'".format(id)
response_handler(response, error_msg)
return response
def create_search_objects_instance(connection, name=None, pattern=4, domain=2, root=None, object_type=None, error_msg=None):
"""Create a search instance.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
name: expression used with the pattern to do the search
pattern: specifies the nature of the search. Possible values are defined
in the EnumDSSXMLSearchTypes javadoc
domain: search domain. specifies the domain/scope of the search.
Possible values are defined in the EnumDSSXMLSearchDomain javadoc
root: folder ID of the root in which the search is done
object_type: specifies the type of objects to be searched. Possible
values are defined in the EnumDSSObjectType javadoc
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected()
response = connection.session.post(url=f"{connection.base_url}/api/objects",
headers={'X-MSTR-ProjectID': connection.project_id},
params={'name': name,
'pattern': pattern,
'domain': domain,
'root': root,
'type': object_type})
if not response.ok:
if error_msg is None:
error_msg = "Error getting objects."
response_handler(response, error_msg)
return response
def get_objects(connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None):
"""Get list of objects from metadata.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
search_id: ID for the results of a previous search stored in I-Server
memory
offset: starting point within the collection of returned results. Used
to control paging behavior.
limit: maximum number of items returned for a single request. Used to
control paging behavior
get_tree: specifies that the search results should be displayed in
a tree structure instead of a list. The ancestors of the searched
objects are the nodes and the searched objects are the leaves of
the tree.
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected
response = connection.session.get(url=f"{connection.base_url}/api/objects",
headers={'X-MSTR-ProjectID': connection.project_id},
params={'searchId': search_id,
'offset': offset,
'limit': limit,
'getTree': get_tree})
if not response.ok:
if error_msg is None:
error_msg = "Error getting objects."
response_handler(response, error_msg)
return response
def get_objects_async(future_session, connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None):
"""Get list of objects from metadata asynchronously.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
search_id: ID for the results of a previous search stored in I-Server
memory
offset: starting point within the collection of returned results. Used
to control paging behavior.
limit: maximum number of items returned for a single request. Used to
control paging behavior.
get_tree: specifies that the search results should be displayed in
a tree structure instead of a list. The ancestors of the searched
objects are the nodes and the searched objects are the leaves of
the tree.
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected()
url = connection.base_url + '/api/objects'
headers = {'X-MSTR-ProjectID': connection.project_id}
params = {'searchId': search_id,
'offset': offset,
'limit': limit,
'getTree': get_tree}
future = future_session.get(url=url, headers=headers, params=params)
return future
| 2.46875 | 2 |
train_bjod.py | cl886699/frcnn_multigpu | 0 | 12757460 | # tf2.0目标检测之csv 2 Tfrecord
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import random
import cv2
from tqdm import tqdm
import datetime
import os
import time
from detection.models.detectors import faster_rcnn
from bjod_data import ZiptrainDataset, Zipvaluedata
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
random.seed(234)
def save_images(image, boxes, filen, label_pre, pth=''):
image = image.numpy()
image = image.astype(np.uint8)
if image.shape[0] == 1:
image = np.squeeze(image, axis=0)
cv2.cvtColor(image, cv2.COLOR_RGB2BGR, image)
n = boxes.shape[0]
if not n:
print("no instances to display ")
for i in range(n):
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
if not np.any(boxes[i]):
continue
x1, y1, x2, y2, _ = boxes[i]
y1, x1, y2, x2 = int(y1), int(x1), int(y2), int(x2)
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, 8, 0)
cv2.putText(image, str(label_pre[i]), (int((x1 + x2) / 2), int((y1 + y2) / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1,
color, 1)
filen = filen[:-4] + '.jpg'
cv2.imwrite(os.path.join(pth, filen), image)
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def eval_modle(gt_dict, pre__dict, npos, categaries):
result = {}
for key in range(categaries):
b1 = pre__dict[str(key + 1)]
if not b1:
continue
image_ids = [tt[0] for tt in b1]
confidence = np.array([tt[5] for tt in b1])
BB = np.array([tt[1:5] for tt in b1])
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = gt_dict[str(key + 1)][str(image_ids[d])] # ann
bb = BB[d, :].astype(float)
ovmax = -np.inf # 负数最大值
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps) # 最大重叠
jmax = np.argmax(overlaps) # 最大重合率对应的gt
# 计算tp 和 fp个数
if ovmax > 0.5:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1 # 标记为已检测
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp) # np.cumsum() 按位累加
tp = np.cumsum(tp)
rec = tp / np.maximum(float(npos[str(key + 1)]), np.finfo(np.float64).eps)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec)
print('key+1: ', str(key + 1))
print(ap)
result[str(key + 1)] = ap
return result
class DistTrainer:
def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''):
self.dist_strategy = dis_strategy
self.model = ori_model
self.num_devices = nu_devices
self.trian_dir = trian_dir
self.epochs = epoch
self.maxap = maxap
self.total_categaries = categaries
self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True)
# @tf.function
def train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
with tf.GradientTape() as tape:
rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \
self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def dist_train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run(
self.train_step,
args=(batch_imgs, batch_metas, batch_bboxes, batch_labels))
loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None)
rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None)
rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None)
rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None)
rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None)
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
# @tf.function
def test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \
self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def dist_test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run(
self.test_step,
args=(batch_imgs, batch_metas, batch_bboxes, batch_labels))
loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None)
rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None)
rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None)
rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None)
rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None)
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def eval_step(self, val_dts):
gt_bbox_dict = {str(key + 1): {} for key in range(self.total_categaries)}
pre_bbox_dict = {str(key + 1): [] for key in range(self.total_categaries)}
# number of true positive
npos = {str(key + 1): 0 for key in range(self.total_categaries)}
img_ids = 10000
for val_imgs, val_metas, val_bboxes, val_labels, val_file in tqdm(val_dts):
# if random.randint(1, 100) > 11:
# continue
val_labels = tf.squeeze(val_labels, axis=0).numpy()
val_bboxes = tf.squeeze(val_bboxes, 0).numpy().astype(np.int)
val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0)
val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0)
val_predict_bboxes = []
for key in range(self.total_categaries):
tmp_box = [val_bboxes[indcc] for indcc, cc in enumerate(val_labels) if cc == key + 1]
det = [False] * len(tmp_box)
gt_bbox_dict[str(key + 1)][str(img_ids)] = {'bbox': np.array(tmp_box), 'det': det}
npos[str(key + 1)] += len(tmp_box)
proposals = self.model.simple_test_rpn(val_imgs, val_metas)
res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals)
for pos in range(res['class_ids'].shape[0]):
label_id = int(res['class_ids'][pos])
y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])]
tmp_list2 = [img_ids, x1, y1, x2, y2, float(res['scores'][pos])]
val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])])
pre_bbox_dict[str(label_id)].append(tmp_list2)
img_ids += 1
return gt_bbox_dict, pre_bbox_dict, npos
def rd_save_images(self, val_dts, img_save_path):
for val_imgs, val_metas, _, _, val_file in tqdm(val_dts):
if random.randint(1, 100) > 10:
continue
val_file = val_file.numpy()[0].decode('utf-8')
val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0)
val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0)
val_predict_bboxes = []
proposals = self.model.simple_test_rpn(val_imgs, val_metas)
res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals)
for pos in range(res['class_ids'].shape[0]):
y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])]
val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])])
save_images(val_imgs, np.array(val_predict_bboxes), val_file, res['class_ids'], img_save_path)
def train(self, train_ds, val_ds):
# train model
train_dts = self.dist_strategy.experimental_distribute_dataset(train_ds)
val_dts = self.dist_strategy.experimental_distribute_dataset(val_ds)
log_dir = self.trian_dir + 'log_dir/' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(log_dir)
index_step = 0
time_start = time.time()
input_flag = False
for epoch in range(self.epochs[0], self.epochs[1]):
loss_history = np.zeros(5)
for (step, inputs) in enumerate(train_dts):
batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs
labels_tmp = tf.cast(tf.fill([1,1000], -1), tf.int32)
if self.num_devices > 1:
for per_tensor in batch_labels.values:
if tf.equal(per_tensor, labels_tmp).numpy().all():
input_flag = True
print("skip this batch")
break
else:
pass
if input_flag:
input_flag = False
continue
else:
if tf.equal(batch_labels, labels_tmp).numpy().all():
continue
loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \
= self.dist_train_step(batch_imgs, batch_metas, batch_bboxes, batch_labels)
loss_history[0] += loss_value
loss_history[1] += rpn_class_loss
loss_history[2] += rpn_bbox_loss
loss_history[3] += rcnn_class_loss
loss_history[4] += rcnn_bbox_loss
if step % 10 == 0:
if step:
loss_history = loss_history / 10
print('time:', round(time.time() - time_start, 2), 'epoch:', epoch, ', step:', step, ', loss:',
loss_history)
time_start = time.time()
with file_writer.as_default():
tf.summary.scalar('total_loss', loss_history[0], step=index_step)
tf.summary.scalar('rpn_class_loss', loss_history[1], step=index_step)
tf.summary.scalar('rpn_bbox_loss', loss_history[2], step=index_step)
tf.summary.scalar('rcnn_class_loss', loss_history[3], step=index_step)
tf.summary.scalar('rcnn_bbox_loss', loss_history[4], step=index_step)
file_writer.flush()
index_step += 1
loss_history = np.zeros(5)
else:
print('epoch:', epoch, ', step:', step, ', loss:', loss_history)
if step % 2000 == 0:
weights_dir = self.trian_dir + 'weights/epoch_' + str(epoch) + '_loss_'
sum_loss = 0
for (val_step, inputs_val) in tqdm(enumerate(val_dts)):
batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs_val
labels_tmp = tf.cast(tf.fill([1, 1000], -1), tf.int32)
if self.num_devices > 1:
for per_tensor in batch_labels.values:
if tf.equal(per_tensor, labels_tmp).numpy().all():
input_flag = True
print("skip this batch")
break
else:
pass
if input_flag:
input_flag = False
continue
else:
if tf.equal(batch_labels, labels_tmp).numpy().all():
continue
loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \
= self.dist_test_step(batch_imgs, batch_metas, batch_bboxes, batch_labels)
sum_loss += loss_value
print('sum_loss: ', sum_loss)
if sum_loss > self.maxap:
self.maxap = sum_loss
self.model.save_weights(weights_dir + str(tf.round(sum_loss, 2).numpy()) + '.h5')
if __name__ == '__main__':
PER_GPU_BATCHSIZE = 1
dist_strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()
)
num_devices = dist_strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
GLOBAL_BATCHSIZE = num_devices * PER_GPU_BATCHSIZE
with dist_strategy.scope():
if os.name == 'nt':
tf_record_path = 'D:/datasets/bjod/'
train_dir = './train_dir/'
else:
tf_record_path = '../../../../../datasets/bjod/'
train_dir = './train_dir/'
crop_size = [992, 992, 3]
train_datasets = ZiptrainDataset(tf_record_path, 1, 96, crop_size=crop_size,
roi_path='D:/datasets/bjod/roi_test/').prepare(True)
val_train = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('train_api_97.record')
val_test = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('val_api_19.record')
one_imgs, one_metas, one_bboxes, one_labels, _ = next(iter(val_train))
one_imgs = tf.expand_dims(tf.cast(one_imgs[0], tf.float32), axis=0)
one_metas = tf.expand_dims(tf.cast(one_metas[0], tf.float32), axis=0)
model = faster_rcnn.FasterRCNN(num_classes=2)
_ = model((one_imgs, one_metas), training=False)
model_ori = faster_rcnn.FasterRCNN(num_classes=81)
_ = model_ori((one_imgs, one_metas), training=False)
model_ori.load_weights('./weights/faster_rcnn_resnet101_fpn_coco2017_map35.h5',
by_name=True)
model.backbone.set_weights(model_ori.backbone.get_weights())
model.neck.set_weights(model_ori.neck.get_weights())
model.rpn_head.set_weights(model_ori.rpn_head.get_weights())
model.roi_align.set_weights(model_ori.roi_align.get_weights())
# print(cc)
model.summary()
def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''):
self.dist_strategy = dis_strategy
self.model = ori_model
self.num_devices = nu_devices
self.trian_dir = trian_dir
self.epochs = epoch
self.maxap = maxap
self.total_categaries = categaries
self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True)
trainer = DistTrainer(dis_strategy=dist_strategy,
ori_model=model,
categaries=2,
nu_devices=1,
maxap=0.0,
epoch=[0, 200],
trian_dir=train_dir
)
trainer.train(train_datasets, val_test)
| 2.25 | 2 |
lyricscraper/genius.py | galenguyer/azlyrics-scraper | 0 | 12757461 | import os
import argparse
import json
import sys
import re
import requests
from json import JSONEncoder
from bs4 import BeautifulSoup
class Song:
def __init__(self, title: str, artist: str, album: str, release: str, lyrics: str, url: str):
self.title = title
self.artist = artist
self.album = album
self.release = release
self.lyrics = lyrics
self.url = url
class SongEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
class SearchResult:
def __init__(self, result):
result = result['result']
self.link = result['url'].encode('ascii', 'ignore').decode("utf-8")
self.title = result['title'].encode('ascii', 'ignore').decode("utf-8")
self.artist = result['primary_artist']['name'].encode('ascii', 'ignore').decode("utf-8")
def __str__(self):
return f'{self.title} by {self.artist}'
def eprint(*args, **kwargs):
"""
Print the given message to stderr
"""
print(*args, file=sys.stderr, **kwargs)
def search(term: str) -> str:
"""
Search for a term
"""
original_term = term
term = re.sub('[^a-zA-Z0-9 ]+', '', term).strip()
term = re.sub(' ', '+', term)
search_page = requests.get(f'https://genius.com/api/search/song?page=1&q={term}')
if search_page.status_code != 200:
eprint(f'Status code {search_page.status_code} for search term "{original_term}" indicates failure')
return None
parsed_page = json.loads(search_page.text)
search_results = parsed_page['response']['sections'][0]['hits']
results = [SearchResult(result) for result in search_results]
if len(results) == 0:
eprint(f'No songs found for query {original_term}')
sys.exit(1)
if len(results) is 1:
print(f'Only result found is {results[0]}')
return results[0].link
for num in range(1, min(16, len(results)+1)):
print(f'{num}. {results[num-1]}')
result = results[int(input('Select a number: '))-1]
return result.link
def download_url(url: str):
"""
Retrieve the page contents and parse out the lyrics from a given url
"""
if not url.startswith('https://genius.com/'):
eprint(f'URL "{url}" does not appear to be a valid genius lyrics url')
return None
result = requests.get(url)
if result.status_code != 200:
eprint(f'Status code {result.status_code} for url "{url}" indicates failure')
return None
parsed_page = BeautifulSoup(result.text.replace(u"\u2018", "'").replace(u"\u2019", "'"), 'html.parser')
song_lyrics = parsed_page.find_all('div', attrs={'class': 'lyrics'})[0].text.strip()
song_data = json.loads([line for line in result.text.split('\n') if 'TRACKING_DATA' in line][0].split('=')[1].strip(' ;'))
song_artist = song_data['Primary Artist'].encode('ascii', 'ignore').decode("utf-8")
song_title = song_data['Title'].encode('ascii', 'ignore').decode("utf-8")
song_album = (song_data['Primary Album'] if song_data['Primary Album'] is not None else 'Unknown Album').encode('ascii', 'ignore').decode("utf-8")
song_release = song_data['Release Date'].encode('ascii', 'ignore').decode("utf-8")
song = Song(title=song_title, artist=song_artist, album=song_album, lyrics=song_lyrics, url=url, release=song_release)
return song
def save_to_file(song: Song):
filename = './lyrics/genius_'
for c in song.title.lower():
if c.isalpha() or c.isdigit():
filename = filename + c
if c is ' ':
filename = filename + '-'
filename = filename + '_'
for c in song.artist.lower():
if c.isalpha() or c.isdigit():
filename = filename + c
if c is ' ':
filename = filename + '-'
filename = filename + '.json'
if not os.path.isdir('./lyrics'):
os.mkdir('./lyrics')
f = open(filename, 'w')
json.dump(song, f, indent=4, cls=SongEncoder)
f.close()
print('Lyrics saved to ' + filename)
def main():
parser = argparse.ArgumentParser(description='Scraper for lyrics from genius.com')
parser.add_argument('term', metavar='TERM', help='Term to search for', nargs='+')
parser.add_argument('--no-save', help='Whether or not to save the data to a file', action='store_false')
args = parser.parse_args()
if args.term is not None:
term = ' '.join(args.term)
if term.startswith('https://genius.com/'):
song = download_url(term)
else:
song = download_url(search(term))
if args.no_save:
save_to_file(song)
else:
print('Title: ' + song.title)
print('Artist: ' + song.artist)
print('Album: ' + song.album + '\n')
print(song.lyrics)
else:
eprint('No URL given, doing nothing')
if __name__ == '__main__':
main()
| 3.03125 | 3 |
moses/vae/trainer.py | rtriangle/moses | 2 | 12757462 | <gh_stars>1-10
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.nn.utils import clip_grad_norm_
from moses.vae.misc import CosineAnnealingLRWithRestart, KLAnnealer, \
Logger
class VAETrainer:
def __init__(self, config):
self.config = config
def fit(self, model, data):
def get_params():
return (p for p in model.vae.parameters() if p.requires_grad)
model.train()
n_epoch = self._n_epoch()
kl_annealer = KLAnnealer(n_epoch, self.config)
optimizer = optim.Adam(get_params(), lr=self.config.lr_start)
lr_annealer = CosineAnnealingLRWithRestart(optimizer, self.config)
n_last = self.config.n_last
elog, ilog = Logger(), Logger()
for epoch in range(n_epoch):
# Epoch start
kl_weight = kl_annealer(epoch)
# Iters
T = tqdm.tqdm(data)
for i, x in enumerate(T):
# Forward
kl_loss, recon_loss = model(x)
loss = kl_weight * kl_loss + recon_loss
# Backward
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(get_params(), self.config.grad_clipping)
optimizer.step()
# Log
lr = optimizer.param_groups[0]['lr']
ilog.append({
'epoch': epoch,
'kl_loss': kl_loss.item(),
'recon_loss': recon_loss.item(),
'loss': loss.item(),
'kl_weight': kl_weight,
'lr': lr
})
# Update T
kl_loss_value = np.mean(ilog['kl_loss'][-n_last:])
recon_loss_value = np.mean(ilog['recon_loss'][-n_last:])
loss_value = np.mean(ilog['loss'][-n_last:])
postfix = [f'loss={loss_value:.5f}',
f'(kl={kl_loss_value:.5f}',
f'recon={recon_loss_value:.5f})',
f'klw={kl_weight:.5f} lr={lr:.5f}']
T.set_postfix_str(' '.join(postfix))
T.set_description(f'Train (epoch #{epoch})')
T.refresh()
# Log
elog.append({
**{k: v for k, v in ilog[-1].items() if 'loss' not in k},
'kl_loss': kl_loss_value,
'recon_loss': recon_loss_value,
'loss': loss_value
})
# Save model at each epoch
torch.save(model.state_dict(), self.config.model_save)
# Epoch end
lr_annealer.step()
return elog, ilog
def _n_epoch(self):
return sum(
self.config.lr_n_period * (self.config.lr_n_mult ** i)
for i in range(self.config.lr_n_restarts)
)
| 1.992188 | 2 |
MyAdventureHeader.py | DidymusRex/PiCraft | 0 | 12757463 | <gh_stars>0
#! /usr/bin/env python
import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
| 1.304688 | 1 |
python/perspective/perspective/tests/core/test_plugin.py | mcleo-d/perspective | 0 | 12757464 | ################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from pytest import raises
from perspective import PerspectiveError, PerspectiveViewer,\
PerspectiveWidget, Plugin
class TestPlugin:
def test_plugin_widget_load_grid(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data, plugin=Plugin.GRID)
assert widget.plugin == "datagrid"
def test_plugin_widget_load(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data, plugin=Plugin.XBAR)
assert widget.plugin == "X Bar"
def test_plugin_widget_setattr(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
widget.plugin = Plugin.XBAR
assert widget.plugin == "X Bar"
def test_plugin_widget_load_invalid(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
with raises(PerspectiveError):
PerspectiveWidget(data, plugin="?")
def test_plugin_widget_setattr_invalid(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
with raises(PerspectiveError):
widget.plugin = "?"
def test_plugin_widget_init_all(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
for plugin in Plugin:
widget = PerspectiveWidget(data, plugin=plugin)
assert widget.plugin == plugin.value
def test_plugin_widget_set_all(self):
data = {"a": [1, 2, 3], "b": ["a", "b", "c"]}
widget = PerspectiveWidget(data)
for plugin in Plugin:
widget.plugin = plugin
assert widget.plugin == plugin.value
def test_plugin_viewer_load(self):
viewer = PerspectiveViewer(plugin=Plugin.XBAR)
assert viewer.plugin == "X Bar"
def test_plugin_viewer_setattr(self):
viewer = PerspectiveViewer()
viewer.plugin = Plugin.XBAR
assert viewer.plugin == "X Bar"
def test_plugin_viewer_init_all(self):
for plugin in Plugin:
viewer = PerspectiveViewer(plugin=plugin)
assert viewer.plugin == plugin.value
def test_plugin_viewer_set_all(self):
viewer = PerspectiveViewer()
for plugin in Plugin:
viewer.plugin = plugin
assert viewer.plugin == plugin.value
| 2.359375 | 2 |
tantum/loss/loss.py | dmitryshendryk/tantum | 0 | 12757465 | <reponame>dmitryshendryk/tantum
import torch
from torch import nn
from torch.nn import functional as F
def compute_normalization_binary_search(activations, t, num_iters):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations = activations - mu
effective_dim = \
torch.sum(
(normalized_activations > -1.0 / (1.0-t)).to(torch.int32),
dim=-1, keepdim=True).to(activations.dtype)
shape_partition = activations.shape[:-1] + (1,)
lower = torch.zeros(shape_partition, dtype=activations.dtype, device=activations.device)
upper = -log_t(1.0/effective_dim, t) * torch.ones_like(lower)
for _ in range(num_iters):
logt_partition = (upper + lower)/2.0
sum_probs = torch.sum(
exp_t(normalized_activations - logt_partition, t),
dim=-1, keepdim=True)
update = (sum_probs < 1.0).to(activations.dtype)
lower = torch.reshape(
lower * update + (1.0-update) * logt_partition,
shape_partition)
upper = torch.reshape(
upper * (1.0 - update) + update * logt_partition,
shape_partition)
logt_partition = (upper + lower)/2.0
return logt_partition + mu
def log_t(u, t):
"""Compute log_t for `u'."""
if t==1.0:
return u.log()
else:
return (u.pow(1.0 - t) - 1.0) / (1.0 - t)
def exp_t(u, t):
"""Compute exp_t for `u'."""
if t==1:
return u.exp()
else:
return (1.0 + (1.0-t)*u).relu().pow(1.0 / (1.0 - t))
def compute_normalization_fixed_point(activations, t, num_iters):
"""Returns the normalization value for each example (t > 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: A tensor of same shape as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations_step_0 = activations - mu
normalized_activations = normalized_activations_step_0
for _ in range(num_iters):
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalized_activations = normalized_activations_step_0 * \
logt_partition.pow(1.0-t)
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalization_constants = - log_t(1.0 / logt_partition, t) + mu
return normalization_constants
class ComputeNormalization(torch.autograd.Function):
"""
Class implementing custom backward pass for compute_normalization. See compute_normalization.
"""
@staticmethod
def forward(ctx, activations, t, num_iters):
if t < 1.0:
normalization_constants = compute_normalization_binary_search(activations, t, num_iters)
else:
normalization_constants = compute_normalization_fixed_point(activations, t, num_iters)
ctx.save_for_backward(activations, normalization_constants)
ctx.t=t
return normalization_constants
@staticmethod
def backward(ctx, grad_output):
activations, normalization_constants = ctx.saved_tensors
t = ctx.t
normalized_activations = activations - normalization_constants
probabilities = exp_t(normalized_activations, t)
escorts = probabilities.pow(t)
escorts = escorts / escorts.sum(dim=-1, keepdim=True)
grad_input = escorts * grad_output
return grad_input, None, None
def compute_normalization(activations, t, num_iters=5):
"""Returns the normalization value for each example.
Backward pass is implemented.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
return ComputeNormalization.apply(activations, t, num_iters)
def tempered_sigmoid(activations, t, num_iters = 5):
"""Tempered sigmoid function.
Args:
activations: Activations for the positive class for binary classification.
t: Temperature tensor > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
internal_activations = torch.stack([activations,
torch.zeros_like(activations)],
dim=-1)
internal_probabilities = tempered_softmax(internal_activations, t, num_iters)
return internal_probabilities[..., 0]
def tempered_softmax(activations, t, num_iters=5):
"""Tempered softmax function.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature > 1.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
if t == 1.0:
return activations.softmax(dim=-1)
normalization_constants = compute_normalization(activations, t, num_iters)
return exp_t(activations - normalization_constants, t)
def bi_tempered_binary_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing = 0.0,
num_iters=5,
reduction='mean'):
"""Bi-Tempered binary logistic loss.
Args:
activations: A tensor containing activations for class 1.
labels: A tensor with shape as activations, containing probabilities for class 1
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing
num_iters: Number of iterations to run the method.
Returns:
A loss tensor.
"""
internal_activations = torch.stack([activations,
torch.zeros_like(activations)],
dim=-1)
internal_labels = torch.stack([labels.to(activations.dtype),
1.0 - labels.to(activations.dtype)],
dim=-1)
return bi_tempered_logistic_loss(internal_activations,
internal_labels,
t1,
t2,
label_smoothing = label_smoothing,
num_iters = num_iters,
reduction = reduction)
def bi_tempered_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5,
reduction = 'mean'):
"""Bi-Tempered Logistic Loss.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
labels: A tensor with shape and dtype as activations (onehot),
or a long tensor of one dimension less than activations (pytorch standard)
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing parameter between [0, 1). Default 0.0.
num_iters: Number of iterations to run the method. Default 5.
reduction: ``'none'`` | ``'mean'`` | ``'sum'``. Default ``'mean'``.
``'none'``: No reduction is applied, return shape is shape of
activations without the last dimension.
``'mean'``: Loss is averaged over minibatch. Return shape (1,)
``'sum'``: Loss is summed over minibatch. Return shape (1,)
Returns:
A loss tensor.
"""
if len(labels.shape)<len(activations.shape): #not one-hot
labels_onehot = torch.zeros_like(activations)
labels_onehot.scatter_(1, labels[..., None], 1)
else:
labels_onehot = labels
if label_smoothing > 0:
num_classes = labels_onehot.shape[-1]
labels_onehot = ( 1 - label_smoothing * num_classes / (num_classes - 1) ) \
* labels_onehot + \
label_smoothing / (num_classes - 1)
probabilities = tempered_softmax(activations, t2, num_iters)
loss_values = labels_onehot * log_t(labels_onehot + 1e-10, t1) \
- labels_onehot * log_t(probabilities, t1) \
- labels_onehot.pow(2.0 - t1) / (2.0 - t1) \
+ probabilities.pow(2.0 - t1) / (2.0 - t1)
loss_values = loss_values.sum(dim = -1) #sum over classes
if reduction == 'none':
return loss_values
if reduction == 'sum':
return loss_values.sum()
if reduction == 'mean':
return loss_values.mean()
class BiTemperedLogisticLoss(nn.Module):
def __init__(self, t1, t2, smoothing=0.0):
super(BiTemperedLogisticLoss, self).__init__()
self.t1 = t1
self.t2 = t2
self.smoothing = smoothing
def forward(self, logit_label, truth_label):
loss_label = bi_tempered_logistic_loss(
logit_label, truth_label,
t1=self.t1, t2=self.t2,
label_smoothing=self.smoothing,
reduction='none'
)
loss_label = loss_label.mean()
return loss_label
class TaylorSoftmax(nn.Module):
'''
This is the autograd version
'''
def __init__(self, dim=1, n=2):
super(TaylorSoftmax, self).__init__()
assert n % 2 == 0
self.dim = dim
self.n = n
def forward(self, x):
'''
usage similar to nn.Softmax:
>>> mod = TaylorSoftmax(dim=1, n=4)
>>> inten = torch.randn(1, 32, 64, 64)
>>> out = mod(inten)
'''
fn = torch.ones_like(x)
denor = 1.
for i in range(1, self.n+1):
denor *= i
fn = fn + x.pow(i) / denor
out = fn / fn.sum(dim=self.dim, keepdims=True)
return out
class TaylorCrossEntropyLoss(nn.Module):
def __init__(self, cfg, n=2, ignore_index=-1, reduction='mean', smoothing=0.2):
super(TaylorCrossEntropyLoss, self).__init__()
assert n % 2 == 0
self.taylor_softmax = TaylorSoftmax(dim=1, n=n)
self.reduction = reduction
self.ignore_index = ignore_index
self.lab_smooth = LabelSmoothing(smoothing=smoothing)
def forward(self, logits, labels):
log_probs = self.taylor_softmax(logits).log()
loss = self.lab_smooth(log_probs, labels)
return loss
def mse_with_softmax(logit1, logit2):
assert logit1.size()==logit2.size()
return F.mse_loss(F.softmax(logit1,1), F.softmax(logit2,1))
def get_criterion(cfg, device):
if cfg.criterion=='CrossEntropyLoss':
criterion = nn.CrossEntropyLoss().to(device)
elif cfg.criterion=='LabelSmoothing':
criterion = LabelSmoothing(smoothing=cfg.smoothing).to(device)
elif cfg.criterion=='FocalLoss':
criterion = FocalLoss().to(device)
elif cfg.criterion=='FocalCosineLoss':
criterion = FocalCosineLoss().to(device)
elif cfg.criterion=='SymmetricCrossEntropyLoss':
criterion = SymmetricCrossEntropy().to(device)
elif cfg.criterion=='BiTemperedLoss':
criterion = BiTemperedLogisticLoss(t1=cfg.t1, t2=cfg.t2, smoothing=cfg.smoothing).to(device)
elif cfg.criterion=='TaylorCrossEntropyLoss':
criterion = TaylorCrossEntropyLoss(smoothing=cfg.smoothing).to(device)
elif cfg.criterion == 'BCEWithLogitsLoss':
criterion = nn.BCEWithLogitsLoss().to(device)
return criterion | 2.921875 | 3 |
avatar2/targets/dmon_target.py | Bounti/avatar2_dmon | 0 | 12757466 | <filename>avatar2/targets/dmon_target.py
from avatar2.targets import Target, TargetStates
from avatar2.protocols.xsdb import XSDBProtocol
from avatar2.protocols.gdb import GDBProtocol
from avatar2.watchmen import watch
class DMonTarget(Target):
'''
dmon is a framework to perform security testing of real world
firmware programs. The dmon target and protocol allow Avatar2
to interact with the high performance dmon emulation platform.
For more information, please visit:
https://github.com/Bounti/dmon
'''
def __init__(self, avatar,
ps7_init_file,
hdf_file,
firmware,
processor='cortex-m3',
gdb_executable=None, gdb_additional_args=None,
gdb_ip='127.0.0.1', gdb_port=3333,
gdb_verbose_mi=False,
enable_init_files=False,
arguments=None,
**kwargs):
super(DMonTarget, self).__init__(avatar, **kwargs)
self.processor = processor
self._ps7_init_file = ps7_init_file
self._hdf_file = hdf_file
self._firmware = firmware
self.gdb_executable = (gdb_executable if gdb_executable is not None
else self._arch.get_gdb_executable())
self.gdb_additional_args = gdb_additional_args if gdb_additional_args else []
self.gdb_ip = gdb_ip
self.gdb_port = gdb_port
self._arguments = arguments
self._enable_init_files = enable_init_files
self._verbose_gdbmi = gdb_verbose_mi
@watch('TargetInit')
def init(self):
if self.processor == 'cortex-m3':
dmon = XSDBProtocol(avatar=self.avatar, origin=self,
output_directory=self.avatar.output_directory)
gdb = GDBProtocol(gdb_executable=self.gdb_executable,
arch=self._arch,
additional_args=self.gdb_additional_args,
avatar=self.avatar, origin=self,
enable_init_files=self._enable_init_files,
local_arguments=self._arguments,
verbose=self._verbose_gdbmi)
else:
dmon = None
self.log.warning("Target board not implemented")
raise Exception("Target board not implemented")
if dmon.connect():
#dmon.execute_command("connect -url tcp:127.0.0.1:3121")
dmon.execute_command("source %s" % self._ps7_init_file)
dmon.execute_command("targets -set -nocase -filter {name =~\"APU*\" && jtag_cable_name =~ \"Digilent Zed 210248A398A9\"} -index 0")
dmon.execute_command("loadhw -hw %s -mem-ranges [list {0x40000000 0xbfffffff}]" % self._hdf_file)
dmon.execute_command("configparams force-mem-access 1")
dmon.execute_command("targets -set -nocase -filter {name =~\"APU*\" && jtag_cable_name =~ \"Digilent Zed 210248A398A9\"} -index 0")
dmon.execute_command("stop")
dmon.execute_command("ps7_init")
dmon.execute_command("ps7_post_config")
dmon.execute_command("targets -set -nocase -filter {name =~ \"ARM*#0\" && jtag_cable_name =~ \"Digilent Zed 210248A398A9\"} -index 0")
dmon.execute_command("rst -processor")
dmon.execute_command("targets -set -nocase -filter {name =~ \"ARM*#0\" && jtag_cable_name =~ \"Digilent Zed 210248A398A9\"} -index 0")
dmon.execute_command("dow %s" % self._firmware)
dmon.execute_command("configparams force-mem-access 0")
#dmon.reset()
dmon.execute_command("con")
self.log.info("Connected to Target")
else:
self.log.warning("Connecting failed")
raise Exception("Connecting to target failed")
if gdb.remote_connect(ip=self.gdb_ip, port=self.gdb_port):
self.log.info("Connected to Target")
else:
self.log.warning("Connecting failed")
raise Exception("Connecting to target failed")
self.update_state(TargetStates.STOPPED)
#if dmon.stop():
# self.update_state(TargetStates.STOPPED)
#self.protocols.set_all(dmon)
self.protocols.set_all(gdb)
self.protocols.monitor = gdb
#self.wait()
def reset(self):
return self.protocols.execution.reset()
#@watch('TargetWait')
#def wait(self, state=TargetStates.STOPPED):
# return self.protocols.execution.wait(state)
| 2.484375 | 2 |
apps/tool/views/ftp_views.py | ranyong1997/Sakura_Infinity | 1 | 12757467 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
from django.http import FileResponse
from django.shortcuts import render
from django.views import View
from util.loginmixin import LoginMixin
from util.ftp_download import FtpDownload, zipDir, replace_char
from util.loggers import logger
from Sakura_Infinity.settings import FTP_PATH
# ftp 下载视图
class FtpView(LoginMixin, View):
def get(self, request):
try:
ftp_host = request.GET.get('ftp_host')
ftp_port = request.GET.get('ftp_port')
ftp_name = request.GET.get('ftp_name')
ftp_pwd = request.GET.get('ftp_pwd')
ftp_file = request.GET.get('ftp_file')
if ftp_host and ftp_port and ftp_name and ftp_pwd and ftp_file is not None:
# 兼容路径以/结尾
if isinstance(ftp_file, str) and ftp_file[-1] == '/':
ftp_file = replace_char(ftp_file, '', -1)
# 登录ftp
ftp = FtpDownload(ftpserver=ftp_host, port=int(ftp_port), usrname=ftp_name, pwd=<PASSWORD>)
filename = ftp_file.split('/')[-1] # 一般为文件
if '.' in filename: # 单一文件下载
dowmload_path = ftp.independentDownload(ftp_file)
try:
if dowmload_path:
response = FileResponse(open(dowmload_path, 'rb'))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename={}'.format(filename)
return response
except Exception as e:
logger.error(e)
msg = {'msg': '下载文件时发生了异常,请稍后在尝试!!!'}
return render(request, 'tool/ftp/ftp_download.html', msg)
else:
dowmload_paths = ftp.downloadFiles(ftp_file) # 下载ftp到本地
dowmload_zip = zipDir(dowmload_paths, f'{filename}.zip') # 压缩本地文件
try:
if dowmload_paths:
response = FileResponse(open(dowmload_zip, 'rb'))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename={}.zip'.format(filename)
if os.path.exists(dowmload_zip):
os.remove(dowmload_zip)
return response
except Exception as e:
logger.error(e)
msg = {'msg': '下载文件时发生了异常,请稍后在尝试!!!'}
return render(request, 'tool/ftp/ftp_download.html', msg)
return render(request, 'tool/ftp/ftp_download.html', )
except Exception as e:
logger.error(f'下载文件异常!{e}')
msg = {'msg': '下载文件异常请稍后在尝试!!!'}
return render(request, 'tool/ftp/ftp_download.html', msg)
# ftp 下载工具视图
class FtpToolView(LoginMixin, View):
def get(self, request):
try:
abspath = os.path.abspath('.') # 当前目录的上上级目录
ftptool_path = abspath + FTP_PATH # 拼接路径
response = FileResponse(
open(ftptool_path, 'rb'))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename={}'.format('FTPtool.zip')
return response
except Exception as e:
logger.error(e)
msg = {'msg': '下载工具异常请稍后在尝试!!!'}
return render(request, 'tool/ftp/ftp_download.html', msg)
| 2 | 2 |
gee_tools/imgtools.py | AnthonyPerez/gee_tools | 3 | 12757468 | <reponame>AnthonyPerez/gee_tools
import ee
# def addTerrain(image):
#
# terrain = ned.clip(geometry).reproject(image.projection());
#
# provterrain = terrain.updateMask(image.select(0).mask()).select([0], ['elev']);
# provaspect = ee.Terrain.aspect(provterrain).select([0], ['aspect']);
# provslope = ee.Terrain.slope(provterrain).select([0], ['slope']);
#
# return composite
# .addBands(provterrain)
# .addBands(provaspect)
# .addBands(provslope)
# Add two bands represeting lon/lat of each pixels
def add_latlon(image):
ll = image.select(0).multiply(0).add(ee.Image.pixelLonLat())
return image.addBands(ll.select(['longitude', 'latitude'], ['LON', 'LAT']))
def get_checkerboard(image, imgband, updmask, viz, color1, color2):
# Create a 0/1 checkerboard on a lon/lat grid: take the floor of lon and
# lat, add them together, and take the low-order bit
lonlat_checks = ee.Image.pixelLonLat().floor().toInt().reduce(ee.Reducer.sum()).bitwiseAnd(1)
# Get the image projection from one of the bands
imgproj = image.select([imgband]).projection()
# Now replace the projection of the lat/lon checkboard (WGS84 by default)
# with the desired projection.
# TODO: it would be a good idea to understand difference between changeProj and reproject.
imgchecks = lonlat_checks.changeProj(ee.Projection('EPSG:4326'), imgproj)
# If requested copy the footprint of the image onto the checkerboard,
# to avoid a global image.
if updmask:
imgchecks = imgchecks.updateMask(image.select([imgband]).mask())
if viz:
imgchecks = imgchecks.visualize({'min': 0, 'max': 1, 'palette': [color1, color2]})
return imgchecks
def _rename_band(val, suffix):
return ee.String(val).cat(ee.String("_")).cat(ee.String(suffix))
def rename_bands(img, suffix):
bandnames = img.bandNames()
newnames = bandnames.map(lambda x: _rename_band(x, suffix))
return img.select(bandnames, newnames)
def addDOY(image):
date = image.date()
doy = date.getRelative('day', 'year').add(1)
month = date.getRelative('month', 'year')
return image.addBands(ee.Image(doy).select([0], ['DOY']).toInt16()).set(
'DOY', doy,
'MONTH', month,
'YEAR', date.get('year'),
'DATE', date.format(),
'MSTIMESTAMP', ee.Number(image.get('system:time_start'))
)
def appendBand(current, previous):
"""
Append it to the result (Note: only return current item on first element/iteration)
:param current:
:param previous:
:return:
"""
accum = ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, current.addBands(ee.Image(previous)))
# Return the accumulation
return accum
def getScaledImage(img, scaler):
"""
The "scaler" parameter is a ee.Dictionary with bands name and corresponding scaling factors.
NOTE: return only selected bands, with no properties from input image.*/
"""
scalingimg = ee.Image.constant(scaler.values()).rename(scaler.keys())
scaledimg = img.select(scaler.keys()).multiply(scalingimg)
return ee.Image(scaledimg.copyProperties(img))
def getGLCMTexture(image, size, kernel, average, intscaler):
scaledbands = getScaledImage(image, intscaler).toInt32()
return scaledbands.glcmTexture(
size=size,
kernel=kernel,
average=average
)
| 2.625 | 3 |
tests/unittest/test_songs_functions.py | Stashchen/bot-top-ranking-slack | 0 | 12757469 | <reponame>Stashchen/bot-top-ranking-slack
import unittest
from unittest.mock import patch, MagicMock, Mock
import songs_functionality.songs_functions as songs_funcs
class TestSongsFunctions(unittest.TestCase):
"""
Testing of SongsFunctions.
"""
@patch('songs_functionality.songs_functions.system')
def test_download_song(self, mocked_system):
songs_funcs.download_song('test_song', 'http://test.json', '.')
mocked_system.assert_called_once_with('wget -O ./test_song.mp3 "http://test.json"')
@patch('songs_functionality.songs_functions.system')
def test_delete_songs(self, mocked_system):
songs_funcs.delete_songs('test/path')
mocked_system.assert_called_once_with('rm test/path/*.mp3')
def test_make_valid_song_name(self):
song = {
'title': 'Song about love',
'artist': 'ArtistName ArtistSurname'
}
title = songs_funcs.make_valid_song_name(song)
self.assertEqual(title, 'ArtistName-ArtistSurname-Song-about-love')
def test_sort_songs(self):
all_songs = [
{
'value': 1,
'title': 'Title1',
'artist': 'Artist1',
'link': 'Link1',
'voted_users': [1]
},
{
'value': 2,
'title': 'Title2',
'artist': 'Artist2',
'link': 'Link2',
'voted_users': []
},
{
'value': 3,
'title': 'Title3',
'artist': 'Artist3',
'link': 'Link3',
'voted_users': [1,2,3]
},
{
'value': 4,
'title': 'Title4',
'artist': 'Artist4',
'link': 'Link4',
'voted_users': [1, 2]
}
]
sorted_songs = [
{
'value': 3,
'title': 'Title3',
'artist': 'Artist3',
'link': 'Link3',
'voted_users': [1,2,3]
},
{
'value': 4,
'title': 'Title4',
'artist': 'Artist4',
'link': 'Link4',
'voted_users': [1, 2]
},
{
'value': 1,
'title': 'Title1',
'artist': 'Artist1',
'link': 'Link1',
'voted_users': [1]
},
{
'value': 2,
'title': 'Title2',
'artist': 'Artist2',
'link': 'Link2',
'voted_users': []
}
]
self.assertEqual(sorted_songs, songs_funcs.sort_songs(all_songs)) | 2.75 | 3 |
virtual_modi/virtual_module/virtual_input_module/virtual_env.py | LUXROBO/virtual-modi | 1 | 12757470 |
from virtual_modi.virtual_module.virtual_module import VirtualModule
class VirtualEnv(VirtualModule):
BRIGHTNESS = 2
RED = 3
GREEN = 4
BLUE = 5
TEMPERATURE = 6
HUMIDITY = 7
def __init__(self, message_handler):
super(VirtualEnv, self).__init__(message_handler)
self.type = 'env'
self.uuid = self.generate_uuid(0x2000)
self.brightness = 0
self.red = 0
self.green = 0
self.blue = 0
self.temperature = 0
self.humidity = 0
self.attach()
def run(self):
self.send_property_message(self.BRIGHTNESS, self.brightness)
self.send_property_message(self.RED, self.red)
self.send_property_message(self.GREEN, self.green)
self.send_property_message(self.BLUE, self.blue)
self.send_property_message(self.TEMPERATURE, self.temperature)
self.send_property_message(self.HUMIDITY, self.humidity)
| 2.53125 | 3 |
client/py_client/modules/orders/endpoints.py | thefstock/FirstockPy | 1 | 12757471 | """
The api endpoint paths stored as constants
"""
PLACE_ORDER = 'PlaceOrder'
MODIFY_ORDER = 'ModifyOrder'
CANCEL_ORDER = 'CancelOrder'
EXIT_SNO_ORDER = 'ExitSNOOrder'
GET_ORDER_MARGIN = 'GetOrderMargin'
GET_BASKET_MARGIN = 'GetBasketMargin'
ORDER_BOOK = 'OrderBook'
MULTILEG_ORDER_BOOK = 'MultiLegOrderBook'
SINGLE_ORDER_HISTORY = 'SingleOrdHist'
TRADE_BOOK = 'TradeBook'
POSITION_BOOK = 'PositionBook'
CONVERT_PRODUCT = 'ProductConversion' | 1.3125 | 1 |
liegroups/numpy/se2.py | lvzhaoyang/liegroups | 1 | 12757472 | import numpy as np
from liegroups.numpy import _base
from liegroups.numpy.so2 import SO2
class SE2(_base.SpecialEuclideanBase):
"""Homogeneous transformation matrix in :math:`SE(2)` using active (alibi) transformations.
.. math::
SE(2) &= \\left\\{ \\mathbf{T}=
\\begin{bmatrix}
\\mathbf{C} & \\mathbf{r} \\\\
\\mathbf{0}^T & 1
\\end{bmatrix} \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~ \\mathbf{C} \\in SO(2), \\mathbf{r} \\in \\mathbb{R}^2 \\right\\} \\\\
\\mathfrak{se}(2) &= \\left\\{ \\boldsymbol{\\Xi} =
\\boldsymbol{\\xi}^\\wedge \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~
\\boldsymbol{\\xi}=
\\begin{bmatrix}
\\boldsymbol{\\rho} \\\\ \\phi
\\end{bmatrix} \\in \\mathbb{R}^3, \\boldsymbol{\\rho} \\in \\mathbb{R}^2, \\phi \in \\mathbb{R} \\right\\}
:cvar ~liegroups.SE2.dim: Dimension of the rotation matrix.
:cvar ~liegroups.SE2.dof: Underlying degrees of freedom (i.e., dimension of the tangent space).
:ivar rot: Storage for the rotation matrix :math:`\mathbf{C}`.
:ivar trans: Storage for the translation vector :math:`\mathbf{r}`.
"""
dim = 3
"""Dimension of the transformation matrix."""
dof = 3
"""Underlying degrees of freedom (i.e., dimension of the tangent space)."""
RotationType = SO2
def adjoint(self):
"""Adjoint matrix of the transformation.
.. math::
\\text{Ad}(\\mathbf{T}) =
\\begin{bmatrix}
\\mathbf{C} & 1^\\wedge \\mathbf{r} \\\\
\\mathbf{0}^T & 1
\\end{bmatrix}
\\in \\mathbb{R}^{3 \\times 3}
"""
rot_part = self.rot.as_matrix()
trans_part = np.array([self.trans[1], -self.trans[0]]).reshape((2, 1))
return np.vstack([np.hstack([rot_part, trans_part]),
[0, 0, 1]])
@classmethod
def exp(cls, xi):
"""Exponential map for :math:`SE(2)`, which computes a transformation from a tangent vector:
.. math::
\\mathbf{T}(\\boldsymbol{\\xi}) =
\\exp(\\boldsymbol{\\xi}^\\wedge) =
\\begin{bmatrix}
\\exp(\\phi ^\\wedge) & \\mathbf{J} \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 1
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.log`.
"""
if len(xi) != cls.dof:
raise ValueError("xi must have length {}".format(cls.dof))
rho = xi[0:2]
phi = xi[2]
return cls(cls.RotationType.exp(phi),
cls.RotationType.left_jacobian(phi).dot(rho))
@classmethod
def inv_left_jacobian(cls, xi):
""":math:`SE(2)` inverse left Jacobian.
.. math::
\\mathcal{J}^{-1}(\\boldsymbol{\\xi})
"""
raise NotImplementedError
@classmethod
def left_jacobian(cls, xi):
""":math:`SE(2)` left Jacobian.
.. math::
\\mathcal{J}(\\boldsymbol{\\xi})
"""
raise NotImplementedError
def log(self):
"""Logarithmic map for :math:`SE(2)`, which computes a tangent vector from a transformation:
.. math::
\\boldsymbol{\\xi}(\\mathbf{T}) =
\\ln(\\mathbf{T})^\\vee =
\\begin{bmatrix}
\\mathbf{J} ^ {-1} \\mathbf{r} \\\\
\\ln(\\boldsymbol{C}) ^\\vee
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.log`.
"""
phi = self.rot.log()
rho = self.RotationType.inv_left_jacobian(phi).dot(self.trans)
return np.hstack([rho, phi])
@classmethod
def odot(cls, p, directional=False):
""":math:`SE(2)` odot operator as defined by Barfoot.
This is the Jacobian of a vector
.. math::
\\mathbf{p} =
\\begin{bmatrix}
sx \\\\ sy \\\\ sz \\\\ s
\\end{bmatrix} =
\\begin{bmatrix}
\\boldsymbol{\\epsilon} \\\\ \\eta
\\end{bmatrix}
with respect to a perturbation in the underlying parameters of :math:`\\mathbf{T}`.
If :math:`\\mathbf{p}` is given in Euclidean coordinates and directional=False, the missing scale value :math:`\\eta` is assumed to be 1 and the Jacobian is 2x3. If directional=True, :math:`\\eta` is assumed to be 0:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon}
\\end{bmatrix}
If :math:`\\mathbf{p}` is given in Homogeneous coordinates, the Jacobian is 3x3:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon} \\\\
\\mathbf{0}^T & 0
\\end{bmatrix}
"""
p = np.atleast_2d(p)
result = np.zeros([p.shape[0], p.shape[1], cls.dof])
if p.shape[1] == cls.dim - 1:
# Assume scale parameter is 1 unless p is a direction
# vector, in which case the scale is 0
if not directional:
result[:, 0:2, 0:2] = np.eye(2)
result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p.T).T
elif p.shape[1] == cls.dim:
result[:, 0:2, 0:2] = p[:, 2] * np.eye(2)
result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p[:, 0:2].T).T
else:
raise ValueError("p must have shape ({},), ({},), (N,{}) or (N,{})".format(
cls.dim - 1, cls.dim, cls.dim - 1, cls.dim))
return np.squeeze(result)
@classmethod
def vee(cls, Xi):
""":math:`SE(2)` vee operator as defined by Barfoot.
.. math::
\\boldsymbol{\\xi} = \\boldsymbol{\\Xi} ^\\vee
This is the inverse operation to :meth:`~liegroups.SE2.wedge`.
"""
if Xi.ndim < 3:
Xi = np.expand_dims(Xi, axis=0)
if Xi.shape[1:3] != (cls.dof, cls.dof):
raise ValueError("Xi must have shape ({},{}) or (N,{},{})".format(
cls.dof, cls.dof, cls.dof, cls.dof))
xi = np.empty([Xi.shape[0], cls.dof])
xi[:, 0:2] = Xi[:, 0:2, 2]
xi[:, 2] = cls.RotationType.vee(Xi[:, 0:2, 0:2])
return np.squeeze(xi)
@classmethod
def wedge(cls, xi):
""":math:`SE(2)` wedge operator as defined by Barfoot.
.. math::
\\boldsymbol{\\Xi} =
\\boldsymbol{\\xi} ^\\wedge =
\\begin{bmatrix}
\\phi ^\\wedge & \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 0
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.vee`.
"""
xi = np.atleast_2d(xi)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
Xi = np.zeros([xi.shape[0], cls.dof, cls.dof])
Xi[:, 0:2, 0:2] = cls.RotationType.wedge(xi[:, 2])
Xi[:, 0:2, 2] = xi[:, 0:2]
return np.squeeze(Xi)
| 2.953125 | 3 |
old_logen/pylogen/ToolBar.py | leuschel/logen | 14 | 12757473 | from Tkinter import *
class ToolBar(Frame):
"""
This class managed the main toolbar
"""
def __init__(self,master=None,app=None):
Frame.__init__(self, master)
self["height"] = "-1cm"
self.images = {}
self.app = app
def add_button(self, text="", command=None, imagefile=None,padx=0,side="left"):
self.images[text] = PhotoImage(file=imagefile, master=self)
b = Button(self,text=text,image=self.images[text], command=command, state="normal")
b.bind(sequence="<Enter>", func=self.over)
b.pack(side=side, padx=padx,pady=2)
self.app.balloon.bind(b, text)
return b
def add_separator(self, padx=4,side="left"):
Label(self).pack(padx=padx,side=side)
def over(self,sender):
self.app.status.set("%s" % (sender.widget.cget("text")))
| 3.234375 | 3 |
result_linker/api/home.py | akshayAithal/result_linker | 0 | 12757474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint,redirect,url_for, session, jsonify, request
from result_linker.models import db
from result_linker.logger import logger
from result_linker.models.share import Share
home_blueprint = Blueprint("home", __name__)
@home_blueprint.route("/")
def index(issue_id=0):
"""Serve the ReactJS-based index.html"""
from flask import render_template
#logger.info("Its coming here")
from flask import request
logger.info(request.headers.get('User-Agent'))
from flask_login import current_user
if current_user.is_authenticated:
logger.info("Authenticated!!")
else:
logger.info("Not Authenticated!!")
logger.debug(session)
return render_template("index.html")
@home_blueprint.route("/issue/<issue_id>")
def issues(issue_id):
"""Serve the ReactJS-based index.html"""
from flask import render_template
share = Share.query.filter_by(issue=issue_id).first()
if share:
return redirect(url_for("share.check_share_link",token = share.link))
session["issue_id"] = issue_id
return redirect(url_for("home.index"))
@home_blueprint.route("/clear")
def clear(issue_id=0):
"""Serve the ReactJS-based index.html"""
from flask import render_template
return jsonify({"success":True})
#@home_blueprint.route("/get_issue")
#def get_issue(issue_id):
# """Serve the ReactJS-based index.html"""
# issue_id = session.get("issue_id",0)
# success_flag=False
# if issue_id:
# success_flag = True
# return jsonify({"success":success_flag,"issue_id": issue_id}) | 2.3125 | 2 |
development/analyze_table.py | gunny26/datalogger | 0 | 12757475 | #!/usr/bin/python
"""
Tool to analyze some datalogger raw data
"""
from __future__ import print_function
import os
import sys
import argparse
import json
parser = argparse.ArgumentParser(description="Tool to analyze some datalogger raw data")
parser.add_argument("-i", "--input-file", help="file to read from", required=True)
options = parser.parse_args("-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv".split())
if not os.path.isfile(options.input_file):
print("file %s does not exist" % options.input_file)
sys.exit(1)
data = {}
meta = {}
meta["delimiter"] = "\t"
meta["index_keynames"] = ("hostname", "ifDescr")
meta["ts_keyname"] = "ts"
meta["interval"] = 300
headers = None
with open(options.input_file, "rt") as infile:
for line in infile.read().split("\n"):
if line == "" or line == "\n":
continue
if headers is None:
headers = line.split(meta["delimiter"])
meta["headers"] = headers
data["length"] = len(headers)
for header in headers:
data[header] = {
"isnumeric" : True,
"interval" : 0
}
assert meta["ts_keyname"] in headers
assert all((index_key in headers for index_key in meta["index_keynames"]))
else:
columns = line.split(meta["delimiter"])
assert len(columns) == data["length"]
for index, column in enumerate(columns):
data[headers[index]]["isnumeric"] = all((data[headers[index]]["isnumeric"], column.isnumeric()))
print(line)
meta["value_keynames"] = dict([(header, "asis") for header in headers if data[header]["isnumeric"] == True])
meta["blacklist"] = [header for header in headers if (data[header]["isnumeric"] == False) and (header not in meta["index_keynames"]) and (header != meta["ts_keyname"])]
print(json.dumps(meta, indent=4, sort_keys=True))
| 2.984375 | 3 |
docs/examples/02_description_models/files/02_robot_from_ros.py | yck011522/compas_fab | 64 | 12757476 | <filename>docs/examples/02_description_models/files/02_robot_from_ros.py
import compas
from compas_fab.backends import RosClient
# Set high precision to import meshes defined in meters
compas.PRECISION = '12f'
with RosClient() as ros:
robot = ros.load_robot(load_geometry=True)
print(robot.model)
| 1.890625 | 2 |
problems/fizzbuzz.py | panc86/coding-problems | 0 | 12757477 | <gh_stars>0
# FizzBuzz game in python
def solution(N):
for i in range(1, N):
x = ""
if i % 3 == 0:
x += "Fizz"
if i % 5 == 0:
x += "Buzz"
yield x or i
# Run via the console
if __name__ == "__main__":
import sys
import time
s = time.time()
N = int(sys.argv[1])
r = solution(N)
e = time.time()
print(f"Result: {list(r)}\ndone in {e - s:.10f} s")
| 3.765625 | 4 |
example/app.py | iksaif/flacon | 2 | 12757478 | <gh_stars>1-10
#!/usr/bin/env python
"""Flacon example."""
import argparse
import flask
from flacon import Flacon
# Optional API.
try:
import flask_restx
except ImportError:
flask_restx = None
class Error(Exception):
"""All local errors."""
pass
# This could be as simple as :
# flacon = Flacon(app)
# app = flacon.app
# More complicated example:
flacon = Flacon(__name__)
app = flacon.app
# Override the default index
@app.route("/")
def index():
return flask.render_template("index.html")
# Add a new page.
@app.route("/example")
def example():
return flask.render_template("example.html")
# Create a custom health check callbback.
def is_healthy():
"""Custom "health" check."""
import random
if random.random() > 0.5:
raise Error()
return True
if flask_restx:
class HelloWorld(flask_restx.Resource):
def get(self):
return {"hello": "world"}
def initialize_api(flask_app):
"""Initialize an API."""
if not flask_restx:
return
api = flask_restx.Api(version="1.0", title="My Example API")
api.add_resource(HelloWorld, "/hello")
blueprint = flask.Blueprint("api", __name__, url_prefix="/api")
api.init_app(blueprint)
flask_app.register_blueprint(blueprint)
def initialize_app(flask_app, args):
"""Initialize the App."""
# Setup flacon with the args.
flacon.setup(args)
# Register a custom health check.
flacon.is_healthy = is_healthy
# Add an optional API
initialize_api(flask_app)
def main():
# Setup a custom parser.
parser = argparse.ArgumentParser(description="Example")
parser = Flacon.get_argparser(parser)
args = parser.parse_args()
initialize_app(app, args)
# Start the application.
flacon.run()
if __name__ == "__main__":
main()
| 2.78125 | 3 |
Rent 6.5/record.py | girisakar365/Project-Rent | 2 | 12757479 | <gh_stars>1-10
"""
__File__: record tab
Source of import: main
Buttons import: Clear, Confrom Record,edit,delete,qr
Lable import: no_record, dor
combo import: src, ssr,
"""
from source import *
from db import db
from Tree import Tree
class Record:
def __init__(self,*args,**kwargs):
self.tool={key:value for key,value in kwargs.items()}
self.clear = args[0]
self.record = args[1]
self.today = strftime('%d/%m/%Y')
self.combo = self.tool['combo']
self.clear['command']=self._clear
self.record['command']=self._record
def _clear(self):
self.tool['entries'][0].delete(0,END)
self.tool['entries'][1].delete(0,END)
self.tool['entries'][2].delete(0,END)
self.tool['entries'][3].delete(0,END)
results=None
for i in db.RS(self.combo[0][1].get(),'fetch',strftime('%B')):
results=i
#Handeling Previous Meter inserting problem
try:
try:
self.tool['entries'][0].insert(0,results[7])
self.tool['entries'][2].insert(0,results[8])
except TypeError:
pass
except IndexError:
pass
self.tool['entries'][1].focus()
db.cache('cmd',False)
db.cache('oid',None)
def _record(self):
Conn = Gate(cmd=db.cache('fetch','cmd'),entries=self.tool['entries'],combo=(self.combo[0][0],self.combo[0][1]))
if Conn.check() == True:
#DisplayStuff
self.tool['tab'].select(1)
current_month = self.combo[0][2].index(self.combo[0][0].get())
current_year = self.combo[0][3].index(int(self.combo[0][1].get()))
self.combo[1][0].current(current_month)
self.combo[1][1].current(current_year)
self.tool['engine']()
self.tool['entries'][1].delete(0,END)
self.tool['entries'][3].delete(0,END)
self.tool['entries'][1].focus()
self._clear()
class Gate:#ErrorHandels
def __init__(self,cmd=0,**kwargs):#initializating-widget
self.gate={keys:values for keys,values in kwargs.items()}
self.cmd=cmd
def check(self):#mainCheckGateOfRecord-Section
intake=[i.get() for i in self.gate['entries'] if len(i.get())!=0]
if len(intake)<7:
Msg('Opps! Your input(s) is missing.','!')
elif self.gate['combo'][0].get()=='Month' or self.gate['combo'][1].get()=='Year':
Msg('Please select an appoprite month or year.','!')
else:
try:
checkalpha=[int(i) for i in intake]
except ValueError:
Msg('Your input(s) must always be a number NOT an alphabet','!')
else:
if db.cache('fetch','meter')==0:
Msg('Opps! You haven\'t selected unit.','!')
else:
self.store()
return True
def store(self):#mainStoreFunction
if self.cmd!=0:
db.RS(self.gate['combo'][1].get(),'delete',db.cache('fetch','oid'))
eleclist=[int(i.get()) for i in self.gate['entries'][0:4]]
water = int(self.gate['entries'][4].get())
waste = int(self.gate['entries'][5].get())
rent = int(self.gate['entries'][6].get())
electricity = ((eleclist[1]-eleclist[0])+((eleclist[3]-eleclist[2])/2))*db.cache('fetch','meter')
Total = electricity+water+waste+rent
raw = [strftime('%d/%m/%Y'),self.gate['combo'][0].get(),
electricity,water,waste,rent,Total,
eleclist[1],eleclist[3],eleclist[0],eleclist[2]]
db.RS(self.gate['combo'][1].get(),'insert',raw)
| 2.609375 | 3 |
submissions/cf16-final/a.py | m-star18/atcoder | 1 | 12757480 | <reponame>m-star18/atcoder
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
h, w = map(int, readline().split())
s = [readline().rstrip().decode().split() for _ in range(h)]
al = [chr(ord('A') + i) for i in range(26)]
for i in range(h):
for j in range(w):
if s[i][j] == 'snuke':
print(al[j] + str(i + 1))
| 2.5 | 2 |
src/sympais/tasks/coral.py | ethanluoyc/sympais | 5 | 12757481 | <filename>src/sympais/tasks/coral.py
"""Coral benchmark tasks."""
# pylint: disable=line-too-long
# flake8: noqa
import glob
import importlib
import os
import re
import tempfile
from typing import List, Tuple
from sympais.tasks import base
SKIPPED_FILES = ["turn.nodeq.m", "conflict.nodeq.m"]
def compile_mathematica_example_to_python(source_file, target_file, path_index):
# Take subjects from 'normal', which uses Gaussian distributions.
# To use other distributions, change the distribution_pattern at line 29
# path = 'subjects/normal/apollo.nodeq.m'
path = source_file
with open(path, "r") as mathematica_input_file:
path_conditions = mathematica_input_file.readlines()
mathematica_input = path_conditions[path_index]
command_prefix = "TimeConstrained[ScientificForm[NProbability["
command_suffix = '}],NumberFormat -> (#1 <> "E" <> #3 &)],1800] //AbsoluteTiming'
mathematica_input = mathematica_input[len(command_prefix):-len(command_suffix) - 1]
conjunctive_constraint, distributions = mathematica_input.split(
",{") # Example: x1 < 10 && (x2 -1) + x3 >= -10;
def translate_operators(mathematica_string: str) -> str:
# Add any other replace rule if needed
result = mathematica_string.replace("^", "**")
return result
# Extract individual conjuncts in a list
conjuncts = [
translate_operators(c.strip()) for c in conjunctive_constraint.split("&&")
]
distribution_pattern = r"(?P<var_id>[a-zA-Z0-9_]+) \\\[Distributed\] TruncatedDistribution\[\{(?P<lb>-?[0-9]+(.[0-9]*)?),(?P<ub>-?[0-9]+(.[0-9]*)?)\},NormalDistribution\[(?P<loc>-?[0-9]+(.[0-9]*)?),(?P<stdev>-?[0-9]+(.[0-9]*)?)\]\]"
matcher = re.compile(distribution_pattern)
def translate_profile(
mathematica_var_declaration: re.Match,) -> Tuple[str, str, List[str], str]:
var_id = mathematica_var_declaration.group("var_id")
lower_bound = float(mathematica_var_declaration.group("lb"))
upper_bound = float(mathematica_var_declaration.group("ub"))
gaussian_loc = float(mathematica_var_declaration.group("loc"))
gaussian_stdev = float(mathematica_var_declaration.group("stdev"))
symbolic_var_declaration = f'{var_id} = sympy.Symbol("{var_id}")'
profile_specification = (
f'"{var_id}": dist.TruncatedNormal(low={lower_bound}, high={upper_bound}, loc={gaussian_loc}, scale={gaussian_stdev})'
)
domain_constraints = [
f"{var_id} >= {lower_bound}",
f"{var_id} <= {upper_bound}",
]
# domain = [float(lower_bound), float(upper_bound)]
domain = f'"{var_id}": ({lower_bound}, {upper_bound})'
return (
symbolic_var_declaration,
profile_specification,
domain_constraints,
domain,
)
python_vars_declaration = []
python_profile_specs = []
python_constraints = conjuncts
python_domains = []
for m in matcher.finditer(distributions):
(
symbolic_var_declaration,
profile_specification,
domain_constraints,
domain,
) = translate_profile(m)
python_vars_declaration.append(symbolic_var_declaration)
python_profile_specs.append(profile_specification)
python_constraints += domain_constraints
python_domains.append(domain)
preamble = [
"from sympy import Symbol",
"import sympy",
"from sympais import distributions as dist",
]
python_code = ("\n".join(preamble) + "\n\n" + "\n".join(python_vars_declaration)
+ "\n\nprofile = {\n\t" + ",\n\t".join(python_profile_specs)
+ "\n}\n\n" + "constraints = [\n\t" + ",\n\t".join(python_constraints)
+ "\n]\n" + "\n\ndomains = {\n\t" + ",\n\t".join(python_domains)
+ "\n}\n\n")
with open(target_file, "wt") as outfile:
outfile.write(python_code)
def compile_mathematica_to_python_module(path, path_index):
with tempfile.NamedTemporaryFile(mode="wt", suffix=".py") as tmp:
filename = tmp.name
compile_mathematica_example_to_python(path, filename, path_index)
spec = importlib.util.spec_from_file_location("mod", filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # pytype: disable=attribute-error
return module
def get_num_paths(filename):
with open(filename, "r") as mathematica_input_file:
path_conditions = mathematica_input_file.readlines()
return len(path_conditions)
def list_all_tasks():
files = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "subjects/normal/*.m"))
return [os.path.basename(file) for file in files]
class Coral(base.Task):
def __init__(self, filename, path_index=0):
if not os.path.exists(filename):
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "subjects/normal", filename)
module = compile_mathematica_to_python_module(filename, path_index)
self.num_paths = get_num_paths(filename)
super().__init__(module.profile, module.constraints, module.domains)
@property
def num_dimensions(self):
return len(self.profile)
| 2.390625 | 2 |
models/densenet.py | Hiwyl/keras_cnn_finetune | 1 | 12757482 | # -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : <EMAIL>
'''
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
import keras
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout
from keras.optimizers import SGD, Adagrad, Adam
from keras import backend as K, Model
import os
from model_cx.load_data import load_data
def densenet(classes,epochs,steps_per_epoch,validation_steps,input_shape):
#加载数据
train_batches,valid_batches=load_data(input_shape)
input_shape+=(3,)
#DenseNet121, DenseNet169, DenseNet201
temp_model= keras.applications.densenet.DenseNet121(include_top=False, weights='imagenet', input_shape=input_shape)
x = temp_model.output
x = GlobalAveragePooling2D()(x) # GlobalAveragePooling2D 将 MxNxC 的张量转换成 1xC 张量,C是通道数
x = Dense(1024, activation='relu')(x)
x=Dropout(0.2)(x)
if classes==1:
print("sigmoid")
predictions = Dense(classes, activation='sigmoid')(x)
else:
print("softmax")
predictions = Dense(classes, activation='softmax')(x)
model = Model(inputs=temp_model.input, outputs=predictions)
if classes==1:
print("二元分类")
model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
loss='binary_crossentropy', metrics=['accuracy'])
else:
print("多分类")
model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08), loss='categorical_crossentropy', metrics=['accuracy'])
# model_cx.summary()
print('the number of layers in this model_cx:' + str(len(model.layers)))
#保存模型
out_dir = "../weights/"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
filepath ="../weights/densenet_{epoch:04d}.h5"
# 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=False,
mode='max')
#学习率调整
lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=1,
min_lr=0.000005, mode="min")
# 早停
earlystopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
#保存训练过程
log_dir = "../logs/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logfile="../logs/densenet.csv"
log=keras.callbacks.CSVLogger(logfile, separator=',', append=False)
loggraph=keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [checkpoint,lr_reduce,log]
# 训练
model.fit_generator(train_batches, steps_per_epoch=steps_per_epoch, validation_data=valid_batches,
validation_steps=validation_steps, epochs=epochs, verbose=2,
callbacks=callbacks_list,workers=16,max_queue_size=20)
if __name__=="__main__":
densenet(1,200,210,15,(224,224))
#densenet121 batch=16 acc:93.3 densenet(3,50,20,5 ,(224,224)) SGD
#factor:0.5 pat:10
| 2.765625 | 3 |
tests/test_amf3_int.py | chrippa/python-flashmedia | 14 | 12757483 | <reponame>chrippa/python-flashmedia<filename>tests/test_amf3_int.py
from . import with_fd
from flashmedia.types import AMF3Integer
MIN = -268435456
MIN_BYTES = b"\xc0\x80\x80\x00"
MIN_SIZE = len(MIN_BYTES)
MAX = 268435455
MAX_BYTES = b"\xbf\xff\xff\xff"
MAX_SIZE = len(MAX_BYTES)
ZERO = 0
ZERO_BYTES = b"\x00"
ZERO_SIZE = len(ZERO_BYTES)
def test_pack_min():
assert AMF3Integer.pack(MIN) == MIN_BYTES
def test_pack_max():
assert AMF3Integer.pack(MAX) == MAX_BYTES
def test_pack_zero():
assert AMF3Integer.pack(ZERO) == ZERO_BYTES
def test_size_min():
assert AMF3Integer.size(MIN) == MIN_SIZE
def test_size_max():
assert AMF3Integer.size(MAX) == MAX_SIZE
def test_size_zero():
assert AMF3Integer.size(ZERO) == ZERO_SIZE
@with_fd(MIN_BYTES)
def test_read_min(fd):
assert AMF3Integer.read(fd) == MIN
@with_fd(MAX_BYTES)
def test_read_max(fd):
assert AMF3Integer.read(fd) == MAX
@with_fd(ZERO_BYTES)
def test_read_zero(fd):
assert AMF3Integer.read(fd) == ZERO
| 2.421875 | 2 |
djedi/migrations/0002_auto_20190722_1447.py | Mojken/djedi-cms | 41 | 12757484 | # Generated by Django 2.2.3 on 2019-07-22 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djedi', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='node',
name='is_published',
field=models.BooleanField(blank=True, default=False),
),
]
| 1.398438 | 1 |
setup.py | apljungquist/botfights-sdk | 0 | 12757485 | <gh_stars>0
#!/usr/bin/env python
import setuptools
setuptools.setup(
name="botfights",
install_requires=["fire", "requests"],
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
entry_points={
"console_scripts": ["fight = botfights:main"],
"botfights.wordle.guesser": [
"sample = botfights.wordle.sample_bot:Bot",
"assisted = botfights.wordle.wordle:Assisted",
],
},
)
| 1.21875 | 1 |
pynif3d/models/idr/hyperparams.py | pfnet/pynif3d | 66 | 12757486 | <gh_stars>10-100
class SoftplusParams:
__slots__ = ["beta"]
def __init__(self, beta=100):
self.beta = beta
class GeometricInitParams:
__slots__ = ["bias"]
def __init__(self, bias=0.6):
self.bias = bias
class IDRHyperParams:
def __init__(self, softplus=None, geometric_init=None):
self.softplus = softplus
if softplus is None:
self.softplus = SoftplusParams()
self.geometric_init = geometric_init
if geometric_init is None:
self.geometric_init = GeometricInitParams()
| 2.53125 | 3 |
tests/test_import.py | Attsun1031/schematics | 1,430 | 12757487 | # -*- coding: utf-8 -*-
from copy import deepcopy
import pytest
from schematics.models import Model
from schematics.types import *
from schematics.types.compound import *
from schematics.exceptions import *
from schematics.undefined import Undefined
@pytest.mark.parametrize('init', (True, False))
def test_import_data(init):
class M(Model):
a, b, c, d = IntType(), IntType(), IntType(), IntType()
m = M({
'a': 1,
'b': None,
'c': 3
}, init=init)
m.import_data({
'a': None,
'b': 2
})
if init:
assert m._data == {'a': None, 'b': 2, 'c': 3, 'd': None}
else:
assert m._data == {'a': None, 'b': 2, 'c': 3}
@pytest.mark.parametrize('init', (True, False))
def test_import_data_with_error(init):
class M(Model):
a, b, c, d = IntType(), IntType(), IntType(required=True), IntType()
m = M({
'a': 1,
'b': None,
'c': 3
}, init=init)
with pytest.raises(DataError):
m.import_data({
'a': None,
'b': 2,
'c': None,
})
if init:
assert m._data == {'a': 1, 'b': None, 'c': 3, 'd': None}
else:
assert m._data == {'a': 1, 'b': None, 'c': 3}
@pytest.mark.parametrize('preconvert_source, populate_source',
[( False, None),
( True, True),
( True, False)])
@pytest.mark.parametrize('recursive, populate_target, init_to_none, populated_result',
[( False, True, True, True),
( False, False, False, False),
( True, True, True, True),
( True, False, True, True),
( True, False, False, False)])
def test_complex_import_data(recursive, preconvert_source, populate_source, populate_target,
init_to_none, populated_result):
class M(Model):
intfield = IntType(max_value=2)
matrixfield = ListType(ListType(IntType))
dictfield = DictType(IntType)
modelfield = ModelType('M')
origdict = {
'intfield': '1',
'dictfield': dict(a=1, b=2),
'modelfield': {
'intfield': '2',
'matrixfield': [[0, 0, 0], [1, 1, 1], [2, 2, 2]],
'dictfield': dict(a=11, b=22),
'modelfield': {
'intfield': '3',
'dictfield': dict(a=111, b=222)}}}
m = M(origdict, init=populate_target)
sourcedict = {
'intfield': '101',
'dictfield': dict(c=3),
'modelfield': {
'matrixfield': [[9]],
'modelfield': {
'intfield': '103',
'dictfield': dict(c=33)}}}
sourcedata = deepcopy(sourcedict)
if preconvert_source:
sourcedata = M(sourcedata, init=populate_source)
m.import_data(sourcedata, recursive=recursive, init_values=init_to_none)
assert id(m) != id(sourcedata)
if preconvert_source and populate_source:
assert m == M(sourcedict, init=True)
elif recursive:
assert m == M({
'intfield': '101',
'dictfield': dict(c=3),
'modelfield': {
'intfield': '2',
'matrixfield': [[9]],
'dictfield': dict(a=11, b=22),
'modelfield': {
'intfield': '103',
'dictfield': dict(c=33)}}}, init=populated_result)
else:
assert m == M(sourcedict, init=populated_result)
| 2.390625 | 2 |
scripts/Canyon-mesh.py | gely/coseis | 7 | 12757488 | <filename>scripts/Canyon-mesh.py
#!/usr/bin/env python
"""
2d Semicircular canyon mesh
"""
import os
import math
import numpy as np
import matplotlib.pyplot as plt
# parameters
L = 11.0
r0 = 1.0
n1, n2 = 301, 321
# step sizes
n = (n2 - 1) / 2
dy0 = 0.5 * math.pi * r0 / (n2 - 1)
dy1 = L / n
# semicircle canyon
f = np.linspace(0.0, 0.5 * math.pi, n2)
x = np.empty([n1, n2])
y = np.empty([n1, n2])
x[0, :] = np.cos(f) * r0
y[0, :] = np.sin(f) * r0
# outer edge
x[-1, :] = L
x[-1, -n:] = np.arange(n)[::-1] * dy1
y[-1, :] = L
y[-1, :n] = np.arange(n) * dy1
# blend
w = np.cumsum(np.linspace(2.0 * dy0, dy1, n1 - 1))
w = w / w[-1]
for i in range(1, n1-1):
x[i, :] = (1.0 - w[i-1]) * x[0, :] + w[i-1] * x[-1, :]
y[i, :] = (1.0 - w[i-1]) * y[0, :] + w[i-1] * y[-1, :]
# print mesh properties
vp = 2.0
dy = y[0, 1] - y[0, 0]
dt = dy * 1.5 / vp / math.sqrt(3.0)
print('shape = ', (n1, n2))
print('nt > ', L / vp / dt)
print('dt < ', dt)
print('L = ', L)
print('L / n = ', L / n)
print('dx00 = ', (x[1, 0] - x[0, 0], y[0, 1] - y[0, 0]))
print('dx01 = ', (x[0, -2] - x[0, -1], y[1, -1] - y[0, -1]))
print('dx10 = ', (x[-1, 0] - x[-2, 0], y[-1, 1] - y[-1, 0]))
print('dx11 = ', (x[-1, -2] - x[-1, -1], y[-1, -1] - y[-2, -1]))
# write files
p = 'repo/Canyon'
os.mkdir(p)
os.chdir(p)
x.T.astype('f').tofile('x.bin')
y.T.astype('f').tofile('y.bin')
# plot
fig = plt.gcf()
fig.clf()
ax = fig.add_subplot(111)
d = 10
x, y = x[::d, ::d], y[::d, ::d]
ax.plot(x, y, 'k-')
ax.plot(-x, y, 'k-')
ax.plot(x.T, y.T, 'k-')
ax.plot(-x.T, y.T, 'k-')
ax.axis('scaled')
ax.axis([-2, 2, 2, -0.2])
fig.savefig('Canyon-Mesh.svg')
fig.show()
| 2.546875 | 3 |
setup.py | michael-olney/hotpatch | 4 | 12757489 | <reponame>michael-olney/hotpatch
from setuptools import setup
setup(
name='hotpatch',
version='0.1',
description='',
url='http://github.com/michael-olney/hotpatch',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['hotpatch'],
zip_safe=False,
install_requires=['bytecode'])
| 1.039063 | 1 |
src/displaythermal.py | wemakerspace/MLX90640_Serial_Processing_Python | 4 | 12757490 | <reponame>wemakerspace/MLX90640_Serial_Processing_Python
import serial
import numpy as np
import cv2
import math
import time
ser = serial.Serial("/dev/ttyACM0", 115200)
if ser.is_open == False:
ser.open()
nmin = 0
nmax = 255
frames = 0
index = 0
temp = 0
alpha = -1.3 # Contrast control (1.0-3.0)
beta = 92 # Brightness control (0-100)
rad = 2
start = time.time()
while True:
frames+=1
recv = ser.readline()
recv = recv.rstrip() #strip the return character
#next job, split on , stick the data in an array
data = np.fromstring(recv, dtype=float, count=-1, sep=',') #get the data
#print(data)
heatmap = np.zeros((24,32,1), np.uint8) #create the blank image to work from
#add to the image
index = 0
if len(data) == 768: #Check we have good data!
for y in range (0,24):
for x in range (0,32):
val = (data[index]*10)-100
#print(val)
if math.isnan(val):
val = 0
if val > 255:
val=255
heatmap[y,x] = (val)
if(y == 12) and (x == 16):
temp = data[index]
index+=1
heatmap = cv2.rotate(heatmap, cv2.ROTATE_90_CLOCKWISE)#rotate
heatmap = cv2.flip(heatmap, 1 ) #flip heatmap
#heatmap = cv2.normalize(heatmap,None,nmin,nmax,cv2.NORM_MINMAX)
heatmap = cv2.convertScaleAbs(heatmap, alpha=alpha, beta=beta)#Bright/Contrast
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)#apply colormap
heatmap = cv2.resize(heatmap,(240,320),interpolation=cv2.INTER_CUBIC)#Scale up!
heatmap = cv2.blur(heatmap,(rad,rad))
# Display the resulting frame
cv2.namedWindow('Thermal',cv2.WINDOW_NORMAL)
# display black box for our data
cv2.rectangle(heatmap, (0, 0),(90, 15), (0,0,0), -1)
# put text in the box
cv2.putText(heatmap,'Temp: '+str(temp), (10, 10),\
cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 255, 255), 1, cv2.LINE_AA)
# draw crosshairs
cv2.line(heatmap,(120,150),(120,170),(0,0,0),1) #vline
cv2.line(heatmap,(110,160),(130,160),(0,0,0),1) #hline
cv2.imshow('Thermal',heatmap)
res = cv2.waitKey(1)
#print(res)
if res == 113: #q
break
if res == 97: #a
rad += 1
if res == 122: #z
if rad == 1:
rad = 1
else:
rad -= 1
if res == 100: #d
alpha += 0.1
if res == 99: #c
alpha -= 0.1
if res == 115: #s
beta += 1
if res == 120: #x
beta -= 1
end = time.time()
elapsed = end-start
print(str(frames/elapsed)+" FPS")
cv2.destroyAllWindows()
| 2.640625 | 3 |
ProjectEuler/7_10001st_prime.py | DmitryTsybin/Study | 0 | 12757491 | <reponame>DmitryTsybin/Study<gh_stars>0
import math
def calculateDividers(number):
dividers = []
sqrt = int(math.sqrt(number))
for i in range(2, sqrt + 1):
if number % i == 0:
dividers.append(i)
return dividers
simpleNumbers= [2]
i = 3
while (len(simpleNumbers) < 10001):
if len(calculateDividers(i)) == 0:
#if i == 9:
# print('9: ', calculateDividers(9))
simpleNumbers.append(i)
i += 2
print(simpleNumbers)
print(simpleNumbers[-1])
| 3.53125 | 4 |
MultiSC/MultiServer/Exceptions/TestException.py | hvuhsg/MultiServer | 8 | 12757492 | from __config__.protocol_code_config import server_error
from .MainException import MainException
class TestException(MainException):
def __init__(self, msg=""):
self.level = 1
self.error_code = server_error
super().__init__(msg, self.level)
| 2.296875 | 2 |
vae/activations.py | lleonart1984/VAE-modeling | 0 | 12757493 | import torch.nn as nn
import torch.functional as F
class ShiftedReLU(nn.Module):
def __init__(self, offset=1):
super().__init__()
self.offset = offset
def forward(self, x):
return F.relu(x + self.offset)
class ShiftedSoftplus(nn.Module):
def __init__(self, offset=1):
super().__init__()
self.offset = offset
def forward(self, x):
return F.softplus(x - self.offset)
| 2.734375 | 3 |
tests/algorithms/memory/test_bam.py | FrostByte266/neupy | 801 | 12757494 | import pickle
import numpy as np
from neupy import algorithms
from neupy.exceptions import NotTrained
from algorithms.memory.data import zero, one, half_one, half_zero
from base import BaseTestCase
from helpers import vectors_for_testing
zero_hint = np.array([[0, 1, 0, 0]])
one_hint = np.array([[1, 0, 0, 0]])
class BAMTestCase(BaseTestCase):
def setUp(self):
super(BAMTestCase, self).setUp()
self.data = np.concatenate([zero, one], axis=0)
self.hints = np.concatenate([zero_hint, one_hint], axis=0)
def test_bam_exceptions(self):
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict(np.array([0, 1]))
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict_input(np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
dbnet.train(np.array([0, 1, 1]), np.array([0, 1]))
def test_bam_X_validation(self):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
with self.assertRaises(ValueError):
# Invalid discrete input values
dbnet.train(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.train(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.predict(np.array([-1, 1]))
def test_discrete_bam_storage(self):
network = algorithms.DiscreteBAM(mode='sync')
network.train(self.data, self.hints)
stored_network = pickle.dumps(network)
loaded_network = pickle.loads(stored_network)
network_prediction = network.predict(self.data)
loaded_network_prediction = loaded_network.predict(self.data)
np.testing.assert_array_almost_equal(
loaded_network_prediction[0], network_prediction[0])
np.testing.assert_array_almost_equal(
loaded_network_prediction[1], network_prediction[1])
def test_discrete_bam_sync(self):
bamnet = algorithms.DiscreteBAM(mode='sync')
bamnet.train(self.data, self.hints)
data_before = self.data.copy()
hints_before = self.hints.copy()
np.testing.assert_array_almost_equal(
bamnet.predict(half_zero)[1],
zero_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one)[1],
one_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(zero_hint)[0],
zero
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint)[0],
one
)
# Test 1d input array prediction
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint.ravel())[0],
one
)
# Test 1d output array input prediction
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one.ravel())[1],
one_hint
)
# Test multiple input values prediction
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_discrete_bam_async(self):
bamnet = algorithms.DiscreteBAM(mode='async', n_times=400)
data_before = self.data.copy()
hints_before = self.hints.copy()
bamnet.train(self.data, self.hints)
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_bam_argument_in_predict_method(self):
dbnet = algorithms.DiscreteBAM(mode='async', n_times=1)
dbnet.train(self.data, self.hints)
self.assertTrue(np.any(one != dbnet.predict_output(half_one)[0]))
np.testing.assert_array_almost_equal(
one, dbnet.predict_output(half_one, n_times=100)[0])
def test_bam_energy_function(self):
input_vector = np.array([[1, 0, 0, 1, 1, 0, 0]])
output_vector = np.array([[1, 0]])
dbnet = algorithms.DiscreteBAM()
dbnet.train(input_vector, output_vector)
self.assertEqual(-7, dbnet.energy(input_vector, output_vector))
self.assertEqual(0, dbnet.energy(
np.array([[0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0]])
))
self.assertEqual(-7, dbnet.energy(
np.array([[0, 1, 1, 0, 0, 1, 1]]),
np.array([[0, 1]])
))
# Test 1d array
self.assertEqual(-7, dbnet.energy(
np.array([0, 1, 1, 0, 0, 1, 1]),
np.array([0, 1])
))
# Test multiple input values energy calculation
np.testing.assert_array_almost_equal(
np.array([-7, 0]),
dbnet.energy(
np.array([
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
]),
np.array([
[0, 1],
[0, 0],
])
)
)
def test_bam_train_different_inputs(self):
self.assertInvalidVectorTrain(
algorithms.DiscreteBAM(),
np.array([1, 0, 0, 1]),
np.array([1, 0]),
is_feature1d=False)
def test_bam_predict_different_inputs(self):
bamnet = algorithms.DiscreteBAM()
data = np.array([[1, 0, 0, 1]])
target = np.array([[1, 0]])
bamnet.train(data, target)
test_vectors = vectors_for_testing(
data.reshape(data.size), is_feature1d=False)
for test_vector in test_vectors:
np.testing.assert_array_almost_equal(
bamnet.predict(test_vector)[1], target)
| 2.375 | 2 |
page/templatetags/base_tags.py | fourfridays-org/wagtail-batteries-included | 0 | 12757495 | from django import template
from page.models import ExternalAccount
register = template.Library()
...
# ExternalAccount snippets
@register.inclusion_tag('tags/external_account.html', takes_context=True)
def external_accounts(context):
return {
'external_accounts': ExternalAccount.objects.all(),
'request': context['request'],
} | 1.648438 | 2 |
guess_my_number.py | MitziRobertson/guessmynumber | 0 | 12757496 | <gh_stars>0
# Guessing game based on an idea from Teach Your Kids to Code by <NAME>
import random
# ask the user to enter the lowest number
lowest = int(input("plesas enter the lowest number: "))
# ask the user to enter the biggest number
biggest = int(input("plesas enter the biggest number: "))
the_number = random.randint(lowest, biggest)
print("Guess a number between ", lowest, " and ", biggest)
guess = int(input(": "))
while guess != the_number:
if guess > the_number:
print(guess, "was too high. try again.")
if guess < the_number:
print(guess, "was too low. try again:")
guess = int(input("Guess again: "))
print(guess, "was the number! you win!")
| 4.15625 | 4 |
cnn_model.py | FedericaLucia/CNN_model | 0 | 12757497 | <filename>cnn_model.py<gh_stars>0
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Input, merge, Reshape, Dropout
from keras.layers.merge import Dot
from keras.constraints import max_norm
FILEPATH = '../models/model.weights.best.hdf5'
class CnnModel(object):
''' Convolutional neural network model for question pair similarity '''
def __init__(self, filter_size, strides, padding, embedding_len, activation,filters, k_initialization, b_initialization, input_shape, bias):
# initialize the model
self.filepath = FILEPATH
self.cnn_model = self._build_model(filter_size, strides, padding, embedding_len, activation,filters, k_initialization,
b_initialization, input_shape, bias)
@classmethod
def from_weights(cls, filepath, filter_size, strides, padding, embedding_len, activation,filters, k_initialization,
b_initialization, input_shape, bias):
self = cls(filter_size, strides, padding, embedding_len, activation, filters,
k_initialization, b_initialization, input_shape, bias)
self.filepath = filepath
self.cnn_model.load_weights(filepath)
return self
def _convolutional_layer(self, filter_size, strides, padding, \
embedding_len, activation, \
bias, k_initialization, b_initialization, filters ):
'''
Defines a list of 2D convolution operations.
:param filter_size: Integer, the dimensionality of the output space.
:param strides: An integer or tuple/list of 2 integers, specifying the stride of the convolution.
:param padding: Type of padding, one of 'valid' or 'same'.
:param embedding_len: An integer, specifying the width of the 2D convolution window.
:param activation: Activation function to use.
:param bias: Boolean, whether the layer uses a bias vector.
:param k_initialization: Initializer for the kernel weight matrix.
:param b_initialization: Initializer for the bias vector.
:param filters: A list of integer, specifying the different heights of the 2D convolution window.
:return: A list of 2D convolution operations.
'''
cnns = [Conv2D(filters=filter_size, kernel_size=(filter_len, embedding_len), \
strides=strides, padding=padding, activation=activation,
use_bias=bias, kernel_initializer=k_initialization,
bias_initializer=b_initialization, kernel_constraint=max_norm(4.)) for filter_len in filters]
return cnns
def _input_sentence(self, shape):
'''
Defines the input shape.
:param shape: Tuple of input shape
:return: A tensor with shape (None, shape)
'''
return Input(shape=shape)
def _cnn_sentences_layer(self, cnns, sentence):
'''
Computes a list of 2D convolution operations on an input sentence.
:param cnns: a list of 2D convolution operations.
:param sentence: input sentence.
:return: A list of 2D convolution layer.
'''
return [cnn(sentence) for cnn in cnns]
def _max_pool_sentences_layer(self, models, sentence_len, filters):
'''
Computes 2D max pooling operation.
:param models: List of input tensors.
:param sentence_len: Integer, the length of the sentence.
:param filters: List of filters.
:return: A list of tensor from the 2D max pooling operation.
'''
return [MaxPooling2D(pool_size=(sentence_len - filter_len + 1, 1))(model) for model, filter_len in zip(models, filters)]
def _merge_concat_layer(self, model):
'''
Concatenates a list of tensors.
:param model: Tensors to concatenate
:return: A tensor from the concatenate operation.
'''
return merge(model, mode='concat')
def _merge_cosim_layer(self, model_1, model_2):
'''
Computes the cosine similarity between two tensors.
:param model_1: The first tensor.
:param model_2: The second tensor.
:return: The cosine similarity value between model_1 and model_2.
'''
return Dot(axes=1, normalize=True)([model_1, model_2])
def _build_model(self,filter_size, strides, padding, embedding_len, activation,filters, k_initialization, b_initialization, input_shape, bias, dropout=0.3):
'''
Defines the convolutional neural network model.
:param filter_size: Number of output.
:param strides: Stride.
:param padding: Padding value.
:param embedding_len: Filter width.
:param activation: Activation function.
:param filters: List of integer - filters heights.
:param k_initialization: Kernel initialization value.
:param b_initialization: Bias initialization values
:param input_shape: Input shape
:param bias: Boolean, whether to use bias.
:param dropout: Dropout value.
:return: Convolutional neural network model.
'''
sentence_len = input_shape[0]
# define input
sentence_1_input = self._input_sentence(input_shape)
sentence_2_input =self._input_sentence(input_shape)
# convolutional layer
cnns = self._convolutional_layer(filter_size, strides, padding, \
embedding_len, activation, \
bias, k_initialization, b_initialization, filters )
## sentence 1 convolutional layer
sentence_1_cnn_layer = self._cnn_sentences_layer(cnns, sentence_1_input)
## add dropout regularization parameter
sentence_1_cnn_layer = [Dropout(dropout)(cnn) for cnn in sentence_1_cnn_layer]
##sentence 2 convolutional layer
sentence_2_cnn_layer = self._cnn_sentences_layer(cnns, sentence_2_input)
## add dropout regularization parameter
sentence_2_cnn_layer = [Dropout(dropout)(cnn) for cnn in sentence_2_cnn_layer]
# Max pooling layer
## sentence 1 max pooling layer
sentence_1_max_pool = self._max_pool_sentences_layer(sentence_1_cnn_layer, sentence_len, filters)
## Sentence 2 max pooling layer
sentence_2_max_pool = self._max_pool_sentences_layer(sentence_2_cnn_layer, sentence_len, filters)
# concat layer
## Sentence 1 concat layer
sentence_1_concat = self._merge_concat_layer(sentence_1_max_pool)
## sentence 2 concat layer
sentence_2_concat = self._merge_concat_layer(sentence_2_max_pool)
# Flatten layer
## sentence 1 flatten layer
sentence_1_flatten = Reshape((-1, ))(sentence_1_concat)
## Sentence 2 Flatten layer
sentence_2_flatten = Reshape((-1, ))(sentence_2_concat)
# Merge with cosine similarity layer
dot = self._merge_cosim_layer(sentence_1_flatten, sentence_2_flatten)
model = Model([sentence_1_input, sentence_2_input], [dot])
return model
def compile(self, loss, optimizer, metrics ):
'''
Configures the model for training.
:param loss: String (name of objective function) or objective function.
:param optimizer: String (name of optimizer) or optimizer instance.
:param metrics: list of metrics to be evaluated by the model during training and testing.
'''
self.cnn_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
def train(self, X_train, y_train, batch_size, epochs, validation_data, verbose=2, shuffle=True ):
'''
Trains the model for a fixed number of epochs.
:param X_train: List of Numpy arrays of training data.
:param y_train: List of Numpy arrays of target data.
:param batch_size: Number of samples per gradient update.
:param epochs: Number of epochs to train the model.
:param validation_data: Tuple on which to evaluate the loss and any model metric at the end of each epoch.
:param verbose: Verbosity mode - 0, 1, 2.
:param shuffle: Boolean (True or False)- whether to shuffle the training data before each epoch.
'''
checkpointer = ModelCheckpoint(filepath=self.filepath, verbose=1,
save_best_only=True)
self.cnn_model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=validation_data,
callbacks=[checkpointer],
verbose=verbose, shuffle=shuffle)
def evaluate(self, X_test, y_test, verbose=0):
'''
Returns the loss value and metrics values for the model in test mode.
:param X_test: List of Numpy array of test data.
:param y_test: List of Numpy array of target data.
:param verbose: Verbosity mode 0 or 1.
:return: List of scalar - test loss and metrics values.
'''
return self.cnn_model.evaluate(X_test, y_test, verbose=verbose)
def predict(self, x):
'''
Generates output predictions for the input samples.
:param x: List of Numpy array of the input data.
:return: Numpy array of predictions.
'''
return self.cnn_model.predict(x)
def summary(self):
'''
Prints the summary representation of the model.
'''
self.cnn_model.summary()
| 2.765625 | 3 |
c2cwsgiutils/pyramid_logging.py | arnaud-morvan/c2cwsgiutils | 0 | 12757498 | <gh_stars>0
"""
Install a filter on the logging handler to add some info about requests:
* client_addr
* method
* matched_route
* path
A pyramid event handler is installed to setup this filter for the current request.
"""
import json
import logging
import logging.config
import os
import socket
import sys
from typing import Any, MutableMapping, Mapping, IO, Optional
import cee_syslog_handler
from pyramid.threadlocal import get_current_request
LOG = logging.getLogger(__name__)
class _PyramidFilter(logging.Filter):
"""
A logging filter that adds request information to CEE logs.
"""
def filter(self, record: Any) -> bool:
request = get_current_request()
if request is not None:
record.client_addr = request.client_addr
record.method = request.method
if request.matched_route is not None:
record.matched_route = request.matched_route.name
record.path = request.path
record.request_id = request.c2c_request_id
record.level_name = record.levelname
return True
_PYRAMID_FILTER = _PyramidFilter()
def _un_underscore(message: MutableMapping[str, Any]) -> Mapping[str, Any]:
"""
Elasticsearch is not indexing the fields starting with underscore and cee_syslog_handler is starting
a lot of interesting fields with underscore. Therefore, it's a good idea to remove all those underscore
prefixes.
"""
for key, value in list(message.items()):
if key.startswith('_'):
new_key = key[1:]
if new_key not in message:
del message[key]
message[new_key] = value
return message
def _rename_field(dico: MutableMapping[str, Any], source: str, dest: str) -> None:
if source in dico:
dico[dest] = dico[source]
del dico[source]
def _make_message_dict(*args: Any, **kargv: Any) -> Mapping[str, Any]:
"""
patch cee_syslog_handler to rename message->full_message otherwise this part is dropped by syslog.
"""
msg = cee_syslog_handler.make_message_dict(*args, **kargv)
if msg['message'] != msg['short_message']:
# only output full_message if it's different from short message
msg['full_message'] = msg['message']
del msg['message']
# make the output more consistent with the one from java
_rename_field(msg, 'short_message', 'msg')
_rename_field(msg, 'facility', 'logger_name')
return _un_underscore(msg)
class PyramidCeeSysLogHandler(cee_syslog_handler.CeeSysLogHandler): # type: ignore
"""
A CEE (JSON format) log handler with additional information about the current request.
"""
def __init__(self, *args: Any, **kargv: Any) -> None:
super().__init__(*args, **kargv)
self.addFilter(_PYRAMID_FILTER)
def format(self, record: Any) -> str:
message = _make_message_dict(record, self._fqdn, self._debugging_fields, self._extra_fields,
self._facility, self._static_fields)
return ": @cee: %s" % json.dumps(message)
class JsonLogHandler(logging.StreamHandler):
"""
Log to stdout in JSON.
"""
def __init__(self, stream: Optional[IO[str]] = None):
super().__init__(stream)
self.addFilter(_PYRAMID_FILTER)
self._fqdn = socket.getfqdn()
def format(self, record: Any) -> str:
message = _make_message_dict(record, self._fqdn, debugging_fields=True, extra_fields=True,
facility=None, static_fields={})
return json.dumps(message)
def init(configfile: Optional[str] = None) -> Optional[str]:
logging.captureWarnings(True)
configfile_ = configfile if configfile is not None \
else os.environ.get('C2CWSGIUTILS_CONFIG', "/app/production.ini")
if os.path.isfile(configfile_):
logging.config.fileConfig(configfile_, defaults=dict(os.environ))
return configfile_
else:
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)-15s %(levelname)5s %(name)s %(message)s",
stream=sys.stderr)
return None
| 2.15625 | 2 |
airlock_processor/tests/test_copy_data.py | chboudry/AzureTRE | 0 | 12757499 | from json import JSONDecodeError
import unittest
from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy
class TestPropertiesExtraction(unittest.TestCase):
def test_extract_prop_valid_body_return_all_values(self):
msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"type\":\"789\", \"workspace_id\":\"ws1\" }}"
req_prop = extract_properties(msg)
self.assertEqual(req_prop.request_id, "123")
self.assertEqual(req_prop.status, "456")
self.assertEqual(req_prop.type, "789")
self.assertEqual(req_prop.workspace_id, "ws1")
def test_extract_prop_missing_arg_throws(self):
msg = "{ \"data\": { \"status\":\"456\" , \"type\":\"789\", \"workspace_id\":\"ws1\" }}"
self.assertRaises(Exception, extract_properties, msg)
msg = "{ \"data\": { \"request_id\":\"123\", \"type\":\"789\", \"workspace_id\":\"ws1\" }}"
self.assertRaises(Exception, extract_properties, msg)
msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"workspace_id\":\"ws1\" }}"
self.assertRaises(Exception, extract_properties, msg)
msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"type\":\"789\" }}"
self.assertRaises(Exception, extract_properties, msg)
def test_extract_prop_invalid_json_throws(self):
msg = "Hi"
self.assertRaises(JSONDecodeError, extract_properties, msg)
class TestDataCopyProperties(unittest.TestCase):
def test_only_specific_status_are_triggering_copy(self):
self.assertEqual(is_require_data_copy("Mitzi"), False)
self.assertEqual(is_require_data_copy(""), False)
self.assertEqual(is_require_data_copy("submit"), False)
# Testing all values that should return true
self.assertEqual(is_require_data_copy("submITted"), True)
self.assertEqual(is_require_data_copy("submitted"), True)
self.assertEqual(is_require_data_copy("approved"), True)
self.assertEqual(is_require_data_copy("REJected"), True)
self.assertEqual(is_require_data_copy("blocked"), True)
def test_wrong_status_raises_when_getting_storage_account_properties(self):
self.assertRaises(Exception, get_source_dest_env_vars, "Miaow", "import")
def test_wrong_type_raises_when_getting_storage_account_properties(self):
self.assertRaises(Exception, get_source_dest_env_vars, "accepted", "somethingelse")
| 2.578125 | 3 |
tidepool_data_science_simulator/models/controller.py | tidepool-org/data-science-simulator | 3 | 12757500 | <reponame>tidepool-org/data-science-simulator
__author__ = "<NAME>"
import pickle as pk
import datetime
import copy
import numpy as np
from tidepool_data_science_simulator.models.simulation import SimulationComponent
from tidepool_data_science_simulator.models.measures import GlucoseTrace
from pyloopkit.loop_data_manager import update
from pyloopkit.dose import DoseType
class DoNothingController(SimulationComponent):
"""
A controller that does nothing, which means that pump schedules
are the only modulation.
"""
def __init__(self, time, controller_config):
self.name = "Do Nothing"
self.time = time
self.controller_config = controller_config
def get_state(self):
return None
def update(self, time, **kwargs):
# Do nothing
pass
class LoopController(SimulationComponent):
"""
A controller class for the Pyloopkit algorithm
"""
def __init__(self, time, loop_config, simulation_config):
self.name = "PyLoopkit v0.1"
self.time = time
self.loop_config = copy.deepcopy(loop_config)
self.recommendations = None
# This is a hack to get this working quickly, it's too coupled to the input file format
# Future: Collect the information for the various simulation components
self.simulation_config = copy.deepcopy(simulation_config)
# The are not used at the moment, but will be once we decouple from simulation config.
self.model = loop_config["model"]
self.momentum_data_interval = loop_config["momentum_data_interval"]
self.suspend_threshold = loop_config["suspend_threshold"]
self.dynamic_carb_absorption_enabled = loop_config[
"dynamic_carb_absorption_enabled"
]
self.retrospective_correction_integration_interval = loop_config[
"retrospective_correction_integration_interval"
]
self.recency_interval = loop_config["recency_interval"]
self.retrospective_correction_grouping_interval = loop_config[
"retrospective_correction_grouping_interval"
]
self.rate_rounder = loop_config["rate_rounder"]
self.insulin_delay = loop_config["insulin_delay"]
self.carb_delay = loop_config["carb_delay"]
self.default_absorption_times = loop_config["default_absorption_times"]
self.max_basal_rate = loop_config["max_basal_rate"]
self.max_bolus = loop_config["max_bolus"]
self.retrospective_correction_enabled = loop_config[
"retrospective_correction_enabled"
]
self.ctr = -5 # TODO remove once we feel refactor is good
def get_state(self):
# TODO: make this a class with convenience functions
return self.recommendations
def prepare_inputs(self, virtual_patient):
"""
Collect inputs to the loop update call for the current time.
Note: TODO: MVP needs to conform to the current pyloopkit interface which
needs a lot of info. In the future, expose pyloopkit interface
that takes minimal state info for computing at time
Parameters
----------
virtual_patient: Virtual Pa
Returns
-------
"""
glucose_dates, glucose_values = virtual_patient.bg_history.get_loop_format()
loop_inputs_dict = copy.deepcopy(self.simulation_config)
loop_update_dict = {
"time_to_calculate_at": self.time,
"glucose_dates": glucose_dates,
"glucose_values": glucose_values,
}
loop_inputs_dict.update(loop_update_dict)
return loop_inputs_dict
def update(self, time, **kwargs):
"""
Using the virtual patient state, get the next action and apply it to patient,
e.g. via pump.
"""
self.time = time
virtual_patient = kwargs["virtual_patient"]
loop_inputs_dict = self.prepare_inputs(virtual_patient)
# TODO remove once we feel refactor is good
# Debugging Code for refactor
# import os
# from src.utils import findDiff
# save_dir = "/Users/csummers/tmp"
# in_fp = os.path.join(save_dir, "tmp_inputs_{}.pk".format(self.ctr))
# other_inputs = pk.load(open(in_fp, "rb"))
# print(findDiff(loop_inputs_dict, other_inputs))
# assert other_inputs == loop_inputs_dict
# out_fp = os.path.join(save_dir, "tmp_outputs_{}.pk".format(self.ctr))
# other_outputs = pk.load(open(out_fp, "rb"))
loop_algorithm_output = update(loop_inputs_dict)
loop_algorithm_output.get("recommended_temp_basal")
# TODO remove once we feel refactor is good
# assert other_outputs == loop_algorithm_output
# self.ctr += 5
self.modulate_temp_basal(virtual_patient, loop_algorithm_output)
self.recommendations = loop_algorithm_output
def modulate_temp_basal(self, virtual_patient, loop_algorithm_output):
"""
Set temp basal on the virtual patient's pump.
Parameters
----------
virtual_patient
loop_algorithm_output
"""
# Update the virtual_patient with any recommendations from loop
if loop_algorithm_output.get("recommended_temp_basal") is not None:
loop_temp_basal, duration = loop_algorithm_output.get(
"recommended_temp_basal"
)
virtual_patient.pump.set_temp_basal(loop_temp_basal, "U")
self.simulation_config["dose_values"].append(
virtual_patient.pump.active_temp_basal.value
)
else:
# If no recommendations, set a temp basal to the scheduled basal rate
scheduled_basal_rate = virtual_patient.pump.get_state().scheduled_basal_rate
virtual_patient.pump.set_temp_basal(scheduled_basal_rate.value, "U")
self.simulation_config["dose_values"].append(
virtual_patient.pump.get_state().scheduled_basal_rate.value
)
# Append dose info to simulation config.
self.simulation_config["dose_types"].append(DoseType.tempbasal)
self.simulation_config["dose_start_times"].append(self.time)
next_time = self.time + datetime.timedelta(minutes=5)
self.simulation_config["dose_end_times"].append(
next_time
) # TODO: is this supposed to be 5 or 30 minutes?
class LoopControllerDisconnector(LoopController):
"""
Loop controller that probabilistically loses connection disallowing
setting of temp basals.
"""
def __init__(self, time, loop_config, simulation_config, connect_prob):
super().__init__(time, loop_config, simulation_config)
self.name = "PyLoopkit v0.1, P(Connect)={}".format(connect_prob)
self.original_time = copy.copy(time)
self.connect_prob = connect_prob
def is_connected(self):
"""
Determine probabilistically if Loop is connected
Returns
-------
bool
"""
is_connected = False
u = np.random.random()
if u < self.connect_prob:
is_connected = True
return is_connected
def update(self, time, **kwargs):
"""
Update the state of the controller and do actions.
Parameters
----------
time: datetime
kwargs: VirtualPatient
"""
self.time = time
if self.is_connected():
virtual_patient = kwargs["virtual_patient"]
loop_inputs_dict = self.prepare_inputs(virtual_patient)
loop_algorithm_output = update(loop_inputs_dict)
loop_algorithm_output.get("recommended_temp_basal")
self.modulate_temp_basal(virtual_patient, loop_algorithm_output)
self.recommendations = loop_algorithm_output
else:
# Disconnected. Do Nothing.
pass
| 2.203125 | 2 |
py/mcdl.py | bueler/mg-glaciers | 0 | 12757501 | <gh_stars>0
'''Module implementing the multilevel constraint decomposition (MCD) method
for the classical obstacle problem, i.e. for a linear interior PDE like the
Poisson equation.'''
__all__ = ['mcdlvcycle', 'mcdlfcycle', 'mcdlsolver']
import numpy as np
from monitor import indentprint, ObstacleMonitor
def _levelreport(indent, j, m, sweeps):
indentprint(indent - j, 'level %d: %d sweeps over m=%d nodes' \
% (j, sweeps, m))
def _coarsereport(indent, m, sweeps):
indentprint(indent, 'coarsest: %d sweeps over m=%d nodes' \
% (sweeps, m))
def mcdlvcycle(args, obsprob, J, hierarchy, ell, levels=None):
'''Apply one V-cycle of the multilevel constraint decomposition method of
Tai (2003). This is stated in Alg. 4.7 in Graeser & Kornhuber (2009)
as a down-slash V(1,0) cycle. Our implementation allows any V(down,up)
cycle. Input args is a dictionary with parameters. Input obsprob is
of type SmootherObstacleProblem. The smoother is projected Gauss-Seidel
or projected Jacobi according to args.jacobi.
Note hierarchy[j] is of type MeshLevel1D. This method generates all defect
constraints hierarchy[j].chi for j < J, but it uses hierarchy[J].chi, which
must be set in advance. The input linear functional ell is in V^J'.
The coarse solver is the same as the smoother, thus not exact.'''
# set up
assert args.down >= 0 and args.up >= 0 and args.coarse >= 0
hierarchy[J].checklen(ell)
hierarchy[J].ell = ell
# downward
for k in range(J, 0, -1):
# compute defect constraint using monotone restriction
hierarchy[k-1].chi = hierarchy[k].mR(hierarchy[k].chi)
# define down-obstacle
phi = hierarchy[k].chi - hierarchy[k].cP(hierarchy[k-1].chi)
# down smoother
if args.mgview:
_levelreport(levels-1, k, hierarchy[k].m, args.down)
hierarchy[k].y = hierarchy[k].zeros()
obsprob.smoother(args.down, hierarchy[k], hierarchy[k].y,
hierarchy[k].ell, phi)
# update and canonically-restrict the residual
hierarchy[k-1].ell = - hierarchy[k].cR(obsprob.residual(hierarchy[k],
hierarchy[k].y,
hierarchy[k].ell))
# coarse mesh solver = smoother sweeps
if args.mgview:
_coarsereport(levels-1, hierarchy[0].m, args.coarse)
hierarchy[0].y = hierarchy[0].zeros()
obsprob.smoother(args.coarse, hierarchy[0], hierarchy[0].y,
hierarchy[0].ell, hierarchy[0].chi)
# upward
z = hierarchy[0].y
for k in range(1, J+1):
# accumulate corrections
z = hierarchy[k].cP(z) + hierarchy[k].y
if args.up > 0:
# up smoother; up-obstacle is chi[k] not phi (see paper)
if args.mgview:
_levelreport(levels-1, k, hierarchy[k].m, args.up)
obsprob.smoother(args.up, hierarchy[k], z,
hierarchy[k].ell, hierarchy[k].chi)
return z
def mcdlfcycle(args, obsprob, J, hierarchy):
'''Apply an F-cycle, i.e. nested iteration, of the multilevel constraint
decomposition method of Tai (2003). This method calls mcdlvcycle().'''
assert args.ni
phi = obsprob.phi(hierarchy[0].xx())
w = phi.copy()
for j in range(J+1):
mesh = hierarchy[j]
# create monitor on this mesh using exact solution if available
uex = None
if obsprob.exact_available():
uex = obsprob.exact(mesh.xx())
mon = ObstacleMonitor(obsprob, mesh, uex=uex,
printresiduals=args.monitor, printerrors=args.monitorerr)
# how many cycles?
iters = args.nicycles
if args.nicascadic:
# very simple model for number of cycles; compare Blum et al 2004
iters *= int(np.ceil(1.5**(J-j)))
# do V cycles
ellf = mesh.ellf(obsprob.source(mesh.xx())) # source functional ell[v] = <f,v>
for s in range(iters):
mon.irerr(w, ellf, phi, indent=J-j) # print norms at stdout
mesh.chi = phi - w # defect obstacle
ell = - obsprob.residual(mesh, w, ellf) # starting source
w += mcdlvcycle(args, obsprob, j, hierarchy, ell, levels=j+1)
mon.irerr(w, ellf, phi, indent=J-j)
# obstacle and initial iterate for next level; prolong and truncate current solution
if j < J:
phi = obsprob.phi(hierarchy[j+1].xx())
w = np.maximum(phi, hierarchy[j+1].cP(w))
return w
def mcdlsolver(args, obsprob, J, hierarchy, ellf, phi, w, monitor,
iters=100, irnorm0=None):
'''Apply V-cycles of the multilevel constraint decomposition method of
Tai (2003) until convergence by an inactive residual norm tolerance.
This method calls mcdlvcycle().'''
mesh = hierarchy[J]
if irnorm0 == None:
irnorm0, _ = monitor.irerr(w, ellf, phi, indent=0)
if irnorm0 == 0.0:
return
for s in range(iters):
mesh.chi = phi - w # defect obstacle
ell = - obsprob.residual(mesh, w, ellf) # starting source
w += mcdlvcycle(args, obsprob, J, hierarchy, ell, levels=J+1)
irnorm, errnorm = monitor.irerr(w, ellf, phi, indent=0)
if irnorm > 100.0 * irnorm0:
print('WARNING: irnorm > 100 irnorm0')
if irnorm <= args.irtol * irnorm0:
break
| 2.71875 | 3 |
dace/codegen/instrumentation/__init__.py | tbennun/dace | 1 | 12757502 | from dace.dtypes import InstrumentationType
from .provider import InstrumentationProvider
from .papi import PAPIInstrumentation
from .timer import TimerProvider
from .cuda_events import CUDAEventProvider
INSTRUMENTATION_PROVIDERS = {
InstrumentationType.No_Instrumentation: None,
InstrumentationType.PAPI_Counters: PAPIInstrumentation,
InstrumentationType.Timer: TimerProvider,
InstrumentationType.CUDA_Events: CUDAEventProvider
} | 1.210938 | 1 |
AI-City-Vehicle-Reid/train/data/samplers/triplet_sampler.py | he010103/Traffic-Brain | 15 | 12757503 | <reponame>he010103/Traffic-Brain
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import copy
import random
import torch
from collections import defaultdict, Counter
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomIdentitySampler(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Args:
- data_source (list): list of (img_path, pid, camid).
- num_instances (int): number of instances per identity in a batch.
- batch_size (int): number of examples in a batch.
"""
def __init__(self, data_source, batch_size, num_instances):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _, _) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
# estimate number of examples in an epoch
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if len(idxs) < self.num_instances:
idxs = list(np.random.choice(idxs, size=self.num_instances, replace=True))
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length
class RandomTargetIdentitySampler(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Args:
- data_source (list): list of (img_path, pid, camid).
- num_instances (int): number of instances per identity in a batch.
- batch_size (int): number of examples in a batch.
"""
def __init__(self, data_source, batch_size, num_instances, num_cameras):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.num_cids = num_cameras
self.num_imgs_per_cid = self.num_instances // self.num_cids
self.index_dic = defaultdict(lambda: defaultdict(list))
for index, (_, pid, camid, _) in enumerate(self.data_source):
self.index_dic[pid][camid].append(index)
self.pids = list(self.index_dic.keys())
# estimate number of examples in an epoch
self.length = 0
for pid in self.pids:
cids = self.index_dic[pid].keys()
for cid in cids:
num = len(self.index_dic[pid][cid])
if num < self.num_instances:
num = self.num_instances
# self.length += num - num % self.num_instances
self.length += int(np.ceil(num / self.num_instances) * self.num_instances)
def __iter__(self):
batch_idxs_dict = defaultdict(lambda: defaultdict(list))
for pid in self.pids:
cids = self.index_dic[pid].keys()
for cid in cids:
idxs = copy.deepcopy(self.index_dic[pid][cid])
need_num = int(np.ceil(len(idxs) / self.num_instances) * self.num_instances)
if len(idxs) < self.num_instances:
idxs = list(np.random.choice(idxs, size=self.num_instances, replace=True))
random.shuffle(idxs)
rem_num = 0
if len(idxs) < need_num: rem_num = need_num - len(idxs)
add_idxs = list(np.random.choice(idxs, size=rem_num, replace=True))
idxs += add_idxs
batch_idxs_dict[pid][cid] = [idxs[i*self.num_imgs_per_cid: (i+1)*self.num_imgs_per_cid] for i in range(int(len(idxs)/self.num_imgs_per_cid))]
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
cids = self.index_dic[pid].keys()
avai_cids = [cid for cid in cids if len(self.index_dic[pid][cid]) != 0]
while True:
selected_cids = idxs = np.random.choice(avai_cids, size=self.num_cids, replace=True)
cid_count = Counter(selected_cids)
check = True
for cid in cid_count:
if cid_count[cid] > len(batch_idxs_dict[pid][cid]): check = False
if check: break
for cid in selected_cids:
batch_idxs = batch_idxs_dict[pid][cid].pop(0)
final_idxs.extend(batch_idxs)
rem_num = 0
for cid in batch_idxs_dict[pid]:
rem_num += len(batch_idxs_dict[pid][cid])
if rem_num == 0:
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length
# New add by gu
class RandomIdentitySampler_alignedreid(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.
Args:
data_source (Dataset): dataset to sample from.
num_instances (int): number of instances per identity.
"""
def __init__(self, data_source, num_instances):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_identities = len(self.pids)
def __iter__(self):
indices = torch.randperm(self.num_identities)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
replace = False if len(t) >= self.num_instances else True
t = np.random.choice(t, size=self.num_instances, replace=replace)
ret.extend(t)
return iter(ret)
def __len__(self):
return self.num_identities * self.num_instances
| 2.3125 | 2 |
nyc_taxi_test.py | oliversu35/pyawsrcf | 0 | 12757504 | #!/usr/bin/env python3
import urllib.request
import pandas as pd
import rcf
if __name__ == "__main__":
data_filename = 'nyc_taxi.csv'
data_source = 'https://raw.githubusercontent.com/numenta/NAB/master/data/realKnownCause/nyc_taxi.csv'
urllib.request.urlretrieve(data_source, data_filename)
taxi_data = pd.read_csv(data_filename, delimiter=',')
taxi_input = taxi_data.value.to_numpy().reshape(-1,1)
anomaly_scores = rcf.rcf(taxi_input)
print(anomaly_scores[5600:5620])
| 3.046875 | 3 |
rap/server/registry.py | so1n/rap | 3 | 12757505 | import importlib
import inspect
import logging
import os
from collections import OrderedDict
from types import FunctionType
from typing import Any, Callable, Dict, List, Optional, Type, Union
from rap.common.channel import UserChannel
from rap.common.exceptions import FuncNotFoundError, RegisteredError
from rap.common.types import is_json_type
from rap.common.utils import constant
from rap.server.model import Request
logger: logging.Logger = logging.getLogger(__name__)
class FuncModel(object):
def __init__(
self,
group: str,
func_type: str,
func: Callable,
is_private: bool,
doc: Optional[str] = None,
func_name: Optional[str] = None,
) -> None:
self.func_sig = inspect.signature(func)
self.group: str = group
self.func_type: str = func_type
self.func: Callable = func
self.is_gen_func: bool = inspect.isgenerator(func) or inspect.isasyncgenfunction(func)
self.is_private: bool = is_private
self.doc: str = doc or func.__doc__ or ""
self.func_name: str = func_name or func.__name__
self.return_type: Type = self.func_sig.return_annotation
self.arg_list: List[str] = []
self.kwarg_dict: OrderedDict = OrderedDict()
if self.func_type == constant.CHANNEL_TYPE and self.is_gen_func:
raise RegisteredError("Is not a legal function. is channel or gen func?")
for name, parameter in self.func_sig.parameters.items():
if parameter.default is parameter.empty:
self.arg_list.append(name)
else:
self.kwarg_dict[name] = parameter.default
def to_dict(self) -> Dict[str, Any]:
return {
"group": self.group,
"func_type": self.func_type,
"is_gen_func": self.is_gen_func,
"is_private": self.is_private,
"doc": self.doc,
"func_name": self.func_name,
}
class RegistryManager(object):
"""server func manager"""
def __init__(self) -> None:
self._cwd: str = os.getcwd()
self.func_dict: Dict[str, FuncModel] = dict()
self.register(self._load, "load", group="registry", is_private=True)
self.register(self._reload, "reload", group="registry", is_private=True)
self.register(self.get_register_func_list, "list", group="registry", is_private=True)
@staticmethod
def gen_key(group: str, name: str, type_: str) -> str:
"""gen func key"""
return f"{type_}:{group}:{name}"
@staticmethod
def _get_func_type(func: Callable) -> str:
"""get func type, normal or channel"""
sig: "inspect.Signature" = inspect.signature(func)
func_arg_parameter: List[inspect.Parameter] = [i for i in sig.parameters.values() if i.default == i.empty]
func_type: str = constant.NORMAL_TYPE
try:
if len(func_arg_parameter) == 1 and issubclass(func_arg_parameter[0].annotation, UserChannel):
func_type = constant.CHANNEL_TYPE
except TypeError:
# ignore error TypeError: issubclass() arg 1 must be a class
pass
return func_type
def get_func_model(self, request: Request, func_type: str) -> FuncModel:
func_key: str = self.gen_key(request.group, request.func_name, func_type)
if func_key not in self.func_dict:
raise FuncNotFoundError(extra_msg=f"name: {request.func_name}")
func_model: FuncModel = self.func_dict[func_key]
return func_model
def register(
self,
func: Callable,
name: Optional[str] = None,
group: Optional[str] = None,
is_private: bool = False,
doc: Optional[str] = None,
) -> None:
"""
register func to manager
:param func: Function that need to be registered
:param name: If the function name is not specified,
the system will obtain its own name according to the function,
otherwise it will be replaced by the specified function name
:param group: Specify the correlation_id to which the function to be registered belongs.
The same function can be registered to different groups.
The root correlation_id is generally used for system components, and there are restrictions when calling.
:param is_private: If the function is private, it will be restricted to call and cannot be overloaded
:param doc: func doc, if not set, auto use python func doc
"""
if inspect.isfunction(func) or inspect.ismethod(func):
name = name if name else func.__name__
else:
raise RegisteredError("func must be func or method")
sig: "inspect.Signature" = inspect.signature(func)
func_type: str = self._get_func_type(func)
if func_type == constant.NORMAL_TYPE:
# check func param&return value type hint
if sig.return_annotation is sig.empty:
raise RegisteredError(f"{func.__name__} must use TypeHints")
if not is_json_type(sig.return_annotation):
raise RegisteredError(f"{func.__name__} return type:{sig.return_annotation} is not json type")
for param in sig.parameters.values():
if param.annotation is sig.empty:
raise RegisteredError(f"{func.__name__} param:{param.name} must use TypeHints")
if not is_json_type(param.annotation):
raise RegisteredError(
f"{func.__name__} param:{param.name} type:{param.annotation} is not json type"
)
if group is None:
group = constant.DEFAULT_GROUP
func_key: str = self.gen_key(group, name, func_type)
if func_key in self.func_dict:
raise RegisteredError(f"`{func_key}` Already register")
self.func_dict[func_key] = FuncModel(
group=group, func_type=func_type, func_name=name, func=func, is_private=is_private, doc=doc
)
logger.debug(f"register `{func_key}` success")
@staticmethod
def _load_func(path: str, func_str: str) -> FunctionType:
"""Dynamic loading function
:param path: func file path
:param func_str: func name
"""
reload_module = importlib.import_module(path)
func = getattr(reload_module, func_str)
if not hasattr(func, "__call__"):
raise RegisteredError(f"{func_str} is not a callable object")
return func
def _load(
self,
path: str,
func_str: str,
name: Optional[str] = None,
group: Optional[str] = None,
is_private: bool = False,
doc: Optional[str] = None,
) -> str:
"""load func to registry"""
try:
func = self._load_func(path, func_str)
if not name:
name = func.__name__
if group is None:
group = constant.DEFAULT_GROUP
func_type: str = self._get_func_type(func)
func_key: str = self.gen_key(group, name, func_type)
if func_key in self.func_dict:
raise RegisteredError(f"`{func_key}` already exists")
self.register(func, name, group, is_private, doc)
return f"load {func_str} from {path} success"
except Exception as e:
raise RegisteredError(f"load {func_str} from {path} fail, {str(e)}")
def _reload(
self,
path: str,
func_str: str,
name: Optional[str] = None,
group: Optional[str] = None,
doc: Optional[str] = None,
) -> str:
"""reload func by registry"""
try:
func = self._load_func(path, func_str)
if not name:
name = func.__name__
if group is None:
group = constant.DEFAULT_GROUP
func_type: str = self._get_func_type(func)
func_key: str = self.gen_key(group, name, func_type)
if func_key not in self.func_dict:
raise RegisteredError(f"`{func_key}` not exists")
func_model: FuncModel = self.func_dict[func_key]
if func_model.is_private:
raise RegisteredError(f"{func_key} reload fail, private func can not reload")
self.func_dict[func_key] = FuncModel(
group=group, func_type=func_type, func_name=name, func=func, is_private=func_model.is_private, doc=doc
)
return f"reload {func_str} from {path} success"
except Exception as e:
raise RegisteredError(f"reload {func_str} from {path} fail, {str(e)}")
def get_register_func_list(self) -> List[Dict[str, Union[str, bool]]]:
"""get func info which in registry"""
register_list: List[Dict[str, Union[str, bool]]] = []
for key, value in self.func_dict.items():
module = inspect.getmodule(value.func)
if not module:
continue
func_info_dict: Dict[str, Any] = value.to_dict()
func_info_dict.update({"module_name": module.__name__, "module_file": module.__file__})
register_list.append(func_info_dict)
return register_list
def __contains__(self, key: str) -> bool:
return key in self.func_dict
def __getitem__(self, key: str) -> FuncModel:
return self.func_dict[key]
| 2.125 | 2 |
venv/Lib/site-packages/IPython/core/magics/auto.py | ajayiagbebaku/NFL-Model | 6,989 | 12757506 | """Implementation of magic functions that control various automatic behaviors.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.magic import Bunch, Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from logging import error
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class AutoMagics(Magics):
"""Magics that control various autoX behaviors."""
def __init__(self, shell):
super(AutoMagics, self).__init__(shell)
# namespace for holding state we may need
self._magic_state = Bunch()
@line_magic
def automagic(self, parameter_s=''):
"""Make magic functions callable without having to type the initial %.
Without arguments toggles on/off (when off, you must call it as
%automagic, of course). With arguments it sets the value, and you can
use any of (case insensitive):
- on, 1, True: to activate
- off, 0, False: to deactivate.
Note that magic functions have lowest priority, so if there's a
variable whose name collides with that of a magic fn, automagic won't
work for that function (you get the variable instead). However, if you
delete the variable (del var), the previously shadowed magic function
becomes visible to automagic again."""
arg = parameter_s.lower()
mman = self.shell.magics_manager
if arg in ('on', '1', 'true'):
val = True
elif arg in ('off', '0', 'false'):
val = False
else:
val = not mman.auto_magic
mman.auto_magic = val
print('\n' + self.shell.magics_manager.auto_status())
@skip_doctest
@line_magic
def autocall(self, parameter_s=''):
"""Make functions callable without having to type parentheses.
Usage:
%autocall [mode]
The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
value is toggled on and off (remembering the previous state).
In more detail, these values mean:
0 -> fully disabled
1 -> active, but do not apply if there are no arguments on the line.
In this mode, you get::
In [1]: callable
Out[1]: <built-in function callable>
In [2]: callable 'hello'
------> callable('hello')
Out[2]: False
2 -> Active always. Even if no arguments are present, the callable
object is called::
In [2]: float
------> float()
Out[2]: 0.0
Note that even with autocall off, you can still use '/' at the start of
a line to treat the first argument on the command line as a function
and add parentheses to it::
In [8]: /str 43
------> str(43)
Out[8]: '43'
# all-random (note for auto-testing)
"""
if parameter_s:
arg = int(parameter_s)
else:
arg = 'toggle'
if not arg in (0, 1, 2, 'toggle'):
error('Valid modes: (0->Off, 1->Smart, 2->Full')
return
if arg in (0, 1, 2):
self.shell.autocall = arg
else: # toggle
if self.shell.autocall:
self._magic_state.autocall_save = self.shell.autocall
self.shell.autocall = 0
else:
try:
self.shell.autocall = self._magic_state.autocall_save
except AttributeError:
self.shell.autocall = self._magic_state.autocall_save = 1
print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
| 2.046875 | 2 |
example/server/server.py | jmahmood/pytestsocket | 1 | 12757507 | <filename>example/server/server.py
__author__ = 'jawaad'
from pytestsocket.wsserver import *
import tornado.httpserver
import tornado.web
import tornado.ioloop
import tornado.options
tornado.options.define("port", default=1690, help="run on the given port", type=int)
tornado.options.parse_config_file("test_server.conf")
tornado.options.parse_command_line()
application = tornado.web.Application([
(r'/tests', unittestWebSocketHandler),
])
logging.info("starting Tornado web server on port %d" % tornado.options.options['port'])
http_server = tornado.httpserver.HTTPServer(application)
port = tornado.options.options['port']
http_server.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
| 2.25 | 2 |
scripts/appctl.py | ljlu1504/EHPC | 0 | 12757508 | <filename>scripts/appctl.py
#!/usr/bin/env python
import sys
from os import path
from constants import (
ACTION_APP_INIT,
ACTION_APP_START,
ACTION_APP_STOP,
ACTION_APP_RESTART,
ACTION_HEALTH_CHECK,
ACTION_METADATA_RELOAD,
ROLE_CONTROLLER,
ROLE_COMPUTE,
ROLE_LOGIN,
SLURM_CONF,
MASTER_CONTROLLER_SID,
)
from common import (
logger,
run_shell,
get_role,
ArgsParser,
get_cluster_info,
get_nas_mount_point,
get_cluster_name,
)
from host_utils import generate_hosts, set_hostname, generate_hpcmodulefiles
from slurm_utils import (
generate_slurm_conf,
update_slurm_conf,
)
from softwarectl import init_software
from userctl import add_admin_user
ROLE_SERVICES = {
ROLE_CONTROLLER: ["slurmctld", "slapd"],
ROLE_COMPUTE: ["slurmd", "nslcd"],
ROLE_LOGIN: ["nslcd"],
}
clear_files = {
ROLE_COMPUTE: ["/usr/sbin/userctl", SLURM_CONF],
ROLE_LOGIN: ["/usr/sbin/userctl"]
}
def setup():
logger.info("Generating hosts...")
generate_hosts()
logger.info("Setup hostname...")
set_hostname()
logger.info("Setup hpc module-files...")
generate_hpcmodulefiles()
logger.info("setup done.")
return 0
def start():
role = get_role()
nas_mount_point = get_nas_mount_point()
cluster_name = get_cluster_name()
# mkdir /nas_mount_point/opt/slurm/state_save_loc for StateSaveLocation
run_shell("mkdir -p {}/opt/slurm/state_save_loc/{}/".format(nas_mount_point,
cluster_name))
run_shell("ln -sf {}/opt/slurm/ /opt/slurm".format(nas_mount_point))
# start before
if role == ROLE_CONTROLLER:
logger.info("Generating slurm configurations...")
generate_slurm_conf()
else:
for f in clear_files[role]:
if path.exists(f):
run_shell("rm {}".format(f))
# start service
if role in ROLE_SERVICES:
for service in ROLE_SERVICES[role]:
logger.info("Start service {}".format(service))
run_shell("systemctl start {}".format(service))
else:
logger.error("Un-support role[%s].", role)
return 1
# start post
cluster_info = get_cluster_info()
nas_mount_point = get_nas_mount_point()
if role == ROLE_CONTROLLER and \
int(cluster_info["sid"]) == MASTER_CONTROLLER_SID:
logger.info("create admin dirs..")
run_shell("mkdir -p {}/opt".format(nas_mount_point))
run_shell("mkdir -p {}/home/".format(nas_mount_point))
run_shell("mkdir -p {}/data/".format(nas_mount_point))
# create admin user
add_admin_user()
# install software
return init_software()
logger.info("%s started.", role)
return 0
def stop():
role = get_role()
if role in ROLE_SERVICES:
for service in ROLE_SERVICES[role]:
run_shell("systemctl stop {}".format(service))
else:
logger.error("Un-support role[%s].", role)
return 1
return 0
def restart():
role = get_role()
if role in ROLE_SERVICES:
for service in ROLE_SERVICES[role]:
run_shell("systemctl restart {}".format(service))
else:
logger.error("Un-support role[%s].", role)
return 1
logger.info("%s re-started.", role)
return 0
def check_service_status(service):
retcode = run_shell("systemctl is-active {}".format(service), without_log=True)
if retcode != 0:
logger.error("the service[%s] is not health[code: %s].", service, retcode)
return retcode
def health_check():
role = get_role()
services = ROLE_SERVICES.get(role, "")
for service in services:
ret = check_service_status(service)
if ret != 0:
return ret
return 0
def metadata_reload():
logger.info("generate hosts for reloading..")
generate_hosts()
role = get_role()
if role == ROLE_CONTROLLER:
logger.info("update slurm conf for reloading metadata..")
update_slurm_conf()
# TODO: 多controller节点时,只在一个master节点执行此命令即可
logger.info("re-config slurm configuration for cluster..")
run_shell("scontrol reconfigure")
return 0
def help():
print "usage: appctl init/start/stop/restart/check/reload"
ACTION_MAP = {
"help": help,
"--help": help,
ACTION_APP_INIT: setup,
ACTION_APP_START: start,
ACTION_APP_STOP: stop,
ACTION_APP_RESTART: restart,
ACTION_HEALTH_CHECK: health_check,
ACTION_METADATA_RELOAD: metadata_reload,
}
def main(argv):
parser = ArgsParser()
ret = parser.parse(argv)
if not ret:
sys.exit(40)
if parser.action in ACTION_MAP:
ret = ACTION_MAP[parser.action]()
sys.exit(ret)
else:
logger.error("Un-support action:[%s], exit!", parser.action)
sys.exit(40)
if __name__ == "__main__":
main(sys.argv)
| 1.648438 | 2 |
server/data_fixer.py | Peace-Data/Peace-Data | 0 | 12757509 | import pandas as pd
import numpy as np
class DataFixer:
def __init__(self):
pass
def get_fix(self, data):
self.data = data
self.data = pd.get_dummies(self.data)
return self.data
| 2.890625 | 3 |
lib/dynamic_screening_solutions/utils.py | goztrk/django-htk | 206 | 12757510 | # Python Standard Library Imports
import base64
import hashlib
import hmac
import json
# HTK Imports
from htk.utils import htk_setting
from htk.utils.general import resolve_method_dynamically
def validate_webhook_request(request):
"""Validates a 321Forms webhook request
Returns a JSON request body if it is valid
Otherwise, returns None
"""
webhook_data = json.loads(request.body)
company_id = webhook_data.get('company', {}).get('id')
headers = request.META
expected_signature = headers.get('HTTP_X_ONBOARDING_SIGNATURE', '')
hash_key_retriever = resolve_method_dynamically(htk_setting('HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER'))
hash_key = hash_key_retriever(company_id)
signature = base64.b64encode(
hmac.new(
bytes(hash_key),
request.body,
digestmod=hashlib.sha1
).digest()
)
is_valid = signature == expected_signature
if is_valid:
webhook_data = webhook_data
else:
webhook_data = None
return webhook_data
def handle_webhook_request(webhook_data):
topic = webhook_data.get('topic', None)
event_handlers = htk_setting('HTK_321FORMS_WEBHOOK_EVENT_HANDLERS')
event_handler_method = event_handlers.get(topic)
event_handler = resolve_method_dynamically(event_handler_method) if event_handler_method else None
if event_handler:
event_handler(webhook_data)
else:
pass
| 2.3125 | 2 |
server.py | Totsui/Voice_cloner | 113 | 12757511 | import socket
import os
from playsound import playsound
from pydub import AudioSegment
def sendToClient(msg):
msg = msg.decode('utf-8')
lang = msg[:3] # ITA or ENG
msg = msg[3:] # actual message
words = msg.split(" ")
if len(words) > 18:
sentences = []
sentence = ""
for i in range(len(words)):
sentence += words[i] + " "
if i%12 == 0 and i != 0:
sentences.append(sentence)
sentence = ""
elif i == len(words)-1:
sentences.append(sentence)
with open('harvard_sentences.txt','w') as f:
first = True
for i, sentence in enumerate(sentences, start=1):
if first:
f.write("first line\n1. "+str(sentence)+"\n")
first = False
else:
f.write(f"{i}. {str(sentence)}\n")
num_sentences = len(sentences)
else:
with open('harvard_sentences.txt','w') as f:
f.write("first line\n1. "+str(msg)+"\n")
num_sentences = 1
os.system('python synthesize.py '+lang)
sounds = 0
for i in range(0, num_sentences):
sounds += AudioSegment.from_wav(f"samples/{i+1}.wav")
# increase volume by 10dB
sounds += 10
sounds.export("backup/final.wav", format="wav")
f.close()
with open('backup/final.wav', 'rb') as f:
audiob = f.read()
clientsocket.send(audiob)
clientsocket.close()
f.close()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", 1234))
s.listen(5)
while True:
print("Waiting for connection...")
clientsocket, address = s.accept()
print(f"Connection from {address} has been established")
msg = clientsocket.recv(2048)
print(msg)
sendToClient(msg)
| 2.875 | 3 |
src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/commands.py | v-Ajnava/azure-cli | 0 | 12757512 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
from azure.cli.command_modules.vm._client_factory import (cf_vm, cf_avail_set, cf_ni,
cf_vm_ext,
cf_vm_ext_image, cf_vm_image, cf_usage,
cf_vmss, cf_vmss_vm,
cf_vm_sizes, cf_disks, cf_snapshots,
cf_images, cf_run_commands,
cf_rolling_upgrade_commands)
from azure.cli.core.commands import DeploymentOutputLongRunningOperation, cli_command
from azure.cli.core.commands.arm import \
(cli_generic_update_command, cli_generic_wait_command, deployment_validate_table_format)
from azure.cli.core.util import empty_on_404
from azure.cli.core.profiles import supported_api_version, ResourceType
# pylint: disable=line-too-long
custom_path = 'azure.cli.command_modules.vm.custom#{}'
mgmt_path = 'azure.mgmt.compute.operations.{}#{}.{}'
# VM
def transform_ip_addresses(result):
transformed = []
for r in result:
network = r['virtualMachine']['network']
public = network.get('publicIpAddresses')
public_ip_addresses = ','.join([p['ipAddress'] for p in public if p['ipAddress']]) if public else None
private = network.get('privateIpAddresses')
private_ip_addresses = ','.join(private) if private else None
entry = OrderedDict([('virtualMachine', r['virtualMachine']['name']),
('publicIPAddresses', public_ip_addresses),
('privateIPAddresses', private_ip_addresses)])
transformed.append(entry)
return transformed
def transform_vm(vm):
result = OrderedDict([('name', vm['name']),
('resourceGroup', vm['resourceGroup']),
('powerState', vm.get('powerState')),
('publicIps', vm.get('publicIps')),
('fqdns', vm.get('fqdns')),
('location', vm['location'])])
if 'zones' in vm:
result['zones'] = ','.join(vm['zones']) if vm['zones'] else ''
return result
def transform_vm_create_output(result):
from msrestazure.tools import parse_resource_id
try:
output = OrderedDict([('id', result.id),
('resourceGroup', getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']),
('powerState', result.power_state),
('publicIpAddress', result.public_ips),
('fqdns', result.fqdns),
('privateIpAddress', result.private_ips),
('macAddress', result.mac_addresses),
('location', result.location)])
if getattr(result, 'identity', None):
output['identity'] = result.identity
if hasattr(result, 'zones'): # output 'zones' column even the property value is None
output['zones'] = result.zones[0] if result.zones else ''
return output
except AttributeError:
from msrest.pipeline import ClientRawResponse
return None if isinstance(result, ClientRawResponse) else result
def transform_vm_usage_list(result):
result = list(result)
for item in result:
item.current_value = str(item.current_value)
item.limit = str(item.limit)
item.local_name = item.name.localized_value
return result
def transform_vm_list(vm_list):
return [transform_vm(v) for v in vm_list]
# flattern out important fields (single member arrays) to be displayed in the table output
def transform_sku_for_table_output(skus):
result = []
for k in skus:
order_dict = OrderedDict()
order_dict['resourceType'] = k['resourceType']
order_dict['locations'] = str(k['locations']) if len(k['locations']) > 1 else k['locations'][0]
order_dict['name'] = k['name']
order_dict['size'] = k['size']
order_dict['tier'] = k['tier']
if k['capabilities']:
temp = ['{}={}'.format(pair['name'], pair['value']) for pair in k['capabilities']]
order_dict['capabilities'] = str(temp) if len(temp) > 1 else temp[0]
else:
order_dict['capabilities'] = None
if k['restrictions']:
reasons = [x['reasonCode'] for x in k['restrictions']]
order_dict['restrictions'] = str(reasons) if len(reasons) > 1 else reasons[0]
else:
order_dict['restrictions'] = None
result.append(order_dict)
return result
op_var = 'virtual_machines_operations'
op_class = 'VirtualMachinesOperations'
cli_command(__name__, 'vm create', custom_path.format('create_vm'), transform=transform_vm_create_output, no_wait_param='no_wait', table_transformer=deployment_validate_table_format)
cli_command(__name__, 'vm delete', mgmt_path.format(op_var, op_class, 'delete'), cf_vm, confirmation=True, no_wait_param='raw')
cli_command(__name__, 'vm deallocate', mgmt_path.format(op_var, op_class, 'deallocate'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm generalize', mgmt_path.format(op_var, op_class, 'generalize'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm show', custom_path.format('show_vm'), table_transformer=transform_vm, exception_handler=empty_on_404)
cli_command(__name__, 'vm list-vm-resize-options', mgmt_path.format(op_var, op_class, 'list_available_sizes'), cf_vm)
cli_command(__name__, 'vm stop', mgmt_path.format(op_var, op_class, 'power_off'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm restart', mgmt_path.format(op_var, op_class, 'restart'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm start', mgmt_path.format(op_var, op_class, 'start'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm redeploy', mgmt_path.format(op_var, op_class, 'redeploy'), cf_vm, no_wait_param='raw')
cli_command(__name__, 'vm list-ip-addresses', custom_path.format('list_ip_addresses'), table_transformer=transform_ip_addresses)
cli_command(__name__, 'vm get-instance-view', custom_path.format('get_instance_view'),
table_transformer='{Name:name, ResourceGroup:resourceGroup, Location:location, ProvisioningState:provisioningState, PowerState:instanceView.statuses[1].displayStatus}')
cli_command(__name__, 'vm list', custom_path.format('list_vm'), table_transformer=transform_vm_list)
cli_command(__name__, 'vm resize', custom_path.format('resize_vm'), no_wait_param='no_wait')
cli_command(__name__, 'vm capture', custom_path.format('capture_vm'))
cli_command(__name__, 'vm open-port', custom_path.format('vm_open_port'))
cli_command(__name__, 'vm format-secret', custom_path.format('get_vm_format_secret'), deprecate_info='az vm secret format')
cli_command(__name__, 'vm secret format', custom_path.format('get_vm_format_secret'))
cli_command(__name__, 'vm secret add', custom_path.format('add_vm_secret'))
cli_command(__name__, 'vm secret list', custom_path.format('list_vm_secrets'))
cli_command(__name__, 'vm secret remove', custom_path.format('remove_vm_secret'))
cli_generic_update_command(__name__, 'vm update',
mgmt_path.format(op_var, op_class, 'get'),
mgmt_path.format(op_var, op_class, 'create_or_update'),
cf_vm,
no_wait_param='raw')
cli_generic_wait_command(__name__, 'vm wait', 'azure.cli.command_modules.vm.custom#get_instance_view')
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vm perform-maintenance', mgmt_path.format(op_var, op_class, 'perform_maintenance'), cf_vm)
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2016-04-30-preview'):
cli_command(__name__, 'vm convert', mgmt_path.format(op_var, op_class, 'convert_to_managed_disks'), cf_vm)
# VM encryption
cli_command(__name__, 'vm encryption enable', 'azure.cli.command_modules.vm.disk_encryption#encrypt_vm')
cli_command(__name__, 'vm encryption disable', 'azure.cli.command_modules.vm.disk_encryption#decrypt_vm')
cli_command(__name__, 'vm encryption show', 'azure.cli.command_modules.vm.disk_encryption#show_vm_encryption_status', exception_handler=empty_on_404)
# VMSS encryption
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vmss encryption enable', 'azure.cli.command_modules.vm.disk_encryption#encrypt_vmss')
cli_command(__name__, 'vmss encryption disable', 'azure.cli.command_modules.vm.disk_encryption#decrypt_vmss')
cli_command(__name__, 'vmss encryption show', 'azure.cli.command_modules.vm.disk_encryption#show_vmss_encryption_status', exception_handler=empty_on_404)
# VM NIC
cli_command(__name__, 'vm nic add', custom_path.format('vm_add_nics'))
cli_command(__name__, 'vm nic remove', custom_path.format('vm_remove_nics'))
cli_command(__name__, 'vm nic set', custom_path.format('vm_set_nics'))
cli_command(__name__, 'vm nic show', custom_path.format('vm_show_nic'), exception_handler=empty_on_404)
cli_command(__name__, 'vm nic list', custom_path.format('vm_list_nics'))
# VMSS NIC
cli_command(__name__, 'vmss nic list', 'azure.mgmt.network.operations.network_interfaces_operations#NetworkInterfacesOperations.list_virtual_machine_scale_set_network_interfaces', cf_ni)
cli_command(__name__, 'vmss nic list-vm-nics', 'azure.mgmt.network.operations.network_interfaces_operations#NetworkInterfacesOperations.list_virtual_machine_scale_set_vm_network_interfaces', cf_ni)
cli_command(__name__, 'vmss nic show', 'azure.mgmt.network.operations.network_interfaces_operations#NetworkInterfacesOperations.get_virtual_machine_scale_set_network_interface', cf_ni, exception_handler=empty_on_404)
# VM Access
cli_command(__name__, 'vm user update', custom_path.format('set_user'), no_wait_param='no_wait')
cli_command(__name__, 'vm user delete', custom_path.format('delete_user'), no_wait_param='no_wait')
cli_command(__name__, 'vm user reset-ssh', custom_path.format('reset_linux_ssh'), no_wait_param='no_wait')
# # VM Availability Set
cli_command(__name__, 'vm availability-set create', custom_path.format('create_av_set'), table_transformer=deployment_validate_table_format, no_wait_param='no_wait')
op_var = 'availability_sets_operations'
op_class = 'AvailabilitySetsOperations'
cli_command(__name__, 'vm availability-set delete', mgmt_path.format(op_var, op_class, 'delete'), cf_avail_set)
cli_command(__name__, 'vm availability-set show', mgmt_path.format(op_var, op_class, 'get'), cf_avail_set, exception_handler=empty_on_404)
cli_command(__name__, 'vm availability-set list', mgmt_path.format(op_var, op_class, 'list'), cf_avail_set)
cli_command(__name__, 'vm availability-set list-sizes', mgmt_path.format(op_var, op_class, 'list_available_sizes'), cf_avail_set)
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2016-04-30-preview'):
cli_command(__name__, 'vm availability-set convert', custom_path.format('convert_av_set_to_managed_disk'))
cli_generic_update_command(__name__, 'vm availability-set update',
custom_path.format('availset_get'),
custom_path.format('availset_set'))
cli_generic_update_command(__name__, 'vmss update',
custom_path.format('vmss_get'),
custom_path.format('vmss_set'),
no_wait_param='no_wait')
cli_generic_wait_command(__name__, 'vmss wait', custom_path.format('vmss_get'))
# VM Boot Diagnostics
cli_command(__name__, 'vm boot-diagnostics disable', custom_path.format('disable_boot_diagnostics'))
cli_command(__name__, 'vm boot-diagnostics enable', custom_path.format('enable_boot_diagnostics'))
cli_command(__name__, 'vm boot-diagnostics get-boot-log', custom_path.format('get_boot_log'))
# VM Diagnostics
cli_command(__name__, 'vm diagnostics set', custom_path.format('set_diagnostics_extension'))
cli_command(__name__, 'vm diagnostics get-default-config', custom_path.format('show_default_diagnostics_configuration'))
# VMSS Diagnostics
cli_command(__name__, 'vmss diagnostics set', custom_path.format('set_vmss_diagnostics_extension'))
cli_command(__name__, 'vmss diagnostics get-default-config', custom_path.format('show_default_diagnostics_configuration'))
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vm disk attach', custom_path.format('attach_managed_data_disk'))
cli_command(__name__, 'vm disk detach', custom_path.format('detach_data_disk'))
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vmss disk attach', custom_path.format('attach_managed_data_disk_to_vmss'))
cli_command(__name__, 'vmss disk detach', custom_path.format('detach_disk_from_vmss'))
cli_command(__name__, 'vm unmanaged-disk attach', custom_path.format('attach_unmanaged_data_disk'))
cli_command(__name__, 'vm unmanaged-disk detach', custom_path.format('detach_data_disk'))
cli_command(__name__, 'vm unmanaged-disk list', custom_path.format('list_unmanaged_disks'))
# VM Extension
op_var = 'virtual_machine_extensions_operations'
op_class = 'VirtualMachineExtensionsOperations'
cli_command(__name__, 'vm extension delete', mgmt_path.format(op_var, op_class, 'delete'), cf_vm_ext)
_extension_show_transform = '{Name:name, ProvisioningState:provisioningState, Publisher:publisher, Version:typeHandlerVersion, AutoUpgradeMinorVersion:autoUpgradeMinorVersion}'
cli_command(__name__, 'vm extension show', mgmt_path.format(op_var, op_class, 'get'), cf_vm_ext, exception_handler=empty_on_404,
table_transformer=_extension_show_transform)
cli_command(__name__, 'vm extension set', custom_path.format('set_extension'))
cli_command(__name__, 'vm extension list', custom_path.format('list_extensions'),
table_transformer='[].' + _extension_show_transform)
# VMSS Extension
cli_command(__name__, 'vmss extension delete', custom_path.format('delete_vmss_extension'))
cli_command(__name__, 'vmss extension show', custom_path.format('get_vmss_extension'), exception_handler=empty_on_404)
cli_command(__name__, 'vmss extension set', custom_path.format('set_vmss_extension'))
cli_command(__name__, 'vmss extension list', custom_path.format('list_vmss_extensions'))
# VM Extension Image
op_var = 'virtual_machine_extension_images_operations'
op_class = 'VirtualMachineExtensionImagesOperations'
cli_command(__name__, 'vm extension image show', mgmt_path.format(op_var, op_class, 'get'), cf_vm_ext_image, exception_handler=empty_on_404)
cli_command(__name__, 'vm extension image list-names', mgmt_path.format(op_var, op_class, 'list_types'), cf_vm_ext_image)
cli_command(__name__, 'vm extension image list-versions', mgmt_path.format(op_var, op_class, 'list_versions'), cf_vm_ext_image)
cli_command(__name__, 'vm extension image list', custom_path.format('list_vm_extension_images'))
# VMSS Extension Image (convenience copy of VM Extension Image)
cli_command(__name__, 'vmss extension image show', mgmt_path.format(op_var, op_class, 'get'), cf_vm_ext_image, exception_handler=empty_on_404)
cli_command(__name__, 'vmss extension image list-names', mgmt_path.format(op_var, op_class, 'list_types'), cf_vm_ext_image)
cli_command(__name__, 'vmss extension image list-versions', mgmt_path.format(op_var, op_class, 'list_versions'), cf_vm_ext_image)
cli_command(__name__, 'vmss extension image list', custom_path.format('list_vm_extension_images'))
# VM Image
op_var = 'virtual_machine_images_operations'
op_class = 'VirtualMachineImagesOperations'
cli_command(__name__, 'vm image show', mgmt_path.format(op_var, op_class, 'get'), cf_vm_image, exception_handler=empty_on_404)
cli_command(__name__, 'vm image list-offers', mgmt_path.format(op_var, op_class, 'list_offers'), cf_vm_image)
cli_command(__name__, 'vm image list-publishers', mgmt_path.format(op_var, op_class, 'list_publishers'), cf_vm_image)
cli_command(__name__, 'vm image list-skus', mgmt_path.format(op_var, op_class, 'list_skus'), cf_vm_image)
cli_command(__name__, 'vm image list', custom_path.format('list_vm_images'))
# VM Usage
cli_command(__name__, 'vm list-usage', mgmt_path.format('usage_operations', 'UsageOperations', 'list'), cf_usage, transform=transform_vm_usage_list,
table_transformer='[].{Name:localName, CurrentValue:currentValue, Limit:limit}')
# VMSS
vmss_show_table_transform = '{Name:name, ResourceGroup:resourceGroup, Location:location, $zone$Capacity:sku.capacity, Overprovision:overprovision, UpgradePolicy:upgradePolicy.mode}'
vmss_show_table_transform = vmss_show_table_transform.replace('$zone$', 'Zones: (!zones && \' \') || join(` `, zones), ' if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30') else ' ')
cli_command(__name__, 'vmss delete', mgmt_path.format('virtual_machine_scale_sets_operations', 'VirtualMachineScaleSetsOperations', 'delete'), cf_vmss, no_wait_param='raw')
cli_command(__name__, 'vmss list-skus', mgmt_path.format('virtual_machine_scale_sets_operations', 'VirtualMachineScaleSetsOperations', 'list_skus'), cf_vmss)
cli_command(__name__, 'vmss list-instances', mgmt_path.format('virtual_machine_scale_set_vms_operations', 'VirtualMachineScaleSetVMsOperations', 'list'), cf_vmss_vm)
cli_command(__name__, 'vmss create', custom_path.format('create_vmss'), transform=DeploymentOutputLongRunningOperation('Starting vmss create'), no_wait_param='no_wait', table_transformer=deployment_validate_table_format)
cli_command(__name__, 'vmss deallocate', custom_path.format('deallocate_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss delete-instances', custom_path.format('delete_vmss_instances'), no_wait_param='no_wait')
cli_command(__name__, 'vmss get-instance-view', custom_path.format('get_vmss_instance_view'),
table_transformer='{ProvisioningState:statuses[0].displayStatus, PowerState:statuses[1].displayStatus}')
cli_command(__name__, 'vmss show', custom_path.format('show_vmss'), exception_handler=empty_on_404,
table_transformer=vmss_show_table_transform)
cli_command(__name__, 'vmss list', custom_path.format('list_vmss'), table_transformer='[].' + vmss_show_table_transform)
cli_command(__name__, 'vmss stop', custom_path.format('stop_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss restart', custom_path.format('restart_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss start', custom_path.format('start_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss update-instances', custom_path.format('update_vmss_instances'), no_wait_param='no_wait')
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vmss reimage', custom_path.format('reimage_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss scale', custom_path.format('scale_vmss'), no_wait_param='no_wait')
cli_command(__name__, 'vmss list-instance-connection-info', custom_path.format('list_vmss_instance_connection_info'))
cli_command(__name__, 'vmss list-instance-public-ips', custom_path.format('list_vmss_instance_public_ips'))
# VM Size
cli_command(__name__, 'vm list-sizes', mgmt_path.format('virtual_machine_sizes_operations', 'VirtualMachineSizesOperations', 'list'), cf_vm_sizes)
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
# VM Disk
disk_show_table_transform = '{Name:name, ResourceGroup:resourceGroup, Location:location, $zone$Sku:sku.name, OsType:osType, SizeGb:diskSizeGb, ProvisioningState:provisioningState}'
disk_show_table_transform = disk_show_table_transform.replace('$zone$', 'Zones: (!zones && \' \') || join(` `, zones), ' if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30') else ' ')
op_var = 'disks_operations'
op_class = 'DisksOperations'
cli_command(__name__, 'disk create', custom_path.format('create_managed_disk'), no_wait_param='no_wait', table_transformer=disk_show_table_transform)
cli_command(__name__, 'disk list', custom_path.format('list_managed_disks'), table_transformer='[].' + disk_show_table_transform)
cli_command(__name__, 'disk show', mgmt_path.format(op_var, op_class, 'get'), cf_disks, exception_handler=empty_on_404, table_transformer=disk_show_table_transform)
cli_command(__name__, 'disk delete', mgmt_path.format(op_var, op_class, 'delete'), cf_disks, no_wait_param='raw', confirmation=True)
cli_command(__name__, 'disk grant-access', custom_path.format('grant_disk_access'))
cli_command(__name__, 'disk revoke-access', mgmt_path.format(op_var, op_class, 'revoke_access'), cf_disks)
cli_generic_update_command(__name__, 'disk update', 'azure.mgmt.compute.operations.{}#{}.get'.format(op_var, op_class),
'azure.mgmt.compute.operations.{}#{}.create_or_update'.format(op_var, op_class),
custom_function_op=custom_path.format('update_managed_disk'),
setter_arg_name='disk', factory=cf_disks, no_wait_param='raw')
cli_generic_wait_command(__name__, 'disk wait', 'azure.mgmt.compute.operations.{}#{}.get'.format(op_var, op_class), cf_disks)
op_var = 'snapshots_operations'
op_class = 'SnapshotsOperations'
cli_command(__name__, 'snapshot create', custom_path.format('create_snapshot'))
cli_command(__name__, 'snapshot list', custom_path.format('list_snapshots'))
cli_command(__name__, 'snapshot show', mgmt_path.format(op_var, op_class, 'get'), cf_snapshots, exception_handler=empty_on_404)
cli_command(__name__, 'snapshot delete', mgmt_path.format(op_var, op_class, 'delete'), cf_snapshots)
cli_command(__name__, 'snapshot grant-access', custom_path.format('grant_snapshot_access'))
cli_command(__name__, 'snapshot revoke-access', mgmt_path.format(op_var, op_class, 'revoke_access'), cf_snapshots)
cli_generic_update_command(__name__, 'snapshot update', 'azure.mgmt.compute.operations.{}#{}.get'.format(op_var, op_class),
'azure.mgmt.compute.operations.{}#{}.create_or_update'.format(op_var, op_class),
custom_function_op=custom_path.format('update_snapshot'),
setter_arg_name='snapshot', factory=cf_snapshots)
op_var = 'images_operations'
op_class = 'ImagesOperations'
cli_command(__name__, 'image create', custom_path.format('create_image'))
cli_command(__name__, 'image list', custom_path.format('list_images'))
cli_command(__name__, 'image show', mgmt_path.format(op_var, op_class, 'get'), cf_images, exception_handler=empty_on_404)
cli_command(__name__, 'image delete', mgmt_path.format(op_var, op_class, 'delete'), cf_images)
if supported_api_version(ResourceType.MGMT_COMPUTE, min_api='2017-03-30'):
cli_command(__name__, 'vm list-skus', custom_path.format('list_skus'), table_transformer=transform_sku_for_table_output)
op_var = 'virtual_machine_run_commands_operations'
op_class = 'VirtualMachineRunCommandsOperations'
cli_command(__name__, 'vm run-command show', mgmt_path.format(op_var, op_class, 'get'), cf_run_commands)
cli_command(__name__, 'vm run-command list', mgmt_path.format(op_var, op_class, 'list'), cf_run_commands)
cli_command(__name__, 'vm run-command invoke', custom_path.format('run_command_invoke'))
op_var = 'virtual_machine_scale_set_rolling_upgrades_operations'
op_class = 'VirtualMachineScaleSetRollingUpgradesOperations'
cli_command(__name__, 'vmss rolling-upgrade cancel', mgmt_path.format(op_var, op_class, 'cancel'), cf_rolling_upgrade_commands)
cli_command(__name__, 'vmss rolling-upgrade get-latest', mgmt_path.format(op_var, op_class, 'get_latest'), cf_rolling_upgrade_commands)
cli_command(__name__, 'vmss rolling-upgrade start', mgmt_path.format(op_var, op_class, 'start_os_upgrade'), cf_rolling_upgrade_commands)
# MSI
cli_command(__name__, 'vm assign-identity', custom_path.format('assign_vm_identity'))
cli_command(__name__, 'vmss assign-identity', custom_path.format('assign_vmss_identity'))
| 1.546875 | 2 |
src/stateserver.py | Mukeshkanna19/sbsim-19 | 0 | 12757513 | #!/usr/bin/env python
import sys
import physics as p
import pygame as pg
import pid
import rospy
import math as m
from geometry_msgs.msg import Pose, Twist
from sbsim.msg import goalmsg
import controller as c
from std_msgs.msg import Int32
from std_msgs.msg import Float64
from sbsim.msg import dribble
d = 0
r10msg = goalmsg()
r11msg = goalmsg()
r20msg = goalmsg()
r21msg = goalmsg()
r1f = 1
r2f = 1
r3f =1
r4f =1
gs = 0
def dribbletest(r1,r2,r3,r4):
global d
if r1.dribble == 1:
d = 1
elif r2.dribble == 1:
d = 2
elif r3.dribble == 1:
d = 3
elif r4.dribble == 1:
d = 4
else:
d =0
return 0
def robotpubinit(t,n):
namepose = 'robot'+str(t)+'n'+str(n)+'/pose'
nametwist = 'robot'+str(t)+'n'+str(n)+'/twist'
namer = 'robot'+str(t)+'n'+str(n)+'/reached'
return rospy.Publisher(namepose,Pose, queue_size = 10),rospy.Publisher(namer,Int32,queue_size = 10)
def robotsubinit():
rospy.Subscriber('robot1n0/ptg',goalmsg,r10callback)
rospy.Subscriber('robot1n1/ptg',goalmsg,r11callback)
rospy.Subscriber('robot2n0/ptg',goalmsg,r20callback)
rospy.Subscriber('robot2n1/ptg',goalmsg,r21callback)
def r10callback(msg):
global r10msg
global r1f
r10msg = msg
r1f = 1
return 0
def r11callback(msg):
global r11msg
global r2f
r11msg = msg
r2f = 1
return 0
def r20callback(msg):
global r3f
global r20msg
r20msg = msg
r3f = 1
return 0
def r21callback(msg):
global r4f
global r21msg
r21msg = msg
r4f = 1
return 0
#callback of subscriber to intelligence
#def freekick
#def throwin
#def goal
def updaterpose(a,b):
a.position.x = b.x
a.position.y = b.y
a.orientation.z = m.tan(b.theta/2)
a.orientation.w = 1
if b.distdribbled != 0:
return b.distdribbled
def updatebtwist(a,b):
a.linear.x = b.xd
a.linear.y = b.yd
a.linear.z = 0
def updatebpose(a,b):
a.position.x = b.x
a.position.y = b.y
a.orientation.w = 1
def rulecheck(msg):
global gs
gs = msg.data
return 0
def reset(t1,t2,r1,r2,r3,r4,ball):
global gs
r1 = p.robot(x= t1[0][0],y= t1[0][1], yaw = 0, ball = ball)
r2 = p.robot(x= t1[1][0],y= t1[1][1], yaw = 0, ball = ball)
r3 = p.robot(x= t2[0][0],y= t2[0][1], yaw = 3.14, ball = ball)
r4 = p.robot(x= t2[1][0],y= t2[1][1], yaw = 3.14, ball = ball)
ball = p.ball(x = 0,y = 0)
gs = 0
def game(t1,t2):
global r10msg
global r11msg
global r20msg
global r21msg
global d
global gs
global r1f
global r2f
global r3f
global r4f
rospy.Subscriber('game/status',Int32,rulecheck)
robotsubinit()
pubball = rospy.Publisher('ballpose', Pose, queue_size=10)
pubbtwist = rospy.Publisher('balltwist', Twist, queue_size=10)
drib = rospy.Publisher('game/dribbler', Int32, queue_size=10)
yis = rospy.Publisher('game/dribdist', Float64, queue_size=10)
pr1 = []
pr2 = []
a,r1r = robotpubinit(1,0)
pr1.append(a)
a,r2r = robotpubinit(1,1)
pr1.append(a)
a,r3r = robotpubinit(2,0)
pr2.append(a)
a,r4r = robotpubinit(2,1)
pr2.append(a)
btwist = Twist()
rate = rospy.Rate(60)
while True:
ball = p.ball(x = 0,y = 0)
bpose = Pose()
r1 = []
r2 = []
r1.append(p.robot(x= t1[0][0],y= t1[0][1], yaw = 0, ball = ball))
r1.append(p.robot(x= t1[1][0],y= t1[1][1], yaw = 0, ball = ball))
r2.append(p.robot(x= t2[0][0],y= t2[0][1], yaw = 3.14, ball = ball))
r2.append(p.robot(x= t2[1][0],y= t2[1][1], yaw = 3.14, ball = ball))
rpose = [Pose(),Pose(),Pose(),Pose()]
updatebpose(bpose,ball)
updatebtwist(btwist,ball)
updaterpose(rpose[0],r1[0])
updaterpose(rpose[1],r1[1])
updaterpose(rpose[2],r2[0])
updaterpose(rpose[3],r2[1])
pr1[0].publish(rpose[0])
pr1[1].publish(rpose[1])
pr2[0].publish(rpose[2])
pr2[1].publish(rpose[3])
pubball.publish(bpose)
while not rospy.is_shutdown():
if gs == 0:
c.control(r10msg,r1[0],ball)
c.control(r11msg,r1[1],ball)
c.control(r20msg,r2[0],ball)
c.control(r21msg,r2[1],ball)
p.collRR(r1[0],r2[0])
p.collRR(r1[0],r2[1])
p.collRR(r1[0],r1[1])
p.collRR(r1[1],r2[0])
p.collRR(r1[1],r2[1])
p.collRR(r2[0],r2[1])
dribbletest(r1[0 ],r1[1],r2[0],r2[1])
updatebpose(bpose,ball)
updatebtwist(btwist,ball)
x1 = updaterpose(rpose[0],r1[0])
x2 = updaterpose(rpose[1],r1[1])
x3 = updaterpose(rpose[2],r2[0])
x4 = updaterpose(rpose[3],r2[1])
x = [x1,x2,x3,x4]
y = max(x)
yis.publish(y)
r1r.publish(r1f)
r2r.publish(r2f)
r3r.publish(r3f)
r4r.publish(r4f)
pr1[0].publish(rpose[0])
pr1[1].publish(rpose[1])
pr2[0].publish(rpose[2])
pr2[1].publish(rpose[3])
pubball.publish(bpose)
pubbtwist.publish(btwist)
drib.publish(d)
rate.sleep()
else:
dribbletest(r1[0],r1[1],r2[0],r2[1])
updatebpose(bpose,ball)
updatebtwist(btwist,ball)
x1 = updaterpose(rpose[0],r1[0])
x2 = updaterpose(rpose[1],r1[1])
x3 = updaterpose(rpose[2],r2[0])
x4 = updaterpose(rpose[3],r2[1])
x = [x1,x2,x3,x4]
y = max(x)
yis.publish(y)
r1r.publish(r1f)
r2r.publish(r2f)
r3r.publish(r3f)
r4r.publish(r4f)
pr1[0].publish(rpose[0])
pr1[1].publish(rpose[1])
pr2[0].publish(rpose[2])
pr2[1].publish(rpose[3])
pubball.publish(bpose)
pubbtwist.publish(btwist)
drib.publish(d)
rate.sleep()
break
if __name__ == '__main__':
rospy.init_node('state_server',anonymous=True)
print 'Select Formation for team 1'
print '1. Striker + Defender'
print '2. Dynamic Duo'
a = input('Enter 1 or 2')
print 'Select Formation for team 2'
print '1. Striker + Defender'
print '2. Dynamic Duo'
print '3. Soyboy + GK'
b = input('Enter 1 or 2')
if a == 1:
posa = [[-50,0],[-250,0]]
elif a == 2:
posa = [[-125,100],[-125,-100]]
if b == 1:
posb = [[50,0],[250,0]]
elif b == 2:
posb = [[125,100],[125,-100]]
else:
posb = [[125,240],[400,0]]
try:
game(posa,posb)
except rospy.ROSInterruptException:
pass | 2.5 | 2 |
jchart/tests.py | monasysinfo/django-jchart | 125 | 12757514 | <reponame>monasysinfo/django-jchart<filename>jchart/tests.py
import json
from django.test import TestCase, RequestFactory
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
from .views import ChartView
from . import Chart
from .config import (Title, Legend, Tooltips, Hover,
InteractionModes, Animation, Element,
ElementArc, Axes, ScaleLabel, Tick, rgba)
class LineChart(Chart):
chart_type = 'line'
title = Title(text='Test Title Line')
legend = Legend(display=False)
tooltips = Tooltips(enabled=False)
hover = Hover(mode='default')
animation = Animation(duration=1.0)
scales = {
'xAxes': [Axes(display=False, type='time', position='bottom')],
'yAxes': [Axes(type='linear',
position='left',
scaleLabel=ScaleLabel(fontColor='#fff'),
ticks=Tick(fontColor='#fff')
)],
}
def get_datasets(self, *args, **kwargs):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
return [dict(label='Test Line Chart', data=data)]
class LineChartParameterized(LineChart):
def get_datasets(self, currency_type):
eur_data = list(range(10))
do_data = list(range(10, 20))
if currency_type == 'euro':
return [dict(label='Euro Chart', data=eur_data)]
elif currency_type == 'dollar':
return [dict(label='Dollar Chart', data=do_data)]
raise ValueError('Unkown currency type: {}'.format(currency_type))
class LineChartUnresponsive(LineChart):
responsive = False
class BarChart(Chart):
chart_type = 'radar'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Radar Chart', data=data)]
class PolarChart(Chart):
chart_type = 'polarArea'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Polar Chart', data=data)]
class RadarChart(Chart):
chart_type = 'bar'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Line Chart', data=data)]
class PieChart(Chart):
chart_type = 'pie'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Pie Chart', data=data)]
class BubbleChart(Chart):
chart_type = 'bubble'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Bubble Chart', data=data)]
class OptionsChart(Chart):
chart_type = 'line'
title = Title(text='Precendence')
options = {
'title': Title(text='Overriden'),
'responsive': True,
'maintainAspectRatio': True,
}
def get_datasets(self, *args, **kwargs):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
return [dict(label='Test Line Chart', data=data)]
class ChartViewTestToolkit(TestCase):
classes = None
url_kwargs = {}
@property
def request(self):
request_factory = RequestFactory()
return request_factory.get('/test-url')
@property
def responses(self):
for klass in self.classes:
yield ChartView.from_chart(klass())(self.request, **self.url_kwargs)
class ChartViewTestToolkitSolo(ChartViewTestToolkit):
klass = None
url_kwargs = {}
@property
def response(self):
return ChartView.from_chart(self.klass())(self.request, **self.url_kwargs)
return self.klass.as_view()(self.request)
@property
def data(self):
charset = getattr(self.response, 'charset', 'utf-8')
data = self.response.content.decode(charset)
return json.loads(data)
class ChartResponseTestCase(ChartViewTestToolkit):
classes = (
LineChart,
BarChart,
PolarChart,
RadarChart,
PieChart,
BubbleChart,
)
def test_status_code(self):
for response in self.responses:
self.assertEquals(response.status_code, 200)
def test_content_type(self):
for response in self.responses:
self.assertEquals(response.get('content-type'), 'application/json')
def test_chart_config(self):
for response in self.responses:
charset = getattr(response, 'charset', 'utf-8')
content = response.content.decode(charset)
data = json.loads(content)
self.assertIn('data', data)
self.assertIn('options', data)
self.assertIn('type', data)
self.assertTrue(isinstance(data['data'], dict))
self.assertTrue(isinstance(data['options'], dict))
self.assertTrue(isinstance(data['type'], (six.string_types, six.text_type)))
self.assertIn(data['type'], ['bar', 'line', 'radar', 'polarArea', 'pie', 'bubble'])
self.assertIn('title', data['options'])
class LineChartTestCase(ChartViewTestToolkitSolo):
klass = LineChart
def test_title(self):
self.assertEquals(self.data['options']['title']['text'], 'Test Title Line')
def test_legend(self):
self.assertEquals(self.data['options']['legend']['display'], False)
def test_tooltips(self):
self.assertEquals(self.data['options']['tooltips']['enabled'], False)
def test_hover(self):
self.assertEquals(self.data['options']['hover']['mode'], 'default')
def test_animation(self):
self.assertEquals(self.data['options']['animation']['duration'], 1.0)
def test_dataset(self):
self.assertEquals(len(self.data['data']['datasets']), 1)
self.assertEquals(len(self.data['data']['labels']), 0)
self.assertEquals(self.data['data']['datasets'][0]['data'], list(range(1, 10)))
class TestConfigADTS(TestCase):
def test_rgba(self):
self.assertEquals(rgba(255, 255, 255), 'rgba(255,255,255,1.0)')
self.assertEquals(rgba(255, 255, 255, 0.0), 'rgba(255,255,255,0.0)')
def test_title(self):
title = Title(text='Hello World')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Title(nonsense='something'))
def test_legend(self):
title = Legend(display=False)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Legend(nonsense='something'))
def test_tooltips(self):
title = Tooltips(enabled=True)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Tooltips(nonsense='something'))
def test_hover(self):
title = Hover(mode='default')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Hover(nonsense='something'))
def test_interaction_modes(self):
title = InteractionModes(label='Hello World')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: InteractionModes(nonsense='something'))
def test_animation(self):
title = Animation(duration=1.0)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Animation(nonsense='something'))
def test_element(self):
arc = ElementArc(borderColor=rgba(255, 255, 255, 1))
title = Element(arc=arc)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Element(nonsense='something'))
def test_scales(self):
axes = Axes(type='linear',
position='left',
scaleLabel=ScaleLabel(fontColor='#fff'),
ticks=Tick(fontColor='#fff')
)
self.assertTrue(isinstance(axes, dict))
self.assertRaises(ValueError, lambda: Axes(nonsense='something'))
class ChartViewTestCase(TestCase):
def test_chart_view(self):
self.assertTrue(getattr(ChartView, 'from_chart', False))
self.assertRaises(ImproperlyConfigured,
lambda: ChartView())
def test_chart_view_from_chart_classonly(self):
ChartViewSubClass = type('ChartViewSubClass', (ChartView, ), {
'chart_instance': LineChart()
})
chart_view = ChartViewSubClass()
self.assertRaises(AttributeError,
lambda: chart_view.from_chart(LineChart()))
def test_chart_view_from_chart(self):
self.assertRaises(ImproperlyConfigured,
lambda: ChartView.from_chart(dict()))
self.assertRaises(ImproperlyConfigured,
lambda: ChartView.from_chart(LineChart))
ChartView.from_chart(LineChart())
def test_chart_view_get(self):
ChartViewSubClass = type('ChartViewSubClass', (ChartView, ), {
'chart_instance': LineChart()
})
chart_view = ChartViewSubClass()
request_factory = RequestFactory()
request = request_factory.get('/test-url')
response = chart_view.get(request)
self.assertEquals(response.status_code, 200)
charset = getattr(response, 'charset', 'utf-8')
content = response.content.decode(charset)
data = json.loads(content)
self.assertIn('data', data)
self.assertIn('options', data)
self.assertIn('type', data)
self.assertTrue(isinstance(data['data'], dict))
self.assertTrue(isinstance(data['options'], dict))
self.assertTrue(isinstance(data['type'], (six.string_types, six.text_type)))
self.assertIn(data['type'], ['bar', 'line', 'radar', 'polarArea', 'pie', 'bubble'])
self.assertIn('title', data['options'])
class ChartTestCase(TestCase):
def test_chart_dimension(self):
line_chart = LineChartUnresponsive(width=1000, height=500)
self.assertEquals(line_chart.width, 1000)
self.assertEquals(line_chart.height, 500)
self.assertIn('height: 500px', line_chart.as_html())
self.assertIn('width: 1000px', line_chart.as_html())
def test_chart_no_dimension(self):
line_chart = LineChart()
self.assertEquals(line_chart.width, None)
self.assertEquals(line_chart.height, None)
self.assertNotIn('height:', line_chart.as_html())
self.assertNotIn('width:', line_chart.as_html())
def test_chart_html_id(self):
line_chart = LineChart(html_id='test-id')
self.assertIn('id="test-id"', line_chart.as_html())
def test_chart_render_html(self):
line_chart = LineChart()
context = {
'html_id': 'test-id',
'chart': line_chart,
'chart_configuration': line_chart.get_configuration(),
}
html = line_chart.render_html(context)
self.assertNotIn('<script', html)
def test_chart_render_js(self):
line_chart = LineChart()
context = {
'html_id': 'test-id',
'chart': line_chart,
'chart_configuration': line_chart.get_configuration(),
}
js = line_chart.render_js(context)
self.assertNotIn('<canvas', js)
def test_responsive_height_width(self):
LineChartUnresponsive(height=500)
self.assertRaises(ImproperlyConfigured,
lambda: LineChart(height=500))
def test_chart_parameterization(self):
chart = LineChartParameterized()
self.assertNotIn('Dollar Chart', chart.as_html('euro'))
self.assertIn('Euro Chart', chart.as_html('euro'))
self.assertNotIn('Euro Chart', chart.as_html('dollar'))
self.assertIn('Dollar Chart', chart.as_html('dollar'))
class AsyncChartParameterization(ChartViewTestToolkitSolo):
klass = LineChartParameterized
def test_euro(self):
self.url_kwargs = dict(currency_type='euro')
self.assertEquals('Euro Chart',
self.data['data']['datasets'][0]['label'])
def test_dollar(self):
self.url_kwargs = dict(currency_type='dollar')
self.assertEquals('Dollar Chart',
self.data['data']['datasets'][0]['label'])
class OptionsChartTestCase(ChartViewTestToolkitSolo):
klass = OptionsChart
def test_precedence(self):
title = self.data['options']['title']['text']
responsive = self.data['options']['responsive']
maintainAspectRatio = self.data['options']['maintainAspectRatio']
self.assertEquals('Precendence', title)
self.assertTrue(responsive)
self.assertTrue(maintainAspectRatio)
| 2.046875 | 2 |
data/tools/mouse_picker.py | westernesque/a-history-of-birds | 1 | 12757515 | import data.tools.maths as m
import pygame, numpy
class MousePicker:
current_ray = None
RAY_RANGE = 600.0
RECURSION_COUNT = 200
def __init__(self, camera, projection_matrix, display, terrain):
self.camera = camera
self.projection_matrix = projection_matrix
self.display = display
self.terrain = terrain
self.view_matrix = m.Maths().create_view_matrix(camera)
self.current_terrain_point = None
self.count = 0
def get_current_ray(self):
return self.current_ray
def update(self):
self.view_matrix = m.Maths().create_view_matrix(self.camera)
self.current_ray = self.calculate_mouse_ray()
def calculate_mouse_ray(self):
mouse_x, mouse_y = float(pygame.mouse.get_pos()[0]), float(pygame.mouse.get_pos()[1])
normalized_device_coordinates = self.get_normalized_device_coordinates(mouse_x, mouse_y)
clip_coordinates = (normalized_device_coordinates[0], normalized_device_coordinates[1], -1.0, 1.0)
eye_coordinates = self.to_eye_coordinates(clip_coordinates)
world_ray = self.to_world_coordinates(eye_coordinates)
return world_ray
def to_world_coordinates(self, eye_coordinates):
inverted_view_matrix = numpy.linalg.inv(self.view_matrix)
ray_world_coordinates = numpy.dot(inverted_view_matrix, eye_coordinates)
mouse_ray = (-ray_world_coordinates[0], ray_world_coordinates[1], -ray_world_coordinates[2])
return mouse_ray
def to_eye_coordinates(self, clip_coordinates):
inverted_projection_matrix = numpy.linalg.inv(self.projection_matrix)
eye_coordinates = numpy.dot(inverted_projection_matrix, clip_coordinates)
return eye_coordinates[0], eye_coordinates[1], -1.0, 0.0
def get_normalized_device_coordinates(self, mouse_x, mouse_y):
x = (2.0 * mouse_x) / self.display.get_width() - 1.0
y = (2.0 * mouse_y) / self.display.get_height() - 1.0
return (x, y)
def intersect_with_y(self):
a = self.camera.position[0]
b = self.camera.position[1]
c = self.camera.position[2]
alpha = self.current_ray[0]
beta = self.current_ray[1]
gamma = self.current_ray[2]
x = a - (alpha * b) / beta
if self.terrain.height is not None:
y = self.terrain.height
else:
y = 0.0
z = c - (gamma * b) / beta
return (x, y, z)
| 2.6875 | 3 |
real_time_ids.py | aspera1631/hs_logreader | 0 | 12757516 | ## Reads the log file in real time and fills the dataframe with card info
__author__ = 'bdeutsch'
import time
import numpy as np
import pandas as pd
import re
# Follow the log and output lines
def follow(filename):
filename.seek(0, 2)
while True:
line = filename.readline()
if not line:
time.sleep(0.1)
continue
yield line
# get the id
def get_id(line):
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# convert to an integer
id = int(m.group(1))
return id
# If there's an id in the line, add it to the list if we haven't seen it already.
# Also, check for nested brackets. If we find them, create a stack of updates.
# For each update, run individual functions to find id, name, cardid, player
# For each value, update the df
filename = "/Users/bdeutsch/Library/Logs/Unity/Player.log"
logfile = open(filename, "r")
loglines = follow(logfile)
for line in loglines:
id = get_id(line)
if line[0] != '(' and id:
print id | 3 | 3 |
seahub/api2/endpoints/via_repo_token.py | samuelduann/seahub | 0 | 12757517 | <reponame>samuelduann/seahub
import os
import json
import logging
import posixpath
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from urllib.parse import quote
from seahub.api2.authentication import RepoAPITokenAuthentication
from seahub.repo_api_tokens.utils import get_dir_file_info_list
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error, to_python_boolean
from seaserv import seafile_api, get_repo, check_quota
from pysearpc import SearpcError
import seahub.settings as settings
from seahub.repo_api_tokens.utils import get_dir_file_recursively
from seahub.constants import PERMISSION_READ
from seahub.utils import normalize_dir_path, check_filename_with_rename, gen_file_upload_url, is_valid_dirent_name, \
normalize_file_path, render_error, gen_file_get_url, is_pro_version
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
logger = logging.getLogger(__name__)
json_content_type = 'application/json; charset=utf-8'
HTTP_443_ABOVE_QUOTA = 443
HTTP_520_OPERATION_FAILED = 520
def check_folder_permission_by_repo_api(request, repo_id, path):
"""
Check repo/folder/file access permission of a repo_api_token.
:param request: request obj
:param repo_id: repo's id
:param path: repo path
:return:
"""
repo_status = seafile_api.get_repo_status(repo_id)
if repo_status == 1:
return PERMISSION_READ
return request.repo_api_token_obj.permission # and return repo_api_token's permission
class ViaRepoDirView(APIView):
authentication_classes = (RepoAPITokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
def get_dir_info(self, repo_id, dir_path):
dir_obj = seafile_api.get_dirent_by_path(repo_id, dir_path)
dir_info = {
'type': 'dir',
'repo_id': repo_id,
'parent_dir': os.path.dirname(dir_path.rstrip('/')),
'obj_name': dir_obj.obj_name,
'obj_id': dir_obj.obj_id,
'mtime': timestamp_to_isoformat_timestr(dir_obj.mtime),
}
return dir_info
def get(self, request, format=None):
repo_id = request.repo_api_token_obj.repo_id
# argument check
recursive = request.GET.get('recursive', '0')
if recursive not in ('1', '0'):
error_msg = "If you want to get recursive dir entries, you should set 'recursive' argument as '1'."
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
request_type = request.GET.get('type', '')
if request_type and request_type not in ('f', 'd'):
error_msg = "'type should be 'f' or 'd'."
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
with_thumbnail = request.GET.get('with_thumbnail', 'false')
if with_thumbnail not in ('true', 'false'):
error_msg = 'with_thumbnail invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
with_thumbnail = to_python_boolean(with_thumbnail)
thumbnail_size = request.GET.get('thumbnail_size', 48)
try:
thumbnail_size = int(thumbnail_size)
except ValueError:
error_msg = 'thumbnail_size invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
with_parents = request.GET.get('with_parents', 'false')
if with_parents not in ('true', 'false'):
error_msg = 'with_parents invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
with_parents = to_python_boolean(with_parents)
# recource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
parent_dir = request.GET.get('path', '/')
parent_dir = normalize_dir_path(parent_dir)
dir_id = seafile_api.get_dir_id_by_path(repo_id, parent_dir)
if not dir_id:
error_msg = 'Folder %s not found.' % parent_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
permission = check_folder_permission_by_repo_api(request, repo_id, parent_dir)
if not permission:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# get dir/file list recursively
# username = request.user.username
# Get username by repo_id. Can not use is_org_context, because 'AnonymousUser' object has no attribute 'org'.
username = seafile_api.get_repo_owner(repo_id) or seafile_api.get_org_repo_owner(repo_id)
if recursive == '1':
try:
dir_file_info_list = get_dir_file_recursively(repo_id, parent_dir, [])
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
response_dict = {}
response_dict['dirent_list'] = []
if request_type == 'f':
for item in dir_file_info_list:
if item['type'] == 'file':
response_dict['dirent_list'].append(item)
elif request_type == 'd':
for item in dir_file_info_list:
if item['type'] == 'dir':
response_dict['dirent_list'].append(item)
else:
response_dict['dirent_list'] = dir_file_info_list
return Response(response_dict)
parent_dir_list = []
if not with_parents:
# only return dirent list in current parent folder
parent_dir_list.append(parent_dir)
else:
# if value of 'path' parameter is '/a/b/c' add with_parents's is 'true'
# then return dirent list in '/', '/a', '/a/b' and '/a/b/c'.
if parent_dir == '/':
parent_dir_list.append(parent_dir)
else:
tmp_parent_dir = '/'
parent_dir_list.append(tmp_parent_dir)
for folder_name in parent_dir.strip('/').split('/'):
tmp_parent_dir = posixpath.join(tmp_parent_dir, folder_name)
tmp_parent_dir = normalize_dir_path(tmp_parent_dir)
parent_dir_list.append(tmp_parent_dir)
all_dir_info_list = []
all_file_info_list = []
try:
for parent_dir in parent_dir_list:
# get dir file info list
dir_info_list, file_info_list = get_dir_file_info_list(username,
request_type, repo, parent_dir, with_thumbnail,
thumbnail_size)
all_dir_info_list.extend(dir_info_list)
all_file_info_list.extend(file_info_list)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
response_dict = {}
response_dict["user_perm"] = permission
response_dict["dir_id"] = dir_id
response_dict["repo_name"] = repo.repo_name
if request_type == 'f':
response_dict['dirent_list'] = all_file_info_list
elif request_type == 'd':
response_dict['dirent_list'] = all_dir_info_list
else:
response_dict['dirent_list'] = all_dir_info_list + all_file_info_list
return Response(response_dict)
def post(self, request, format=None):
repo_id = request.repo_api_token_obj.repo_id
# argument check
path = request.GET.get('path', None)
if not path or path[0] != '/':
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if path == '/':
error_msg = 'Can not operate root dir.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
operation = request.data.get('operation', None)
if not operation:
error_msg = 'operation invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
operation = operation.lower()
if operation not in ('mkdir', 'rename', 'revert'):
error_msg = "operation can only be 'mkdir', 'rename' or 'revert'."
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = path.rstrip('/')
username = request.user.username
parent_dir = os.path.dirname(path)
if operation == 'mkdir':
# resource check
parent_dir_id = seafile_api.get_dir_id_by_path(repo_id, parent_dir)
if not parent_dir_id:
error_msg = 'Folder %s not found.' % parent_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if check_folder_permission_by_repo_api(request, repo_id, parent_dir) != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
new_dir_name = os.path.basename(path)
if not is_valid_dirent_name(new_dir_name):
return api_error(status.HTTP_400_BAD_REQUEST,
'name invalid.')
retry_count = 0
while retry_count < 10:
new_dir_name = check_filename_with_rename(repo_id,
parent_dir, new_dir_name)
try:
seafile_api.post_dir(repo_id,
parent_dir, new_dir_name, username)
break
except SearpcError as e:
if str(e) == 'file already exists':
retry_count += 1
else:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR,
error_msg)
new_dir_path = posixpath.join(parent_dir, new_dir_name)
dir_info = self.get_dir_info(repo_id, new_dir_path)
resp = Response(dir_info)
return resp
if operation == 'rename':
# resource check
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
if not dir_id:
error_msg = 'Folder %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if check_folder_permission_by_repo_api(request, repo_id, path) != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
old_dir_name = os.path.basename(path)
new_dir_name = request.data.get('newname', None)
if not new_dir_name:
error_msg = 'newname invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not is_valid_dirent_name(new_dir_name):
return api_error(status.HTTP_400_BAD_REQUEST,
'name invalid.')
if new_dir_name == old_dir_name:
dir_info = self.get_dir_info(repo_id, path)
resp = Response(dir_info)
return resp
try:
# rename duplicate name
new_dir_name = check_filename_with_rename(repo_id, parent_dir, new_dir_name)
# rename dir
seafile_api.rename_file(repo_id, parent_dir, old_dir_name,
new_dir_name, username)
new_dir_path = posixpath.join(parent_dir, new_dir_name)
dir_info = self.get_dir_info(repo_id, new_dir_path)
resp = Response(dir_info)
return resp
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if operation == 'revert':
commit_id = request.data.get('commit_id', None)
if not commit_id:
error_msg = 'commit_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if seafile_api.get_dir_id_by_path(repo_id, path):
# dir exists in repo
if check_folder_permission_by_repo_api(request, repo_id, path) != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
else:
# dir NOT exists in repo
if check_folder_permission_by_repo_api(request, repo_id, '/') != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
seafile_api.revert_dir(repo_id, commit_id, path, username)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
class ViaRepoUploadLinkView(APIView):
authentication_classes = (RepoAPITokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
def get(self, request, format=None):
repo_id = request.repo_api_token_obj.repo_id
# recourse check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
parent_dir = request.GET.get('path', '/')
dir_id = seafile_api.get_dir_id_by_path(repo_id, parent_dir)
if not dir_id:
error_msg = 'Folder %s not found.' % parent_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if check_folder_permission_by_repo_api(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to access this folder.')
if check_quota(repo_id) < 0:
return api_error(HTTP_443_ABOVE_QUOTA, "Out of quota.")
obj_data = {'parent_dir': parent_dir}
if is_pro_version():
obj_data['anonymous_user'] = request.repo_api_token_obj.app_name
obj_id = json.dumps(obj_data)
token = seafile_api.get_fileserver_access_token(repo_id,
obj_id, 'upload', '',
use_onetime=False)
if not token:
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
req_from = request.GET.get('from', 'api')
if req_from == 'api':
try:
replace = to_python_boolean(request.GET.get('replace', '0'))
except ValueError:
replace = False
url = gen_file_upload_url(token, 'upload-api', replace)
elif req_from == 'web':
url = gen_file_upload_url(token, 'upload-aj')
else:
error_msg = 'from invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
return Response(url)
class ViaRepoDownloadLinkView(APIView):
authentication_classes = (RepoAPITokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
def get(self, request):
path = request.GET.get('path')
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id = request.repo_api_token_obj.repo_id
path = normalize_file_path(path)
filename = os.path.basename(path)
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if not file_id:
error_msg = 'File not found'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
token = seafile_api.get_fileserver_access_token(
repo_id, file_id, 'download', request.repo_api_token_obj.app_name,
use_onetime=settings.FILESERVER_TOKEN_ONCE_ONLY)
download_url = gen_file_get_url(token, filename)
return Response(download_url)
class RepoInfoView(APIView):
authentication_classes = (RepoAPITokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
def get(self, request):
repo_id = request.repo_api_token_obj.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %(repo_id)s not found.' % {'repo_id': repo_id}
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
data = {
'repo_id': repo.id,
'repo_name': repo.name,
}
return Response(data)
| 1.929688 | 2 |
trftools/dictionaries/_subtlex.py | christianbrodbeck/TRF-Tools | 0 | 12757518 | # Author: <NAME> <<EMAIL>>
from math import log
from pathlib import Path
from ._utils import download
MINIMAL_ENTRY = {
'FREQcount': 1,
'CDcount': 1,
'Lg10WF': log(2, 10), # log10(FREQcount + 1)
'Lg10CD': log(2, 10),
}
TOTAL_COUNT = 51e6
def read_subtlex(lower=False):
"""Read the SUBTLEXus data
Parameters
----------
lower : bool
Use lower case keys (default is upper case).
Notes
-----
http://www.ugent.be/pp/experimentele-psychologie/en/research/documents/subtlexus
Columns:
Word, FREQcount, CDcount, FREQlow, Cdlow, SUBTLWF, Lg10WF, SUBTLCD, Lg10CD
"""
path = download('https://www.ugent.be/pp/experimentele-psychologie/en/research/documents/subtlexus/subtlexus2.zip/at_download/file', 'SUBTLEXus74286wordstextversion.txt', unzip=True)
out = {}
str_trans = str.lower if lower else str.upper
with path.open() as fid:
columns = fid.readline().split()
i_key = columns.index('Word')
columns.pop(i_key)
for line in fid:
items = line.split()
key = str_trans(items.pop(i_key))
if key in out:
raise RuntimeError(f"Duplicate key: {key}")
out[key] = dict(zip(columns, map(float, items)))
return out
def read_subtlex_pos():
"""Read SUBTLEXus with part-of-speech tags"""
path = download('http://crr.ugent.be/papers/SUBTLEX-US_frequency_list_with_PoS_information_final_text_version.zip', 'SUBTLEX-US-PoS.txt', unzip=True)
with path.open() as fid:
keys = next(fid).split()
i_word = keys.index('Word')
i_class = keys.index('All_PoS_SUBTLEX')
i_freq = keys.index('All_freqs_SUBTLEX')
d = {}
for line in fid:
line = line.split()
d[line[i_word]] = {k: int(v) for k, v in
zip(line[i_class].split('.'),
line[i_freq].split('.')) if v != '#N/A'}
return d
| 2.65625 | 3 |
solutions/518.py | pacokwon/leetcode | 2 | 12757519 | <reponame>pacokwon/leetcode<filename>solutions/518.py
# Coin Change 8
class Solution:
def change(self, amount, coins):
# Classic DP problem?
dp = [0] * (amount + 1)
dp[0] = 1
for coin in coins:
for am in range(1, amount + 1):
if am >= coin:
dp[am] += dp[am - coin]
return dp[amount]
if __name__ == "__main__":
sol = Solution()
amount = 5
coins = [1, 2, 5]
print(sol.change(amount, coins))
| 3.4375 | 3 |
FET_Segm_Stats.py | cbri92/FETsegmenter | 0 | 12757520 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 12:21:00 2020
@author: cbri3325
"""
#%% Import functions
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import datetime
import os
import glob
import shutil
import xlsxwriter
import time
from scipy.stats.stats import pearsonr
def createList(r1, r2):
return [item for item in range(r1, r2+1)]
#%% Set Working directory
data_supradir = 'path to directory containing analysis results xlsx files' #Set working directory
users_path = [ f.path for f in os.scandir(data_supradir) if f.is_dir() ] #Create a list of the paths to the users directories
users_name = [ f.name for f in os.scandir(data_supradir) if f.is_dir() ] #Create a lisdt of users names
n_users = len(users_name) #Total number of subjects
print(users_path)
subj_list = ['AB001', 'DA003', 'FF004', 'GF002', 'GW005', 'KF006']
storeResults = {subj: pd.DataFrame(columns=['User', 'Repeats', 'Method', 'BTV Volume [mm3]', 'CTRL VOI Mean intensity [SUV]']) for subj in subj_list}
for current in users_name:
user_dir = data_supradir+current
user_name = current
#Set paths to subfolders
MI_dir = user_dir +'/MI method'
CS_dir = user_dir +'/CS method'
MI_results_df = pd.read_excel(MI_dir+'/Results_MI.xlsx', sheet_name=None)
CS_results_df = pd.read_excel(CS_dir+'/Results_CS.xlsx', sheet_name=None)
n_repeats = len(MI_results_df)
n_subjs = len(MI_results_df.get('Repeat1')['Subject_ID'])
# subj_list = MI_results_df.get('Repeat1')['Subject_ID'].tolist()
MI_results_df = pd.read_excel(MI_dir+'/Results_MI.xlsx', sheet_name=None, index_col='Subject_ID')
CS_results_df = pd.read_excel(CS_dir+'/Results_CS.xlsx', sheet_name=None, index_col='Subject_ID')
#Create empty dataframes to populate as going through the loop
# storeResults = {subj: pd.DataFrame(columns=['User', 'Repeats', 'Method', 'BTV Volume [mm3]', 'CTRL VOI Mean intensity [SUV]']) for subj in subj_list}
for repeat in range(1, n_repeats+1):
# Repeats_ns = Repeats_ns.append([repeat])
MI_BTV_CTRL = MI_results_df.get('Repeat'+str(repeat)).loc[:, ['BTV Volume [mm3]','CTRL VOI Mean intensity [SUV]']]
CS_BTV_CTRL = CS_results_df.get('Repeat'+str(repeat)).loc[:, ['BTV Volume [mm3]','CTRL VOI Mean intensity [SUV]']]
method1, method2 = 'MI', 'CS'
for subj in subj_list:
storeResults[subj] = storeResults[subj].append({'User': current,
'Repeats': repeat,
'Method': method1,
'BTV Volume [mm3]': float(MI_BTV_CTRL.loc[subj, 'BTV Volume [mm3]']),
'CTRL VOI Mean intensity [SUV]': float(MI_BTV_CTRL.loc[subj, 'CTRL VOI Mean intensity [SUV]'])
}, ignore_index=True)
storeResults[subj] = storeResults[subj].append({'User': current,
'Repeats': repeat,
'Method': method2,
'BTV Volume [mm3]': float(CS_BTV_CTRL.loc[subj, 'BTV Volume [mm3]']),
'CTRL VOI Mean intensity [SUV]': float(CS_BTV_CTRL.loc[subj, 'CTRL VOI Mean intensity [SUV]'])
}, ignore_index=True)
# new_df = df.groupby(['User', 'Method']).['BTV Volume [mm3]'].agg({'mean','std'})
# df = df.merge(new_df, left_on=['User', 'Method'], right_index=True)
writer = pd.ExcelWriter(data_supradir +'OverallResults.xlsx', engine='xlsxwriter')
for name, df in storeResults.items():
# Calculate CoV values for BTV and CTRL for each group of User, Method for each Subject
BTV_df = df.groupby(['User', 'Method'])['BTV Volume [mm3]'].agg({'mean','std'})
CTRL_df = df.groupby(['User', 'Method'])['CTRL VOI Mean intensity [SUV]'].agg({'mean','std'})
BTV_df['BTV_CoV'] = BTV_df['std']/BTV_df['mean']
CTRL_df['CTRL_CoV'] = CTRL_df['std']/CTRL_df['mean']
df = df.merge(BTV_df['BTV_CoV'], left_on=['User', 'Method'], right_index=True)
df = df.merge(CTRL_df['CTRL_CoV'], left_on=['User', 'Method'], right_index=True)
df.to_excel(writer, sheet_name=name, index=False)
writer.save()
| 2.390625 | 2 |
cheap_orchestra/__init__.py | fpftech/cheap_orchestra | 0 | 12757521 | # -*- coding: utf-8 -*-
from .cheap_orchestra import *
"""Top-level package for Cheap Orchestra."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 0.929688 | 1 |
data.py | rozim/ChessAtAGlance | 0 | 12757522 | import sys, os
import time
import tensorflow as tf
import functools
from absl import app
from plan import *
from snappy_io import unsnappy
NUM_CLASSES = 4672
BOARD_SHAPE = (20, 8, 8)
BOARD_FLOATS = 1280
AUTOTUNE = tf.data.AUTOTUNE
FEATURES = {
'board': tf.io.FixedLenFeature(BOARD_SHAPE, tf.float32),
'label': tf.io.FixedLenFeature([], tf.int64)
}
FEATURES2 = {
'board': tf.io.FixedLenFeature(BOARD_SHAPE, tf.float32),
'label': tf.io.FixedLenFeature([], tf.int64),
'legal_moves': tf.io.VarLenFeature(tf.int64)
}
def legal_moves_mask(legal_moves_sparse):
dense = tf.sparse.to_dense(legal_moves_sparse, default_value=-1)
hot = tf.one_hot(dense, on_value=1.0, off_value=0.0, depth=NUM_CLASSES)
hot2 = tf.math.reduce_sum(hot, axis=[-2])
return hot2
def _extract(blob):
t = tf.io.parse_example(blob, features=FEATURES)
return t['board'], t['label']
def _extract2(blob):
t = tf.io.parse_example(blob, features=FEATURES2)
return ({'board': t['board'],
'legal_moves': t['legal_moves'],
#'legal_moves_mask': legal_moves_mask(t['legal_moves']) # very slow - try to do in model and maybe runs on gpu?
},
t['label'])
# def gen_snappy(fn):
# for ex in unsnappy(fn):
# board = tf.convert_to_tensor(ex.features.feature['board'].float_list.value,
# dtype=tf.float32)
# board = tf.reshape(board, BOARD_SHAPE)
# action = tf.convert_to_tensor(ex.features.feature['label'].int64_list.value[0],
# dtype=tf.int64)
# yield (board, action)
def create_input_generator_rio(dplan, fns, is_train=True, verbose=True, do_repeat=True, return_legal_moves=False):
if type(fns) == type(""):
fns = [fns]
if verbose:
print(f'Open {fns}')
datasets = []
for fn in fns:
assert os.path.isfile(fn), fn
assert fn.endswith('.recordio')
ds = tf.data.TFRecordDataset(fns, 'ZLIB', num_parallel_reads=len(fns))
if is_train:
ds = ds.shuffle(dplan.shuffle)
if do_repeat:
ds = ds.repeat()
if dplan.get('swap_batch_map_order', False):
ds = ds.batch(dplan.batch,
num_parallel_calls=AUTOTUNE,
deterministic=False)
ds = ds.map(_extract2 if return_legal_moves else _extract,
num_parallel_calls=AUTOTUNE)
else:
ds = ds.batch(dplan.batch,
num_parallel_calls=AUTOTUNE,
deterministic=False) # performance
ds = ds.map(_extract2 if return_legal_moves else _extract)
ds = ds.prefetch(dplan.prefetch)
return ds
def create_input_generator(dplan, fns, is_train=True, verbose=True, do_repeat=True, return_legal_moves=False):
if type(fns) == type(""):
fns = [fns]
if fns[0].endswith('.recordio'):
return create_input_generator_rio(dplan, fns, is_train, verbose, do_repeat, return_legal_moves)
assert False, 'obsolete code path'
# if verbose:
# print(f'Open {fns}')
# datasets = []
# for fn in fns:
# assert os.path.isfile(fn), fn
# assert fn.endswith('.snappy')
# gen1 = functools.partial(gen_snappy, fn)
# ds = tf.data.Dataset.from_generator(gen1,
# output_types=('float32', 'int64'),
# output_shapes=(BOARD_SHAPE, []))
# if do_repeat:
# ds = ds.repeat()
# datasets.append(ds)
# del ds
# ds = tf.data.experimental.sample_from_datasets(
# datasets,
# weights=None # Uniform
# )
# if is_train:
# ds = ds.shuffle(dplan.shuffle)
# ds = ds.repeat()
# ds = ds.batch(dplan.batch,
# num_parallel_calls=AUTOTUNE,
# deterministic=False) # performance
# ds = ds.prefetch(dplan.prefetch)
# return ds
def main(argv):
plan = load_plan('v0.toml')
print(next(iter(create_input_generator(plan.data, 'mega-v2-9.snappy'))))
if __name__ == '__main__':
app.run(main)
| 2.328125 | 2 |
gtts_token/tests/test_token.py | BoseCorp/gTTS-token | 43 | 12757523 | <gh_stars>10-100
# coding=UTF-8
import unittest
import requests
from gtts_token import gtts_token
class TestToken(unittest.TestCase):
"""Test gToken"""
def setUp(self):
self.tokenizer = gtts_token.Token()
def test_token(self):
text = 'test'
self.assertEqual('278125.134055', self.tokenizer.calculate_token(text, seed="406986.2817744745"))
def test_real(self):
text = "Hello"
token = self.tokenizer.calculate_token(text)
payload = {
'q' : text,
'tl' : "en",
'client' : 't',
'tk' : token
}
r = requests.get('https://translate.google.com/translate_tts', params=payload)
self.assertEqual(200, r.status_code)
if __name__ == '__main__':
unittest.main()
| 2.953125 | 3 |
AutotestWebD/apps/common/func/send_mail.py | yangjourney/sosotest | 422 | 12757524 | from AutotestWebD.settings import EMAIL_SENDER,EMAIL_PASSWORD,EMAIL_SERVER,EMAIL_USERNAME
import smtplib
import traceback
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
import os
import email.encoders
import time
import traceback
import re
def send_mail(email_list, subject, email_text, filepath="", sub_type="text"):
try:
# 发送email
receiver = list(set(email_list.split(';')))
sender = EMAIL_SENDER
smtpserver = EMAIL_SERVER
username = EMAIL_USERNAME
password = <PASSWORD>
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
msg = MIMEMultipart() #
if sub_type == "text":
text_msg = MIMEText(email_text, 'plain', 'utf-8') # 文本格式
elif sub_type == "html":
text_msg = MIMEText(email_text, _subtype='html', _charset='utf-8') # html格式
else:
text_msg = MIMEText(email_text, 'plain', 'utf-8') # 文本格式
msg.attach(text_msg)
msg['From'] = sender
msg['To'] = ";".join(receiver)
msg['Subject'] = Header(subject, 'utf-8')
# 构造MIMEBase对象做为文件附件内容并附加到根容器
filepath = filepath.strip()
if os.path.isfile(filepath):
contype = 'application/octet-stream'
maintype, subtype = contype.split('/', 1)
data = open(filepath, 'rb')
file_msg = MIMEBase(maintype, subtype)
file_msg.set_payload(data.read())
data.close()
email.encoders.encode_base64(file_msg)
filename_list = filepath.split('/')
filename = filename_list[len(filename_list) - 1]
basename = os.path.basename(filename)
file_msg.add_header('Content-Disposition', 'attachment', filename=basename)
msg.attach(file_msg)
is_send_success = False
resend_times = 0
for i in range(0, 3):
smtp = ""
try:
smtp = smtplib.SMTP(smtpserver)
smtp.login(username, password)
# 用smtp发送邮件
smtp.sendmail(sender, receiver, msg.as_string())
is_send_success = True
break
except Exception as e:
resend_times += 1
user_logger.debug("发送第%s次失败!10秒后重试!" % resend_times)
user_logger.error(traceback.format_exc())
time.sleep(10) # 休眠10秒,10秒后重发
if len(receiver) == 0:
return False
finally:
if smtp != "":
smtp.quit()
if is_send_success:
return True
else:
return False
except Exception as e:
print(traceback.format_exc())
return False
def whether_display_name(namestr):
if re.match("^[\u4e00-\u9fa5]{2,4}\([a-z]{1,}[0-9]{0,}\)$", namestr):
return True
else:
return False
if __name__ == "__main__":
retstr = get_email_list("<EMAIL>;<EMAIL>;王蕾(wanglei05);,<EMAIL>,")
print(retstr)
print(type(retstr))
| 2.140625 | 2 |
4dml/nspace.py | ssb22/4dml | 0 | 12757525 | <filename>4dml/nspace.py
#!/usr/bin/env python2
# 4DML Transformation Utility
#
# (C) 2002-2006 <NAME> (University of Cambridge Computer Laboratory,
# Cambridge, UK, http://ssb22.user.srcf.net )
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
# NB a lot of the inline expansions here didn't get
# significant speedups
import os
try:
if os.environ.has_key("no_psyco"): raise ImportError
from psyco.classes import *
# except ImportError: pass
except: pass # jython hack
class N_Space:
def __init__(self,numDimensions):
self.thePoints = {}
self.theNumDimensions = numDimensions
self.hasChanged = 1
def isEmpty(self):
return not self.thePoints
def getPoints(self): return self.thePoints.keys()
def getAPoint(self):
# one point only (arbitrary); exception if empty
# return self.thePoints.popitem()[0]
# (NO! it erases the item!)
return self.thePoints.keys()[0] # back to the inefficient one
def addPoint(self,newPoint):
# newPoint is a tuple
assert len(newPoint) == self.theNumDimensions, \
"Wrong number of dimensions"
assert not self.thePoints.has_key(newPoint), \
"Duplicate point %s" % (newPoint,)
self.thePoints[newPoint]=1
self.hasChanged = 1
def addPoints(self,newPoints):
for i in newPoints:
# self.addPoint(i)
# Expand inline for optimisation
assert len(i) == self.theNumDimensions, \
"Wrong number of dimensions"
assert not self.thePoints.has_key(i), \
"Duplicate point %s" % (i,)
self.thePoints[i]=1
self.hasChanged = 1
def removePoint(self,pointToRemove):
try:
del self.thePoints[pointToRemove]
self.hasChanged = 1
except KeyError:
assert 0, "Tried to remove a non-existent point"
def removePoints(self,pointsToRemove):
# for i in pointsToRemove: self.removePoint(i)
# return
try:
for i in pointsToRemove: del self.thePoints[i]
except KeyError:
assert 0, "Tried to remove a non-existent point"
self.hasChanged = 1
def addCursorPoints(self,cursor,cursorDimens,cursorPos):
# inserts value 'cursorPos' BEFORE cursorDimens (0+)
assert 0 <= cursorDimens < self.theNumDimensions
# < not <=, because theNumDimensions is the NEW
# number (we're adding a dimension)
for i in cursor.getPoints():
assert len(i) == self.theNumDimensions-1, \
"Wrong number of dimensions"
newPt = i[0:cursorDimens] + \
(cursorPos,) + \
i[cursorDimens:]
# self.addPoint(newPt)
# expanding for optimisation
self.thePoints[newPt] = 1
self.hasChanged = 1
def crossSection(self,dimension,value):
# Opposite to addCursorPoints
# Takes points where dimension=value and removes
# that dimension; returns result as a new N_Space
assert 0 <= dimension < self.theNumDimensions
section = N_Space(self.theNumDimensions-1)
for i in self.getPoints():
if i[dimension] == value:
newPt=i[0:dimension] + \
i[dimension+1:]
# section.addPoint(newPt)
# expanding for optimisation
section.thePoints[newPt] = 1
return section
def crossSection2(self,values):
# for optimisation
l = len(values)
if self.__dict__.has_key("optimiseDict1") and l==self.theNumDimensions-1: return self.optimisedCrossSection2(values)
assert l < self.theNumDimensions
section = N_Space(self.theNumDimensions-l)
for i in self.getPoints():
if i[0:l] == values:
# section.addPoint(i[l:])
# expanding for optimisation
section.thePoints[i[l:]] = 1
return section
def optimiseForCrossSectioningLastDimension(self):
optimiseDict1 = {}
for i in self.getPoints():
key = i[:-1]
if not optimiseDict1.has_key(key): optimiseDict1[key] = N_Space(1)
optimiseDict1[key].thePoints[i[-1:]] = 1
self.optimiseDict1 = optimiseDict1
def optimisedCrossSection2(self,values):
try:
return self.optimiseDict1 [ values ]
except: return N_Space(1)
def optimiseForSubSectionSingleValue(self,dimension):
optimiseDict2 = {}
for i in self.getPoints():
key = i[dimension]
if not optimiseDict2.has_key(key): optimiseDict2[key] = N_Space(self.theNumDimensions)
optimiseDict2[key].thePoints[i] = 1
self.optimiseDict2 = optimiseDict2
self.ssOptimisedFor = dimension
def optimisedSubSectionSingleValue(self,dimension,value):
try:
return self.optimiseDict2 [value]
except: return N_Space(self.theNumDimensions)
def subSection(self,dimension,values):
# Removes all points where dimension != values
# Returns new N_Space with same number of dimensions
assert isinstance(values, types.ListType), \
"Argument of subSection %s must be a LIST of values" % (values,)
if self.__dict__.has_key("optimiseDict2") and len(values)==1 and self.ssOptimisedFor==dimension: return self.optimisedSubSectionSingleValue(dimension,values[0])
# Convert 'values' to a dictionary because sometimes
# it's quite long (e.g. as used in scopeOfElement)
valuesDict = {}
for i in values: valuesDict[i]=1
return self.subSectionFromDict(dimension,valuesDict)
def subSectionFromDict(self,dimension,values):
if self.__dict__.has_key("optimiseDict2") and self.ssOptimisedFor==dimension:
r = N_Space(self.theNumDimensions)
for v in values.keys(): r.addPoints(self.optimisedSubSectionSingleValue(dimension,v).getPoints())
return r
# Non-optimised version:
assert 0 <= dimension < self.theNumDimensions
section = N_Space(self.theNumDimensions)
for i in self.getPoints():
if values.has_key(i[dimension]):
# section.addPoint(i)
section.thePoints[i] = 1
return section
def minimum(self,dimension):
# Returns new N_Space only including the points
# where 'dimension' is at its minimum (the datatype
# of that dimension has to be sortable for this to
# work)
vals = self.axisProjection(dimension)
if vals == []: return N_Space(self.theNumDimensions)
# (not just return self, in case later modified)
vals.sort()
return self.subSection(dimension,vals[0:1])
def axisProjectionAsDict(self,dimension):
assert 0 <= dimension < self.theNumDimensions
projection = {}
for i in self.getPoints():
projection[i[dimension]] = 1
return projection
def axisProjection(self,dimension):
return self.axisProjectionAsDict(dimension).keys()
| 2.328125 | 2 |
prettify.py | TechnoIris/python_Effizs | 0 | 12757526 | <filename>prettify.py<gh_stars>0
import requests
from bs4 import BeautifulSoup
URL="https://www.flipkart.com/nova-prime-series-nht-1085-runtime-45-min-trimmer-men/p/itmenaz5kbhceffm?gclid=CjwKCAiAluLvBRASEiwAAbX3GQNX3ebUtqmybVDGfJVV6uOPB0f8gfztPYRQn5uHSEngrq3njdt6vhoCfBgQAvD_BwE&pid=SHVENAZ5AQNMDGYA&lid=LSTSHVENAZ5AQNMDGYAKNSAM4&marketplace=FLIPKART&cmpid=content_trimmer_8965229628_gmc_pla&tgi=sem,1,G,11214002,g,search,,330072689017,1o2,,,c,,,,,,,&ef_id=CjwKCAiAluLvBRASEiwAAbX3GQNX3ebUtqmybVDGfJVV6uOPB0f8gfztPYRQn5uHSEngrq3njdt6vhoCfBgQAvD_BwE:G:s&s_kwcid=AL!739!3!330072689017!!!g!312958784358!"
headers={"User-Agent":'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0'}
def check():
page = requests.get(URL, headers=headers)
soup= BeautifulSoup(page.content,'html.parser')
#----------------------------------------------------- TO CHECK WHETHER soup IS WORKING OR NOT
m=open('soupw.txt',"wb")
m.write(soup.prettify().encode("utf-8"))
m.close
print("executed!!!")
print("executing...")
check()
| 2.8125 | 3 |
testing/test_type_comment_arg_injection.py | python-discord/flake8-annotations | 49 | 12757527 | <reponame>python-discord/flake8-annotations<gh_stars>10-100
from typing import Tuple
import pytest
import pytest_check as check
from flake8_annotations.checker import FORMATTED_ERROR
from testing.helpers import check_source
from testing.test_cases.type_comment_arg_injection_test_cases import (
TypeCommentArgInjectTestCase,
type_comment_arg_inject_test_cases,
)
class TestTypeCommentArgInject:
"""Test injection of `self` or `cls` intoto class methods."""
@pytest.fixture(
params=type_comment_arg_inject_test_cases.items(),
ids=type_comment_arg_inject_test_cases.keys(),
)
def yielded_errors(
self, request # noqa: ANN001
) -> Tuple[str, TypeCommentArgInjectTestCase, Tuple[FORMATTED_ERROR]]:
"""
Build a fixture for the error codes emitted from parsing the test code.
Fixture provides a tuple of: test case name, its corresponding TypeCommentArgInjectTestCase
instance, and a tuple of the errors yielded by the checker.
"""
test_case_name, test_case = request.param
return (
test_case_name,
test_case,
tuple(check_source(test_case.src)),
)
def test_type_comment_arg_injection(
self, yielded_errors: Tuple[str, TypeCommentArgInjectTestCase, Tuple[FORMATTED_ERROR]]
) -> None:
"""Test that ANN100 errors are yielded appropriately for type comment annotated defs."""
failure_msg = f"Check failed for case '{yielded_errors[0]}'"
yielded_ANN100 = any("ANN1" in error[2] for error in yielded_errors[2])
check.equal(yielded_errors[1].should_yield_ANN100, yielded_ANN100, msg=failure_msg)
| 2.671875 | 3 |
qcelemental/tests/test_molparse_parse_nucleus_label.py | PeterKraus/QCElemental | 1 | 12757528 | <filename>qcelemental/tests/test_molparse_parse_nucleus_label.py
import pytest
from utils import *
import qcelemental
@pytest.mark.parametrize("inp,expected", [
('@ca_miNe', {'E': 'ca', 'Z': None, 'user': '_miNe', 'A': None, 'real': False, 'mass': None}),
('Gh(Ca_mine)', {'E': 'Ca', 'Z': None, 'user': '_mine', 'A': None, 'real': False, 'mass': None}),
('@Ca_mine@1.07', {'E': 'Ca', 'Z': None, 'user': '_mine', 'A': None, 'real': False, 'mass': 1.07}),
('Gh(cA_MINE@1.07)', {'E': 'cA', 'Z': None, 'user': '_MINE', 'A': None, 'real': False, 'mass': 1.07}),
('@40Ca_mine@1.07', {'E': 'Ca', 'Z': None, 'user': '_mine', 'A': 40, 'real': False, 'mass': 1.07}),
('Gh(40Ca_mine@1.07)', {'E': 'Ca', 'Z': None, 'user': '_mine', 'A': 40, 'real': False, 'mass': 1.07}),
('444lu333@4.0', {'E': 'lu', 'Z': None, 'user': '333', 'A': 444, 'real': True, 'mass': 4.0}),
('@444lu333@4.4', {'E': 'lu', 'Z': None, 'user': '333', 'A': 444, 'real': False, 'mass': 4.4}),
('8i', {'E': 'i', 'Z': None, 'user': None, 'A': 8, 'real': True, 'mass': None}),
('53_mI4', {'Z': 53, 'E': None, 'user': '_mI4', 'A': None, 'real': True, 'mass': None}),
('@5_MINEs3@4.4', {'Z': 5, 'E': None, 'user': '_MINEs3', 'A': None, 'real': False, 'mass': 4.4}),
('Gh(555_mines3@0.1)', {'Z': 555, 'E': None, 'user': '_mines3', 'A': None, 'real': False, 'mass': 0.1}),
]) # yapf: disable
def test_parse_nucleus_label(inp, expected):
lbl_A, lbl_Z, lbl_E, lbl_mass, lbl_real, lbl_user = qcelemental.molparse.nucleus.parse_nucleus_label(inp)
assert compare_integers(expected['real'], lbl_real, inp + " real")
assert compare_integers(expected['A'], lbl_A, inp + " A")
assert compare_integers(expected['Z'], lbl_Z, inp + " Z")
assert compare_strings(expected['E'], lbl_E, inp + " symbol")
assert compare_strings(expected['user'], lbl_user, inp + " user")
assert compare_values(expected['mass'], lbl_mass, 6, inp + " mass", passnone=True)
@pytest.mark.parametrize("inp", [
'1L1A1B1',
])
def test_parse_nucleus_label_error(inp):
with pytest.raises(qcelemental.ValidationError):
ans = qcelemental.molparse.nucleus.parse_nucleus_label(inp)
| 2.078125 | 2 |
output/models/ibm_data/valid/d4_3_16/d4_3_16v01_xsd/d4_3_16v01.py | tefra/xsdata-w3c-tests | 1 | 12757529 | from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlDateTime
__NAMESPACE__ = "http://xstest-tns/schema11_F4_3_16_v01"
@dataclass
class Root:
class Meta:
name = "root"
namespace = "http://xstest-tns/schema11_F4_3_16_v01"
el_dtime_type: List[XmlDateTime] = field(
default_factory=list,
metadata={
"name": "elDTimeType",
"type": "Element",
"namespace": "",
}
)
el_dtime_etprohibited: List[XmlDateTime] = field(
default_factory=list,
metadata={
"name": "elDTimeETProhibited",
"type": "Element",
"namespace": "",
"min_occurs": 1,
"explicit_timezone": "prohibited",
}
)
el_dtime_etrequired: List[XmlDateTime] = field(
default_factory=list,
metadata={
"name": "elDTimeETRequired",
"type": "Element",
"namespace": "",
"min_occurs": 1,
"explicit_timezone": "required",
}
)
el_dtime_etoptional: List[XmlDateTime] = field(
default_factory=list,
metadata={
"name": "elDTimeETOptional",
"type": "Element",
"namespace": "",
"min_occurs": 1,
"explicit_timezone": "optional",
}
)
| 2.421875 | 2 |
djpagan/czech/views.py | carthage-college/django-djpagan | 0 | 12757530 | from django.conf import settings
from django.shortcuts import render
from djpagan.czech.forms import ReimbursementForm
def reimbursement(request):
form = ReimbursementForm()
return render(
request, 'czech/reimbursement/form.html', {'form': form,}
)
| 1.40625 | 1 |
sdk/customproviders/azure-mgmt-customproviders/azure/mgmt/customproviders/models/__init__.py | rsdoherty/azure-sdk-for-python | 2,728 | 12757531 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Association
from ._models_py3 import AssociationsList
from ._models_py3 import CustomRPActionRouteDefinition
from ._models_py3 import CustomRPManifest
from ._models_py3 import CustomRPResourceTypeRouteDefinition
from ._models_py3 import CustomRPRouteDefinition
from ._models_py3 import CustomRPValidations
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorResponse
from ._models_py3 import ListByCustomRPManifest
from ._models_py3 import Resource
from ._models_py3 import ResourceProviderOperation
from ._models_py3 import ResourceProviderOperationDisplay
from ._models_py3 import ResourceProviderOperationList
from ._models_py3 import ResourceProvidersUpdate
except (SyntaxError, ImportError):
from ._models import Association # type: ignore
from ._models import AssociationsList # type: ignore
from ._models import CustomRPActionRouteDefinition # type: ignore
from ._models import CustomRPManifest # type: ignore
from ._models import CustomRPResourceTypeRouteDefinition # type: ignore
from ._models import CustomRPRouteDefinition # type: ignore
from ._models import CustomRPValidations # type: ignore
from ._models import ErrorDefinition # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ListByCustomRPManifest # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceProviderOperation # type: ignore
from ._models import ResourceProviderOperationDisplay # type: ignore
from ._models import ResourceProviderOperationList # type: ignore
from ._models import ResourceProvidersUpdate # type: ignore
from ._customproviders_enums import (
ActionRouting,
ProvisioningState,
ResourceTypeRouting,
ValidationType,
)
__all__ = [
'Association',
'AssociationsList',
'CustomRPActionRouteDefinition',
'CustomRPManifest',
'CustomRPResourceTypeRouteDefinition',
'CustomRPRouteDefinition',
'CustomRPValidations',
'ErrorDefinition',
'ErrorResponse',
'ListByCustomRPManifest',
'Resource',
'ResourceProviderOperation',
'ResourceProviderOperationDisplay',
'ResourceProviderOperationList',
'ResourceProvidersUpdate',
'ActionRouting',
'ProvisioningState',
'ResourceTypeRouting',
'ValidationType',
]
| 1.398438 | 1 |
Dataset-Generation/Generating-Quantized-frames-from-videos/L6_UV_5 _Single_Step.py | akshay-kap/Meng-699-Image-Banding-detection | 2 | 12757532 | import os
import subprocess
from subprocess import check_output
import cv2
import numpy as np
class VideoCaptureYUV:
def __init__(self, filename, size):
self.height, self.width = size
self.frame_len = int(self.width * self.height * 3 / 2)
self.f = open(filename, 'rb')
self.shape = (int(self.height*1.5), self.width)
def quantize(self,y_comp,bits,bdepth=8):
y_comp=np.uint8(np.rint(y_comp*((pow(2,bits)-1)/(pow(2,bdepth)-1))))
return y_comp
def quantize_inverse(self,y_comp,bits,bdepth=8):
y_comp=np.uint8(np.rint(y_comp*((pow(2,bdepth)-1)/(pow(2,bits)-1))))
return y_comp
def adjust_luminance(self,y_comp,step):
y_comp=np.clip(y_comp+step,a_min = 2, a_max = 255)
return y_comp
def read_raw(self):
try:
raw = self.f.read(self.frame_len)
yuv = np.frombuffer(raw, dtype=np.uint8)
yuv = yuv.reshape(self.shape)
except Exception as e:
print(str(e))
return False, None
return True, yuv
def read(self,lum_step=0):
ret, yuv = self.read_raw()
if not ret:
return ret, yuv
y=yuv[:1080,:]
uv=yuv[1080:,:]
y=self.quantize(y,6,8)
uv=self.quantize(uv,5,8)
y=self.quantize_inverse(y,6,8)
uv=self.quantize_inverse(uv,5,8)
yuv=np.concatenate((y,uv),axis=0)
yuv_mod = yuv.reshape(self.frame_len,)
bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_I420)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
# gray=gray = cv2.cvtColor(yuv, cv2.COLOR_BGR2GRAY)
return ret, rgb
def step1():
# step 1
inputfilepath="./mp4"
outputfilepath="./yuv"
files=os.listdir(inputfilepath)
for name in files:
cmd="ffmpeg -i {0} -c:v rawvideo -pix_fmt yuv420p {1}".format(inputfilepath+"/"+name,outputfilepath+"/"+name[:-3]+"yuv")
check_output(cmd, shell=True).decode()
print(cmd)
#os.remove(inputfilepath+"/"+name)
print("shukar hai!")
print("Step 1 completed")
for name in files:
os.remove(inputfilepath+"/"+name)
def step2():
# step 2
path="./yuv"
files=os.listdir(path)
for name in files:
filename=path+'\\'+name
print(filename)
for lum_step in range(0,1):
size = (1080, 1920)
cap = VideoCaptureYUV(filename, size)
fourcc=cv2.VideoWriter_fourcc(*'MP4V')
fourcc=0x7634706d
fps=int(name[-6:-4])
out=cv2.VideoWriter('./mp4/{0}_6b.mp4'.format(name[:-4]),fourcc,fps,(1920,1080))
while 1:
ret, frame = cap.read(lum_step)
out.write(frame)
if ret:
pass
else:
break
print("step 2 completed")
def step3():
#step3
path = "./yuv"
files = os.listdir(path)
for name in files:
filename=path+'\\'+name
os.remove(filename)
print("step3 completed")
def step4():
#step 4
inputfilepath="./mp4"
outputfilepath="./png"
files=os.listdir(inputfilepath)
for name in files:
try:
cmd="ffmpeg -i {0} -vf fps=0.2 {1}".format(inputfilepath+"/"+name,outputfilepath+"/%06d_"+name[:4]+".png")
check_output(cmd, shell=True).decode()
except:
pass
print("step 3 completed")
if __name__ == "__main__":
step1()
step2()
step3()
step4()
| 2.65625 | 3 |
katas/beta/easy_kata.py | the-zebulan/CodeWars | 40 | 12757533 | <reponame>the-zebulan/CodeWars<filename>katas/beta/easy_kata.py
def print_x(x):
return x
| 1.15625 | 1 |
examples/shred.py | robes/chisel | 1 | 12757534 | """Example of using the 'shred' transformation.
You will need a copy of 'zfs.owl' and specify its location at 'CHISEL_EXAMPLES_ZFS_OWL'.
"""
import os
from deriva.core import DerivaServer
from deriva.chisel import Model
from deriva.chisel import shred
__dry_run__ = os.getenv('CHISEL_EXAMPLE_DRY_RUN', True)
__host__ = os.getenv('CHISEL_EXAMPLES_HOSTNAME', 'localhost')
__catalog_id__ = os.getenv('CHISEL_EXAMPLES_CATALOG', '1')
zfs_filename = os.getenv('CHISEL_EXAMPLES_ZFS_OWL')
if not zfs_filename:
print("ERROR: env var 'CHISEL_EXAMPLES_ZFS_OWL' not defined")
exit(1)
server = DerivaServer('https', __host__)
catalog = server.connect_ermrest(__catalog_id__)
model = Model.from_catalog(catalog)
# SPARQL expression to extract the id (i.e., short identifier) and name (i.e., preferred readable name) from the graph
sparql_class_and_props = """
SELECT DISTINCT ?id (?label AS ?name)
WHERE {
?s oboInOwl:id ?id .
?s rdfs:label ?label .
}"""
# Create a new relation computed from the shredded graph
with model.begin(dry_run=__dry_run__) as session:
session.create_table_as(
'vocab', 'zebrafish_stage_terms',
shred(zfs_filename, sparql_class_and_props)
)
| 2.46875 | 2 |
dimod/generators/anti_crossing.py | randomir/dimod | 0 | 12757535 | <reponame>randomir/dimod<gh_stars>0
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
from dimod import BinaryQuadraticModel
__all__ = ['anti_crossing_clique', 'anti_crossing_loops']
def anti_crossing_clique(num_variables):
"""Anti crossing problems with a single clique.
Given the number of variables, the code will generate a clique of size
num_variables/2, each variable ferromagnetically interacting with a partner
variable with opposite bias. A single variable in the cluster will have
no bias applied.
Args:
num_variables (int):
Number of variables used to generate the problem. Must be an even number
greater than 6.
Returns:
:obj:`.BinaryQuadraticModel`.
"""
if num_variables % 2 != 0 or num_variables < 6:
raise ValueError('num_variables must be an even number > 6')
bqm = BinaryQuadraticModel({}, {}, 0, 'SPIN')
hf = int(num_variables / 2)
for n in range(hf):
for m in range(n + 1, hf):
bqm.add_interaction(n, m, -1)
bqm.add_interaction(n, n + hf, -1)
bqm.add_variable(n, 1)
bqm.add_variable(n + hf, -1)
bqm.set_linear(1, 0)
return bqm
def anti_crossing_loops(num_variables):
"""Anti crossing problems with two loops. These instances are copies of the
instance studied in [DJA]_.
Args:
num_variables (int):
Number of variables used to generate the problem. Must be an even number
greater than 8.
Returns:
:obj:`.BinaryQuadraticModel`.
.. [DJA] <NAME>., <NAME>., <NAME>. et al. Thermally assisted
quantum annealing of a 16-qubit problem. Nat Commun 4, 1903 (2013).
https://doi.org/10.1038/ncomms2920
"""
bqm = BinaryQuadraticModel({}, {}, 0, 'SPIN')
if num_variables % 2 != 0 or num_variables < 8:
raise ValueError('num_variables must be an even number > 8')
hf = int(num_variables / 4)
for n in range(hf):
if n % 2 == 1:
bqm.set_quadratic(n, n + hf, -1)
bqm.set_quadratic(n, (n + 1) % hf, -1)
bqm.set_quadratic(n + hf, (n + 1) % hf + hf, -1)
bqm.set_quadratic(n, n + 2 * hf, -1)
bqm.set_quadratic(n + hf, n + 3 * hf, -1)
bqm.add_variable(n, 1)
bqm.add_variable(n + hf, 1)
bqm.add_variable(n + 2 * hf, -1)
bqm.add_variable(n + 3 * hf, -1)
bqm.set_linear(0, 0)
bqm.set_linear(hf, 0)
return bqm
| 2.484375 | 2 |
explorer_pyaudio/scripts/client_local.py | NaiveCoder1999/nwpu_explorer_2019 | 1 | 12757536 | <filename>explorer_pyaudio/scripts/client_local.py
#!/usr/bin/env python
import socket
import sys
import pyaudio
import wave
import rospy
#address = ('192.168.188.123', 12345)
rospy.init_node("local_talker")
ip_address = rospy.get_param('~address')
ip_port = rospy.get_param('~port')
address = (ip_address, ip_port)
s =socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
CHUNK = 256
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
rospy.loginfo("stream start")
#print("stream start")
while True:
data = stream.read(CHUNK)
frames = []
frames.append(data)
s.sendto(data, address)
# print("send")
rospy.loginfo("stopped")
#print("stopped")
stream.stop_stream()
stream.close()
p.terminate()
s.close()
| 2.515625 | 3 |
setup.py | ctberthiaume/tsdataformat-python | 0 | 12757537 | <gh_stars>0
from setuptools import setup, find_packages
import versioneer
setup(
name='tsdataformat',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
description='A Python project to manage time series data',
long_description=open('README.rst', 'r').read(),
url='https://github.com/ctberthiaume/tsdataformat-python',
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only'
],
keywords = ['csv', 'command-line', 'time series', 'tsdata'],
python_requires='>=3.7, <4',
install_requires=[
'ciso8601',
'click',
'pandas'
],
zip_safe=True,
entry_points={
'console_scripts': [
'tsdataformat=tsdataformat.cli:cli'
]
}
)
| 1.351563 | 1 |
reaver/agents/base/actor_critic.py | XinyiYS/reaver | 0 | 12757538 | <reponame>XinyiYS/reaver
import copy
import gin.tf
import numpy as np
import tensorflow.compat.v1 as tf
from abc import abstractmethod
from reaver.envs.base import Spec
from reaver.agents.base import MemoryAgent
from reaver.utils import Logger
from reaver.utils.tensorflow import SessionManager
from reaver.utils.typing import ModelBuilder, PolicyType
DEFAULTS = dict(
model_fn=None,
policy_cls=None,
optimizer=None,
learning_rate=0.0003,
value_coef=0.5,
entropy_coef=0.01,
traj_len=16,
batch_sz=16,
discount=0.99,
gae_lambda=0.95,
clip_rewards=0.0,
clip_grads_norm=0.0,
normalize_returns=False,
normalize_advantages=False,
model_variable_scope=None,
)
LOGGING_MSG_HEADER = "LOGGING FROM <reaver.reaver.agents.base.actor_critic> "
@gin.configurable('ACAgent')
class ActorCriticAgent(MemoryAgent):
"""
Abstract class, unifies deep actor critic functionality
Handles on_step callbacks, either updating current batch
or executing one training step if the batch is ready
Extending classes only need to implement loss_fn method
"""
def __init__(
self,
obs_spec: Spec,
act_spec: Spec,
model_variable_scope=DEFAULTS['model_variable_scope'],
model_fn: ModelBuilder = None,
policy_cls: PolicyType = None,
sess_mgr: SessionManager = None,
optimizer: tf.train.Optimizer = None,
value_coef=DEFAULTS['value_coef'],
entropy_coef=DEFAULTS['entropy_coef'],
traj_len=DEFAULTS['traj_len'],
batch_sz=DEFAULTS['batch_sz'],
discount=DEFAULTS['discount'],
gae_lambda=DEFAULTS['gae_lambda'],
clip_rewards=DEFAULTS['clip_rewards'],
clip_grads_norm=DEFAULTS['clip_grads_norm'],
normalize_returns=DEFAULTS['normalize_returns'],
normalize_advantages=DEFAULTS['normalize_advantages'],
**kwargs,
):
MemoryAgent.__init__(self, obs_spec, act_spec, traj_len, batch_sz)
print(LOGGING_MSG_HEADER +": the traj_len is {} and batch_sz is {}".format(traj_len, batch_sz))
if not sess_mgr:
sess_mgr = SessionManager()
self.subenvs = subenvs = kwargs['subenvs'] if 'subenvs' in kwargs else []
if optimizer:
optimizers = [copy.deepcopy(optimizer) for subenv in subenvs]
else:
optimizer = tf.train.AdamOptimizer(
learning_rate=DEFAULTS['learning_rate'])
optimizers = [tf.train.AdamOptimizer(learning_rate=DEFAULTS['learning_rate']) for subenv in subenvs]
self.sess_mgr = sess_mgr
self.model_variable_scope = self.sess_mgr.model_variable_scope
self.value_coef = value_coef
self.entropy_coef = entropy_coef
self.discount = discount
self.gae_lambda = gae_lambda
self.clip_rewards = clip_rewards
self.normalize_returns = normalize_returns
self.normalize_advantages = normalize_advantages
self.traj_len = traj_len
self.batch_sz = batch_sz
print(LOGGING_MSG_HEADER + " : the current model_variable_scope is", self.model_variable_scope)
# implement the a2c to support multiple subagents
# self.model = model_fn(obs_spec, act_spec)
with sess_mgr.sess.graph.as_default():
# note this is name_scope as opposed to variable_scope, important
with tf.name_scope(self.sess_mgr.main_tf_vs.original_name_scope):
if subenvs:
from collections import defaultdict
self.subenv_dict = defaultdict(list)
print(LOGGING_MSG_HEADER + ": Creating models for each individual subenvs: ", subenvs)
for i, subenv in enumerate(subenvs):
subenv_model = model_fn(obs_spec, act_spec)
self.subenv_dict['models'].append(subenv_model)
subenv_value = subenv_model.outputs[-1]
self.subenv_dict['values'].append(subenv_value)
subenv_policy = policy_cls(act_spec, subenv_model.outputs[:-1])
self.subenv_dict['policies'].append(subenv_policy)
subenv_loss_op, subenv_loss_terms, subenv_loss_inputs = self.loss_fn(policy=subenv_policy, value=subenv_value)
self.subenv_dict['loss_ops'].append(subenv_loss_op)
self.subenv_dict['loss_terms'].append(subenv_loss_terms)
self.subenv_dict['loss_inputs'].append(subenv_loss_inputs)
subenv_optimizer = optimizers[i]
grads, vars = zip(*subenv_optimizer.compute_gradients(subenv_loss_op))
subenv_grads_norm = tf.global_norm(grads)
self.subenv_dict['grads_norms'].append(subenv_grads_norm)
if clip_grads_norm > 0 :
grads, _ = tf.clip_by_global_norm(grads, clip_grads_norm, subenv_grads_norm)
self.subenv_dict['train_ops'].append(subenv_optimizer.apply_gradients(
zip(grads, vars), global_step=sess_mgr.global_step))
self.subenv_dict['minimize_ops'].append(self.make_minimize_ops( subenv_id=i) )
print(LOGGING_MSG_HEADER + ": Successfully created models for each individual subenvs")
else:
print(LOGGING_MSG_HEADER + ": Creating single model for the environment.")
self.model = model_fn(obs_spec, act_spec)
self.value = self.model.outputs[-1]
self.policy = policy_cls(act_spec, self.model.outputs[:-1])
self.loss_op, self.loss_terms, self.loss_inputs = self.loss_fn()
grads, vars = zip(*optimizer.compute_gradients(self.loss_op))
self.grads_norm = tf.global_norm(grads)
if clip_grads_norm > 0.:
grads, _ = tf.clip_by_global_norm(
grads, clip_grads_norm, self.grads_norm)
self.train_op = optimizer.apply_gradients(
zip(grads, vars), global_step=sess_mgr.global_step)
self.minimize_ops = self.make_minimize_ops()
print(LOGGING_MSG_HEADER + " : main_model setup on sess and graph complete")
sess_mgr.restore_or_init()
print(LOGGING_MSG_HEADER + " : main_model weights restore/init complete")
self.n_batches = sess_mgr.start_step
self.start_step = sess_mgr.start_step * traj_len
self.logger = Logger()
def get_action_and_value(self, obs, subenv_id=None):
if self.subenvs and subenv_id is not None:
return self.sess_mgr.run([self.subenv_dict['policies'][subenv_id].sample, self.subenv_dict['values'][subenv_id]], self.subenv_dict['models'][subenv_id].inputs, obs)
else:
return self.sess_mgr.run([self.policy.sample, self.value], self.model.inputs, obs)
def get_action(self, obs, subenv_id=None):
if self.subenvs and subenv_id is not None:
return self.sess_mgr.run(self.subenv_dict['policies'][subenv_id].sample, self.subenv_dict['models'][subenv_id].inputs, obs)
else:
return self.sess_mgr.run(self.policy.sample, self.model.inputs, obs)
def on_step(self, step, obs, action, reward, done, value=None, subenv_id=None):
MemoryAgent.on_step(self, step, obs, action, reward, done, value)
self.logger.on_step(step, reward, done)
if not self.batch_ready():
return
if self.subenvs and subenv_id is not None:
assert self.subenv_dict, "Missing subenv_dict implementation"
next_values = self.sess_mgr.run(
self.subenv_dict['values'][subenv_id], self.subenv_dict['models'][subenv_id].inputs, self.last_obs)
else:
next_values = self.sess_mgr.run(
self.value, self.model.inputs, self.last_obs)
adv, returns = self.compute_advantages_and_returns(next_values)
loss_terms, grads_norm = self.minimize(adv, returns, subenv_id=subenv_id)
self.sess_mgr.on_update(self.n_batches)
logs = self.logger.on_update(self.n_batches, loss_terms,
grads_norm, returns, adv, next_values)
return logs
def minimize(self, advantages, returns, subenv_id=None):
inputs = self.obs + self.acts + [advantages, returns]
inputs = [a.reshape(-1, *a.shape[2:]) for a in inputs]
if self.subenvs and subenv_id is not None:
assert self.subenv_dict, "Missing subenv_dict implementation"
tf_inputs = self.subenv_dict['models'][subenv_id].inputs + self.subenv_dict['policies'][subenv_id].inputs + self.subenv_dict['loss_inputs'][subenv_id]
loss_terms, grads_norm, * \
_ = self.sess_mgr.run(self.subenv_dict['minimize_ops'][subenv_id], tf_inputs, inputs)
else:
tf_inputs = self.model.inputs + self.policy.inputs + self.loss_inputs
loss_terms, grads_norm, * \
_ = self.sess_mgr.run(self.minimize_ops, tf_inputs, inputs)
return loss_terms, grads_norm
def compute_advantages_and_returns(self, bootstrap_value):
"""
GAE can help with reducing variance of policy gradient estimates
"""
if self.clip_rewards > 0.0:
np.clip(self.rewards, -self.clip_rewards,
self.clip_rewards, out=self.rewards)
rewards = self.rewards.copy()
rewards[-1] += (1-self.dones[-1]) * self.discount * bootstrap_value
masked_discounts = self.discount * (1-self.dones)
returns = self.discounted_cumsum(rewards, masked_discounts)
if self.gae_lambda > 0.:
values = np.append(self.values, np.expand_dims(
bootstrap_value, 0), axis=0)
# d_t = r_t + g * V(s_{t+1}) - V(s_t)
deltas = self.rewards + masked_discounts * values[1:] - values[:-1]
adv = self.discounted_cumsum(
deltas, self.gae_lambda * masked_discounts)
else:
adv = returns - self.values
if self.normalize_advantages:
adv = (adv - adv.mean()) / (adv.std() + 1e-10)
if self.normalize_returns:
returns = (returns - returns.mean()) / (returns.std() + 1e-10)
return adv, returns
def on_start(self):
self.logger.on_start()
def on_finish(self):
self.logger.on_finish()
def reset(self):
"""
Introduced for HRL with multiple subenvs trained in sequence
So need to reset some auxiliary logging book-keeping information
"""
MemoryAgent.__init__(self, obs_spec=self.obs_spec, act_spec=self.act_spec, traj_len=self.traj_len, batch_sz=self.batch_sz)
self.logger.reset()
def make_minimize_ops(self, subenv_id=None):
if self.subenvs and subenv_id is not None:
assert self.subenv_dict, "self.subenv_dict is None or empty"
loss_terms = self.subenv_dict['loss_terms'][subenv_id]
grads_norm = self.subenv_dict['grads_norms'][subenv_id]
ops = [loss_terms, grads_norm]
if self.sess_mgr.training_enabled:
ops.append(self.subenv_dict['train_ops'][subenv_id])
return ops
else:
ops = [self.loss_terms, self.grads_norm]
if self.sess_mgr.training_enabled:
ops.append(self.train_op)
# appending extra model update ops (e.g. running stats)
# note: this will most likely break if model.compile() is used
ops.extend(self.model.get_updates_for(None))
return ops
@staticmethod
def discounted_cumsum(x, discount):
y = np.zeros_like(x)
y[-1] = x[-1]
for t in range(x.shape[0]-2, -1, -1):
y[t] = x[t] + discount[t] * y[t+1]
return y
@abstractmethod
def loss_fn(self): ...
| 2.171875 | 2 |
danceschool/core/handlers.py | django-danceschool/django-danceschool | 32 | 12757539 | <filename>danceschool/core/handlers.py
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.dispatch import receiver
from allauth.account.signals import email_confirmed
from allauth.account.models import EmailAddress
import logging
from .signals import post_registration
from .models import Registration, EventRegistration
# Define logger for this file
logger = logging.getLogger(__name__)
@receiver(email_confirmed)
def linkUserToMostRecentCustomer(sender, **kwargs):
'''
If a new primary email address has just been confirmed, check if the user
associated with that email has an associated customer object yet. If not,
then look for the customer with that email address who most recently
registered for something and that is not associated with another user.
Automatically associate the User with with Customer, and if missing, fill in
the user's name information with the Customer's name. This way, when a new
or existing customer creates a user account, they are seamlessly linked to
their most recent existing registration at the time they verify their email
address.
'''
email_address = kwargs.get('email_address', None)
if not email_address or not email_address.primary or not email_address.verified:
return
user = email_address.user
if not hasattr(user, 'customer'):
last_reg = EventRegistration.objects.filter(
customer__email=email_address.email,
customer__user__isnull=True,
dateTime__isnull=False,
final=True
).order_by('-registration__dateTime').first()
if last_reg:
customer = last_reg.customer
customer.user = user
customer.save()
if not user.first_name and not user.last_name:
user.first_name = customer.first_name
user.last_name = customer.last_name
user.save()
@receiver(post_registration)
def linkCustomerToVerifiedUser(sender, **kwargs):
"""
If a Registration is processed in which the associated Customer does not yet
have a User, then check to see if the Customer's email address has been
verified as belonging to a specific User, and if that User has an associated
Customer. If such a User is found, then associated this Customer with that
User. This way, if a new User verifies their email account before they have
submitted any Registrations, their Customer account is seamlessly linked when
they do complete their first Registration.
"""
invoice = kwargs.get('invoice', None)
eventregs = EventRegistration.objects.filter(
invoiceItem__invoice=invoice, customer__isnull=False,
customer__user__isnull=True
)
if not eventregs:
return
logger.debug('Checking for User for Customer with no associated registration.')
for er in eventregs:
customer = er.customer
try:
verified_email = EmailAddress.objects.get(
email=customer.email,
verified=True,
primary=True,
user__customer__isnull=True
)
logger.info("Found user %s to associate with customer %s.", verified_email.user.id, customer.id)
customer.user = verified_email.user
customer.save()
if not customer.user.first_name and not customer.user.last_name:
customer.user.first_name = customer.first_name
customer.user.last_name = customer.last_name
customer.user.save()
except ObjectDoesNotExist:
logger.info("No user found to associate with customer %s.", customer.id)
except MultipleObjectsReturned:
# This should never happen, as email should be unique in the db table account_emailaddress.
# If it does, something's broken in the database or Django.
errmsg = "Something's not right with the database: more than one entry found on the database for the email %s. \
This duplicate key value violates unique constraint \"account_emailaddress_email_key\". \
The email field should be unique for each account.\n"
logger.exception(errmsg, customer.email)
| 2.125 | 2 |
solutions/day04/solution.py | dbjohnson/advent-of-code-2021 | 0 | 12757540 | import pandas as pd
with open('input.txt') as fh:
lines = fh.readlines()
class Board:
def __init__(self, lines):
self.values = [
(int(val), r, c)
for r, row in enumerate(lines)
for c, val in enumerate(row.split())
]
self.board = pd.DataFrame([[0] * 5] * 5)
self.last_num = 0
def update(self, number):
for val, r, c in self.values:
if val == number:
self.board.iloc[r, c] = 1
self.last_num = number
self.values = [
x for x in self.values if x[0] != number
]
@property
def score(self):
return self.last_num * sum([x[0] for x in self.values])
@property
def winner(self):
return any([
self.board.all().any(),
self.board.all(axis=1).any()
])
sequence = list(map(int, lines[0].split(',')))
boards = [
Board(lines[offs:offs + 5])
for offs in range(2, len(lines) + 1, 6)
]
# part 1
def find_first_winner(sequence, boards):
for num in sequence:
for i, b in enumerate(boards):
b.update(num)
if b.winner:
return(i + 1, b.score)
print('first winner:', find_first_winner(sequence, boards))
# part 2
def find_last_winner(sequence, boards):
for num in sequence:
for i, b in enumerate(boards):
if not b.winner:
b.update(num)
if b.winner:
if all([board.winner for board in boards]):
return (i + 1, b.score)
print('last winner:', find_last_winner(sequence, boards))
| 3.34375 | 3 |
autodidaqt/examples/minimal_app.py | chstan/autodiDAQt | 1 | 12757541 | <gh_stars>1-10
"""
The absolute, bare minimum. Open an application with no panels.
"""
from autodidaqt import AutodiDAQt
app = AutodiDAQt(__name__, {})
if __name__ == "__main__":
app.start()
| 1.453125 | 1 |
wakatime/main.py | sklirg/wakatime | 220 | 12757542 | # -*- coding: utf-8 -*-
"""
wakatime.main
~~~~~~~~~~~~~
Module entry point.
:copyright: (c) 2013 <NAME>.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import traceback
pwd = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(pwd))
sys.path.insert(0, os.path.join(pwd, 'packages'))
from .__about__ import __version__
from .api import send_heartbeats
from .arguments import parse_arguments
from .compat import u, json
from .constants import SUCCESS, UNKNOWN_ERROR, HEARTBEATS_PER_REQUEST
from .logger import setup_logging
log = logging.getLogger('WakaTime')
from .heartbeat import Heartbeat
from .offlinequeue import Queue
def execute(argv=None):
if argv:
sys.argv = ['wakatime'] + argv
args, configs = parse_arguments()
setup_logging(args, __version__)
try:
heartbeats = []
hb = Heartbeat(vars(args), args, configs)
if hb:
heartbeats.append(hb)
else:
log.debug(hb.skip)
if args.extra_heartbeats:
try:
for extra_data in json.loads(sys.stdin.readline()):
hb = Heartbeat(extra_data, args, configs)
if hb:
heartbeats.append(hb)
else:
log.debug(hb.skip)
except json.JSONDecodeError as ex:
log.warning(u('Malformed extra heartbeats json: {msg}').format(
msg=u(ex),
))
retval = SUCCESS
while heartbeats:
retval = send_heartbeats(heartbeats[:HEARTBEATS_PER_REQUEST], args, configs)
heartbeats = heartbeats[HEARTBEATS_PER_REQUEST:]
if retval != SUCCESS:
break
if heartbeats:
Queue(args, configs).push_many(heartbeats)
if retval == SUCCESS:
queue = Queue(args, configs)
for offline_heartbeats in queue.pop_many(args.sync_offline_activity):
time.sleep(1)
retval = send_heartbeats(offline_heartbeats, args, configs)
if retval != SUCCESS:
break
return retval
except:
log.traceback(logging.ERROR)
print(traceback.format_exc())
return UNKNOWN_ERROR
| 2.09375 | 2 |
project/networking/displayConnections.py | flyflyinit/GUI-admin-tool | 3 | 12757543 | import subprocess
def displayConnection():
lisData = []
lisData2 = []
command = 'nmcli connection show > /tmp/listConnection '
try:
subprocess.run(command, check=True, shell=True)
except subprocess.CalledProcessError:
print("Error While fetching Connnections ")
with open('/tmp/listConnection') as f:
lineList = f.readlines()
lineList.pop(0)
for i in lineList:
i = i[::-1]
i = i[56:]
i = i[::-1]
lisData.append(i)
for i in lisData:
i = i.replace('\n', '')
i = i.replace(' ', '\\ ')
i = i.replace('\\', ',')
i = i.replace(', ,', '')
i = i.replace('\n', '')
i = i.replace(',', '\\')
i = i.replace(' \\', '')
lisData2.append(i)
return lisData2
| 2.6875 | 3 |
0901-1000/0998-Maximum Binary Tree II/0998-Maximum Binary Tree II.py | jiadaizhao/LeetCode | 49 | 12757544 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoMaxTree(self, root: TreeNode, val: int) -> TreeNode:
if root is None or root.val < val:
node = TreeNode(val)
node.left = root
return node
else:
root.right = self.insertIntoMaxTree(root.right, val)
return root
class Solution2:
def insertIntoMaxTree(self, root: TreeNode, val: int) -> TreeNode:
prev = None
curr = root
while curr and curr.val > val:
prev = curr
curr = curr.right
node = TreeNode(val)
node.left = curr
if prev:
prev.right = node
return root if prev else node
| 4 | 4 |
mapmint-services/mmowssp.py | fenilgmehta/mapmint | 41 | 12757545 | <filename>mapmint-services/mmowssp.py
from pprint import pformat
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol
from twisted.web import server, resource
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
import sys, json, urllib.parse
import urllib.request, urllib.error, urllib.parse
import cgi
class HelloResource(resource.Resource):
isLeaf = True
numberRequests = 0
SecureAccessUrl = "http://host1/cgi-bin/mm/zoo_loader.cgi"
req_tmpl = \
'<wps:Execute service="WPS" version="1.0.0" xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 ../wpsExecute_request.xsd">' + \
'<ows:Identifier>ows-security.SecureAccess</ows:Identifier>' + \
'<wps:DataInputs>' + \
'<wps:Input>' + \
'<ows:Identifier>server</ows:Identifier>' + \
'<wps:Data>' + \
'<wps:LiteralData>[server]</wps:LiteralData>' + \
'</wps:Data>' + \
'</wps:Input>' + \
'<wps:Input>' + \
'<ows:Identifier>token</ows:Identifier>' + \
'<wps:Data>' + \
'<wps:LiteralData>[token]</wps:LiteralData>' + \
'</wps:Data>' + \
'</wps:Input>' + \
'<wps:Input>' + \
'<ows:Identifier>user</ows:Identifier>' + \
'<wps:Data>' + \
'<wps:LiteralData>[user]</wps:LiteralData>' + \
'</wps:Data>' + \
'</wps:Input>' + \
'<wps:Input>' + \
'<ows:Identifier>password</ows:Identifier>' + \
'<wps:Data>' + \
'<wps:LiteralData>[password]</wps:LiteralData>' + \
'</wps:Data>' + \
'</wps:Input>' + \
'<wps:Input>' + \
'<ows:Identifier>ip</ows:Identifier>' + \
'<wps:Data>' + \
'[ip_address]' + \
'</wps:Data>' + \
'</wps:Input>' + \
'<wps:Input>' + \
'<ows:Identifier>Query</ows:Identifier>' + \
'<wps:Data>' + \
'[query]' + \
'</wps:Data>' + \
'</wps:Input>' + \
'</wps:DataInputs>' + \
'<wps:ResponseForm>' + \
'<wps:RawDataOutput>' + \
'<ows:Identifier>Result</ows:Identifier>' + \
'</wps:RawDataOutput>' + \
'</wps:ResponseForm>' + \
'</wps:Execute>'
def setDefaultHeaders(self, request):
request.setHeader('Server', 'MapMint OWS-Security')
request.setHeader('X-Fowarded-By', 'MapMint OWS-Security')
def reparse(self, parsed_path):
# TODO: confirm assumption: "parsed_path" is a Python 3 dictionary object
# for i in list(parsed_path.keys()):
for i in parsed_path.keys():
if i.lower() != i:
parsed_path[i.lower()] = parsed_path[i]
def render_GET(self, request):
self.setDefaultHeaders(request)
import json
# ip, port = self.transport.socket.getpeername()
print(dir(request), file=sys.stderr) # protocol.transport.getPeer()
print(request.getHeader("x-real-ip"), file=sys.stderr)
print(request.path, file=sys.stderr)
print(request.transport.getPeer().host, file=sys.stderr)
print(request.getUser(), file=sys.stderr)
print(request.getPassword(), file=sys.stderr)
rcontent = request.path.split('/')
print(rcontent, file=sys.stderr)
parsed_path = request.args
self.reparse(parsed_path)
# TODO: confirm assumption: "parsed_path" is a Python 3 dictionary object
# if list(parsed_path.keys()).count("token") > 0 and list(parsed_path.keys()).count("server"):
if "token" in parsed_path > 0 and "server" in parsed_path:
params = [parsed_path["server"][0], parsed_path["token"][0]]
else:
params = [rcontent[3], rcontent[2]]
log.msg(parsed_path)
clientIp = request.getHeader("x-real-ip")
if clientIp is None:
clientIp = request.transport.getPeer().host
# TODO: confirm assumption: "parsed_path" is a Python 3 dictionary object
# if list(parsed_path.keys()).count('token') == 0:
if 'token' not in parsed_path:
parsed_path["token"] = "-1"
# TODO: confirm assumption: "parsed_path" is a Python 3 dictionary object
# if list(parsed_path.keys()).count('request') > 0:
if 'request' in parsed_path:
query = {}
# TODO: confirm assumption: "parsed_path" is a Python 3 dictionary object
# for i in list(parsed_path.keys()):
for i in parsed_path.keys():
if i != "server" and i != "token":
query[i] = parsed_path[i][0]
res = self.req_tmpl.replace("[server]", params[0]).replace("[token]", params[1]).replace("[query]", '<wps:ComplexData mimeType="application/json">' + json.dumps(query) + '</wps:ComplexData>').replace("[ip_address]", "<wps:LiteralData>" + clientIp + "</wps:LiteralData>").replace("[user]",
request.getUser()).replace(
"[password]", request.getPassword())
log.msg(res)
req = urllib.request.Request(url=self.SecureAccessUrl,
data=res,
headers={'Content-Type': 'application/xml'})
try:
response = urllib.request.urlopen(req)
except Exception as e:
request.setResponseCode(e.code)
request.setHeader("WWW-Authenticate", 'Basic realm="MapMint OWS-Security", charset="UTF-8"')
request.setHeader("content-type", "text/xml")
return e.read()
log.msg(response.info())
lkeys = list(response.headers.keys())
lvalues = list(response.headers.values())
print(lkeys, file=sys.stderr)
print(lvalues, file=sys.stderr)
for i in range(0, len(lkeys)):
if "transfer-encoding" != lkeys[i]:
request.setHeader(lkeys[i], lvalues[i])
return response.read()
self.numberRequests += 1
request.setHeader("content-type", "text/plain")
return "I am request #" + str(self.numberRequests) + "\n"
def render_POST(self, request):
self.setDefaultHeaders(request)
try:
query = {}
rcontent = request.path.split('/')
log.msg(request.args)
pquery = request.content.read()
log.msg(pquery)
# TODO: confirm assumption: "request.args" is a Python 3 dictionary object
# if list(request.args.keys()).count("token") > 0 and list(request.args.keys()).count("server"):
if "token" in request.args and "server" in request.args:
params = [request.args["server"][0], request.args["token"][0]]
else:
params = [rcontent[3], rcontent[2]]
clientIp = request.getHeader("x-real-ip")
if clientIp is None:
clientIp = request.transport.getPeer().host
res = self.req_tmpl.replace("[server]", rcontent[3]).replace("[token]", rcontent[2]).replace("[query]", '<wps:ComplexData mimeType="text/xml">' + pquery + '</wps:ComplexData>').replace("[ip_address]", "<wps:LiteralData>" + clientIp + "</wps:LiteralData>").replace("[user]", request.getUser()).replace(
"[password]", request.getPassword())
req = urllib.request.Request(url=self.SecureAccessUrl,
data=res,
headers={'Content-Type': 'text/xml'})
print(res, file=sys.stderr)
response = urllib.request.urlopen(req)
print(request.headers, file=sys.stderr)
log.msg(response.info())
log.msg(res)
return response.read()
except Exception as e:
print("ERROR: " + str(e), file=sys.stderr)
log.msg(req)
return '<html><body>You submitted the following request which is not supported: %s</body></html>\n' % (pquery,)
log.startLogging(sys.stderr)
reactor.listenTCP(8080, server.Site(HelloResource()))
reactor.run()
| 2.15625 | 2 |
python/8Kyu/Semi-Optional.py | athasv/Codewars-data | 0 | 12757546 | <gh_stars>0
def wrap(value):
my_dict={}
my_dict["value"] = value
return my_dict | 2.703125 | 3 |
python_packages/pytorch/4_extending_with_python/CUDACpp_exts/2_PMTS_custom_cuda_kernel/setup.py | trisct/Software-Tutorials | 2 | 12757547 | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
# [!!!] be sure to use different file names for cpp and cu files
# because `setuptools` does not see the filename extension
setup(
name='PMTS_cuda',
ext_modules=[
CUDAExtension('PMTS_cuda', [
'PMTS_cuda.cpp',
'PMTS_cuda_kernels.cu',
])
],
cmdclass={
'build_ext': BuildExtension
})
| 1.367188 | 1 |
stable/ckan/conf/psql-init/psql-init.py | menmarc/charts | 6 | 12757548 | """
Copyright (c) 2020 Keitaro AB
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import re
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from psycopg2.extensions import AsIs
ckan_conn_str = os.environ.get('CKAN_SQLALCHEMY_URL', '')
datastorerw_conn_str = os.environ.get('CKAN_DATASTORE_WRITE_URL', '')
datastorero_conn_str = os.environ.get('CKAN_DATASTORE_READ_URL', '')
master_user = os.environ.get('PSQL_MASTER', '')
master_passwd = <PASSWORD>('<PASSWORD>', '')
master_database = os.environ.get('PSQL_DB', '')
master_host = os.environ.get('PSQL_HOST', '')
class DB_Params:
def __init__(self, conn_str):
conn_protocol, conn_info = conn_str.split('://')
db_user, db_passwd = conn_info.split(':')
db_passwd, db_host = db_passwd.split('@')
db_host, db_name = db_host.split('/')
self.db_protocol = conn_protocol
self.db_user = db_user
self.db_passwd = <PASSWORD>
self.db_host = db_host
self.db_name = db_name
def check_db_connection(db_params, retry=None):
print('Checking whether database is up...')
if retry is None:
retry = 20
elif retry == 0:
print('Giving up...')
sys.exit(1)
try:
con = psycopg2.connect(user=master_user,
host=master_host,
password=<PASSWORD>,
database=master_database)
except psycopg2.Error as e:
print((str(e)))
print('Unable to connect to the database...try again in a while.')
import time
time.sleep(30)
check_db_connection(db_params, retry=retry - 1)
else:
con.close()
def create_user(db_params):
con = None
try:
con = psycopg2.connect(user=master_user,
host=master_host,
password=<PASSWORD>,
database=master_database)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
print("Creating user " + db_params.db_user)
cur.execute('CREATE USER %s WITH PASSWORD %s',
(AsIs(db_params.db_user), db_params.db_passwd,))
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
finally:
cur.close()
con.close()
def create_db(db_params):
con = None
try:
con = psycopg2.connect(user=master_user,
host=master_host,
password=<PASSWORD>,
database=master_database)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
cur.execute('GRANT ' + db_params.db_user + ' TO ' + master_user)
print("Creating database " + db_params.db_name + " with owner " +
db_params.db_user)
cur.execute('CREATE DATABASE ' + db_params.db_name + ' OWNER ' +
db_params.db_user)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
finally:
cur.close()
con.close()
def set_db_permissions(db_params, sql):
con = None
try:
con = psycopg2.connect(user=master_user,
host=master_host,
password=<PASSWORD>,
database=db_params.db_name)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
print("Setting datastore permissions\n")
print(sql)
cur.execute(sql)
print("Datastore permissions applied.")
except Exception as error:
print("ERROR DB: ", error)
finally:
cur.close()
con.close()
if master_user == '' or master_passwd == '' or master_database == '':
print("No master postgresql user provided.")
print("Cannot initialize default CKAN db resources. Exiting!")
sys.exit(1)
print("Master DB: " + master_database + " Master User: " + master_user)
ckan_db = DB_Params(ckan_conn_str)
datastorerw_db = DB_Params(datastorerw_conn_str)
datastorero_db = DB_Params(datastorero_conn_str)
# Check to see whether we can connect to the database, exit after 10 mins
check_db_connection(ckan_db)
try:
create_user(ckan_db)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
try:
create_user(datastorerw_db)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
try:
create_user(datastorero_db)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
try:
create_db(ckan_db)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
try:
create_db(datastorerw_db)
except(Exception, psycopg2.DatabaseError) as error:
print("ERROR DB: ", error)
# replace ckan.plugins so that ckan cli can run and apply datastore permissions
# sed_string = "s/ckan.plugins =.*/ckan.plugins = envvars image_view text_view recline_view datastore/g" # noqa
# subprocess.Popen(["/bin/sed", sed_string, "-i", "/srv/app/production.ini"])
sql = subprocess.check_output(["ckan",
"-c", "/srv/app/production.ini",
"datastore",
"set-permissions"],
stderr=subprocess.PIPE)
print(sql)
# Remove the connect clause from the output
sql = re.sub("\\\connect.*", "", sql.decode('utf-8'))
set_db_permissions(datastorerw_db, sql)
| 1.84375 | 2 |
vxt/view/config.py | veeso/voice-xtractor | 1 | 12757549 | # VXT
# Developed by <NAME>
#
# MIT License
# Copyright (c) 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from locale import getdefaultlocale
from typing import Optional
from vxt.misc.track_fmt import TrackFmt
from vxt.speech2text.engine import Speech2TextEngine
from vxt.speech2text.bing import BingSpeech2TextEngine
from vxt.speech2text.google import GoogleSpeech2TextEngine
from vxt.speech2text.google_cloud import GoogleCloudSpeech2TextEngine
from vxt.speech2text.houndify import HoundifySpeech2TextEngine
from vxt.speech2text.ibm import IbmSpeech2TextEngine
from vxt.speech2text.sphinx import SphinxSpeech2TextEngine
class Config(object):
"""App configuration"""
def __init__(self) -> None:
super().__init__()
self.__engine: Speech2TextEngine = GoogleSpeech2TextEngine(None)
locale = getdefaultlocale()[0]
if locale:
self.__language: str = locale
else:
self.__language: str = "en_US"
self.__min_silence_len: int = 500
self.__silence_threshold: int = -16
self.__keep_silence: int = 500
self.__output_fmt: TrackFmt = TrackFmt("%t-%s.64")
self.__output_dir: Optional[str] = None
@property
def engine(self) -> Speech2TextEngine:
return self.__engine
@property
def language(self) -> str:
return self.__language
@language.setter
def language(self, language: str) -> None:
self.__language = language
@property
def min_silence_len(self) -> int:
return self.__min_silence_len
@min_silence_len.setter
def min_silence_len(self, len: int) -> None:
self.__min_silence_len = len
@property
def silence_threshold(self) -> int:
return self.__silence_threshold
@silence_threshold.setter
def silence_threshold(self, t: int) -> None:
if t > 0:
raise InvalidConfigError("Silence threshold should be a negative number")
self.__silence_threshold = t
@property
def keep_silence(self) -> int:
return self.__keep_silence
@keep_silence.setter
def keep_silence(self, how_much: int) -> None:
if how_much < 0:
raise InvalidConfigError(
"Keep silence should be a positive integer bigger than or equal to 0"
)
self.__keep_silence = how_much
@property
def output_fmt(self) -> TrackFmt:
return self.__output_fmt
@output_fmt.setter
def output_fmt(self, fmt: str) -> None:
try:
self.__output_fmt = TrackFmt(fmt)
except Exception:
raise InvalidConfigError("Invalid fmt syntax")
@property
def output_dir(self) -> str:
return self.__output_dir
@output_dir.setter
def output_dir(self, d: str) -> None:
self.__output_dir = d
# -- speech 2 text setters
def use_bing_speech2text(self, api_key: str) -> None:
self.__engine = BingSpeech2TextEngine(api_key)
def use_google_speech2text(self, api_key: Optional[str]) -> None:
self.__engine = GoogleSpeech2TextEngine(api_key)
def use_google_cloud_speech2text(self, credentials: Optional[str]) -> None:
self.__engine = GoogleCloudSpeech2TextEngine(credentials)
def use_houndify_speech2text(self, client_id: str, client_key: str) -> None:
self.__engine = HoundifySpeech2TextEngine(client_id, client_key)
def use_ibm_speech2text(self, username: str, password: str) -> None:
self.__engine = IbmSpeech2TextEngine(username, password)
def use_sphinx_speech2text(
self, keyword_entries: Optional[str], grammar_file: Optional[str]
) -> None:
self.__engine = SphinxSpeech2TextEngine(keyword_entries, grammar_file)
class InvalidConfigError(Exception):
"""
Indicates an invalid configuration
"""
def __init__(self, message: str):
self.message = message
def __str__(self):
return repr(self.message)
def __repr__(self):
return str(self.message)
| 1.828125 | 2 |
topo/sparse/sparsecf.py | ceball/topographica | 0 | 12757550 | <reponame>ceball/topographica<gh_stars>0
"""
Basic SparseCFProjection with associated sparse CFs and output,
response, and learning function. If sparse component cannot be imported,
SparseCFProjection will fall back to a basic dense CFProjection.
CFSOF and CFSLF Plugin function allow any single CF output function to
be applied to the sparse CFs, but may suffer a serious performance
loss. For real work, such functions should be implemented at the
Cython or C++ level.
"""
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import param
from copy import copy
import topo
from topo.base.cf import CFProjection, NullCFError, _create_mask, simple_vectorize
from topo import pattern
from imagen import patterngenerator
from imagen.patterngenerator import PatternGenerator
from topo.base.functionfamily import TransferFn, IdentityTF
from topo.base.functionfamily import LearningFn, Hebbian
from topo.base.functionfamily import ResponseFn, DotProduct
from topo.base.sheetcoords import Slice
use_sparse = True
try:
import sparse
except:
use_sparse = False
sparse_type = np.float32
class CFSPLF_Plugin(param.Parameterized):
"""CFSPLearningFunction applying the specified single_cf_fn to each Sparse CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),doc="""
Accepts a LearningFn that will be applied to each CF individually.""")
def constant_sum_connection_rate(self,n_units,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/n_units
def __call__(self, projection, **params):
"""Apply the specified single_cf_fn to every sparse CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(projection.n_units,projection.learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.get_input_matrix(projection.src.activity),
projection.dest.activity.flat[cf.oned_idx], temp_weights,
single_connection_learning_rate)
temp_weights *= cf.mask
cf.weights = temp_weights
class CFSPOF_Plugin(param.Parameterized):
"""
Applies the specified single_cf_fn to each SparseCF in the SparseCFProjection.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, projection, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.weights)
cf.weights = temp_weights
del cf.norm_total
class CFSPOF_Prune(CFSPOF_Plugin):
"""
Prunes specified percentage of connections from CFs in SparseCFProjection
at specified interval.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval at which pruning step will be applied.""")
percentile = param.Number(default=10.0,bounds=(0,100),doc="""
Percentile boundary below which connections will be pruned.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
for cf in projection.flatcfs:
dim1,dim2 = cf.weights.shape
temp_weights = cf.weights
percentile = np.percentile(temp_weights[temp_weights.nonzero()],self.percentile)
temp_weights[np.where(temp_weights<=percentile)] = 0.0
cf.weights = temp_weights
projection.weights.prune()
self.message("%s has %f%% of initial connections" % (projection.name, (float(projection.n_conns())/self.initial_conns[projection.name])*100))
class CFSPOF_SproutRetract(CFSPOF_Plugin):
"""
Sprouting and retraction weights output function. At a preset time
interval, the function removes and adds connections based on a
piecewise function, which determines the number of connections to
alter and the sprouting and retraction ratios, eventually allowing
connections to converge on the target_sparsity. The function
ensures the full turnover_rate is applied at the maximal distances
from the target sparsity, i.e. at 0% and 100% density. As the
projection approaches the target sparsity, it will asymptote, but a
residual turnover will ensure that a fixed amount of connections
will continue to sprout and retract.
Retraction deletes the x lowest weights, while sprouting applies a
convolution with a Gaussian kernel to the existing connections,
growing connections at locations with the highest probabilities.
Still experimental and not scientifically validated.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval between sprout/retract steps.""")
residual_turnover = param.Number(default=0.01,bounds=(0,1.0),doc="""
Constant turnover rate independent of current sparsity.""")
turnover_rate = param.Number(default=0.1,bounds=(0,1.0),doc="""
Percentage of weights to change per interval, assuming
currently fully dense and target is fully sparse.""")
target_sparsity = param.Number(default=0.15,bounds=(0,1.0),doc="""
Sparsity level at which sprouting and retraction cancel out.""")
kernel_sigma = param.Number(default=1.0,bounds=(0.0,10.0),doc="""
Gaussian spatial variance for weights to diffuse per interval.""")
disk_mask = param.Boolean(default=True,doc="""
Limits connection sprouting to a disk.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if self.disk_mask:
self.disk = pattern.Disk(size=1.0,smoothing=0.0)
# Get CF and src sheet shapes
cf_x,cf_y = projection.dest.activity.shape
src_x,src_y = projection.src.activity.shape
# Initialize sparse triplet arrays
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=sparse_type)
# Create new sparse matrix to accumulate into
sum_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
# Counters for logging
sprout_sum = 0; prune_sum = 0; unit_total = 0
self.mask_total = 0
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
idx=0
for cidx,cf in enumerate(projection.flatcfs):
temp_weights = cf.weights
dense_unit_mask = (1.0 - (temp_weights>0.0))
dim1,dim2 = temp_weights.shape
sprout_count,prune_idx,nnz = self.calc_ratios(temp_weights)
self.prune(temp_weights,prune_idx)
nnz_pp = np.count_nonzero(temp_weights)
prune_sum += (nnz_pp-nnz)
self.sprout(temp_weights,dense_unit_mask,sprout_count)
nnz_ps = np.count_nonzero(temp_weights)
sprout_sum += nnz_ps - nnz_pp
unit_total += nnz_ps
# Populate sparse array chunk
temp_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
x1,x2,y1,y2 = cf.input_sheet_slice.tolist()
for cnx in range(dim1):
val_array[idx:idx+dim2] = temp_weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+dim2] = range(x_val,x_val+dim2)
y_array[idx:idx+dim2] = cidx
idx += dim2
# Populate combined sparse array with sparse array chunk
if (cidx+1)%cf_y == 0:
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
sum_sparse += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
idx=0
projection.weights = sum_sparse
del temp_sparse, sum_sparse
projection.weights.compress()
self.message("%s pruned by %d and sprouted %d, connection is now %f%% dense" % (projection.name,prune_sum,sprout_sum,(float(unit_total)/self.mask_total)*100))
def sprout(self, temp_weights, mask, sprout_count):
"""
Applies a Gaussian blur to the existing connection field,
selecting the n units with the highest probabilities to sprout
new connections, where n is set by the sprout_count. New
connections are initialized at the minimal strength of the
current CF.
"""
dim1,dim2 = temp_weights.shape
init_weight = temp_weights[temp_weights.nonzero()].min()
blurred_weights = gaussian_filter(temp_weights, sigma=self.kernel_sigma)
blurred_weights = (blurred_weights - blurred_weights.min()) / blurred_weights.max()
sprout_prob_map = (blurred_weights * np.random.rand(dim1,dim2)) * mask
if self.disk_mask:
sprout_prob_map *= self.disk(xdensity=dim2,ydensity=dim1)
sprout_inds = np.unravel_index(np.argsort(sprout_prob_map.flatten())[-sprout_count:],(dim1,dim2))
temp_weights[sprout_inds] = init_weight
def prune(self, temp_weights, prune_idx):
"""
Retracts n connections with the lowest weights, where n is
determined by the piecewise linear function in the calc_ratios
method.
"""
sorted_weights = np.sort(temp_weights.flatten())
threshold = sorted_weights[prune_idx]
temp_weights[temp_weights < threshold] = 0.0
def calc_ratios(self,temp_weights):
"""
Uses a piecewise linear function to determine the unit
proportion of sprouting and retraction and the associated
turnover rates.
Above the target sparsity the sprout/retract ratio scales
linearly up to maximal density, i.e. at full density 100% of
the turnover is put into retraction while at full sparsity
all the turnover is put into sprouting new connections. At
the target density sprouting and retraction are equal.
The turnover is determined also determined by the piecewise
linear function. At maximal distance from the target sparsity,
i.e. at full sparsity or density, the full turnover rate will
be used and as the target sparsity is approached from either
side this term decays to zero. Therefore, a residual turnover
is introduced to ensure that even at the target sparsity some
connections continue to sprout and retract.
"""
dim1,dim2 = temp_weights.shape
if self.disk_mask:
masked_units = len(self.disk(xdensity=dim2,ydensity=dim1).nonzero()[0])
else:
masked_units = dim1*dim2
self.mask_total += masked_units
max_units = dim1*dim2
nnz = np.count_nonzero(temp_weights)
cf_sparsity = nnz / float(masked_units)
delta_sparsity = cf_sparsity - self.target_sparsity
if delta_sparsity > 0:
relative_sparsity = delta_sparsity/(1.0 - self.target_sparsity)
else:
relative_sparsity = delta_sparsity/self.target_sparsity
# Total number of units to modify, broken down into units for pruning and sprouting
delta_units = (abs(self.turnover_rate * relative_sparsity) + self.residual_turnover) * masked_units
prune_factor = 0.5 + (0.5*relative_sparsity)
prune_count = int(delta_units * prune_factor)
prune_idx = (max_units-nnz)+prune_count
sprout_count = int(delta_units * (1-prune_factor))
return sprout_count, prune_idx, nnz
class CFSPRF_Plugin(param.Parameterized):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default single_cf_fn
of DotProduct(), does a basic dot product of each CF with the corresponding
slice of the input array. This function is likely to be slow to run, but
it is easy to extend with any arbitrary single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two identically
shaped matrices X (the input) and W (the CF weights) and computes a scalar
activation value based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),doc="""
Accepts a ResponseFn that will be applied to each CF individually.""")
def __call__(self, projection, **params):
single_cf_fn = self.single_cf_fn
for i,cf in enumerate(projection.flatcfs):
X = cf.input_sheet_slice.submatrix(projection.src.activity)
projection.activity.flat[i] = single_cf_fn(X,cf.weights)
projection.activity *= projection.strength
def compute_sparse_joint_norm_totals(projlist,active_units_mask=True):
"""
Compute norm_total for each CF in each projection from a group to be
normalized jointly.
"""
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
joint_sum = np.zeros(projlist[0].dest.shape,dtype=np.float64)
for p in projlist:
if not p.has_norm_total:
p.norm_total *= 0.0
p.weights.CFWeightTotals(p.norm_total)
p.has_norm_total=True
joint_sum = np.add.reduce([proj.norm_total for proj in projlist],dtype=np.float64)
for p in projlist:
p.norm_total = joint_sum.copy()
def CFPOF_DivisiveNormalizeL1_Sparse(projection):
"""
Sparse CF Projection output function applying L1 divisive normalization
to individual CFs.
"""
if not projection.has_norm_total:
projection.norm_total *= 0.0
projection.weights.CFWeightTotals(projection.norm_total)
projection.weights.DivisiveNormalizeL1(projection.norm_total)
projection.has_norm_total = False
def CFPLF_Hebbian_Sparse(projection):
"""
Sparse CF Projection learning function applying Hebbian learning
to the weights in a projection.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr)
projection.has_norm_total = True
def CFPLF_Hebbian_Sparse_opt(projection):
"""
Sparse CF Projection learning function, which calls an optimized Hebbian
learning function while skipping over inactive units.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian_opt(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr,projection.initialized)
projection.has_norm_total = True
def CFPRF_DotProduct_Sparse(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights.
"""
projection.weights.DotProduct(projection.strength, projection.input_buffer, projection.activity)
def CFPRF_DotProduct_Sparse_opt(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights. Optimization skips
inactive units if a certain percentage of neurons is inactive.
"""
nnz_ratio = np.count_nonzero(projection.src.activity) / len(projection.src.activity.flatten())
if nnz_ratio < 0.1:
projection.weights.DotProduct_opt(projection.strength, projection.src.activity, projection.activity)
else:
projection.weights.DotProduct(projection.strength, projection.src.activity, projection.activity)
class SparseConnectionField(param.Parameterized):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
# ALERT: need bounds, more docs
x = param.Number(default=0.0,doc="Sheet X coordinate of CF")
y = param.Number(default=0.0,doc="Sheet Y coordinate of CF")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,doc="""
Generates initial weights values.""")
min_matrix_radius=param.Integer(default=1)
output_fns = param.HookList(default=[],class_=TransferFn,precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
# Class attribute to switch to legacy weight generation if False
independent_weight_generation = True
def get_bounds(self,input_sheet=None):
if not input_sheet == None:
return self.input_sheet_slice.compute_bounds(input_sheet)
else:
return self.input_sheet_slice.compute_bounds(self.input_sheet)
def __get_shape_mask(self):
cf_shape = self.projection.cf_shape
bounds = self.projection.bounds_template
xdensity = self.projection.src.xdensity
ydensity = self.projection.src.xdensity
center_r,center_c = self.projection.src.sheet2matrixidx(0,0)
center_x,center_y = self.projection.src.matrixidx2sheet(center_r,center_c)
cf_mask = cf_shape(x=center_x,y=center_y,bounds=bounds,xdensity=xdensity,ydensity=ydensity)
return cf_mask
shape_mask = property(__get_shape_mask)
def __get_norm_total(self):
return self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]]
def __set_norm_total(self,new_norm_total):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = new_norm_total
def __del_norm_total(self):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = 0.0
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total)
def __get_mask(self):
x1,x2,y1,y2 = self.input_sheet_slice.tolist()
mask = np.zeros((x2-x1,y2-y1),dtype=np.bool)
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
nz_flat = self.projection.weights[inds,self.oned_idx].toarray()
nz_inds = nz_flat.reshape(x2-x1,y2-y1).nonzero()
mask[nz_inds] = True
return mask
mask = property(__get_mask,
"""
The mask property returns an array of bools representing the
zero weights in the CF weights array.
It is useful when applying additive functions on the weights
array, to ensure zero values are not accidentally overwritten.
The mask cannot be changed via the property, only by changing
the weights directly.
""")
def __get_weights(self):
"""
get_weights accesses the sparse CF matrix and returns the CF
in dense form.
"""
x1,x2,y1,y2 = self.src_slice
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
return self.projection.weights[inds,self.oned_idx].toarray().reshape(x2-x1,y2-y1)
def __set_weights(self,arr):
"""
Takes an input array, which has to match the CF shape, and
creates an mgrid of the appropriate size, adds the proper
offsets and passes the values and indices to the sparse matrix
representation.
"""
x1,x2,y1,y2 = self.src_slice
(dim1,dim2) = arr.shape
assert (dim1,dim2) == (x2-x1,y2-y1), "Array does not match CF shape."
(x,y) = np.mgrid[0:dim1,0:dim2] # Create mgrid of CF size
x_ind = np.array(x)+x1; y_ind = np.array(y) + y1; # Add slice offsets
row_inds = np.ravel_multi_index((x_ind,y_ind),self.projection.src.shape).flatten().astype(np.int32)
col_inds = np.array([self.oned_idx]*len(row_inds),dtype=np.int32)
self.projection.weights.put(arr[x,y].flatten(),row_inds,col_inds)
weights = property(__get_weights,__set_weights)
def __init__(self,template,input_sheet,projection,label=None,**params):
"""
Initializes the CF object and stores meta information about the CF's
shape and position in the SparseCFProjection to allow for easier
initialization.
"""
super(SparseConnectionField,self).__init__(**params)
self.input_sheet = input_sheet
self.projection = projection
self.label = label
self.matrix_idx = self.projection.dest.sheet2matrixidx(self.x,self.y)
self.oned_idx = self.matrix_idx[0] * self.projection.dest.shape[1] + self.matrix_idx[1]
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,self.input_sheet,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.weights_slice = self._create_input_sheet_slice(template)
self.src_slice = tuple(self.input_sheet_slice.tolist())
def _init_weights(self,mask_template):
if not hasattr(mask_template,'view'):
mask = _create_mask(mask_template,
self.weights_slice.compute_bounds(
self.input_sheet),
self.input_sheet,True,0.5)
mask = self.weights_slice.submatrix(mask_template)
mask = np.array(mask,copy=1)
pattern_params = dict(x=self.x,y=self.y,
bounds=self.get_bounds(self.input_sheet),
xdensity=self.input_sheet.xdensity,
ydensity=self.input_sheet.ydensity,
mask=mask)
controlled_weights = (param.Dynamic.time_dependent
and isinstance(param.Dynamic.time_fn,
param.Time)
and self.independent_weight_generation)
if controlled_weights:
with param.Dynamic.time_fn as t:
t(0) # Initialize at time zero.
# Controls random streams
label = '' if self.label is None else self.label
name = "%s_CF (%.5f, %.5f)" % (label, self.x, self.y)
w = self.weights_generator(**dict(pattern_params,
name=name))
else:
w = self.weights_generator(**pattern_params)
w = w.astype(sparse_type)
for of in self.output_fns:
of(w)
return w
def _create_input_sheet_slice(self,template):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(self.x,self.y,self.input_sheet)
input_sheet_slice.crop_to_sheet(self.input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(self.x,self.y,self.input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(self.x,self.y,self.input_sheet)
return template
def get_input_matrix(self, activity):
return self.input_sheet_slice.submatrix(activity)
class SparseCFProjection(CFProjection):
"""
A projection composed of SparseConnectionFields from a Sheet into
a ProjectionSheet.
SparseCFProjection computes its activity using a response_fn which
can either be an optimized function implemented as part of the
sparse matrix class or an unoptimized function, which requests the
weights in dense format. The initial contents of the
SparseConnectionFields mapping from the input Sheet into the
target ProjectionSheet are controlled by the weights_generator,
cf_shape, and weights_output_fn parameters, while the location of
the ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface activate(self) that
computes the response from the input and stores it in the activity
array.
"""
cf_type = param.Parameter(default=SparseConnectionField,doc="""
Type of ConnectionField to use when creating individual CFs.""")
learning_fn = param.Callable(default=CFPLF_Hebbian_Sparse,doc="""
Function for computing changes to the weights based on one activation step.""")
response_fn = param.Callable(default=CFPRF_DotProduct_Sparse,doc="""
Function for computing the Projection response to an input pattern.""")
weights_output_fns = param.HookList(default=[CFPOF_DivisiveNormalizeL1_Sparse],doc="""
Functions applied to each CF after learning.""")
initialized = param.Boolean(default=False)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically SparseConnectionFields), each located at the
location in the source sheet corresponding to the unit in the
target sheet. The cf_type objects are stored in the 'cfs'
array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
self.n_units = self._calc_n_units()
self.activity = np.array(self.dest.activity)
self.norm_total = np.array(self.dest.activity,dtype=np.float64)
self.has_norm_total = False
if initialize_cfs:
self._create_cfs()
if self.apply_output_fns_init:
self.apply_learn_output_fns()
self.input_buffer = None
def __getstate__(self):
"""
Method to support pickling of sparse weights object.
"""
state_dict = self.__dict__.copy()
state_dict['triplets'] = state_dict['weights'].getTriplets()
state_dict['weight_shape'] = (self.src.activity.shape,self.dest.activity.shape)
del state_dict['weights']
return state_dict
def __setstate__(self,state_dict):
"""
Method to support unpickling of sparse weights object.
"""
self.__dict__.update(state_dict)
self.weights = sparse.csarray_float(self.weight_shape[0],self.weight_shape[1])
rowInds, colInds, values = self.triplets
self.weights.setTriplets(rowInds,colInds,values)
del self.triplets
del self.weight_shape
def _create_cfs(self):
"""
Creates the CF objects, initializing the weights one by one
and adding them to the sparse weights object in chunks.
"""
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
self.weights = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
cf_x,cf_y = self.dest.activity.shape
src_x,src_y = self.src.activity.shape
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=np.float32)
# Iterate over the CFs
for x in range(cf_x):
temp_sparse = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
idx = 0
for y in range(cf_y):
x1,x2,y1,y2 = self.cfs[x][y].input_sheet_slice.tolist()
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
weights = self.cfs[x][y]._init_weights(mask_template)
cn_x,cn_y = weights.shape
y_val = x * cf_y + y
for cnx in range(cn_x):
val_array[idx:idx+cn_y] = weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+cn_y] = range(x_val,x_val+cn_y)
y_array[idx:idx+cn_y] = y_val
idx += cn_y
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
self.weights += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
del temp_sparse
self.weights.compress()
self.debug("Sparse projection %r loaded" % self.name)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
label = self.hash_format.format(name=self.name,
src=self.src.name,
dest=self.dest.name)
try:
CF = self.cf_type(template=self._slice_template,
projection=self,input_sheet=self.src,x=x,y=y,
weights_generator=self.weights_generator,
min_matrix_radius=self.min_matrix_radius,
label=label)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def get_sheet_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def get_active_units_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
if self.input_fns:
input_activity = input_activity.copy()
for iaf in self.input_fns:
iaf(input_activity)
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(self)
for of in self.output_fns:
of(self.activity)
def learn(self):
"""
For a SparseCFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer != None:
self.learning_fn(self)
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
"""
for of in self.weights_output_fns: of(self)
def n_bytes(self):
"""
Estimates the size on the basis of the number non-zeros in the
sparse matrix, asssuming indices and values are stored using
32-bit integers and floats respectively.
"""
return self.n_conns() * (3 * 4)
def n_conns(self):
"""
Returns number of nonzero weights.
"""
return self.weights.getnnz()
if not use_sparse:
print "WARNING: Sparse component could not be imported, replacing SparseCFProjection with regular CFProjection"
def SparseCFProjection(*args, **kwargs): # pyflakes:ignore (optimized version provided)
return CFProjection(*args,**kwargs)
sparse_components = [CFSPLF_Plugin,
CFSPOF_Plugin,
CFSPOF_Prune,
CFSPOF_SproutRetract,
CFSPRF_Plugin,
compute_sparse_joint_norm_totals,
CFPOF_DivisiveNormalizeL1_Sparse,
CFPLF_Hebbian_Sparse,
CFPLF_Hebbian_Sparse_opt,
CFPRF_DotProduct_Sparse,
CFPRF_DotProduct_Sparse_opt,
SparseConnectionField,
SparseCFProjection]
__all__ = sparse_components
| 2.09375 | 2 |