hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb4f24c5328a1a2bade8a19d198131fa85784909
| 10,902
|
py
|
Python
|
kodrive/cli.py
|
Jvlythical/KodeBox
|
325fe5e5870b7d4eb121dcc7e93be64aa16e7988
|
[
"MIT"
] | 1
|
2021-09-02T02:53:05.000Z
|
2021-09-02T02:53:05.000Z
|
kodrive/cli.py
|
Jvlythical/KodeBox
|
325fe5e5870b7d4eb121dcc7e93be64aa16e7988
|
[
"MIT"
] | null | null | null |
kodrive/cli.py
|
Jvlythical/KodeBox
|
325fe5e5870b7d4eb121dcc7e93be64aa16e7988
|
[
"MIT"
] | 1
|
2018-09-01T08:41:01.000Z
|
2018-09-01T08:41:01.000Z
|
import click
import os, time, math, pdb
from . import cli_syncthing_adapter
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.version_option()
@click.group(
epilog="Run 'kodrive COMMAND --help' for more information on a command.",
context_settings=CONTEXT_SETTINGS
)
@click.pass_context
def main(ctx):
''' A tool to synchronize remote/local directories. '''
pass
#
# Subcommands start
#
### Sys
@main.command()
#@click.option('-k', '--key', is_flag=True, help="Return device key.")
#@click.option('-a', '--about', is_flag=True, help="List KodeDrive information.")
@click.option('-i', '--init', is_flag=True, help="Init KodeDrive daemon.")
@click.option('-e', '--exit', is_flag=True, help="Exit KodeDrive daemon.")
@click.option('-c', '--client', is_flag=True, help="Set Kodedrive into client mode.")
@click.option('-s', '--server', is_flag=True, help="Set Kodedrive into server mode.")
@click.option('-r', '--restart', is_flag=True, help="Restart KodeDrive daemon.")
@click.option('-d', '--delay', type=int, help="Set remote device detection speed.", metavar=" <INTEGER>")
#@click.option('-t', '--test', help="Test random functions :)")
def sys(**kwargs):
''' Manage system configuration. '''
output, err = cli_syncthing_adapter.sys(**kwargs)
if output:
click.echo("%s" % output, err=err)
else:
if not kwargs['init']:
click.echo(click.get_current_context().get_help())
### Ls
@main.command()
def ls():
''' List all synchronized directories. '''
heading, body = cli_syncthing_adapter.ls()
if heading:
click.echo(heading)
if body:
click.echo(body.strip())
### Link
@main.command()
@click.argument('key', nargs=1)
@click.option(
'-i', '--interval', default=30,
nargs=1, metavar="<INTEGER>",
help="Specify sync interval in seconds."
)
@click.option(
'-t', '--tag', nargs=1,
metavar=" <TEXT>",
help="Associate this folder with a tag."
)
@click.option(
'-p', '--path',
type=click.Path(exists=True, writable=True, resolve_path=True),
default=".", nargs=1, metavar=" <PATH>",
help="Specify which folder to link."
)
@click.option(
'-y', '--yes', nargs=1, is_flag=True,
default=False,
help="Bypass confirmation step."
)
def link(**kwargs):
''' Synchronize remote/local directory. '''
if kwargs['yes']:
output, err = cli_syncthing_adapter.link(**kwargs)
click.echo("%s" % output, err=err)
else:
if click.confirm("Are you sure you want to link %s?" % kwargs['path']):
output, err = cli_syncthing_adapter.link(**kwargs)
click.echo("%s" % output, err=err)
'''
*** Make the catch more specific
output = None
if click.confirm("Are you sure you want to link to %s?" % path):
try:
output = cli_syncthing_adapter.init(key, tag, path)
except ValueError:
raise
except:
cli_syncthing_adapter.start()
sleep(1.5)
output = cli_syncthing_adapter.init(key, tag, path)
finally:
if output:
click.echo("%s" % output)
'''
### Auth
@main.command()
@click.argument('key', nargs=1)
@click.option(
'-R', '--remove',
is_flag=True,
help="Deauthorize a directory."
)
@click.option(
'-y', '--yes', nargs=1, is_flag=True,
default=False,
help="Bypass confirmation step."
)
@click.option(
'-p', '--path',
type=click.Path(exists=True, writable=True, resolve_path=True),
default=".", nargs=1, metavar=" <PATH>",
help="Specify which folder to link."
)
def auth(**kwargs):
''' Authorize device synchronization. '''
"""
kodrive auth <path> <device_id (client)>
1. make sure path has been added to config.xml, server
2. make sure path is not shared by someone
3. add device_id to folder in config.xml, server
4. add device to devices in config.xml, server
"""
option = 'add'
path = kwargs['path']
key = kwargs['key']
if kwargs['remove']:
option = 'remove'
if kwargs['yes']:
output, err = cli_syncthing_adapter.auth(option, key, path)
click.echo("%s" % output, err=err)
else:
verb = 'authorize' if not kwargs['remove'] else 'de-authorize'
if click.confirm("Are you sure you want to %s this device to access %s?" % (verb, path)):
output, err = cli_syncthing_adapter.auth(option, key, path)
if output:
click.echo("%s" % output, err=err)
###
#
# *** Dir commands
#
@click.group()
@click.pass_context
def dir(ctx):
''' Manage synchronized directory settings. '''
pass
### Mv
@dir.command()
@click.argument('source', nargs=-1, required=True)
@click.argument('target', nargs=1)
def mv(source, target):
''' Move synchronized directory. '''
if os.path.isfile(target) and len(source) == 1:
if click.confirm("Are you sure you want to overwrite %s?" % target):
err_msg = cli_syncthing_adapter.mv_edge_case(source, target)
# Edge case: to match Bash 'mv' behavior and overwrite file
if err_msg:
click.echo(err_msg)
return
if len(source) > 1 and not os.path.isdir(target):
click.echo(click.get_current_context().get_help())
return
else:
err_msg, err = cli_syncthing_adapter.mv(source, target)
if err_msg:
click.echo(err_msg, err)
### Push
@dir.command()
@click.option(
'-v', '--verbose', is_flag=True,
help='Show synchronize progress.'
)
@click.argument(
'path', nargs=1,
type=click.Path(exists=True, writable=True, resolve_path=True),
)
def push(**kwargs):
''' Force synchronization of directory. '''
output, err = cli_syncthing_adapter.refresh(**kwargs)
if output:
click.echo("%s" % output, err=err)
if kwargs['verbose'] and not err:
with click.progressbar(
iterable=None,
length=100,
label='Synchronizing') as bar:
device_num = 0
max_devices = 1
prev_percent = 0
while True:
kwargs['progress'] = True
kwargs['device_num'] = device_num
data, err = cli_syncthing_adapter.refresh(**kwargs)
device_num = data['device_num']
max_devices = data['max_devices']
cur_percent = math.floor(data['percent']) - prev_percent
if cur_percent > 0:
bar.update(cur_percent)
prev_percent = math.floor(data['percent'])
if device_num < max_devices:
time.sleep(0.5)
else:
break
### Tag
@dir.command()
@click.argument(
'path',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="PATH",
)
@click.argument('name', nargs=1)
def tag(path, name):
''' Change tag associated with directory. '''
output, err = cli_syncthing_adapter.tag(path, name)
click.echo("%s" % output, err=err)
### Free
@dir.command()
@click.argument(
'path',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="PATH",
)
def free(**kwargs):
''' Stop synchronization of directory. '''
output, err = cli_syncthing_adapter.free(kwargs['path'])
click.echo("%s" % output, err=err)
### Add
@dir.command()
@click.option(
'-t', '--tag', nargs=1, metavar=" <TEXT>",
default="my-sync",
help="Associate this folder with a tag."
)
@click.option(
'-i', '--interval', default=30,
nargs=1, metavar="<INTEGER>",
help="Specify sync interval in seconds."
)
@click.argument(
'path',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="PATH",
)
def add(**kwargs):
''' Make a directory shareable. '''
output, err = cli_syncthing_adapter.add(**kwargs)
click.echo("%s" % output, err=err)
### Info
@dir.command()
@click.argument(
'path', nargs=1,
type=click.Path(exists=True, writable=True, resolve_path=True),
)
def info(path):
''' Display synchronization information. '''
output, err = cli_syncthing_adapter.info(folder=path)
if err:
click.echo(output, err=err)
else:
stat = output['status']
click.echo("State: %s" % stat['state'])
click.echo("\nTotal Files: %s" % stat['localFiles'])
click.echo("Files Needed: %s" % stat['needFiles'])
click.echo("\nTotal Bytes: %s" % stat['localBytes'])
click.echo("Bytes Needed: %s" % stat['needBytes'])
progress = output['files_needed']['progress']
queued = output['files_needed']['queued']
rest = output['files_needed']['rest']
if len(progress) or len(queued) or len(rest):
click.echo("\nFiles Needed:")
for f in progress:
click.echo(" " + f['name'])
for f in queued:
click.echo(" " + f['name'])
for f in rest:
click.echo(" " + f['name'])
click.echo("\nDevices Authorized:\n%s" % output['auth_ls'])
### Key
@dir.command()
@click.option(
'-c', '--client', is_flag=True, help="Return directory key in client mode."
)
@click.option(
'-s', '--server', is_flag=True, help="Return directory key in server mode."
)
@click.argument(
'path', nargs=1,
type=click.Path(exists=True, writable=True, resolve_path=True),
)
def key(**kwargs):
''' Display directory key. '''
kwargs['folder'] = kwargs['path']
output, err = cli_syncthing_adapter.key(**kwargs)
if not output:
click.echo(click.get_current_context().get_help())
else:
click.echo("%s" % output, err=err)
###
#
# *** Sys commands
#
@click.group()
@click.pass_context
def sys(ctx):
''' Manage system-wide configuration. '''
pass
@sys.command()
@click.option(
'-d', '--device',
is_flag=True,
help="Display device key."
)
@click.option(
'-f', '--folder',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="<PATH>", help="Display folder key."
)
def key(**kwargs):
''' Display system key. '''
output, err = cli_syncthing_adapter.key(device=True)
click.echo("%s" % output, err=err)
### Info
@sys.command()
def info(**kwargs):
''' Display system information. '''
output, err = cli_syncthing_adapter.info(device=True)
click.echo(output, err=err)
### Start
@sys.command()
@click.option('-i', '--inotify', is_flag=True, help="Enable inotify upon start.")
@click.option('-c', '--client', is_flag=True, help="Set Kodedrive into client mode.")
@click.option('-s', '--server', is_flag=True, help="Set Kodedrive into server mode.")
@click.option('-l', '--lcast', is_flag=True, help="Enable local announce.")
@click.option(
'-H', '--home', nargs=1, metavar=" <PATH>",
type=click.Path(exists=True, writable=True, resolve_path=True),
help="Set where config files are stored."
)
def start(**kwargs):
''' Start KodeDrive daemon. '''
output, err = cli_syncthing_adapter.start(**kwargs)
click.echo("%s" % output, err=err)
### Start
@sys.command()
def stop():
''' Stop KodeDrive daemon. '''
output, err = cli_syncthing_adapter.sys(exit=True)
click.echo("%s" % output, err=err)
'''
@sys.command()
def test():
output, err = cli_syncthing_adapter.sys(test='a')
click.echo("%s" % output, err=err)
'''
# Attach subcommands to main
main.add_command(dir)
main.add_command(sys)
| 25.177829
| 106
| 0.642726
|
8c6a67fef9579516b8ab93015b14053c54a78701
| 701
|
py
|
Python
|
endpoints/warp.py
|
Lifeismana/imgen
|
05f703a93eb0dd4625cb3316dc1a9f88a0259f08
|
[
"MIT"
] | null | null | null |
endpoints/warp.py
|
Lifeismana/imgen
|
05f703a93eb0dd4625cb3316dc1a9f88a0259f08
|
[
"MIT"
] | null | null | null |
endpoints/warp.py
|
Lifeismana/imgen
|
05f703a93eb0dd4625cb3316dc1a9f88a0259f08
|
[
"MIT"
] | null | null | null |
from io import BytesIO
from random import choice, randint
from flask import send_file
from utils import gm
from utils.endpoint import Endpoint, setup
@setup
class Warp(Endpoint):
params = ["avatar0"]
def generate(self, avatars, text, usernames, kwargs):
implode = "-{}".format(str(randint(3, 15)))
roll = "+{}+{}".format(randint(0, 256), randint(0, 256))
swirl = "{}{}".format(choice(["+", "-"]), randint(120, 180))
concat = ["-implode", implode, "-roll", roll, "-swirl", swirl]
output = gm.convert(avatars[0], concat, "png")
b = BytesIO(output)
b.seek(0)
return send_file(b, mimetype="image/png")
| 28.04
| 71
| 0.586305
|
efd5290220f1b3380b3dba6ae3ca99b83521f5a2
| 3,489
|
py
|
Python
|
src/nutrients_parser/parsers/parse_combinedata.py
|
TurtleToast/food-nutrient-parser-python
|
d9a7b6eb9a497b6fbe634e812115102f44f1114d
|
[
"MIT"
] | null | null | null |
src/nutrients_parser/parsers/parse_combinedata.py
|
TurtleToast/food-nutrient-parser-python
|
d9a7b6eb9a497b6fbe634e812115102f44f1114d
|
[
"MIT"
] | null | null | null |
src/nutrients_parser/parsers/parse_combinedata.py
|
TurtleToast/food-nutrient-parser-python
|
d9a7b6eb9a497b6fbe634e812115102f44f1114d
|
[
"MIT"
] | null | null | null |
from pprint import pprint
"""
Getting
[
[
['energie'],
['kJ 577 / kcal 137', 'kJ 2596 / kcal 617']
],
[
['vetten'],
['4.4 g', '19.8 g']
]
]
Wanting
[
{'per':
{
'name': '100 Gram',
'orginalText': 'per 100 g:',
'unit': 'g',
'value': '100'
},
'prepared': False,
'rows': [
{
'amount': {
'orginalText': 'kJ 577',
'unit': 'Kilojoules',
'value': '577'
},
'code': None,
'name': 'energie'
},
{
'amount': {
'orginalText': 'kcal 137',
'unit': 'Calories',
'value': '137'
},
code': None,
'name': 'energie'
},
{
'amount': {
'orginalText': '4.4',
'unit': 'Gram',
'value': '4.4'
},
'code': None,
'name': 'vetten'
}
]
},
{'per':
{
'name': '450 Gram',
'orginalText': 'Per 450 g:',
'unit': 'g',
'value': '450'
},
'prepared': False,
'rows': [
{
'amount': {
'orginalText': 'kJ 2596',
'unit': 'Kilojoules',
'value': '2596'
},
'code': None,
'name': 'energie'
},
{
'amount': {
'orginalText': 'kcal 617',
'unit': 'Calories',
'value': '617'
},
code': None,
'name': 'energie'
},
{
'amount': {
'orginalText': '19.8',
'unit': 'Gram',
'value': '19.8'
},
'code': None,
'name': 'vetten gram'
}]
}
]
[
[
['energie'],
[
[
{'orginalText': 'kJ 577', 'unit': 'Kilojoules', 'value': '577'},
{'orginalText': 'kcal 137', 'unit': 'Calories', 'value': '137'}
],
[
{'orginalText': 'kJ 2596', 'unit': 'Kilojoules', 'value': '2596'},
{'orginalText': 'kcal 617', 'unit': 'Calories', 'value': '617'}
]
]
],
[
['vetten'],
[
[
{'orginalText': '4.4 g', 'unit': 'Gram', 'value': '4.4'}
],
[
{'orginalText': '19.8 g', 'unit': 'Gram', 'value': '19.8'}
]
]
],
[
['waarvan verzadigde vetzuren'],
[
[
{'orginalText': '0.8 g', 'unit': 'Gram', 'value': '0.8'}
],
[
{'orginalText': '3.6 g', 'unit': 'Gram', 'value': '3.6'}
]
]
]
]
"""
def parse_combinedata(dataRows, perData):
for row in dataRows:
nutrient = row[0][0]
for i, value in enumerate(row[1]):
if len(perData) >= i + 1:
for x in value:
perData[i]['rows'].append({"name": nutrient, "code": None, "amount":x})
return perData
| 24.744681
| 93
| 0.300946
|
45de8d168dfaa3cb3da3acff5a6e825729f71b83
| 5,132
|
py
|
Python
|
sderl/examples/pytorch/pg_math/2_rtg_pg.py
|
XiaohanZhangCMU/sderl
|
9f8b0044b068d144400520c3b36b75403d394c7d
|
[
"MIT"
] | 2
|
2020-10-29T12:29:04.000Z
|
2021-05-01T23:23:45.000Z
|
sderl/examples/pytorch/pg_math/2_rtg_pg.py
|
XiaohanZhangCMU/sderl
|
9f8b0044b068d144400520c3b36b75403d394c7d
|
[
"MIT"
] | null | null | null |
sderl/examples/pytorch/pg_math/2_rtg_pg.py
|
XiaohanZhangCMU/sderl
|
9f8b0044b068d144400520c3b36b75403d394c7d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import numpy as np
import gym
from gym.spaces import Discrete, Box
def mlp(sizes, activation=nn.Tanh, output_activation=nn.Identity):
# Build a feedforward neural network.
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def reward_to_go(rews):
n = len(rews)
rtgs = np.zeros_like(rews)
for i in reversed(range(n)):
rtgs[i] = rews[i] + (rtgs[i+1] if i+1 < n else 0)
return rtgs
def train(env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=50, batch_size=5000, render=False):
# make environment, check spaces, get obs / act dims
env = gym.make(env_name)
assert isinstance(env.observation_space, Box), \
"This example only works for envs with continuous state spaces."
assert isinstance(env.action_space, Discrete), \
"This example only works for envs with discrete action spaces."
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
# make core of policy network
logits_net = mlp(sizes=[obs_dim]+hidden_sizes+[n_acts])
# make function to compute action distribution
def get_policy(obs):
logits = logits_net(obs)
return Categorical(logits=logits)
# make action selection function (outputs int actions, sampled from policy)
def get_action(obs):
return get_policy(obs).sample().item()
# make loss function whose gradient, for the right data, is policy gradient
def compute_loss(obs, act, weights):
logp = get_policy(obs).log_prob(act)
return -(logp * weights).mean()
# make optimizer
optimizer = Adam(logits_net.parameters(), lr=lr)
# for training policy
def train_one_epoch():
# make some empty lists for logging.
batch_obs = [] # for observations
batch_acts = [] # for actions
batch_weights = [] # for reward-to-go weighting in policy gradient
batch_rets = [] # for measuring episode returns
batch_lens = [] # for measuring episode lengths
# reset episode-specific variables
obs = env.reset() # first obs comes from starting distribution
done = False # signal from environment that episode is over
ep_rews = [] # list for rewards accrued throughout ep
# render first episode of each epoch
finished_rendering_this_epoch = False
# collect experience by acting in the environment with current policy
while True:
# rendering
if (not finished_rendering_this_epoch) and render:
env.render()
# save obs
batch_obs.append(obs.copy())
# act in the environment
act = get_action(torch.as_tensor(obs, dtype=torch.float32))
obs, rew, done, _ = env.step(act)
# save action, reward
batch_acts.append(act)
ep_rews.append(rew)
if done:
# if episode is over, record info about episode
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a_t|s_t) is reward-to-go from t
batch_weights += list(reward_to_go(ep_rews))
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
# won't render again this epoch
finished_rendering_this_epoch = True
# end experience loop if we have enough of it
if len(batch_obs) > batch_size:
break
# take a single policy gradient update step
optimizer.zero_grad()
batch_loss = compute_loss(obs=torch.as_tensor(batch_obs, dtype=torch.float32),
act=torch.as_tensor(batch_acts, dtype=torch.int32),
weights=torch.as_tensor(batch_weights, dtype=torch.float32)
)
batch_loss.backward()
optimizer.step()
return batch_loss, batch_rets, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f'%
(i, batch_loss, np.mean(batch_rets), np.mean(batch_lens)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print('\nUsing reward-to-go formulation of policy gradient.\n')
train(env_name=args.env_name, render=args.render, lr=args.lr)
| 37.735294
| 93
| 0.618472
|
e8be386288851b570510f3a337659570475b2f1d
| 43,627
|
py
|
Python
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py
|
chen2007999/Dunno
|
c158b6d7221cb42894ab01b0b0b115ab334c83b7
|
[
"Apache-2.0"
] | 283
|
2015-01-27T22:43:26.000Z
|
2022-02-14T11:50:18.000Z
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py
|
chen2007999/Dunno
|
c158b6d7221cb42894ab01b0b0b115ab334c83b7
|
[
"Apache-2.0"
] | 104
|
2015-02-02T22:59:58.000Z
|
2019-02-06T20:09:42.000Z
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py
|
chen2007999/Dunno
|
c158b6d7221cb42894ab01b0b0b115ab334c83b7
|
[
"Apache-2.0"
] | 48
|
2015-02-26T18:03:06.000Z
|
2022-02-25T21:42:27.000Z
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RootDir)%(Directory)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(FullPath)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
# We don't know this setting. Give a warning.
print >> stderr, ('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting))
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
print >> stderr, ('Warning: unrecognized setting %s/%s' %
(tool_name, setting))
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall'])) # /Gz
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2'])) # /arch:SSE2
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# These settings generate correctly in the MSVS output files when using
# e.g. DelayLoadDLLs! or AdditionalDependencies! to exclude files from
# configuration entries, but result in spurious artifacts which can be
# safely ignored here. See crbug.com/246570
_MSVSOnly(_link, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSVSOnly(_link, 'DelayLoadDLLs_excluded', _file_list)
_MSVSOnly(_link, 'AdditionalDependencies_excluded', _file_list)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_link, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
# TODO(jeanluc) I don't think these are genuine settings but byproducts of Gyp.
_MSVSOnly(_lib, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TargetMachine', _target_machine_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
| 41.470532
| 80
| 0.682582
|
96ecc91cd9c34fe7a6360bb4d034ffcf45ca5573
| 2,898
|
py
|
Python
|
models/reconstruction.py
|
lianqing11/PyCDA
|
1a4c64a71f2908cb325a7c4cf65e5b169bcd5d5a
|
[
"MIT"
] | 69
|
2019-09-20T01:04:18.000Z
|
2022-02-06T11:52:03.000Z
|
models/reconstruction.py
|
lianqing11/pycda
|
1a4c64a71f2908cb325a7c4cf65e5b169bcd5d5a
|
[
"MIT"
] | 6
|
2019-09-21T01:43:58.000Z
|
2020-10-30T14:12:13.000Z
|
models/reconstruction.py
|
lianqing11/pycda
|
1a4c64a71f2908cb325a7c4cf65e5b169bcd5d5a
|
[
"MIT"
] | 6
|
2020-02-06T04:15:16.000Z
|
2022-01-30T23:04:48.000Z
|
import torch
import torch.nn as nn
from lib.nn import SynchronizedBatchNorm2d
import torch.nn.functional as F
def gaussian_weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 and classname.find('Conv') == 0:
# print m.__class__.__name__
m.weight.data.normal_(0.0, 0.02)
class INSResBlock(nn.Module):
def conv3x3(self, inplances, out_plances, stride=1):
return nn.Conv2d(inplances, out_plances, kernel_size=3, stride=stride, padding=1)
def __init__(self, inplanes, planes, stride=1, dropout=0.0):
super(INSResBlock, self).__init__()
model = []
model += [self.conv3x3(inplanes, planes, stride)]
model += [nn.InstanceNorm2d(planes)]
model += [nn.LeakyReLU(inplace=True)]
model += [self.conv3x3(planes, planes)]
model += [nn.InstanceNorm2d(planes)]
if dropout > 0:
model += [nn.Dropout(p=dropout)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
residual = x
out = self.model(x)
#print(x.size(), residual.size())
out += residual
return out
class LeakyReLUConv2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding=0):
super(LeakyReLUConv2d, self).__init__()
model = []
model += [nn.Conv2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)]
model += [nn.LeakyReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
return self.model(x)
class LeakyReLUConvTranspose2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding=0, output_padding=0):
super(LeakyReLUConvTranspose2d, self).__init__()
model = []
model += [nn.ConvTranspose2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=True)]
model += [nn.LeakyReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
#print(type(x))
return self.model(x)
class UNIT(nn.Module):
def __init__(self, input_dim = 2048):
super(UNIT, self).__init__()
ch = 256
enc = []
tch = ch
enc += [LeakyReLUConv2d(input_dim, ch, kernel_size=7, stride=1, padding=3)]
dec = []
for i in range(1):
dec += [INSResBlock(tch, tch)]
for i in range(3):
dec += [LeakyReLUConvTranspose2d(tch, tch//2, kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = tch // 2
dec += [LeakyReLUConv2d(tch, 3, kernel_size=1, stride=1, padding=0)]
dec += [nn.Tanh()]
self.encoder = nn.Sequential(*enc)
self.decoder = nn.Sequential(*dec)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
| 30.829787
| 145
| 0.649413
|
226351fbd175d1200a4729c8aa20ebc8fdf21c23
| 13,236
|
py
|
Python
|
src/v5.3/enrollment/swagger_client/models/student_student_education_organization_association.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 2
|
2021-04-27T17:18:17.000Z
|
2021-04-27T19:14:39.000Z
|
src/v5.3/enrollment/swagger_client/models/student_student_education_organization_association.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | null | null | null |
src/v5.3/enrollment/swagger_client/models/student_student_education_organization_association.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:43:11.000Z
|
2022-01-06T09:43:11.000Z
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class StudentStudentEducationOrganizationAssociation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'login_id': 'str',
'sex_descriptor': 'str',
'hispanic_latino_ethnicity': 'bool',
'education_organization_type': 'str',
'education_organization_id': 'int',
'languages': 'list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationLanguage]',
'races': 'list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationRace]'
}
attribute_map = {
'login_id': 'loginId',
'sex_descriptor': 'sexDescriptor',
'hispanic_latino_ethnicity': 'hispanicLatinoEthnicity',
'education_organization_type': 'educationOrganizationType',
'education_organization_id': 'educationOrganizationId',
'languages': 'languages',
'races': 'races'
}
def __init__(self, login_id=None, sex_descriptor=None, hispanic_latino_ethnicity=None, education_organization_type=None, education_organization_id=None, languages=None, races=None, _configuration=None): # noqa: E501
"""StudentStudentEducationOrganizationAssociation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._login_id = None
self._sex_descriptor = None
self._hispanic_latino_ethnicity = None
self._education_organization_type = None
self._education_organization_id = None
self._languages = None
self._races = None
self.discriminator = None
if login_id is not None:
self.login_id = login_id
self.sex_descriptor = sex_descriptor
if hispanic_latino_ethnicity is not None:
self.hispanic_latino_ethnicity = hispanic_latino_ethnicity
if education_organization_type is not None:
self.education_organization_type = education_organization_type
self.education_organization_id = education_organization_id
if languages is not None:
self.languages = languages
if races is not None:
self.races = races
@property
def login_id(self):
"""Gets the login_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
The login ID for the user; used for security access control interface. # noqa: E501
:return: The login_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: str
"""
return self._login_id
@login_id.setter
def login_id(self, login_id):
"""Sets the login_id of this StudentStudentEducationOrganizationAssociation.
The login ID for the user; used for security access control interface. # noqa: E501
:param login_id: The login_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
login_id is not None and len(login_id) > 60):
raise ValueError("Invalid value for `login_id`, length must be less than or equal to `60`") # noqa: E501
self._login_id = login_id
@property
def sex_descriptor(self):
"""Gets the sex_descriptor of this StudentStudentEducationOrganizationAssociation. # noqa: E501
The student's gender as last reported to the education organization. # noqa: E501
:return: The sex_descriptor of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: str
"""
return self._sex_descriptor
@sex_descriptor.setter
def sex_descriptor(self, sex_descriptor):
"""Sets the sex_descriptor of this StudentStudentEducationOrganizationAssociation.
The student's gender as last reported to the education organization. # noqa: E501
:param sex_descriptor: The sex_descriptor of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and sex_descriptor is None:
raise ValueError("Invalid value for `sex_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
sex_descriptor is not None and len(sex_descriptor) > 306):
raise ValueError("Invalid value for `sex_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._sex_descriptor = sex_descriptor
@property
def hispanic_latino_ethnicity(self):
"""Gets the hispanic_latino_ethnicity of this StudentStudentEducationOrganizationAssociation. # noqa: E501
An indication that the individual traces his or her origin or descent to Mexico, Puerto Rico, Cuba, Central, and South America, and other Spanish cultures, regardless of race, as last reported to the education organization. The term, \"Spanish origin,\" can be used in addition to \"Hispanic or Latino.\" # noqa: E501
:return: The hispanic_latino_ethnicity of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: bool
"""
return self._hispanic_latino_ethnicity
@hispanic_latino_ethnicity.setter
def hispanic_latino_ethnicity(self, hispanic_latino_ethnicity):
"""Sets the hispanic_latino_ethnicity of this StudentStudentEducationOrganizationAssociation.
An indication that the individual traces his or her origin or descent to Mexico, Puerto Rico, Cuba, Central, and South America, and other Spanish cultures, regardless of race, as last reported to the education organization. The term, \"Spanish origin,\" can be used in addition to \"Hispanic or Latino.\" # noqa: E501
:param hispanic_latino_ethnicity: The hispanic_latino_ethnicity of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: bool
"""
self._hispanic_latino_ethnicity = hispanic_latino_ethnicity
@property
def education_organization_type(self):
"""Gets the education_organization_type of this StudentStudentEducationOrganizationAssociation. # noqa: E501
# noqa: E501
:return: The education_organization_type of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: str
"""
return self._education_organization_type
@education_organization_type.setter
def education_organization_type(self, education_organization_type):
"""Sets the education_organization_type of this StudentStudentEducationOrganizationAssociation.
# noqa: E501
:param education_organization_type: The education_organization_type of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
education_organization_type is not None and len(education_organization_type) > 128):
raise ValueError("Invalid value for `education_organization_type`, length must be less than or equal to `128`") # noqa: E501
self._education_organization_type = education_organization_type
@property
def education_organization_id(self):
"""Gets the education_organization_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
The identifier assigned to an education organization. # noqa: E501
:return: The education_organization_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: int
"""
return self._education_organization_id
@education_organization_id.setter
def education_organization_id(self, education_organization_id):
"""Sets the education_organization_id of this StudentStudentEducationOrganizationAssociation.
The identifier assigned to an education organization. # noqa: E501
:param education_organization_id: The education_organization_id of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and education_organization_id is None:
raise ValueError("Invalid value for `education_organization_id`, must not be `None`") # noqa: E501
self._education_organization_id = education_organization_id
@property
def languages(self):
"""Gets the languages of this StudentStudentEducationOrganizationAssociation. # noqa: E501
An unordered collection of studentEducationOrganizationAssociationLanguages. # noqa: E501
:return: The languages of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationLanguage]
"""
return self._languages
@languages.setter
def languages(self, languages):
"""Sets the languages of this StudentStudentEducationOrganizationAssociation.
An unordered collection of studentEducationOrganizationAssociationLanguages. # noqa: E501
:param languages: The languages of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationLanguage]
"""
self._languages = languages
@property
def races(self):
"""Gets the races of this StudentStudentEducationOrganizationAssociation. # noqa: E501
An unordered collection of studentEducationOrganizationAssociationRaces. # noqa: E501
:return: The races of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:rtype: list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationRace]
"""
return self._races
@races.setter
def races(self, races):
"""Sets the races of this StudentStudentEducationOrganizationAssociation.
An unordered collection of studentEducationOrganizationAssociationRaces. # noqa: E501
:param races: The races of this StudentStudentEducationOrganizationAssociation. # noqa: E501
:type: list[StudentStudentEducationOrganizationAssociationStudentEducationOrganizationAssociationRace]
"""
self._races = races
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StudentStudentEducationOrganizationAssociation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StudentStudentEducationOrganizationAssociation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StudentStudentEducationOrganizationAssociation):
return True
return self.to_dict() != other.to_dict()
| 43.396721
| 482
| 0.698247
|
c4af05987c899a33e3825749bbcd70ec0dad73b8
| 21,420
|
py
|
Python
|
symjax/rl.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 67
|
2020-02-21T21:26:46.000Z
|
2020-06-14T14:25:42.000Z
|
symjax/rl.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 8
|
2020-02-22T14:45:56.000Z
|
2020-06-07T16:56:47.000Z
|
symjax/rl.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 4
|
2020-02-21T17:34:46.000Z
|
2020-05-30T08:30:14.000Z
|
# main class
import symjax
import numpy as np
from symjax import nn
import symjax.tensor as T
from symjax.probabilities import Categorical, MultivariateNormal
import math
import matplotlib.pyplot as plt
from collections import deque
import gym
import random
import scipy.signal
# https://gist.github.com/heerad/1983d50c6657a55298b67e69a2ceeb44
# ===========================
# Set rewards
# ===========================
class Reward(object):
"""reward management (discounts, n-step)
Args:
-----
factor: float
the amount of rescaling of the reward after discount has been
applied.
n: int or inf
the n-step reward discount calculation, for the infinite case
prefer giving ``np.inf``
"""
def __init__(self, factor, gamma=None, n=8):
# Reward parameters
self.factor = factor
self.n = n
self.gamma = gamma
# Set step rewards to total episode reward
def total(self, ep_batch, tot_reward):
for step in ep_batch:
step[2] = tot_reward * self.factor
return ep_batch
# Set step rewards to discounted reward
def discount(self, ep_batch):
if self.n == 1:
return ep_batch
x = ep_batch[:, 2]
if self.n == np.inf:
b = [1]
a = [1, -self.gamma]
else:
b = self.gamma ** np.arange(self.n)
a = [1]
discounted = scipy.signal.lfilter(b=b, a=a, x=x[::-1], axis=0)[::-1]
discounted *= self.factor
ep_batch[:, 2] = discounted
return ep_batch
class BasicBuffer:
def __init__(self, size):
self.buffer = deque(maxlen=size)
self.priorities = deque(maxlen=size)
def clear(self):
del self.buffer
del self.priorities
self.buffer = deque(maxlen=size)
self.priorities = deque(maxlen=size)
@property
def len(self):
return len(self.buffer)
def push(self, *args):
if not hasattr(self, "n_components"):
self.n_components = len(args)
else:
assert len(args) == self.n_components
self.buffer.append(args)
self.priorities.append(1)
def update_priorities(self, indices, priorities):
for indx, priority in zip(indices, priorities):
self.priorities[indx] = 1 + priority
def sample(self, length, asarray=True, return_indices=False):
assert len(self.buffer) > 0
items = [[] for i in range(self.n_components)]
indices = np.arange(len(self.buffer), dtype="int32")
batch = random.choices(indices, weights=self.priorities, k=length)
self.current_indices = batch
for experience in batch:
values = self.buffer[experience]
for item, value in zip(items, values):
item.append(value)
if asarray:
for i in range(len(items)):
items[i] = np.asarray(items[i], dtype=np.float32)
if return_indices:
return items + [batch]
else:
return items
### Create both the actor and critic networks at once ###
### Q(s, mu(s)) returns the maximum Q for a given state s ###
class ddpg:
def __init__(
self,
env_fn,
actor,
critic,
gamma=0.99,
tau=0.01,
lr=1e-3,
batch_size=32,
epsilon=0.1,
epsilon_decay=1 / 1000,
min_epsilon=0.01,
reward=None,
):
# comment out this line if you don't want to record a video of the agent
# if save_folder is not None:
# test_env = gym.wrappers.Monitor(test_env)
# get size of state space and action space
num_states = env.observation_space.shape[0]
continuous = type(env.action_space) == gym.spaces.box.Box
if continuous:
num_actions = env.action_space.shape[0]
action_max = env.action_space.high[0]
else:
num_actions = env.action_space.n
action_max = 1
self.batch_size = batch_size
self.num_states = num_states
self.num_actions = num_actions
self.state_dim = (batch_size, num_states)
self.action_dim = (batch_size, num_actions)
self.gamma = gamma
self.continuous = continuous
self.observ_min = np.clip(env.observation_space.low, -20, 20)
self.observ_max = np.clip(env.observation_space.high, -20, 20)
self.env = env
self.reward = reward
# state
state = T.Placeholder((batch_size, num_states), "float32")
gradients = T.Placeholder((batch_size, num_actions), "float32")
action = T.Placeholder((batch_size, num_actions), "float32")
target = T.Placeholder((batch_size, 1), "float32")
with symjax.Scope("actor_critic"):
scaled_out = action_max * actor(state)
Q = critic(state, action)
a_loss = -T.sum(gradients * scaled_out)
q_loss = T.mean((Q - target) ** 2)
nn.optimizers.Adam(a_loss + q_loss, lr)
self.update = symjax.function(
state,
action,
target,
gradients,
outputs=[a_loss, q_loss],
updates=symjax.get_updates(),
)
g = symjax.gradients(T.mean(Q), [action])[0]
self.get_gradients = symjax.function(state, action, outputs=g)
# also create the target variants
with symjax.Scope("actor_critic_target"):
scaled_out_target = action_max * actor(state)
Q_target = critic(state, action)
self.actor_predict = symjax.function(state, outputs=scaled_out)
self.actor_predict_target = symjax.function(state, outputs=scaled_out_target)
self.critic_predict = symjax.function(state, action, outputs=Q)
self.critic_predict_target = symjax.function(state, action, outputs=Q_target)
t_params = symjax.get_variables(scope="/actor_critic_target/*")
params = symjax.get_variables(scope="/actor_critic/*")
replacement = {t: tau * e + (1 - tau) * t for t, e in zip(t_params, params)}
self.update_target = symjax.function(updates=replacement)
single_state = T.Placeholder((1, num_states), "float32")
if not continuous:
scaled_out = clean_action.argmax(-1)
self.act = symjax.function(
single_state, outputs=scaled_out.clone({state: single_state})[0]
)
def train(self):
(
s_batch,
a_batch,
r_batch,
s2_batch,
t_batch,
) = self.env.buffer.sample(self.batch_size)
# Calculate targets
target_q = self.critic_predict_target(
s2_batch, self.actor_predict_target(s2_batch)
)
# One step TD targets y_i for (s,a) from experience replay
# = r_i + gamma*Q_slow(s',mu_slow(s')) if s' is not terminal
# = r_i if s' terminal
discount = self.gamma ** self.reward.n
y_i = r_batch[:, None] + discount * (1 - t_batch[:, None]) * target_q
if not self.continuous:
a_batch = (np.arange(self.num_actions) == a_batch[:, None]).astype(
"float32"
)
td_error = np.abs(y_i - self.critic_predict(s_batch, a_batch))
self.env.buffer.update_priorities(self.env.buffer.current_indices, td_error)
# Update the critic given the targets
gradients = self.get_gradients(s_batch, a_batch)
a_loss, q_loss = self.update(s_batch, a_batch, y_i, gradients)
# Update target networks
self.update_target()
return a_loss, q_loss
class OrnsteinUhlenbeckProcess:
"""dXt = theta*(mu-Xt)*dt + sigma*dWt"""
def __init__(
self,
dim,
theta=0.15,
mu=0.0,
sigma=0.2,
noise_decay=0.99,
initial_noise_scale=1,
):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dim = dim
self.noise_decay = noise_decay
self.initial_noise_scale = initial_noise_scale
self.reset()
def __call__(self, action, episode):
self.noise_process = self.theta * (
self.mu - self.process
) + self.sigma * np.random.randn(self.dim)
self.noise_scale = self.initial_noise_scale * self.noise_decay ** episode
return action + self.noise_scale * self.process
def reset(self):
self.process = np.zeros(self.dim)
class PPO:
def __init__(
self,
state_dim,
action_dim,
lr,
gamma,
K_epochs,
eps_clip,
actor,
critic,
batch_size,
continuous=True,
):
self.lr = lr
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.batch_size = batch_size
state = T.Placeholder((batch_size,) + state_dim, "float32")
reward = T.Placeholder((batch_size,), "float32")
old_action_logprobs = T.Placeholder((batch_size,), "float32")
logits = actor(state)
if not continuous:
given_action = T.Placeholder((batch_size,), "int32")
dist = Categorical(logits=logits)
else:
mean = T.tanh(logits[:, : logits.shape[1] // 2])
std = T.exp(logits[:, logits.shape[1] // 2 :])
given_action = T.Placeholder((batch_size, action_dim), "float32")
dist = MultivariateNormal(mean=mean, diag_std=std)
sample = dist.sample()
sample_logprobs = dist.log_prob(sample)
self._act = symjax.function(state, outputs=[sample, sample_logprobs])
given_action_logprobs = dist.log_prob(given_action)
# Finding the ratio (pi_theta / pi_theta__old):
ratios = T.exp(sample_logprobs - old_action_logprobs)
ratios = T.clip(ratios, None, 1 + self.eps_clip)
state_value = critic(state)
advantages = reward - T.stop_gradient(state_value)
loss = (
-T.mean(ratios * advantages)
+ 0.5 * T.mean((state_value - reward) ** 2)
- 0.0 * dist.entropy().mean()
)
print(loss)
nn.optimizers.Adam(loss, self.lr)
self.learn = symjax.function(
state,
given_action,
reward,
old_action_logprobs,
outputs=T.mean(loss),
updates=symjax.get_updates(),
)
def act(self, state, memory):
action, action_logprobs = self._act(state[None, :].repeat(self.batch_size, 0))
memory.states.append(state)
memory.actions.append(action[0])
memory.logprobs.append(action_logprobs[0])
return action
def train(self):
(
s_batch,
a_batch,
r_batch,
s2_batch,
t_batch,
batch,
) = self.env.buffer.sample(self.batch_size)
# Calculate targets
target_q = self.critic_predict_target(
s2_batch, self.actor_predict_target(s2_batch)
)
y_i = r_batch + self.gamma * (1 - t_batch[:, None]) * target_q
if not self.continuous:
a_batch = (np.arange(self.num_actions) == a_batch[:, None]).astype(
"float32"
)
td_error = np.abs(y_i - self.critic_predict(s_batch, a_batch))
self.env.buffer.update_priorities(batch, td_error)
# Update the critic given the targets
q_loss, predicted_q_value = self.train_critic(s_batch, a_batch, y_i)
# Update the actor policy using the sampled gradient
a_outs = self.actor_predict(s_batch)
# grads = self.get_action_grads(s_batch, a_outs)
a_loss = self.train_actor(s_batch) # , grads)
# Update target networks
self.update_target()
return a_loss, q_loss
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(
reversed(memory.rewards), reversed(memory.is_terminals)
):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
rewards = np.array(rewards)
# Normalizing the rewards:
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list to tensor
old_states = memory.states
old_actions = memory.actions
old_logprobs = memory.logprobs
# Optimize policy for K epochs:
for _ in range(4):
loss = self.learn(old_states, old_actions, rewards, old_logprobs)
print("loss", loss)
# Copy new weights into old policy:
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_states,
Q,
learning_rate=0.01,
reward_decay=0.8,
e_greedy=0.9,
replace_target_iter=30,
memory_size=500,
batch_size=32,
e_greedy_increment=0.001,
save_steps=-1,
output_graph=False,
record_history=True,
observation_interval=0.01,
):
self.n_actions = n_actions
self.n_states = n_states
self.lr = learning_rate
self.reward_decay = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.record_history = record_history
self.observation_interval = observation_interval
# total learning step
self.learn_step_counter = 0
self.action_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_states * 2 + 2))
self.memory_counter = 0
# consist of [target_net, evaluate_net]
self.build_net(Q)
# save data
self.save_steps = save_steps
self.steps = 0
t_params = symjax.get_variables(scope="target_net")
e_params = symjax.get_variables(scope="eval_net")
replacement = {t: e for t, e in zip(t_params, e_params)}
self.replace = symjax.function(updates=replacement)
self.cost_history = []
self._tmp_cost_history = []
def build_net(self, Q):
# ------------------ all inputs ------------------------
state = T.Placeholder([self.batch_size, self.n_states], "float32", name="s")
next_state = T.Placeholder(
[self.batch_size, self.n_states], "float32", name="s_"
)
reward = T.Placeholder(
[
self.batch_size,
],
"float32",
name="r",
) # input reward
action = T.Placeholder(
[
self.batch_size,
],
"int32",
name="a",
) # input Action
with symjax.Scope("eval_net"):
q_eval = Q(state, self.n_actions)
with symjax.Scope("test_set"):
q_next = Q(next_state, self.n_actions)
q_target = reward + self.reward_decay * q_next.max(1)
q_target = T.stop_gradient(q_target)
a_indices = T.stack([T.range(self.batch_size), action], axis=1)
q_eval_wrt_a = T.take_along_axis(q_eval, action.reshape((-1, 1)), 1).squeeze(1)
loss = T.mean((q_target - q_eval_wrt_a) ** 2)
nn.optimizers.Adam(loss, self.lr)
self.train = symjax.function(
state, action, reward, next_state, updates=symjax.get_updates()
)
self.q_eval = symjax.function(state, outputs=q_eval)
def store_transition(self, state, action, reward, next_state):
transition = np.hstack((state, [action, reward], next_state))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def act(self, state, memory):
# to have batch dimension when feed into tf placeholder
state = state[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the state and get q value for every actions
actions_logprobs = self.q_eval(
state.repeat(self.batch_size, 0).astype("float32")
)[0]
action = np.argmax(actions_logprobs)
else:
action = np.random.randint(0, self.n_actions)
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprobs)
self.action_step_counter += 1
return action
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.replace()
print("\ntarget_params_replaced\n")
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
cost = self.train(
batch_memory[:, : self.n_states],
batch_memory[:, self.n_states].astype("int32"),
batch_memory[:, self.n_states + 1],
batch_memory[:, -self.n_states :],
)
self.steps += 1
# increasing epsilon
self.epsilon = (
self.epsilon + self.epsilon_increment
if self.epsilon < self.epsilon_max
else self.epsilon_max
)
self.learn_step_counter += 1
def run(
env,
agent,
reward,
replay_size=int(1e6),
action_noise=0.1,
max_episode_length=500,
decay=0.99,
start_episode=100,
update_after=10000,
max_ep_steps=10000,
max_episodes=1000,
total_steps=40000,
skip_frames=4,
):
# Main loop: play episode and train
init = 0.1 * (env.action_space.high - env.action_space.low)
noise = OrnsteinUhlenbeckProcess(agent.num_actions, initial_noise_scale=init)
global_step = 0
losses = []
for i in range(MAX_EPISODES):
s = env.reset()
# Clear episode buffer
episode_buffer = []
for j in range(max_ep_steps):
a = noise(agent.act(s[None, :]), i)
r = 0
for k in range(skip_frames):
s2, r_, terminal, info = env.step(a)
r += r_
if terminal:
break
r /= k + 1
episode_buffer.append([s, a, r, s2, terminal])
s = s2
# Perform the updates
if env.buffer.len >= update_after:
losses.append(agent.train())
if terminal or j == (max_ep_steps - 1):
episode_buffer = reward.discount(np.asarray(episode_buffer))
print(
i,
" return:",
episode_buffer[:, 2].sum(),
"episode_length:",
j,
"noise scale",
noise.noise_scale,
)
for step in episode_buffer:
env.buffer.push(*step)
print(np.array(losses).mean(0))
noise.reset()
break
return (returns, q_losses, mu_losses)
def actor(state):
input = nn.elu(nn.layers.Dense(state, 8))
input = nn.elu(nn.layers.Dense(input, 8))
input = nn.layers.Dense(input, 1)
return T.tanh(input)
def critic(state, action):
inputs = nn.layers.Dense(nn.elu(nn.layers.Dense(state, 8)), 8)
inputa = nn.layers.Dense(nn.elu(nn.layers.Dense(action, 8)), 8)
input = nn.elu(nn.layers.Dense(inputs + inputa, 8))
input = nn.layers.Dense(input, 1)
return input
# RL = DeepQNetwork(
# env.n_actions,
# env.n_states,
# actor,
# learning_rate=0.0005,
# reward_decay=0.995,
# e_greedy=0.1,
# replace_target_iter=400,
# batch_size=128,
# memory_size=4000,
# e_greedy_increment=None,
# record_history=True,
# )
# ==========================
# Training Parameters
# ==========================
# Maximum episodes run
MAX_EPISODES = 1000
# Max episode length
MAX_EP_STEPS = 1000
# Reward parameters
REWARD_FACTOR = 1 # Total episode reward factor
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.0001
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 1e-2
# Size of replay buffer
reward = Reward(REWARD_FACTOR, GAMMA, n=1)
# env = pendulum.CartpoleEnv()
env = gym.make("Pendulum-v0") # MountainCarContinuous-v0"
# ) # "Pendulum-v0") # gym.make("CartPole-v0")
agent = ddpg(
env,
actor,
critic,
batch_size=128,
tau=TAU,
gamma=GAMMA,
reward=reward,
)
env.buffer = BasicBuffer(size=int(1e5))
a, b, c = run(
env,
agent,
reward,
update_after=100,
start_episode=100,
skip_frames=1,
max_episodes=10000,
max_ep_steps=1000,
)
plt.subplot(211)
plt.plot(a)
plt.subplot(212)
plt.plot(b)
plt.plot(c)
plt.show()
# run_pendulum(env, RL)
| 28.985115
| 87
| 0.58338
|
f3c8e2f84649be44240f2d446f9154287f09affa
| 224
|
py
|
Python
|
octopus/api/cfg.py
|
ZarvisD/octopus
|
3e238721fccfec69a69a1635b8a0dc485e525e69
|
[
"MIT"
] | 2
|
2019-01-19T07:12:02.000Z
|
2021-08-14T13:23:37.000Z
|
octopus/api/cfg.py
|
ZarvisD/octopus
|
3e238721fccfec69a69a1635b8a0dc485e525e69
|
[
"MIT"
] | null | null | null |
octopus/api/cfg.py
|
ZarvisD/octopus
|
3e238721fccfec69a69a1635b8a0dc485e525e69
|
[
"MIT"
] | 1
|
2019-01-19T07:12:05.000Z
|
2019-01-19T07:12:05.000Z
|
class CFG(object):
def __init__(self, bytecode=None, instructions=None, analysis=True):
""" TODO """
raise NotImplementedError
def show(self):
""" TODO """
raise NotImplementedError
| 22.4
| 72
| 0.607143
|
6085fd702ec39c5cd8094622f622eb3dbb3e7636
| 1,533
|
py
|
Python
|
tests/server/utils.py
|
saurav-c/droplet
|
37e041816496ac95bc51226c6b3d1cbf429d0ffe
|
[
"Apache-2.0"
] | null | null | null |
tests/server/utils.py
|
saurav-c/droplet
|
37e041816496ac95bc51226c6b3d1cbf429d0ffe
|
[
"Apache-2.0"
] | null | null | null |
tests/server/utils.py
|
saurav-c/droplet
|
37e041816496ac95bc51226c6b3d1cbf429d0ffe
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from anna.lattices import LWWPairLattice
from cloudburst.server.utils import get_func_kvs_name
from cloudburst.shared.proto.cloudburst_pb2 import Dag
from cloudburst.shared.serializer import Serializer
serializer = Serializer()
def create_function(function, kvs_client, fname='func', ltype=LWWPairLattice):
serialized = serializer.dump_lattice(function, ltype)
kvs_client.put(get_func_kvs_name(fname), serialized)
def create_linear_dag(functions, fnames, kvs_client, dname,
lattice_type=LWWPairLattice):
dag = Dag()
dag.name = dname
prev = None
for index, fname in enumerate(fnames):
function = functions[index]
create_function(function, kvs_client, fname, lattice_type)
dag.functions.append(fname)
if prev:
link = dag.connections.add()
link.source = prev
link.sink = fname
prev = fname
return dag
| 30.66
| 78
| 0.71559
|
35e93b1137e09c8732d1ff844b7cf688524401f0
| 10,087
|
py
|
Python
|
hyperspectral/terra_common.py
|
terraref/extractors-hyperspectral
|
782e3c62088257bb9c8f0f4f1d8943fa02881dc0
|
[
"BSD-3-Clause"
] | 5
|
2016-10-11T19:05:23.000Z
|
2020-10-08T03:44:33.000Z
|
hyperspectral/terra_common.py
|
terraref/extractors-hyperspectral
|
782e3c62088257bb9c8f0f4f1d8943fa02881dc0
|
[
"BSD-3-Clause"
] | 27
|
2017-01-17T21:30:17.000Z
|
2020-03-12T21:10:26.000Z
|
hyperspectral/terra_common.py
|
terraref/extractors-hyperspectral
|
782e3c62088257bb9c8f0f4f1d8943fa02881dc0
|
[
"BSD-3-Clause"
] | 6
|
2017-01-18T18:57:00.000Z
|
2020-11-10T15:12:11.000Z
|
'''
Created on Sep 6, 2016
@author: Zongyang Li
'''
import json, sys, utm, re
import numpy as np
import pandas as pd
from datetime import datetime
from math import cos, pi
from terrautils.betydb import get_site_boundaries
from terrautils import betydb
# Scanalyzer -> MAC formular @ https://terraref.gitbooks.io/terraref-documentation/content/user/geospatial-information.html
# Mx = ax + bx * Gx + cx * Gy
# My = ay + by * Gx + cy * Gy
# Gx = ( (My/cy - ay/cy) - (Mx/cx - ax/cx) ) / (by/cy - bx/cx)
# Gy = ( (My/by - ay/by) - (Mx/bx - ax/bx) ) / (cy/by - cx/bx)
SE_latlon = (33.07451869,-111.97477775)
ay, by, cy = 3659974.971, 1.0002, 0.0078
ax, bx, cx = 409012.2032, 0.009, - 0.9986
SE_utm = utm.from_latlon(SE_latlon[0], SE_latlon[1])
lng_shift = 0.000020308287
lat_shift = 0.000015258894
"(latitude, longitude) of SE corner (positions are + in NW direction)"
ZERO_ZERO = (33.0745,-111.97475)
class CoordinateConverter(object):
"""
This class implements coordinate conversions
what coordinate system do we have in terra?
LatLon, field_position, field_partition, plot_number, pixels
LatLon: latitude/longitude, EPSG:4326, ZERO_ZERO = (33.0745,-111.97475) SE corner (positions are + in NW direction)
field_position: field position in meters which gantry system is using, ZERO_ZERO = (3.8, 0.0)
field_partition: field are divided into 54 rows and 16 columns, partition(1, 1) is in SW corner (+ in NE direction)
plot_number: a number that created by field_partition, details are in the file "Sorghum TERRA plot plan 2016 4.21.16.xlsx"
pixels: (x,y,z) coordinate in different sensor data, 2-D or 3-D, need a parameter of 'field of view' or other parameter.
"""
def __init__(self):
self.fov = 0
self.pixSize = 1
self.x_range = 0
self.y_column = 0
self.max_col = None
self.max_range = None
self.seasonNum = 0
self.np_bounds = None # np.zeros((54, 16, 4))
self.queryStatus = False
self.plots = ''
def fieldPosition_to_fieldPartition(self, x, y):
plot_row = 0
plot_col = 0
if not self.queryStatus:
return plot_row, plot_col
for i in range(self.max_range):
xmin = self.np_bounds[i][0][0]
xmax = self.np_bounds[i][0][1]
if (x > xmin) and (x <= xmax):
plot_row = i + 1
break
for j in range(self.max_col):
ymin = self.np_bounds[plot_row-1][j][2]
ymax = self.np_bounds[plot_row-1][j][3]
if (y > ymin) and (y <= ymax):
plot_col = j + 1
break
return int(plot_row), int(plot_col)
def plotNum_to_fieldPartition(self, plotNum):
"Converts plot number to field partition"
plot_row = 0
plot_col = 0
if not self.queryStatus:
return int(plot_row), int(plot_col)
cols = self.max_col
col = plotNum % cols
if col == 0:
plot_row = plotNum // cols
if (plot_row % 2 == 0):
plot_col = 1
else:
plot_col = self.max_col
return int(plot_row), int(plot_col)
plot_row = plotNum // cols +1
plot_col = col
if (plot_row % 2 == 0):
plot_col = cols - col + 1
return int(plot_row), int(plot_col)
def pixel_to_plotNum(self, x, y, position, fov, scan_d, x_width, y_width):
"Converts pixel to plot number, given pixel x, y, field position, sensor field of view, scan distance, image width and height"
plotNum = 0
if not self.queryStatus:
return plotNum
x_param = float(fov) / float(x_width)
y_param = float(scan_d) / float(y_width)
x_shift = x * x_param
y_shift = y * y_param
x_real = position[0] - float(fov)/2 + x_shift
y_real = position[1] + y_shift
plot_row, plot_col = self.fieldPosition_to_fieldPartition(x_real, y_real)
plotNum = self.fieldPartition_to_plotNum(plot_row, plot_col)
return int(plotNum)
def fieldPartition_to_plotNum(self, plot_row, plot_col):
"Converts field partition to plot number"
plotNum = 0
if not self.queryStatus:
return 0
if plot_row == 0:
return 0
if plot_row % 2 == 0:
plot_col = self.max_col + 1 - plot_col
plotNum = (plot_row-1)*self.max_col + plot_col
return int(plotNum)
def epsg_to_mac(self, latlng):
Utm_lng = latlng[0] - lng_shift
Utm_lat = latlng[1] + lat_shift
mac = utm.from_latlon(Utm_lat, Utm_lng)
return mac
def mac_to_Scanalyzer(self, mac):
# Gx = ( (My/cy - ay/cy) - (Mx/cx - ax/cx) ) / (by/cy - bx/cx)
# Gy = ( (My/by - ay/by) - (Mx/bx - ax/bx) ) / (cy/by - cx/bx)
Mx = mac[0]
My = mac[1]
Gx = ( (My/cy - ay/cy) - (Mx/cx - ax/cx) ) / (by/cy - bx/cx)
Gy = ( (My/by - ay/by) - (Mx/bx - ax/bx) ) / (cy/by - cx/bx)
return [Gx, Gy]
def latlng_to_Scanalyzer(self, latlng):
mac = self.epsg_to_mac(latlng)
gantry_coord = self.mac_to_Scanalyzer(mac)
return gantry_coord
def bety_query(self, str_date):
# After S10 the betydb url changed to OPEN betydb: http://128.196.65.186:8000/bety/
if datetime.strptime(str_date, "%Y-%m-%d") >= datetime(2019, 11, 25):
betydb.BETYDB_URL = 'http://128.196.65.186:8000/bety/'
else:
betydb.BETYDB_URL = "https://terraref.ncsa.illinois.edu/bety"
self.plots = get_site_boundaries(str_date, city="Maricopa")
if len(self.plots) == 0:
self.queryStatus = False
return False
plot_season_range_col = [[int(x) for x in re.findall(r'\d+', x)] for x in list(self.plots.keys())] # find numbers in plot name
_, max_range, max_col = np.max(plot_season_range_col, axis=0)
self.max_range = max_range
self.max_col = max_col
self.np_bounds = np.zeros((max_range, max_col, 4))
self.parse_bety_plots()
# TODO add warning here if records num != size
# records_num = np.count_nonzero(self.np_bounds)
# if records_num != max_range * max_col * 4:
# self.queryStatus = False
# return False
self.queryStatus = True
#self.add_gaps_in_np_bounds()
return True
def file_query(self, file_path):
boundary_df = pd.read_csv(file_path)
boundary_df['range'] = boundary_df['range'].astype(int)
boundary_df['column'] = boundary_df['column'].astype(int)
self.max_range = boundary_df['range'].max()
self.max_col = boundary_df['column'].max()
self.np_bounds = np.zeros([self.max_range, self.max_col, 4])
for idx, row in boundary_df.iterrows():
self.np_bounds[int(row['range'])-1 , int(row['column'])-1] = \
row[['x_start', 'x_end', 'y_start', 'y_end']].values
self.queryStatus = True
def parse_bety_plots(self):
for item in self.plots:
strlist = item.split()
if not strlist[0] == 'MAC':
continue
range_, col, xmin, xmax, ymin, ymax = self.parse_site_boundary(item, self.plots[item])
if item.endswith(" W") or item.endswith(" E"):
continue
if range_ == 0:
continue
self.insert_boundary_to_nparray(range_, col, xmin, xmax, ymin, ymax)
return
def parse_site_boundary(self, site, bound_record):
# MAC Field Scanner Season 4 Range 5 Column 6
plot_record = [int(s) for s in site.split() if s.isdigit()]
if len(plot_record) != 3:
return 0, 0, 0, 0, 0, 0
self.seasonNum = plot_record[0]
range_ = plot_record[1]
col = plot_record[2]
latlngs = self.bety_str_parsing(bound_record)
gantry_coords = []
for latlng in latlngs:
gantry_coords.append(self.latlng_to_Scanalyzer(latlng))
gantry_coords = np.array(gantry_coords)
xmin = gantry_coords[:, 0].min()
xmax = gantry_coords[:, 0].max()
ymin = gantry_coords[:, 1].min()
ymax = gantry_coords[:, 1].max()
# xmin = gantry_coords[2][0]
# xmax = gantry_coords[0][0]
# ymin = gantry_coords[1][1]
# ymax = gantry_coords[0][1]
return range_, col, xmin, xmax, ymin, ymax
def bety_str_parsing(self, bety_str):
j = json.loads(bety_str)
latlngs = []
for i in range(4):
latlngs.append(j['coordinates'][0][0][i])
#latlngs.append(j['coordinates'][0][i])
return latlngs
def insert_boundary_to_nparray(self, range_, col, xmin, xmax, ymin, ymax):
range_ -= 1
col -= 1
self.np_bounds[range_][col][0] = xmin
self.np_bounds[range_][col][1] = xmax
self.np_bounds[range_][col][2] = ymin
self.np_bounds[range_][col][3] = ymax
return
def load_json(meta_path):
try:
with open(meta_path, 'r') as fin:
return json.load(fin)
except Exception as ex:
fail('Corrupt metadata file, ' + str(ex))
def fail(reason):
print(reason)
def lower_keys(in_dict):
if type(in_dict) is dict:
out_dict = {}
for key, item in in_dict.items():
out_dict[key.lower()] = lower_keys(item)
return out_dict
elif type(in_dict) is list:
return [lower_keys(obj) for obj in in_dict]
else:
return in_dict
| 34.426621
| 135
| 0.562407
|
62ee9bbfb17e26ed722f105d6922f72126aab964
| 4,755
|
py
|
Python
|
policy_value_net_numpy.py
|
ngxson/hobby-AlphaZero-Gomoku-flask
|
941b1c35b7047e3c47617260de623c54a739bbae
|
[
"MIT"
] | null | null | null |
policy_value_net_numpy.py
|
ngxson/hobby-AlphaZero-Gomoku-flask
|
941b1c35b7047e3c47617260de623c54a739bbae
|
[
"MIT"
] | null | null | null |
policy_value_net_numpy.py
|
ngxson/hobby-AlphaZero-Gomoku-flask
|
941b1c35b7047e3c47617260de623c54a739bbae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Implement the policy value network using numpy, so that we can play with the
trained AI model without installing any DL framwork
@author: Junxiao Song
"""
from __future__ import print_function
import numpy as np
# some utility functions
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
def relu(X):
out = np.maximum(X, 0)
return out
def conv_forward(X, W, b, stride=1, padding=1):
n_filters, d_filter, h_filter, w_filter = W.shape
# theano conv2d flips the filters (rotate 180 degree) first
# while doing the calculation
# W = W[:, :, ::-1, ::-1]
n_x, d_x, h_x, w_x = X.shape
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = int(h_out), int(w_out)
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = W.reshape(n_filters, -1)
out = (np.dot(W_col, X_col).T + b).T
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def fc_forward(X, W, b):
out = np.dot(X, W) + b
return out
def get_im2col_indices(x_shape, field_height,
field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_width) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height,
field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def swapPositions(list, pos1, pos2):
list[pos1], list[pos2] = list[pos2], list[pos1]
return list
class PolicyValueNetNumpy():
"""policy-value network in numpy """
def __init__(self, board_width, board_height, net_params):
self.board_width = board_width
self.board_height = board_height
"""
# convert model from Keras
net_params = swapPositions(net_params, 6, 8)
net_params = swapPositions(net_params, 7, 9)
net_params = swapPositions(net_params, 8, 10)
net_params = swapPositions(net_params, 9, 11)
net_params = swapPositions(net_params, 8, 12)
net_params = swapPositions(net_params, 9, 13)
print('params', len(net_params))
for i in range(len(net_params)):
if net_params[i].ndim == 4:
net_params[i] = net_params[i].transpose(3, 2, 0, 1)
print(net_params[i].shape)
"""
self.params = net_params
def policy_value_fn(self, board):
"""
input: board
output: a list of (action, probability) tuples for each available
action and the score of the board state
"""
legal_positions = board.availables
current_state = board.current_state()
X = current_state.reshape(-1, 4, self.board_width, self.board_height)
# first 3 conv layers with ReLu nonlinearity
for i in [0, 2, 4]:
X = relu(conv_forward(X, self.params[i], self.params[i+1]))
# policy head
X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))
X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])
act_probs = softmax(X_p)
# value head
X_v = relu(conv_forward(X, self.params[10],
self.params[11], padding=0))
X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))
value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value
| 35.75188
| 79
| 0.612618
|
777692d90c2e739a888f4ad78529e68bbc968351
| 51,995
|
py
|
Python
|
dask/array/linalg.py
|
mchi/dask
|
6a8f17b9df8ac661a72539bfa1fa7b539f1b2280
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T02:44:21.000Z
|
2019-01-31T02:44:21.000Z
|
dask/array/linalg.py
|
tomwhite/dask
|
a555c76fff593f1c4437b91a28e7328ef5e50bc6
|
[
"BSD-3-Clause"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
dask/array/linalg.py
|
tomwhite/dask
|
a555c76fff593f1c4437b91a28e7328ef5e50bc6
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T02:44:12.000Z
|
2019-01-31T02:44:12.000Z
|
import operator
from numbers import Number
import numpy as np
import tlz as toolz
from ..base import tokenize, wait
from ..delayed import delayed
from ..blockwise import blockwise
from ..highlevelgraph import HighLevelGraph
from ..utils import derived_from, apply
from .core import dotmany, Array, concatenate, from_delayed
from .creation import eye
from .random import RandomState
from .utils import meta_from_array, svd_flip, ones_like_safe
def _cumsum_blocks(it):
total = 0
for x in it:
total_previous = total
total += x
yield (total_previous, total)
def _cumsum_part(last, new):
return (last[1], last[1] + new)
def _nanmin(m, n):
k_0 = min([m, n])
k_1 = m if np.isnan(n) else n
return k_1 if np.isnan(k_0) else k_0
def _wrapped_qr(a):
"""
A wrapper for np.linalg.qr that handles arrays with 0 rows
Notes: Created for tsqr so as to manage cases with uncertain
array dimensions. In particular, the case where arrays have
(uncertain) chunks with 0 rows.
"""
# workaround may be removed when numpy stops rejecting edge cases
if a.shape[0] == 0:
return np.zeros((0, 0)), np.zeros((0, a.shape[1]))
else:
return np.linalg.qr(a)
def tsqr(data, compute_svd=False, _max_vchunk_size=None):
"""Direct Tall-and-Skinny QR algorithm
As presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
https://arxiv.org/abs/1301.1071
This algorithm is used to compute both the QR decomposition and the
Singular Value Decomposition. It requires that the input array have a
single column of blocks, each of which fit in memory.
Parameters
----------
data: Array
compute_svd: bool
Whether to compute the SVD rather than the QR decomposition
_max_vchunk_size: Integer
Used internally in recursion to set the maximum row dimension
of chunks in subsequent recursive calls.
Notes
-----
With ``k`` blocks of size ``(m, n)``, this algorithm has memory use that
scales as ``k * n * n``.
The implementation here is the recursive variant due to the ultimate
need for one "single core" QR decomposition. In the non-recursive version
of the algorithm, given ``k`` blocks, after ``k`` ``m * n`` QR
decompositions, there will be a "single core" QR decomposition that will
have to work with a ``(k * n, n)`` matrix.
Here, recursion is applied as necessary to ensure that ``k * n`` is not
larger than ``m`` (if ``m / n >= 2``). In particular, this is done
to ensure that single core computations do not have to work on blocks
larger than ``(m, n)``.
Where blocks are irregular, the above logic is applied with the "height" of
the "tallest" block used in place of ``m``.
Consider use of the ``rechunk`` method to control this behavior.
Taller blocks will reduce overall memory use (assuming that many of them
still fit in memory at once).
See Also
--------
dask.array.linalg.qr
Powered by this algorithm
dask.array.linalg.svd
Powered by this algorithm
dask.array.linalg.sfqr
Variant for short-and-fat arrays
"""
nr, nc = len(data.chunks[0]), len(data.chunks[1])
cr_max, cc = max(data.chunks[0]), data.chunks[1][0]
if not (data.ndim == 2 and nc == 1): # Is a matrix # Only one column block
raise ValueError(
"Input must have the following properties:\n"
" 1. Have two dimensions\n"
" 2. Have only one column of blocks\n\n"
"Note: This function (tsqr) supports QR decomposition in the case of\n"
"tall-and-skinny matrices (single column chunk/block; see qr)\n"
"Current shape: {},\nCurrent chunksize: {}".format(
data.shape, data.chunksize
)
)
token = "-" + tokenize(data, compute_svd)
m, n = data.shape
numblocks = (nr, 1)
qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))
layers = data.__dask_graph__().layers.copy()
dependencies = data.__dask_graph__().dependencies.copy()
# Block qr
name_qr_st1 = "qr" + token
dsk_qr_st1 = blockwise(
_wrapped_qr,
name_qr_st1,
"ij",
data.name,
"ij",
numblocks={data.name: numblocks},
)
layers[name_qr_st1] = dsk_qr_st1
dependencies[name_qr_st1] = set(data.__dask_layers__())
# Block qr[0]
name_q_st1 = "getitem" + token + "-q1"
dsk_q_st1 = dict(
((name_q_st1, i, 0), (operator.getitem, (name_qr_st1, i, 0), 0))
for i in range(numblocks[0])
)
layers[name_q_st1] = dsk_q_st1
dependencies[name_q_st1] = {name_qr_st1}
# Block qr[1]
name_r_st1 = "getitem" + token + "-r1"
dsk_r_st1 = dict(
((name_r_st1, i, 0), (operator.getitem, (name_qr_st1, i, 0), 1))
for i in range(numblocks[0])
)
layers[name_r_st1] = dsk_r_st1
dependencies[name_r_st1] = {name_qr_st1}
# Next step is to obtain a QR decomposition for the stacked R factors, so either:
# - gather R factors into a single core and do a QR decomposition
# - recurse with tsqr (if single core computation too large and a-priori "meaningful
# reduction" possible, meaning that chunks have to be well defined)
single_core_compute_m = nr * cc
chunks_well_defined = not any(np.isnan(c) for cs in data.chunks for c in cs)
prospective_blocks = np.ceil(single_core_compute_m / cr_max)
meaningful_reduction_possible = (
cr_max if _max_vchunk_size is None else _max_vchunk_size
) >= 2 * cc
can_distribute = chunks_well_defined and int(prospective_blocks) > 1
if chunks_well_defined and meaningful_reduction_possible and can_distribute:
# stack chunks into blocks and recurse using tsqr
# Prepare to stack chunks into blocks (from block qr[1])
all_blocks = []
curr_block = []
curr_block_sz = 0
for idx, a_m in enumerate(data.chunks[0]):
m_q = a_m
n_q = min(m_q, cc)
m_r = n_q
# n_r = cc
if curr_block_sz + m_r > cr_max:
all_blocks.append(curr_block)
curr_block = []
curr_block_sz = 0
curr_block.append((idx, m_r))
curr_block_sz += m_r
if len(curr_block) > 0:
all_blocks.append(curr_block)
# R_stacked
name_r_stacked = "stack" + token + "-r1"
dsk_r_stacked = dict(
(
(name_r_stacked, i, 0),
(
np.vstack,
(tuple, [(name_r_st1, idx, 0) for idx, _ in sub_block_info]),
),
)
for i, sub_block_info in enumerate(all_blocks)
)
layers[name_r_stacked] = dsk_r_stacked
dependencies[name_r_stacked] = {name_r_st1}
# retrieve R_stacked for recursion with tsqr
vchunks_rstacked = tuple(
[sum(map(lambda x: x[1], sub_block_info)) for sub_block_info in all_blocks]
)
graph = HighLevelGraph(layers, dependencies)
# dsk.dependencies[name_r_stacked] = {data.name}
r_stacked_meta = meta_from_array(
data, len((sum(vchunks_rstacked), n)), dtype=rr.dtype
)
r_stacked = Array(
graph,
name_r_stacked,
shape=(sum(vchunks_rstacked), n),
chunks=(vchunks_rstacked, n),
meta=r_stacked_meta,
)
# recurse
q_inner, r_inner = tsqr(r_stacked, _max_vchunk_size=cr_max)
layers = toolz.merge(q_inner.dask.layers, r_inner.dask.layers)
dependencies = toolz.merge(q_inner.dask.dependencies, r_inner.dask.dependencies)
# Q_inner: "unstack"
name_q_st2 = "getitem" + token + "-q2"
dsk_q_st2 = dict(
(
(name_q_st2, j, 0),
(
operator.getitem,
(q_inner.name, i, 0),
((slice(e[0], e[1])), (slice(0, n))),
),
)
for i, sub_block_info in enumerate(all_blocks)
for j, e in zip(
[x[0] for x in sub_block_info],
_cumsum_blocks([x[1] for x in sub_block_info]),
)
)
layers[name_q_st2] = dsk_q_st2
dependencies[name_q_st2] = set(q_inner.__dask_layers__())
# R: R_inner
name_r_st2 = "r-inner" + token
dsk_r_st2 = {(name_r_st2, 0, 0): (r_inner.name, 0, 0)}
layers[name_r_st2] = dsk_r_st2
dependencies[name_r_st2] = set(r_inner.__dask_layers__())
# Q: Block qr[0] (*) Q_inner
name_q_st3 = "dot" + token + "-q3"
dsk_q_st3 = blockwise(
np.dot,
name_q_st3,
"ij",
name_q_st1,
"ij",
name_q_st2,
"ij",
numblocks={name_q_st1: numblocks, name_q_st2: numblocks},
)
layers[name_q_st3] = dsk_q_st3
dependencies[name_q_st3] = {name_q_st1, name_q_st2}
else:
# Do single core computation
# Stacking for in-core QR computation
to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]
name_r_st1_stacked = "stack" + token + "-r1"
dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack, (tuple, to_stack))}
layers[name_r_st1_stacked] = dsk_r_st1_stacked
dependencies[name_r_st1_stacked] = {name_r_st1}
# In-core QR computation
name_qr_st2 = "qr" + token + "-qr2"
dsk_qr_st2 = blockwise(
np.linalg.qr,
name_qr_st2,
"ij",
name_r_st1_stacked,
"ij",
numblocks={name_r_st1_stacked: (1, 1)},
)
layers[name_qr_st2] = dsk_qr_st2
dependencies[name_qr_st2] = {name_r_st1_stacked}
# In-core qr[0]
name_q_st2_aux = "getitem" + token + "-q2-aux"
dsk_q_st2_aux = {
(name_q_st2_aux, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 0)
}
layers[name_q_st2_aux] = dsk_q_st2_aux
dependencies[name_q_st2_aux] = {name_qr_st2}
chucks_are_all_known = not any(np.isnan(c) for cs in data.chunks for c in cs)
if chucks_are_all_known:
# when chunks are all known...
# obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))
q2_block_sizes = [min(e, n) for e in data.chunks[0]]
block_slices = [
(slice(e[0], e[1]), slice(0, n)) for e in _cumsum_blocks(q2_block_sizes)
]
dsk_q_blockslices = {}
deps = set()
else:
# when chunks are not already known...
# request shape information: vertical chunk sizes & column dimension (n)
name_q2bs = "shape" + token + "-q2"
dsk_q2_shapes = {
(name_q2bs, i): (min, (getattr, (data.name, i, 0), "shape"))
for i in range(numblocks[0])
}
name_n = "getitem" + token + "-n"
dsk_n = {
name_n: (operator.getitem, (getattr, (data.name, 0, 0), "shape"), 1)
}
# cumulative sums (start, end)
name_q2cs = "cumsum" + token + "-q2"
dsk_q2_cumsum = {(name_q2cs, 0): [0, (name_q2bs, 0)]}
for i in range(1, numblocks[0]):
dsk_q2_cumsum[(name_q2cs, i)] = (
_cumsum_part,
(name_q2cs, i - 1),
(name_q2bs, i),
)
# obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))
name_blockslice = "slice" + token + "-q"
dsk_block_slices = {
(name_blockslice, i): (
tuple,
[(apply, slice, (name_q2cs, i)), (slice, 0, name_n)],
)
for i in range(numblocks[0])
}
dsk_q_blockslices = toolz.merge(
dsk_n, dsk_q2_shapes, dsk_q2_cumsum, dsk_block_slices
)
deps = {data.name}
block_slices = [(name_blockslice, i) for i in range(numblocks[0])]
layers["q-blocksizes" + token] = dsk_q_blockslices
dependencies["q-blocksizes" + token] = deps
# In-core qr[0] unstacking
name_q_st2 = "getitem" + token + "-q2"
dsk_q_st2 = dict(
((name_q_st2, i, 0), (operator.getitem, (name_q_st2_aux, 0, 0), b))
for i, b in enumerate(block_slices)
)
layers[name_q_st2] = dsk_q_st2
if chucks_are_all_known:
dependencies[name_q_st2] = {name_q_st2_aux}
else:
dependencies[name_q_st2] = {name_q_st2_aux, "q-blocksizes" + token}
# Q: Block qr[0] (*) In-core qr[0]
name_q_st3 = "dot" + token + "-q3"
dsk_q_st3 = blockwise(
np.dot,
name_q_st3,
"ij",
name_q_st1,
"ij",
name_q_st2,
"ij",
numblocks={name_q_st1: numblocks, name_q_st2: numblocks},
)
layers[name_q_st3] = dsk_q_st3
dependencies[name_q_st3] = {name_q_st1, name_q_st2}
# R: In-core qr[1]
name_r_st2 = "getitem" + token + "-r2"
dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}
layers[name_r_st2] = dsk_r_st2
dependencies[name_r_st2] = {name_qr_st2}
if not compute_svd:
is_unknown_m = np.isnan(data.shape[0]) or any(
np.isnan(c) for c in data.chunks[0]
)
is_unknown_n = np.isnan(data.shape[1]) or any(
np.isnan(c) for c in data.chunks[1]
)
if is_unknown_m and is_unknown_n:
# assumption: m >= n
q_shape = data.shape
q_chunks = (data.chunks[0], (np.nan,))
r_shape = (np.nan, np.nan)
r_chunks = ((np.nan,), (np.nan,))
elif is_unknown_m and not is_unknown_n:
# assumption: m >= n
q_shape = data.shape
q_chunks = (data.chunks[0], (n,))
r_shape = (n, n)
r_chunks = (n, n)
elif not is_unknown_m and is_unknown_n:
# assumption: m >= n
q_shape = data.shape
q_chunks = (data.chunks[0], (np.nan,))
r_shape = (np.nan, np.nan)
r_chunks = ((np.nan,), (np.nan,))
else:
q_shape = (
data.shape
if data.shape[0] >= data.shape[1]
else (data.shape[0], data.shape[0])
)
q_chunks = (
data.chunks
if data.shape[0] >= data.shape[1]
else (data.chunks[0], data.chunks[0])
)
r_shape = (n, n) if data.shape[0] >= data.shape[1] else data.shape
r_chunks = r_shape
# dsk.dependencies[name_q_st3] = {data.name}
# dsk.dependencies[name_r_st2] = {data.name}
graph = HighLevelGraph(layers, dependencies)
q_meta = meta_from_array(data, len(q_shape), qq.dtype)
r_meta = meta_from_array(data, len(r_shape), rr.dtype)
q = Array(graph, name_q_st3, shape=q_shape, chunks=q_chunks, meta=q_meta)
r = Array(graph, name_r_st2, shape=r_shape, chunks=r_chunks, meta=r_meta)
return q, r
else:
# In-core SVD computation
name_svd_st2 = "svd" + token + "-2"
dsk_svd_st2 = blockwise(
np.linalg.svd,
name_svd_st2,
"ij",
name_r_st2,
"ij",
numblocks={name_r_st2: (1, 1)},
)
# svd[0]
name_u_st2 = "getitem" + token + "-u2"
dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 0)}
# svd[1]
name_s_st2 = "getitem" + token + "-s2"
dsk_s_st2 = {(name_s_st2, 0): (operator.getitem, (name_svd_st2, 0, 0), 1)}
# svd[2]
name_v_st2 = "getitem" + token + "-v2"
dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 2)}
# Q * U
name_u_st4 = "getitem" + token + "-u4"
dsk_u_st4 = blockwise(
dotmany,
name_u_st4,
"ij",
name_q_st3,
"ik",
name_u_st2,
"kj",
numblocks={name_q_st3: numblocks, name_u_st2: (1, 1)},
)
layers[name_svd_st2] = dsk_svd_st2
dependencies[name_svd_st2] = {name_r_st2}
layers[name_u_st2] = dsk_u_st2
dependencies[name_u_st2] = {name_svd_st2}
layers[name_u_st4] = dsk_u_st4
dependencies[name_u_st4] = {name_q_st3, name_u_st2}
layers[name_s_st2] = dsk_s_st2
dependencies[name_s_st2] = {name_svd_st2}
layers[name_v_st2] = dsk_v_st2
dependencies[name_v_st2] = {name_svd_st2}
uu, ss, vvh = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))
k = _nanmin(m, n) # avoid RuntimeWarning with np.nanmin([m, n])
m_u = m
n_u = int(k) if not np.isnan(k) else k
n_s = n_u
m_vh = n_u
n_vh = n
d_vh = max(m_vh, n_vh) # full matrix returned: but basically n
graph = HighLevelGraph(layers, dependencies)
u_meta = meta_from_array(data, len((m_u, n_u)), uu.dtype)
s_meta = meta_from_array(data, len((n_s,)), ss.dtype)
vh_meta = meta_from_array(data, len((d_vh, d_vh)), vvh.dtype)
u = Array(
graph,
name_u_st4,
shape=(m_u, n_u),
chunks=(data.chunks[0], (n_u,)),
meta=u_meta,
)
s = Array(graph, name_s_st2, shape=(n_s,), chunks=((n_s,),), meta=s_meta)
vh = Array(
graph, name_v_st2, shape=(d_vh, d_vh), chunks=((n,), (n,)), meta=vh_meta
)
return u, s, vh
def sfqr(data, name=None):
"""Direct Short-and-Fat QR
Currently, this is a quick hack for non-tall-and-skinny matrices which
are one chunk tall and (unless they are one chunk wide) have chunks
that are wider than they are tall
Q [R_1 R_2 ...] = [A_1 A_2 ...]
it computes the factorization Q R_1 = A_1, then computes the other
R_k's in parallel.
Parameters
----------
data: Array
See Also
--------
dask.array.linalg.qr
Main user API that uses this function
dask.array.linalg.tsqr
Variant for tall-and-skinny case
"""
nr, nc = len(data.chunks[0]), len(data.chunks[1])
cr, cc = data.chunks[0][0], data.chunks[1][0]
if not (
(data.ndim == 2)
and (nr == 1) # Is a matrix
and ( # Has exactly one block row
(cr <= cc)
or (nc == 1) # Chunking dimension on rows is at least that on cols or...
)
): # ... only one block col
raise ValueError(
"Input must have the following properties:\n"
" 1. Have two dimensions\n"
" 2. Have only one row of blocks\n"
" 3. Either one column of blocks or (first) chunk size on cols\n"
" is at most that on rows (e.g.: for a 5x20 matrix,\n"
" chunks=((5), (8,4,8)) is fine, but chunks=((5), (4,8,8)) is not;\n"
" still, prefer something simple like chunks=(5,10) or chunks=5)\n\n"
"Note: This function (sfqr) supports QR decomposition in the case\n"
"of short-and-fat matrices (single row chunk/block; see qr)"
)
prefix = name or "sfqr-" + tokenize(data)
prefix += "_"
m, n = data.shape
qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))
layers = data.__dask_graph__().layers.copy()
dependencies = data.__dask_graph__().dependencies.copy()
# data = A = [A_1 A_rest]
name_A_1 = prefix + "A_1"
name_A_rest = prefix + "A_rest"
layers[name_A_1] = {(name_A_1, 0, 0): (data.name, 0, 0)}
dependencies[name_A_1] = set(data.__dask_layers__())
layers[name_A_rest] = {
(name_A_rest, 0, idx): (data.name, 0, 1 + idx) for idx in range(nc - 1)
}
if len(layers[name_A_rest]) > 0:
dependencies[name_A_rest] = set(data.__dask_layers__())
else:
dependencies[name_A_rest] = set()
# Q R_1 = A_1
name_Q_R1 = prefix + "Q_R_1"
name_Q = prefix + "Q"
name_R_1 = prefix + "R_1"
layers[name_Q_R1] = {(name_Q_R1, 0, 0): (np.linalg.qr, (name_A_1, 0, 0))}
dependencies[name_Q_R1] = {name_A_1}
layers[name_Q] = {(name_Q, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 0)}
dependencies[name_Q] = {name_Q_R1}
layers[name_R_1] = {(name_R_1, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 1)}
dependencies[name_R_1] = {name_Q_R1}
graph = HighLevelGraph(layers, dependencies)
Q_meta = meta_from_array(data, len((m, min(m, n))), dtype=qq.dtype)
R_1_meta = meta_from_array(data, len((min(m, n), cc)), dtype=rr.dtype)
Q = Array(graph, name_Q, shape=(m, min(m, n)), chunks=(m, min(m, n)), meta=Q_meta)
R_1 = Array(graph, name_R_1, shape=(min(m, n), cc), chunks=(cr, cc), meta=R_1_meta)
# R = [R_1 Q'A_rest]
Rs = [R_1]
if nc > 1:
A_rest_meta = meta_from_array(data, len((min(m, n), n - cc)), dtype=rr.dtype)
A_rest = Array(
graph,
name_A_rest,
shape=(min(m, n), n - cc),
chunks=(cr, data.chunks[1][1:]),
meta=A_rest_meta,
)
Rs.append(Q.T.dot(A_rest))
R = concatenate(Rs, axis=1)
return Q, R
def compression_level(n, q, n_oversamples=10, min_subspace_size=20):
"""Compression level to use in svd_compressed
Given the size ``n`` of a space, compress that that to one of size
``q`` plus n_oversamples.
The oversampling allows for greater flexibility in finding an
appropriate subspace, a low value is often enough (10 is already a
very conservative choice, it can be further reduced).
``q + oversampling`` should not be larger than ``n``. In this
specific implementation, ``q + n_oversamples`` is at least
``min_subspace_size``.
Parameters
----------
n: int
Column/row dimension of original matrix
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
n_oversamples: int, default=10
Number of oversamples used for generating the sampling matrix.
min_subspace_size : int, default=20
Minimum subspace size.
Examples
--------
>>> compression_level(100, 10)
20
"""
return min(max(min_subspace_size, q + n_oversamples), n)
def compression_matrix(
data, q, iterator="none", n_power_iter=0, n_oversamples=10, seed=None, compute=False
):
"""Randomly sample matrix to find most active subspace
This compression matrix returned by this algorithm can be used to
compute both the QR decomposition and the Singular Value
Decomposition.
Parameters
----------
data: Array
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
iterator: {'none', 'power', 'QR'}, default='none'
Define the technique used for iterations to cope with flat
singular spectra or when the input matrix is very large.
n_power_iter: int
number of power iterations, useful when the singular values of
the input matrix decay very slowly.
n_oversamples: int, default=10
Number of oversamples used for generating the sampling matrix.
This value increases the size of the subspace computed, which is more
accurate at the cost of efficiency. Results are rarely sensitive to this choice
though and in practice a value of 10 is very commonly high enough.
compute : bool
Whether or not to compute data at each use.
Recomputing the input while performing several passes reduces memory
pressure, but means that we have to compute the input multiple times.
This is a good choice if the data is larger than memory and cheap to
recreate.
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
https://arxiv.org/abs/0909.4061
"""
m, n = data.shape
comp_level = compression_level(min(m, n), q, n_oversamples=n_oversamples)
if isinstance(seed, RandomState):
state = seed
else:
state = RandomState(seed)
datatype = np.float64
if (data.dtype).type in {np.float32, np.complex64}:
datatype = np.float32
omega = state.standard_normal(
size=(n, comp_level), chunks=(data.chunks[1], (comp_level,))
).astype(datatype, copy=False)
mat_h = data.dot(omega)
if iterator == "power":
for i in range(n_power_iter):
if compute:
mat_h = mat_h.persist()
wait(mat_h)
tmp = data.T.dot(mat_h)
if compute:
tmp = tmp.persist()
wait(tmp)
mat_h = data.dot(tmp)
q, _ = tsqr(mat_h)
elif iterator == "QR":
q, _ = tsqr(mat_h)
for i in range(n_power_iter):
if compute:
q = q.persist()
wait(q)
q, _ = tsqr(data.T.dot(q))
if compute:
q = q.persist()
wait(q)
q, _ = tsqr(data.dot(q))
else:
q, _ = tsqr(mat_h)
return q.T
def svd_compressed(
a,
k,
iterator="none",
n_power_iter=1,
n_oversamples=10,
seed=None,
compute=False,
coerce_signs=True,
):
"""Randomly compressed rank-k thin Singular Value Decomposition.
This computes the approximate singular value decomposition of a large
array. This algorithm is generally faster than the normal algorithm
but does not provide exact results. One can balance between
performance and accuracy with input parameters (see below).
Parameters
----------
a: Array
Input array
k: int
Rank of the desired thin SVD decomposition.
iterator: {'none', 'power', 'QR'}, default='none'
Define the technique used for iterations to cope with flat
singular spectra or when the input matrix is very large.
n_power_iter: int, default=1
Number of power iterations, useful when the singular values
decay slowly. Error decreases exponentially as n_power_iter
increases. In practice, set n_power_iter <= 4.
n_oversamples: int, default=10
Number of oversamples used for generating the sampling matrix.
compute : bool
Whether or not to compute data at each use.
Recomputing the input while performing several passes reduces memory
pressure, but means that we have to compute the input multiple times.
This is a good choice if the data is larger than memory and cheap to
recreate.
coerce_signs : bool
Whether or not to apply sign coercion to singular vectors in
order to maintain deterministic results, by default True.
Examples
--------
>>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
https://arxiv.org/abs/0909.4061
"""
if iterator != "none" and n_power_iter == 0:
raise ValueError("Iterators require n_power_iter > 0.\n")
comp = compression_matrix(
a,
k,
iterator=iterator,
n_power_iter=n_power_iter,
n_oversamples=n_oversamples,
seed=seed,
compute=compute,
)
if compute:
comp = comp.persist()
wait(comp)
a_compressed = comp.dot(a)
v, s, u = tsqr(a_compressed.T, compute_svd=True)
u = comp.T.dot(u.T)
v = v.T
u = u[:, :k]
s = s[:k]
v = v[:k, :]
if coerce_signs:
u, v = svd_flip(u, v)
return u, s, v
def qr(a):
"""
Compute the qr factorization of a matrix.
Parameters
----------
a : Array
Returns
-------
q: Array, orthonormal
r: Array, upper-triangular
Examples
--------
>>> q, r = da.linalg.qr(x) # doctest: +SKIP
See Also
--------
numpy.linalg.qr: Equivalent NumPy Operation
dask.array.linalg.tsqr: Implementation for tall-and-skinny arrays
dask.array.linalg.sfqr: Implementation for short-and-fat arrays
"""
if len(a.chunks[1]) == 1 and len(a.chunks[0]) > 1:
return tsqr(a)
elif len(a.chunks[0]) == 1:
return sfqr(a)
else:
raise NotImplementedError(
"qr currently supports only tall-and-skinny (single column chunk/block; see tsqr)\n"
"and short-and-fat (single row chunk/block; see sfqr) matrices\n\n"
"Consider use of the rechunk method. For example,\n\n"
"x.rechunk({0: -1, 1: 'auto'}) or x.rechunk({0: 'auto', 1: -1})\n\n"
"which rechunk one shorter axis to a single chunk, while allowing\n"
"the other axis to automatically grow/shrink appropriately."
)
def svd(a, coerce_signs=True):
"""
Compute the singular value decomposition of a matrix.
Parameters
----------
a : (M, N) Array
coerce_signs : bool
Whether or not to apply sign coercion to singular vectors in
order to maintain deterministic results, by default True.
Examples
--------
>>> u, s, v = da.linalg.svd(x) # doctest: +SKIP
Returns
-------
u : (M, K) Array, unitary / orthogonal
Left-singular vectors of `a` (in columns) with shape (M, K)
where K = min(M, N).
s : (K,) Array, singular values in decreasing order (largest first)
Singular values of `a`.
v : (K, N) Array, unitary / orthogonal
Right-singular vectors of `a` (in rows) with shape (K, N)
where K = min(M, N).
Warnings
--------
SVD is only supported for arrays with chunking in one dimension.
This requires that all inputs either contain a single column
of chunks (tall-and-skinny) or a single row of chunks (short-and-fat).
For arrays with chunking in both dimensions, see da.linalg.svd_compressed.
See Also
--------
np.linalg.svd : Equivalent NumPy Operation
da.linalg.svd_compressed : Randomized SVD for fully chunked arrays
dask.array.linalg.tsqr : QR factorization for tall-and-skinny arrays
dask.array.utils.svd_flip : Sign normalization for singular vectors
"""
nb = a.numblocks
if a.ndim != 2:
raise ValueError(
"Array must be 2D.\n"
"Input shape: {}\n"
"Input ndim: {}\n".format(a.shape, a.ndim)
)
if nb[0] > 1 and nb[1] > 1:
raise NotImplementedError(
"Array must be chunked in one dimension only. "
"This function (svd) only supports tall-and-skinny or short-and-fat "
"matrices (see da.linalg.svd_compressed for SVD on fully chunked arrays).\n"
"Input shape: {}\n"
"Input numblocks: {}\n".format(a.shape, nb)
)
# Single-chunk case
if nb[0] == nb[1] == 1:
m, n = a.shape
k = min(a.shape)
mu, ms, mv = np.linalg.svd(
ones_like_safe(a._meta, shape=(1, 1), dtype=a._meta.dtype)
)
u, s, v = delayed(np.linalg.svd, nout=3)(a, full_matrices=False)
u = from_delayed(u, shape=(m, k), meta=mu)
s = from_delayed(s, shape=(k,), meta=ms)
v = from_delayed(v, shape=(k, n), meta=mv)
# Multi-chunk cases
else:
# Tall-and-skinny case
if nb[0] > nb[1]:
u, s, v = tsqr(a, compute_svd=True)
truncate = a.shape[0] < a.shape[1]
# Short-and-fat case
else:
vt, s, ut = tsqr(a.T, compute_svd=True)
u, s, v = ut.T, s, vt.T
truncate = a.shape[0] > a.shape[1]
# Only when necessary, remove extra singular vectors if array
# has shape that contradicts chunking, e.g. the array is a
# column of chunks but still has more columns than rows overall
if truncate:
k = min(a.shape)
u, v = u[:, :k], v[:k, :]
if coerce_signs:
u, v = svd_flip(u, v)
return u, s, v
def _solve_triangular_lower(a, b):
import scipy.linalg
return scipy.linalg.solve_triangular(a, b, lower=True)
def lu(a):
"""
Compute the lu decomposition of a matrix.
Examples
--------
>>> p, l, u = da.linalg.lu(x) # doctest: +SKIP
Returns
-------
p: Array, permutation matrix
l: Array, lower triangular matrix with unit diagonal.
u: Array, upper triangular matrix
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError("Dimension must be 2 to perform lu decomposition")
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError("Input must be a square matrix to perform lu decomposition")
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = (
"All chunks must be a square matrix to perform lu decomposition. "
"Use .rechunk method to change the size of chunks."
)
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name_lu = "lu-lu-" + token
name_p = "lu-p-" + token
name_l = "lu-l-" + token
name_u = "lu-u-" + token
# for internal calculation
name_p_inv = "lu-p-inv-" + token
name_l_permuted = "lu-l-permute-" + token
name_u_transposed = "lu-u-transpose-" + token
name_plu_dot = "lu-plu-dot-" + token
name_lu_dot = "lu-lu-dot-" + token
dsk = {}
for i in range(min(vdim, hdim)):
target = (a.name, i, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, i, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# diagonal block
dsk[name_lu, i, i] = (scipy.linalg.lu, target)
# sweep to horizontal
for j in range(i + 1, hdim):
target = (np.dot, (name_p_inv, i, i), (a.name, i, j))
if i > 0:
prevs = []
for p in range(i):
prev = name_lu_dot, i, p, p, j
dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_lu, i, j] = (_solve_triangular_lower, (name_l, i, i), target)
# sweep to vertical
for k in range(i + 1, vdim):
target = (a.name, k, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, k, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T
dsk[name_lu, k, i] = (
np.transpose,
(
_solve_triangular_lower,
(name_u_transposed, i, i),
(np.transpose, target),
),
)
for i in range(min(vdim, hdim)):
for j in range(min(vdim, hdim)):
if i == j:
dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)
dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)
dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)
# permuted l is required to be propagated to i > j blocks
dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))
dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))
# transposed permutation matrix is equal to its inverse
dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))
elif i > j:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
# calculations are performed using permuted l,
# thus the result should be reverted by inverted (=transposed) p
# to have the same row order as diagonal blocks
dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))
dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l_permuted, i, j] = (name_lu, i, j)
else:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_u, i, j] = (name_lu, i, j)
# l_permuted is not referred in upper triangulars
pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))
pp_meta = meta_from_array(a, dtype=pp.dtype)
ll_meta = meta_from_array(a, dtype=ll.dtype)
uu_meta = meta_from_array(a, dtype=uu.dtype)
graph = HighLevelGraph.from_collections(name_p, dsk, dependencies=[a])
p = Array(graph, name_p, shape=a.shape, chunks=a.chunks, meta=pp_meta)
graph = HighLevelGraph.from_collections(name_l, dsk, dependencies=[a])
l = Array(graph, name_l, shape=a.shape, chunks=a.chunks, meta=ll_meta)
graph = HighLevelGraph.from_collections(name_u, dsk, dependencies=[a])
u = Array(graph, name_u, shape=a.shape, chunks=a.chunks, meta=uu_meta)
return p, l, u
def solve_triangular(a, b, lower=False):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
Returns
-------
x : (M,) or (M, N) array
Solution to the system `a x = b`. Shape of return matches `b`.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError("a must be 2 dimensional")
if b.ndim <= 2:
if a.shape[1] != b.shape[0]:
raise ValueError("a.shape[1] and b.shape[0] must be equal")
if a.chunks[1] != b.chunks[0]:
msg = (
"a.chunks[1] and b.chunks[0] must be equal. "
"Use .rechunk method to change the size of chunks."
)
raise ValueError(msg)
else:
raise ValueError("b must be 1 or 2 dimensional")
vchunks = len(a.chunks[1])
hchunks = 1 if b.ndim == 1 else len(b.chunks[1])
token = tokenize(a, b, lower)
name = "solve-triangular-" + token
# for internal calculation
# (name, i, j, k, l) corresponds to a_ij.dot(b_kl)
name_mdot = "solve-tri-dot-" + token
def _b_init(i, j):
if b.ndim == 1:
return b.name, i
else:
return b.name, i, j
def _key(i, j):
if b.ndim == 1:
return name, i
else:
return name, i, j
dsk = {}
if lower:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i > 0:
prevs = []
for k in range(i):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)
else:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i < vchunks - 1:
prevs = []
for k in range(i + 1, vchunks):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (
scipy.linalg.solve_triangular,
(a.name, i, i),
target,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[a, b])
res = _solve_triangular_lower(
np.array([[1, 0], [1, 2]], dtype=a.dtype), np.array([0, 1], dtype=b.dtype)
)
meta = meta_from_array(a, b.ndim, dtype=res.dtype)
return Array(graph, name, shape=b.shape, chunks=b.chunks, meta=meta)
def solve(a, b, sym_pos=False):
"""
Solve the equation ``a x = b`` for ``x``. By default, use LU
decomposition and forward / backward substitutions. When ``sym_pos`` is
``True``, use Cholesky decomposition.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume a is symmetric and positive definite. If ``True``, use Cholesky
decomposition.
Returns
-------
x : (M,) or (M, N) Array
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
"""
if sym_pos:
l, u = _cholesky(a)
else:
p, l, u = lu(a)
b = p.T.dot(b)
uy = solve_triangular(l, b, lower=True)
return solve_triangular(u, uy)
def inv(a):
"""
Compute the inverse of a matrix with LU decomposition and
forward / backward substitutions.
Parameters
----------
a : array_like
Square matrix to be inverted.
Returns
-------
ainv : Array
Inverse of the matrix `a`.
"""
return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))
def _cholesky_lower(a):
import scipy.linalg
return scipy.linalg.cholesky(a, lower=True)
def cholesky(a, lower=False):
"""
Returns the Cholesky decomposition, :math:`A = L L^*` or
:math:`A = U^* U` of a Hermitian positive-definite matrix A.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky
factorization. Default is upper-triangular.
Returns
-------
c : (M, M) Array
Upper- or lower-triangular Cholesky factor of `a`.
"""
l, u = _cholesky(a)
if lower:
return l
else:
return u
def _cholesky(a):
"""
Private function to perform Cholesky decomposition, which returns both
lower and upper triangulars.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError("Dimension must be 2 to perform cholesky decomposition")
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError(
"Input must be a square matrix to perform cholesky decomposition"
)
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = (
"All chunks must be a square matrix to perform cholesky decomposition. "
"Use .rechunk method to change the size of chunks."
)
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name = "cholesky-" + token
# (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)
name_lt_dot = "cholesky-lt-dot-" + token
# because transposed results are needed for calculation,
# we can build graph for upper triangular simultaneously
name_upper = "cholesky-upper-" + token
# calculates lower triangulars because subscriptions get simpler
dsk = {}
for i in range(vdim):
for j in range(hdim):
if i < j:
dsk[name, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_upper, j, i] = (name, i, j)
elif i == j:
target = (a.name, i, j)
if i > 0:
prevs = []
for p in range(i):
prev = name_lt_dot, i, p, i, p
dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name, i, i] = (_cholesky_lower, target)
dsk[name_upper, i, i] = (np.transpose, (name, i, i))
else:
# solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to
# L11.dot(x.T) = A21.T - L10.dot(L20.T)
# L11.dot(x.T) = A12 - L10.dot(L02)
target = (a.name, j, i)
if j > 0:
prevs = []
for p in range(j):
prev = name_lt_dot, j, p, i, p
dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_upper, j, i] = (_solve_triangular_lower, (name, j, j), target)
dsk[name, i, j] = (np.transpose, (name_upper, j, i))
graph_upper = HighLevelGraph.from_collections(name_upper, dsk, dependencies=[a])
graph_lower = HighLevelGraph.from_collections(name, dsk, dependencies=[a])
cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
meta = meta_from_array(a, dtype=cho.dtype)
lower = Array(graph_lower, name, shape=a.shape, chunks=a.chunks, meta=meta)
# do not use .T, because part of transposed blocks are already calculated
upper = Array(graph_upper, name_upper, shape=a.shape, chunks=a.chunks, meta=meta)
return lower, upper
def _sort_decreasing(x):
x[::-1].sort()
return x
def lstsq(a, b):
"""
Return the least-squares solution to a linear matrix equation using
QR decomposition.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
Returns
-------
x : {(N,), (N, K)} Array
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,)} Array
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : Array
Rank of matrix `a`.
s : (min(M, N),) Array
Singular values of `a`.
"""
q, r = qr(a)
x = solve_triangular(r, q.T.dot(b))
residuals = b - a.dot(x)
residuals = (residuals ** 2).sum(axis=0, keepdims=b.ndim == 1)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = "lstsq-rank-" + token
rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))}
graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r])
# rank must be an integer
rank = Array(graph, rname, shape=(), chunks=(), dtype=int)
# singular
sname = "lstsq-singular-" + token
rt = r.T
sdsk = {
(sname, 0): (
_sort_decreasing,
(np.sqrt, (np.linalg.eigvals, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))),
)
}
graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r])
_, _, _, ss = np.linalg.lstsq(
np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype),
rcond=-1,
)
meta = meta_from_array(r, 1, dtype=ss.dtype)
s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta)
return x, residuals, rank, s
@derived_from(np.linalg)
def norm(x, ord=None, axis=None, keepdims=False):
if axis is None:
axis = tuple(range(x.ndim))
elif isinstance(axis, Number):
axis = (int(axis),)
else:
axis = tuple(axis)
if len(axis) > 2:
raise ValueError("Improper number of dimensions to norm.")
if ord == "fro":
ord = None
if len(axis) == 1:
raise ValueError("Invalid norm order for vectors.")
# Coerce to double precision.
r = x.astype(np.promote_types(x.dtype, float))
if ord is None:
r = (abs(r) ** 2).sum(axis=axis, keepdims=keepdims) ** 0.5
elif ord == "nuc":
if len(axis) == 1:
raise ValueError("Invalid norm order for vectors.")
if x.ndim > 2:
raise NotImplementedError("SVD based norm not implemented for ndim > 2")
r = svd(x)[1][None].sum(keepdims=keepdims)
elif ord == np.inf:
r = abs(r)
if len(axis) == 1:
r = r.max(axis=axis, keepdims=keepdims)
else:
r = r.sum(axis=axis[1], keepdims=True).max(axis=axis[0], keepdims=True)
if keepdims is False:
r = r.squeeze(axis=axis)
elif ord == -np.inf:
r = abs(r)
if len(axis) == 1:
r = r.min(axis=axis, keepdims=keepdims)
else:
r = r.sum(axis=axis[1], keepdims=True).min(axis=axis[0], keepdims=True)
if keepdims is False:
r = r.squeeze(axis=axis)
elif ord == 0:
if len(axis) == 2:
raise ValueError("Invalid norm order for matrices.")
r = (r != 0).astype(r.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
r = abs(r)
if len(axis) == 1:
r = r.sum(axis=axis, keepdims=keepdims)
else:
r = r.sum(axis=axis[0], keepdims=True).max(axis=axis[1], keepdims=True)
if keepdims is False:
r = r.squeeze(axis=axis)
elif len(axis) == 2 and ord == -1:
r = abs(r).sum(axis=axis[0], keepdims=True).min(axis=axis[1], keepdims=True)
if keepdims is False:
r = r.squeeze(axis=axis)
elif len(axis) == 2 and ord == 2:
if x.ndim > 2:
raise NotImplementedError("SVD based norm not implemented for ndim > 2")
r = svd(x)[1][None].max(keepdims=keepdims)
elif len(axis) == 2 and ord == -2:
if x.ndim > 2:
raise NotImplementedError("SVD based norm not implemented for ndim > 2")
r = svd(x)[1][None].min(keepdims=keepdims)
else:
if len(axis) == 2:
raise ValueError("Invalid norm order for matrices.")
r = (abs(r) ** ord).sum(axis=axis, keepdims=keepdims) ** (1.0 / ord)
return r
| 34.456594
| 96
| 0.563208
|
d8a895e790ffa49438b10aa6e62579ebeafa05ec
| 7,930
|
py
|
Python
|
nova/api/openstack/placement/handlers/resource_provider.py
|
ZhanHan/nova
|
4033e0166ca16ef380705cfdd928083be8bf4f68
|
[
"Apache-2.0"
] | 1
|
2019-07-29T10:30:24.000Z
|
2019-07-29T10:30:24.000Z
|
nova/api/openstack/placement/handlers/resource_provider.py
|
ZhanHan/nova
|
4033e0166ca16ef380705cfdd928083be8bf4f68
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/placement/handlers/resource_provider.py
|
ZhanHan/nova
|
4033e0166ca16ef380705cfdd928083be8bf4f68
|
[
"Apache-2.0"
] | 1
|
2020-07-24T00:40:05.000Z
|
2020-07-24T00:40:05.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for resource providers."""
import copy
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.placement import util
from nova import exception
from nova.i18n import _
from nova import objects
POST_RESOURCE_PROVIDER_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string",
"maxLength": 200
},
"uuid": {
"type": "string",
"format": "uuid"
}
},
"required": [
"name"
],
"additionalProperties": False,
}
# Remove uuid to create the schema for PUTting a resource provider
PUT_RESOURCE_PROVIDER_SCHEMA = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA)
PUT_RESOURCE_PROVIDER_SCHEMA['properties'].pop('uuid')
def _serialize_links(environ, resource_provider):
url = util.resource_provider_url(environ, resource_provider)
links = [{'rel': 'self', 'href': url}]
for rel in ('aggregates', 'inventories', 'usages'):
links.append({'rel': rel, 'href': '%s/%s' % (url, rel)})
return links
def _serialize_provider(environ, resource_provider):
data = {
'uuid': resource_provider.uuid,
'name': resource_provider.name,
'generation': resource_provider.generation,
'links': _serialize_links(environ, resource_provider)
}
return data
def _serialize_providers(environ, resource_providers):
output = []
for provider in resource_providers:
provider_data = _serialize_provider(environ, provider)
output.append(provider_data)
return {"resource_providers": output}
@webob.dec.wsgify
@util.require_content('application/json')
def create_resource_provider(req):
"""POST to create a resource provider.
On success return a 201 response with an empty body and a location
header pointing to the newly created resource provider.
"""
context = req.environ['placement.context']
data = util.extract_json(req.body, POST_RESOURCE_PROVIDER_SCHEMA)
try:
uuid = data.get('uuid', uuidutils.generate_uuid())
resource_provider = objects.ResourceProvider(
context, name=data['name'], uuid=uuid)
resource_provider.create()
except db_exc.DBDuplicateEntry as exc:
raise webob.exc.HTTPConflict(
_('Conflicting resource provider already exists: %(error)s') %
{'error': exc},
json_formatter=util.json_error_formatter)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
json_formatter=util.json_error_formatter)
req.response.location = util.resource_provider_url(
req.environ, resource_provider)
req.response.status = 201
req.response.content_type = None
return req.response
@webob.dec.wsgify
def delete_resource_provider(req):
"""DELETE to destroy a single resource provider.
On success return a 204 and an empty body.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
# The containing application will catch a not found here.
try:
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
resource_provider.destroy()
except exception.ResourceProviderInUse as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
json_formatter=util.json_error_formatter)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %s found for delete") % uuid)
req.response.status = 204
req.response.content_type = None
return req.response
@webob.dec.wsgify
@util.check_accept('application/json')
def get_resource_provider(req):
"""Get a single resource provider.
On success return a 200 with an application/json body representing
the resource provider.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
# The containing application will catch a not found here.
context = req.environ['placement.context']
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
req.response.body = jsonutils.dumps(
_serialize_provider(req.environ, resource_provider))
req.response.content_type = 'application/json'
return req.response
@webob.dec.wsgify
@util.check_accept('application/json')
def list_resource_providers(req):
"""GET a list of resource providers.
On success return a 200 and an application/json body representing
a collection of resource providers.
"""
context = req.environ['placement.context']
allowed_filters = set(objects.ResourceProviderList.allowed_filters)
passed_filters = set(req.GET.keys())
invalid_filters = passed_filters - allowed_filters
if invalid_filters:
raise webob.exc.HTTPBadRequest(
_('Invalid filters: %(filters)s') %
{'filters': ', '.join(invalid_filters)},
json_formatter=util.json_error_formatter)
if 'uuid' in req.GET and not uuidutils.is_uuid_like(req.GET['uuid']):
raise webob.exc.HTTPBadRequest(
_('Invalid uuid value: %(uuid)s') % {'uuid': req.GET['uuid']},
json_formatter=util.json_error_formatter)
filters = {}
for attr in objects.ResourceProviderList.allowed_filters:
if attr in req.GET:
filters[attr] = req.GET[attr]
resource_providers = objects.ResourceProviderList.get_all_by_filters(
context, filters)
response = req.response
response.body = jsonutils.dumps(_serialize_providers(
req.environ, resource_providers))
response.content_type = 'application/json'
return response
@webob.dec.wsgify
@util.require_content('application/json')
def update_resource_provider(req):
"""PUT to update a single resource provider.
On success return a 200 response with a representation of the updated
resource provider.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
# The containing application will catch a not found here.
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = util.extract_json(req.body, PUT_RESOURCE_PROVIDER_SCHEMA)
resource_provider.name = data['name']
try:
resource_provider.save()
except db_exc.DBDuplicateEntry as exc:
raise webob.exc.HTTPConflict(
_('Conflicting resource provider already exists: %(error)s') %
{'error': exc},
json_formatter=util.json_error_formatter)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to save resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
json_formatter=util.json_error_formatter)
req.response.body = jsonutils.dumps(
_serialize_provider(req.environ, resource_provider))
req.response.status = 200
req.response.content_type = 'application/json'
return req.response
| 34.329004
| 78
| 0.686129
|
8548a160f487fd93874aa05003877074c4aa1882
| 505
|
py
|
Python
|
bookings/templatetags/bookings_filters.py
|
Dheavyman/airtech-flight
|
e6eea7a121d0b372164492532358cd566112285a
|
[
"MIT"
] | null | null | null |
bookings/templatetags/bookings_filters.py
|
Dheavyman/airtech-flight
|
e6eea7a121d0b372164492532358cd566112285a
|
[
"MIT"
] | 8
|
2020-02-11T23:48:18.000Z
|
2022-03-11T23:43:47.000Z
|
bookings/templatetags/bookings_filters.py
|
Dheavyman/airtech-flight
|
e6eea7a121d0b372164492532358cd566112285a
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.utils.dateparse import parse_datetime
from django.template import Library
from django.template.defaultfilters import stringfilter
register = Library()
@register.filter(name='ctime')
@stringfilter
def custom_time(value):
value = parse_datetime(value)
return datetime.strftime(value, '%-I:%M %p')
@register.filter(name='cdate')
@stringfilter
def custom_date(value):
value = parse_datetime(value)
return datetime.strftime(value, '%a, %d %b, %Y')
| 25.25
| 55
| 0.758416
|
4ba0fd1b1dcde6a9d460bd58b751baee7bf02140
| 2,462
|
py
|
Python
|
fakedetection/src/main/docker/extraction/extract_faces_video.py
|
LafLaurine/imac2-projetTUT
|
9a57c1bd1ea841fecda894575a8e3a65057addef
|
[
"MIT"
] | 3
|
2020-02-19T19:16:15.000Z
|
2021-01-24T13:46:28.000Z
|
fakedetection/src/main/docker/extraction/extract_faces_video.py
|
LafLaurine/imac2-projetTUT
|
9a57c1bd1ea841fecda894575a8e3a65057addef
|
[
"MIT"
] | 5
|
2022-02-14T17:46:40.000Z
|
2022-02-27T20:21:36.000Z
|
fakedetection/src/main/docker/extraction/extract_faces_video.py
|
LafLaurine/imac2-projetTUT
|
9a57c1bd1ea841fecda894575a8e3a65057addef
|
[
"MIT"
] | null | null | null |
import json
import distutils.util
import youtube_dl
import os
import redis
from flask import Flask
from extract.face_extraction import FaceExtractor
app = Flask(__name__)
cache = redis.Redis(host='redis', port=6379)
# base arguments
type_tracker = "CSRT"
are_saved = True
log_enabled = True
min_confidence = 0.85
start_frame_default = 0
end_frame_default = None
step_frame_default = 25
max_frame_default = 50
def download_video(url,output,name,quiet=True):
ydl_opts = {}
ydl_opts['outtmpl'] = output + name
ydl_opts['quiet'] = quiet
ydl_opts['merge_output_format'] = 'mkv'
ydl_opts['format'] = 'bestvideo+bestaudio'
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
result = ydl.extract_info(url, download=False)
outfile = ydl.prepare_filename(result) + '.' + result['ext']
return outfile
@app.route('/extract_faces_video')
def extract_faces_videos() :
if (distutils.util.strtobool(os.getenv("video_download"))):
src = download_video(os.getenv("video_url"),'./input/',os.getenv("name_video_downloaded"))
else:
src = os.getenv("input_path")
dir_out = os.getenv("output_path")
method_detection = os.getenv("method_detection")
start_frame = int(os.getenv("start_frame"))
end_frame = int(os.getenv("end_frame"))
step_frame = int(os.getenv("step_frame"))
max_frame = int(os.getenv("max_frame"))
are_warped = distutils.util.strtobool(os.getenv("are_warped"))
are_culled =distutils.util.strtobool(os.getenv("are_culled"))
are_saved_landmarks = distutils.util.strtobool(os.getenv("are_saved_landmarks"))
is_saved_rectangle = distutils.util.strtobool(os.getenv("is_saved_rectangle"))
FaceExtractor.extract_faces_from_video(
src=src,
method_detection=method_detection,
start_frame=start_frame,
end_frame=end_frame,
step_frame=step_frame,
max_frame=max_frame,
min_confidence=min_confidence,
type_tracker=type_tracker,
are_warped=are_warped,
are_culled=are_culled,
are_saved=are_saved,
are_saved_landmarks=are_saved_landmarks,
is_saved_rectangle=is_saved_rectangle,
dir_out=dir_out,
log_enabled=log_enabled
)
s = '{"message" : "Faces from video extracted"}'
return json.loads(s)
| 33.726027
| 98
| 0.672218
|
efd87d10e3bb1910d6dcd902e310e125303c8fa1
| 1,275
|
py
|
Python
|
SpringSemester2021/13_Clustering_Evaluation-AdvancedMethods/Ex13_02_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | 1
|
2021-05-09T11:02:35.000Z
|
2021-05-09T11:02:35.000Z
|
SpringSemester2021/13_Clustering_Evaluation-AdvancedMethods/Ex13_02_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | null | null | null |
SpringSemester2021/13_Clustering_Evaluation-AdvancedMethods/Ex13_02_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | 1
|
2020-05-26T15:35:40.000Z
|
2020-05-26T15:35:40.000Z
|
# Init Solution
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
from IPython.display import display, Markdown
# Init Solution completed
display(Markdown("##### Loading Data"))
data = pd.read_csv("./Ex13_02_Data.csv")
display(data.head(5))
display(Markdown("##### Pairplot"))
sns.pairplot(data)
plt.show()
display(Markdown("##### Pairplot (color = Cylinders)"))
sns.pairplot(data, hue="cylinders")
plt.show()
display(Markdown("##### Horsepower vs MPG"))
fig, ax = plt.subplots(figsize=(10,10))
sns.kdeplot(data["horsepower"], data["mpg"], ax=ax)
plt.show()
display(Markdown("##### Acceleration vs Weight"))
fig, ax = plt.subplots(figsize=(10,10))
data.plot.scatter("acceleration", "weight", ax=ax, c="cylinders", cmap="rainbow", alpha=.5)
sns.kdeplot(data["acceleration"], data["weight"], ax=ax)
plt.show()
display(Markdown("##### Weight vs Acceleration"))
sns.jointplot(data=data, x="weight", y="acceleration")
plt.show()
display(Markdown("##### Weight vs Acceleration (KDE)"))
sns.jointplot(data=data, x="weight", y="acceleration", kind="kde")
plt.show()
display(Markdown("##### Displacement vs Horsepower"))
sns.jointplot(data=data, x="displacement", y="horsepower", kind="hex")
plt.show()
| 27.717391
| 91
| 0.705882
|
6324d02fceb8ac5a82479f779bf5dea1537ecd56
| 131
|
py
|
Python
|
1_Kithgard_Dungeon/022-Descending_Further/descending_further.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
1_Kithgard_Dungeon/022-Descending_Further/descending_further.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
1_Kithgard_Dungeon/022-Descending_Further/descending_further.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
# You need the Elemental codex 1+ to cast "Haste"
hero.cast("haste", hero)
while True:
hero.moveRight(2)
hero.moveDown()
| 16.375
| 49
| 0.671756
|
0b314e4a11cb3f82bdeb311d91234c19979c20b4
| 4,226
|
py
|
Python
|
python/paddle/utils/code_gen/generate_op.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/utils/code_gen/generate_op.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/utils/code_gen/generate_op.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from itertools import chain
from pathlib import Path
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from filters import to_op_attr_type, to_opmaker_name, to_opmaker_name_cstr, to_pascal_case
from tests import is_base_api, is_vec, is_scalar, is_initializer_list, supports_inplace, supports_no_need_buffer
from filters import to_input_name
from parse_utils import to_named_dict
file_loader = FileSystemLoader(Path(__file__).parent / "templates")
env = Environment(
loader=file_loader,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True,
undefined=StrictUndefined,
extensions=['jinja2.ext.do'])
env.filters["to_op_attr_type"] = to_op_attr_type
env.filters["to_opmaker_name"] = to_opmaker_name
env.filters["to_pascal_case"] = to_pascal_case
env.filters["to_input_name"] = to_input_name
env.filters["to_opmaker_name_cstr"] = to_opmaker_name_cstr
env.tests["base_api"] = is_base_api
env.tests["vec"] = is_vec
env.tests["scalar"] = is_scalar
env.tests["initializer_list"] = is_initializer_list
env.tests["supports_inplace"] = supports_inplace
env.tests["supports_no_need_buffer"] = supports_no_need_buffer
def main(api_yaml_path, backward_yaml_path, output_op_path,
output_arg_map_path):
with open(api_yaml_path, "rt") as f:
apis = yaml.safe_load(f)
forward_api_dict = to_named_dict(apis)
with open(backward_yaml_path, "rt") as f:
backward_apis = yaml.safe_load(f)
backward_api_dict = to_named_dict(backward_apis)
# fill backward field for an api if another api claims it as forward
for name, backward_api in backward_api_dict.items():
forward_name = backward_api["forward"]["name"]
if forward_name in backward_api_dict:
forward_api = backward_api_dict[forward_name]
if forward_api["backward"] is None:
forward_api["backward"] = name
if forward_name in backward_api_dict:
forward_api = backward_api_dict[forward_name]
if forward_api["backward"] is None:
forward_api["backward"] = name
api_dict = {}
api_dict.update(forward_api_dict)
api_dict.update(backward_api_dict)
if len(apis) == 0 and len(backward_apis) == 0:
if os.path.isfile(output_op_path):
os.remove(output_op_path)
if os.path.isfile(output_arg_map_path):
os.remove(output_arg_map_path)
return
op_template = env.get_template('op.c.j2')
with open(output_op_path, "wt") as f:
msg = op_template.render(
apis=apis, backward_apis=backward_apis, api_dict=api_dict)
f.write(msg)
ks_template = env.get_template('ks.c.j2')
with open(output_arg_map_path, 'wt') as f:
msg = ks_template.render(apis=apis, backward_apis=backward_apis)
f.write(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate operator file from api yaml.")
parser.add_argument(
'--api_yaml_path', type=str, help="parsed api yaml file.")
parser.add_argument(
'--backward_api_yaml_path',
type=str,
help="parsed backward api yaml file.")
parser.add_argument(
"--output_op_path", type=str, help="path to save generated operators.")
parser.add_argument(
"--output_arg_map_path",
type=str,
help="path to save generated argument mapping functions.")
args = parser.parse_args()
main(args.api_yaml_path, args.backward_api_yaml_path, args.output_op_path,
args.output_arg_map_path)
| 37.070175
| 112
| 0.718883
|
432ed559a0ccdf973a8121809951d46ce9338c3a
| 58,537
|
py
|
Python
|
fasm2bels/models/verilog_modeling.py
|
litghost/symbiflow-xc-fasm2bels
|
9ed2a4d9033fb9bf80060114b6b122a2b7c296d7
|
[
"0BSD"
] | null | null | null |
fasm2bels/models/verilog_modeling.py
|
litghost/symbiflow-xc-fasm2bels
|
9ed2a4d9033fb9bf80060114b6b122a2b7c296d7
|
[
"0BSD"
] | null | null | null |
fasm2bels/models/verilog_modeling.py
|
litghost/symbiflow-xc-fasm2bels
|
9ed2a4d9033fb9bf80060114b6b122a2b7c296d7
|
[
"0BSD"
] | null | null | null |
""" Core classes for modelling a bitstream back into verilog and routes.
There are 3 modelling elements:
- Bel: A synthesizable element.
- Site: A collection of Bel's, routing sinks and routing sources.
- Module: The root container for all Sites
The modelling approach works as so:
BELs represent a particular tech library instance (e.g. LUT6 or FDRE). These
BELs are connected into the routing fabric or internal site sources via the
Site methods:
- Site.add_sink
- Site.add_source
- Site.add_output_from_internal
- Site.connect_internal
- Site.add_internal_source
BEL parameters should be on the BEL.
In cases where there is multiple instances of a BEL (e.g. LUT's), the
Bel.set_bel must be called to ensure that Vivado places the BEL in the exact
location.
"""
import functools
import re
import fasm
from ..make_routes import make_routes, ONE_NET, ZERO_NET, prune_antennas
from ..database.connection_db_utils import get_wire_pkey
def pin_to_wire_and_idx(pin):
""" Break pin name into wire name and vector index.
Arguments
---------
pin : str
Pin name, with optional vector index.
Returns
-------
wire : str
Wire name
idx : int
Vector index
>>> pin_to_wire_and_idx('A')
('A', None)
>>> pin_to_wire_and_idx('A[0]')
('A', 0)
>>> pin_to_wire_and_idx('A[1]')
('A', 1)
"""
idx = pin.find('[')
if idx == -1:
return (pin, None)
else:
assert pin[-1] == ']'
return (pin[:idx], int(pin[idx + 1:-1]))
def make_bus(wires):
""" Combine bus wires into a consecutive bus.
Args:
wires ([str]): Takes list of wire names.
Returns list of (wire/bus name, max bus wire count).
If the wire is NOT a bus, then max bus wire count will be None.
If the wire was part of a bus, then max bus wire count will be the maximum
observed bus index. It is assumed that all buses will be sized as
[max:0].
>>> list(make_bus(['A', 'B']))
[('A', None), ('B', None)]
>>> list(make_bus(['A[0]', 'A[1]', 'B']))
[('A', 1), ('B', None)]
>>> list(make_bus(['A[0]', 'A[1]', 'B[0]']))
[('A', 1), ('B', 0)]
"""
output = {}
buses = {}
for w in wires:
widx = w.rfind('[')
if widx != -1 and w[-1] == ']':
bus = w[0:widx]
idx = int(w[widx + 1:-1])
if bus not in buses:
buses[bus] = []
buses[bus].append(idx)
else:
output[w] = None
for bus, values in buses.items():
output[bus] = max(values)
for name in sorted(output):
yield name, output[name]
def escape_verilog_name(name):
""" Transform net names into escaped id and bus selection (if any)
Args:
name (str): Net name
Returns:
Escape verilog name
>>> escape_verilog_name(
... '$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf[4]')
'\\\\$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf [4]'
>>> escape_verilog_name(
... '$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf[4:0]')
'\\\\$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf[4:0] '
>>> escape_verilog_name(
... '$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf[4:0][0]')
'\\\\$abc$6513$auto$alumacc.cc:474:replace_alu$1259.B_buf[4:0] [0]'
>>> escape_verilog_name(
... 'test')
'\\\\test '
"""
idx = name.rfind('[')
bus_idx = None
if idx != -1 and name[-1] == ']':
try:
bus_idx = int(name[idx + 1:-1])
except ValueError:
pass
if bus_idx is None:
# Escape whole name
return '\\' + name + ' '
return '\\' + name[:idx] + ' ' + name[idx:]
class ConnectionModel(object):
""" Constant, Wire, Bus and NoConnect objects represent a small interface
for Verilog module instance connection descriptions.
"""
def to_string(self, net_map=None):
""" Returns the string representing this models connection in verilog.
Arguments
---------
net_map : map of str to str
Optional wire renaming map. If present, leaf wires should be
renamed through the map.
Returns
-------
str representing valid Verilog to model the connect by this object.
"""
pass
def iter_wires(self):
""" Iterates over wires present on this object.
Yields
------
Vector index : int
Is None for scalar connections, otherwise an integer that
represents the index into the vector.
Connection : str
Verilog representing this connection.
"""
pass
class Constant(ConnectionModel):
""" Represents a boolean constant, e.g. 1'b0 or 1'b1. """
def __init__(self, value):
assert value in [0, 1]
self.value = value
def to_string(self, net_map=None):
return "1'b{}".format(self.value)
def __repr__(self):
return 'Constant({})'.format(self.value)
def iter_wires(self):
return iter([])
class Wire(ConnectionModel):
""" Represents a single wire connection. """
def __init__(self, wire):
self.wire = wire
def to_string(self, net_map=None):
if net_map is None:
return self.wire
else:
if self.wire in net_map:
return net_map[self.wire]
else:
return self.wire
def __repr__(self):
return 'Wire({})'.format(repr(self.wire))
def iter_wires(self):
yield (None, self.wire)
class Bus(ConnectionModel):
""" Represents a vector wire connection.
Arguments
---------
wires : list of Constant or Wire objects.
"""
def __init__(self, wires):
self.wires = wires
def to_string(self, net_map=None):
return '{' + ', '.join(
wire.to_string(net_map=net_map) for wire in self.wires[::-1]) + '}'
def __repr__(self):
return 'Bus({})'.format(repr(self.wires))
def iter_wires(self):
for idx, wire in enumerate(self.wires):
for _, real_wire in wire.iter_wires():
yield (idx, real_wire)
class NoConnect(ConnectionModel):
""" Represents an unconnected port. """
def __init__(self):
pass
def to_string(self, net_map=None):
return ''
def __repr__(self):
return 'NoConnect()'
def iter_wires(self):
return iter([])
def flatten_wires(wire, wire_assigns, wire_name_net_map):
""" Given a wire, return the source net name (or constant string).
Arguments
---------
wire : str
Wire to translate to source
wire_assigns : dict of str to str
Map of wires to their parents. Equivilant to assign statements in
verilog. Example:
assign A = B;
would be represented as:
{
'A': 'B'
}
wire_name_net_map : dict of str to str
Some wires have net names that originate from the post-synth eblif.
This maps programatic net names (e.g. CLBLL_L_X12Y110_SLICE_X16Y110_BO5)
to these post-synth eblif names.
"""
while True:
if wire not in wire_assigns:
break
wires = wire_assigns[wire]
assert len(wires) == 1, wires
wire = wires[0]
if wire in wire_name_net_map:
return wire_name_net_map[wire]
else:
if wire in [0, 1]:
return "1'b{}".format(wire)
else:
return wire
class Bel(object):
""" Object to model a BEL. """
def __init__(self, module, name=None, keep=True, priority=0):
""" Construct Bel object.
module (str): Exact tech library name to instance during synthesis.
Example "LUT6_2" or "FDRE".
name (str): Optional name of this bel, used to disambiguate multiple
instances of the same module in a site. If there are multiple
instances of the same module in a site, name must be specified and
unique.
keep (bool): Controls if KEEP, DONT_TOUCH constraints are added to this
instance.
priority (int): Priority for assigning LOC attributes. Lower priority
means LOC is set first. LOC priority should be set to allow
assignment without blocking later elements.
"""
self.module = module
if name is None:
self.name = module
else:
self.name = name
self.connections = {}
self.unused_connections = set()
self.parameters = {}
self.outputs = set()
self.prefix = None
self.site = None
self.keep = keep
self.bel = None
self.nets = None
self.net_names = {}
self.priority = priority
def set_prefix(self, prefix):
""" Set the prefix used for wire and BEL naming.
This is method is typically called automatically during
Site.integrate_site. """
self.prefix = prefix
def set_site(self, site):
""" Sets the site string used to set the LOC constraint.
This is method is typically called automatically during
Site.integrate_site. """
self.site = site
def set_bel(self, bel):
""" Sets the BEL constraint.
This method should be called if the parent site has multiple instances
of the BEL (e.g. LUT6 in a SLICE).
"""
self.bel = bel
def _prefix_things(self, s):
""" Apply the prefix (if any) to the input string. """
if self.prefix is not None:
return '{}_{}'.format(self.prefix, s)
else:
return s
def get_prefixed_name(self):
return self._prefix_things(self.name)
def get_cell(self, top):
""" Get the cell name of this BEL.
Should only be called after set_prefix has been invoked (if set_prefix
will be called)."""
# The .cname property will be associated with some pin/net combinations
# Use this name if present.
eblif_cnames = set()
for ((pin, idx), net) in self.net_names.items():
cname = top.lookup_cname(pin, idx, net)
if cname is not None:
eblif_cnames.add(cname)
if len(eblif_cnames) > 0:
# Always post-fix with the programatic name to allow for easier
# cell lookup via something like "*{name}"
return escape_verilog_name('_'.join(eblif_cnames) +
self._prefix_things(self.name))
else:
return self._prefix_things(self.name)
def create_connections(self, top):
""" Create connection model for this BEL.
Returns
-------
dead_wires : list of str
List of wires that represents unconnected input or output wires
in vectors on this BEL.
connections : map of str to ConnectionModel
"""
connections = {}
buses = {}
bus_is_output = {}
for wire, connection in self.connections.items():
if top.is_top_level(connection):
connection_wire = Wire(connection)
elif connection in [0, 1]:
connection_wire = Constant(connection)
else:
if connection is not None:
connection_wire = Wire(self._prefix_things(connection))
else:
connection_wire = None
if '[' in wire:
bus_name, address = wire.split('[')
assert address[-1] == ']', address
wire_is_output = wire in self.outputs
if bus_name not in buses:
buses[bus_name] = {}
bus_is_output[bus_name] = wire_is_output
else:
assert bus_is_output[bus_name] == wire_is_output, (
bus_name,
wire,
bus_is_output[bus_name],
wire_is_output,
)
if connection_wire is not None:
buses[bus_name][int(address[:-1])] = connection_wire
else:
buses[bus_name][int(address[:-1])] = None
else:
if connection_wire is None:
connection_wire = NoConnect()
connections[wire] = connection_wire
dead_wires = []
for bus_name, bus in buses.items():
prefix_bus_name = self._prefix_things(bus_name)
num_elements = max(bus.keys()) + 1
bus_wires = [None for _ in range(num_elements)]
for idx, wire in bus.items():
bus_wires[idx] = wire
for idx, wire in enumerate(bus_wires):
if wire is None:
dead_wire = '_{}_{}_'.format(prefix_bus_name, idx)
dead_wires.append(dead_wire)
bus_wires[idx] = Wire(dead_wire)
connections[bus_name] = Bus(bus_wires)
for unused_connection in self.unused_connections:
connections[unused_connection] = NoConnect()
return dead_wires, connections
def make_net_map(self, top, net_map):
""" Create a mapping of programatic net names to VPR net names.
By default nets are named:
{tile}_{site}_{pin}{pin idx}
For example:
CLBLL_L_X12Y110_SLICE_X16Y110_BO5
This scheme unambiguously names a connection in the design. Initially
all nets and BELs are defined using this scheme to provide a simple
unambiguous way to refer to wires in the design.
However the parent design maybe have assigned wires net names from the,
e.g. '$auto$alumacc.cc:474:replace_alu$1273.CO_CHAIN [1]'. This
function builds the association between these two schemes using the
pin to net mapping created via Bel.add_net_name. Bel.add_net_name is
called during site integration to associate Bel pins with net names
via the wire primary key table in the connection database.
During verilog output, the net map can be used to translate the
programatic names back to the net names from the eblif used during
place and route.
"""
_, connections = self.create_connections(top)
for pin, connection in connections.items():
for idx, wire in connection.iter_wires():
key = (pin, idx)
if key in self.net_names:
if wire in net_map:
assert self.net_names[key] == net_map[wire], (
key, self.net_names[key], net_map[wire])
else:
net_map[wire] = self.net_names[key]
def output_verilog(self, top, net_map, indent=' '):
""" Output the Verilog to represent this BEL. """
dead_wires, connections = self.create_connections(top)
for dead_wire in dead_wires:
yield '{indent}wire [0:0] {wire};'.format(
indent=indent, wire=dead_wire)
yield ''
if self.site is not None:
comment = []
if self.keep:
comment.append('KEEP')
comment.append('DONT_TOUCH')
if self.bel:
comment.append('BEL = "{bel}"'.format(bel=self.bel))
yield '{indent}(* {comment} *)'.format(
indent=indent, comment=', '.join(comment))
yield '{indent}{site} #('.format(indent=indent, site=self.module)
parameters = []
for param, value in sorted(
self.parameters.items(), key=lambda x: x[0]):
parameters.append('{indent}{indent}.{param}({value})'.format(
indent=indent, param=param, value=value))
if parameters:
yield ',\n'.join(parameters)
yield '{indent}) {name} ('.format(
indent=indent, name=self.get_cell(top))
if connections:
yield ',\n'.join(
'.{}({})'.format(port, connections[port].to_string(net_map))
for port in sorted(connections))
yield '{indent});'.format(indent=indent)
def add_net_name(self, pin, net_name):
""" Add name of net attached to this pin ."""
assert pin not in self.net_names
key = pin_to_wire_and_idx(pin)
self.net_names[key] = net_name
class Site(object):
""" Object to model a Site.
A site is a collection of BELs, and sources and sinks that connect the
site to the routing fabric. Sources and sinks exported by the Site will
be used during routing formation.
Wires that are not in the sources and sinks lists will be invisible to
the routing formation step. In particular, site connections that should
be sources and sinks but are not specified will be ingored during routing
formation, and likely end up as disconnected wires.
On the flip side it is import that specified sinks are always connected
to at least one BEL. If this is not done, antenna nets may be emitted
during routing formation, which will result in a DRC violation.
Parameters
----------
merged_site : bool
Set to true if this site spans multiple sites (e.g. BRAM36 spans
BRAM18_Y0 and BRAM18_Y1), versus a SLICEL, which stays within its
SLICE_X0.
"""
def __init__(self, features, site, tile=None, merged_site=False):
self.bels = []
self.sinks = {}
self.sources = {}
self.outputs = {}
self.internal_sources = {}
self.set_features = set()
self.features = set()
self.post_route_cleanup = None
self.bel_map = {}
self.site_wire_to_wire_pkey = {}
if features:
aparts = features[0].feature.split('.')
for f in features:
if f.value == 0:
continue
if merged_site:
parts = f.feature.split('.')
assert parts[0] == aparts[0]
self.set_features.add(
fasm.SetFasmFeature(
feature='.'.join(parts[1:]),
start=f.start,
end=f.end,
value=f.value,
value_format=f.value_format,
))
else:
parts = f.feature.split('.')
assert parts[0] == aparts[0]
assert parts[1] == aparts[1]
self.set_features.add(
fasm.SetFasmFeature(
feature='.'.join(parts[2:]),
start=f.start,
end=f.end,
value=f.value,
value_format=f.value_format,
))
# Features as strings
self.features = set([f.feature for f in self.set_features])
if tile is None:
self.tile = aparts[0]
else:
self.tile = tile
self.site = site
def has_feature(self, feature):
""" Does this set have the specified feature set? """
return feature in self.features
def has_feature_with_part(self, part):
"""
Returns True when a given site has a feature which contains a
particular part.
"""
for feature in self.features:
parts = feature.split(".")
if part in parts:
return True
return False
def has_feature_containing(self, substr):
"""
Returns True when a given site has a feature which contains a given
substring.
"""
for feature in self.features:
if substr in feature:
return True
return False
def decode_multi_bit_feature(self, feature):
"""
Decodes a "multi-bit" fasm feature. If not present returns 0.
"""
value = 0
for f in self.set_features:
if f.feature.startswith(feature):
for canon_f in fasm.canonical_features(f):
if canon_f.start is None:
value |= 1
else:
value |= (1 << canon_f.start)
return value
def add_sink(self, bel, bel_pin, sink):
""" Adds a sink.
Attaches sink to the specified bel.
bel (Bel): Bel object
bel_pin (str): The exact tech library name for the relevant pin. Can be
a bus (e.g. A[5]). The name must identically match the library
name or an error will occur during synthesis.
sink (str): The exact site pin name for this sink. The name must
identically match the site pin name, or an error will be generated
when Site.integrate_site is invoked.
"""
assert bel_pin not in bel.connections
if sink not in self.sinks:
self.sinks[sink] = []
bel.connections[bel_pin] = sink
self.sinks[sink].append((bel, bel_pin))
def mask_sink(self, bel, bel_pin):
""" Mark a BEL pin as not visible in the Verilog.
This bel_pin is effectively removed from the Verilog output, but
may still be routed too during FIXED_ROUTE emission.
"""
assert bel_pin in bel.connections
sink = bel.connections[bel_pin]
sink_idx = None
for idx, (a_bel, a_bel_pin) in enumerate(self.sinks[sink]):
if a_bel is bel and bel_pin == a_bel_pin:
assert sink_idx is None
sink_idx = idx
assert sink_idx is not None, (bel, bel_pin, sink)
self.sinks[sink][sink_idx] = None
del bel.connections[bel_pin]
def rename_sink(self, bel, old_bel_pin, new_bel_pin):
""" Rename a BEL sink from one pin name to another.
new_bel_pin may be a mask'd sink BEL pin.
"""
self.move_sink(bel, old_bel_pin, bel, new_bel_pin)
def move_sink(self, old_bel, old_bel_pin, new_bel, new_bel_pin):
""" Moves sink from one BEL in site to another BEL in site.
new_bel_pin may be a mask'd sink BEL pin.
"""
assert old_bel_pin in old_bel.connections
assert new_bel_pin not in new_bel.connections
new_bel.connections[new_bel_pin] = old_bel.connections[old_bel_pin]
sink = old_bel.connections[old_bel_pin]
del old_bel.connections[old_bel_pin]
sink_idx = None
for idx, (a_bel, a_bel_pin) in enumerate(self.sinks[sink]):
if a_bel is old_bel and a_bel_pin == old_bel_pin:
assert sink_idx is None
sink_idx = idx
assert sink_idx is not None, (old_bel, old_bel_pin, sink)
self.sinks[sink][sink_idx] = (new_bel, new_bel_pin)
def add_source(self, bel, bel_pin, source):
""" Adds a source.
Attaches source to bel.
bel (Bel): Bel object
bel_pin (str): The exact tech library name for the relevant pin. Can be
a bus (e.g. A[5]). The name must identically match the library
name or an error will occur during synthesis.
source (str): The exact site pin name for this source. The name must
identically match the site pin name, or an error will be generated
when Site.integrate_site is invoked.
"""
assert source not in self.sources
assert bel_pin not in bel.connections
bel.connections[bel_pin] = source
bel.outputs.add(bel_pin)
self.sources[source] = (bel, bel_pin)
def rename_source(self, bel, old_bel_pin, new_bel_pin):
""" Rename a BEL source from one pin name to another.
new_bel_pin may be a mask'd source BEL pin.
"""
self.move_source(bel, old_bel_pin, bel, new_bel_pin)
def move_source(self, old_bel, old_bel_pin, new_bel, new_bel_pin):
""" Moves source from one BEL in site to another BEL in site.
new_bel_pin may be a mask'd source BEL pin.
"""
assert old_bel_pin in old_bel.connections
assert new_bel_pin not in new_bel.connections
source = old_bel.connections[old_bel_pin]
a_bel, a_bel_pin = self.sources[source]
assert a_bel is old_bel
assert a_bel_pin == old_bel_pin
self.sources[source] = (new_bel, new_bel_pin)
def add_output_from_internal(self, source, internal_source):
""" Adds a source from a site internal source.
This is used to convert an internal_source wire to a site source.
source (str): The exact site pin name for this source. The name must
identically match the site pin name, or an error will be generated
when Site.integrate_site is invoked.
internal_source (str): The internal_source must match the internal
source name provided to Site.add_internal_source earlier.
"""
assert source not in self.sources, source
assert internal_source in self.internal_sources, internal_source
self.outputs[source] = internal_source
self.sources[source] = self.internal_sources[internal_source]
def add_output_from_output(self, source, other_source):
""" Adds an output wire from an existing source wire.
The new output wire is not a source, but will participate in routing
formation.
source (str): The exact site pin name for this source. The name must
identically match the site pin name, or an error will be generated
when Site.integrate_site is invoked.
other_source (str): The name of an existing source generated from add_source.
"""
assert source not in self.sources
assert other_source in self.sources
self.outputs[source] = other_source
def add_internal_source(self, bel, bel_pin, wire_name):
""" Adds a site internal source.
Adds an internal source to the site. This wire will not be used during
routing formation, but can be connected to other BELs within the site.
bel (Bel): Bel object
bel_pin (str): The exact tech library name for the relevant pin. Can be
a bus (e.g. A[5]). The name must identically match the library
name or an error will occur during synthesis.
wire_name (str): The name of the site wire. This wire_name must be
overlap with a source or sink site pin name.
"""
bel.connections[bel_pin] = wire_name
bel.outputs.add(bel_pin)
assert wire_name not in self.internal_sources, wire_name
self.internal_sources[wire_name] = (bel, bel_pin)
def connect_internal(self, bel, bel_pin, source):
""" Connect a BEL pin to an existing internal source.
bel (Bel): Bel object
bel_pin (str): The exact tech library name for the relevant pin. Can be
a bus (e.g. A[5]). The name must identically match the library
name or an error will occur during synthesis.
source (str): Existing internal source wire added via
add_internal_source.
"""
assert source in self.internal_sources, source
assert bel_pin not in bel.connections
bel.connections[bel_pin] = source
def add_bel(self, bel, name=None):
""" Adds a BEL to the site.
All BELs that use the add_sink, add_source, add_internal_source,
and connect_internal must call add_bel with the relevant BEL.
bel (Bel): Bel object
name (str): Optional name to assign to the bel to enable retrival with
the maybe_get_bel method. This name is not used for any other
reason.
"""
self.bels.append(bel)
if name is not None:
assert name not in self.bel_map
self.bel_map[name] = bel
def set_post_route_cleanup_function(self, func):
""" Set callback to be called on this site during routing formation.
This callback is intended to enable sites that must perform decisions
based on routed connections.
func (function): Function that takes two arguments, the parent module
and the site object to cleanup.
"""
self.post_route_cleanup = func
def integrate_site(self, conn, module):
""" Integrates site so that it can be used with routing formation.
This method is called automatically by Module.add_site.
"""
self.check_site()
prefix = '{}_{}'.format(self.tile, self.site.name)
site_pin_map = make_site_pin_map(frozenset(self.site.site_pins))
# Sanity check BEL connections
for bel in self.bels:
bel.set_prefix(prefix)
bel.set_site(self.site.name)
for wire in bel.connections.values():
if wire == 0 or wire == 1:
continue
assert (wire in self.sinks) or (wire in self.sources) or (
wire in self.internal_sources
) or module.is_top_level(wire), wire
wires = set()
unrouted_sinks = set()
unrouted_sources = set()
wire_pkey_to_wire = {}
source_bels = {}
wire_assigns = {}
net_map = {}
for wire in self.internal_sources:
prefix_wire = prefix + '_' + wire
wires.add(prefix_wire)
for wire in self.sinks:
if wire is module.is_top_level(wire):
continue
prefix_wire = prefix + '_' + wire
wires.add(prefix_wire)
wire_pkey = get_wire_pkey(conn, self.tile, site_pin_map[wire])
wire_pkey_to_wire[wire_pkey] = prefix_wire
self.site_wire_to_wire_pkey[wire] = wire_pkey
unrouted_sinks.add(wire_pkey)
for wire in self.sources:
if wire is module.is_top_level(wire):
continue
prefix_wire = prefix + '_' + wire
wires.add(prefix_wire)
wire_pkey = get_wire_pkey(conn, self.tile, site_pin_map[wire])
net_name = module.check_for_net_name(wire_pkey)
if net_name:
wires.add(net_name)
net_map[prefix_wire] = net_name
wire_pkey_to_wire[wire_pkey] = prefix_wire
self.site_wire_to_wire_pkey[wire] = wire_pkey
unrouted_sources.add(wire_pkey)
source_bel = self.sources[wire]
if source_bel is not None:
source_bels[wire_pkey] = source_bel
if net_name:
bel, bel_pin = source_bel
bel.add_net_name(bel_pin, net_name)
shorted_nets = {}
for source_wire, sink_wire in self.outputs.items():
wire_source = prefix + '_' + sink_wire
wire = prefix + '_' + source_wire
wires.add(wire)
wire_assigns[wire] = [wire_source]
# If this is a passthrough wire, then indicate that allow the net
# is be merged.
if sink_wire not in site_pin_map:
continue
sink_wire_pkey = get_wire_pkey(conn, self.tile,
site_pin_map[sink_wire])
source_wire_pkey = get_wire_pkey(conn, self.tile,
site_pin_map[source_wire])
if sink_wire_pkey in unrouted_sinks:
shorted_nets[source_wire_pkey] = sink_wire_pkey
# Because this is being treated as a short, remove the
# source and sink.
unrouted_sources.remove(source_wire_pkey)
unrouted_sinks.remove(sink_wire_pkey)
return dict(
wires=wires,
unrouted_sinks=unrouted_sinks,
unrouted_sources=unrouted_sources,
wire_pkey_to_wire=wire_pkey_to_wire,
source_bels=source_bels,
wire_assigns=wire_assigns,
shorted_nets=shorted_nets,
net_map=net_map,
)
def check_site(self):
""" Sanity checks that the site is internally consistent. """
internal_sources = set(self.internal_sources.keys())
sinks = set(self.sinks.keys())
sources = set(self.sources.keys())
assert len(internal_sources & sinks) == 0, (internal_sources & sinks)
assert len(internal_sources & sources) == 0, (
internal_sources & sources)
bel_ids = set()
for bel in self.bels:
bel_ids.add(id(bel))
for bel_pair in self.sources.values():
if bel_pair is not None:
bel, _ = bel_pair
assert id(bel) in bel_ids
for sinks in self.sinks.values():
for bel, _ in sinks:
assert id(bel) in bel_ids
for bel_pair in self.internal_sources.values():
if bel_pair is not None:
bel, _ = bel_pair
assert id(bel) in bel_ids
def maybe_get_bel(self, name):
""" Returns named BEL from site.
name (str): Name given during Site.add_bel.
Returns None if name is not found, otherwise Bel object.
"""
if name in self.bel_map:
return self.bel_map[name]
else:
return None
def remove_bel(self, bel_to_remove):
""" Attempts to remove BEL from site.
It is an error to remove a BEL if any of its outputs are currently
in use by the Site. This method does NOT verify that the sources
of the BEL are not currently in use.
"""
bel_idx = None
for idx, bel in enumerate(self.bels):
if id(bel) == id(bel_to_remove):
bel_idx = idx
break
assert bel_idx is not None
# Make sure none of the BEL sources are in use
for bel in self.bels:
if id(bel) == id(bel_to_remove):
continue
for site_wire in bel.connections.values():
assert site_wire not in bel_to_remove.outputs, site_wire
# BEL is not used internal, preceed with removal.
del self.bels[bel_idx]
removed_sinks = []
removed_sources = []
for sink_wire, bels_using_sink in self.sinks.items():
bel_idx = None
for idx, (bel, _) in enumerate(bels_using_sink):
if id(bel) == id(bel_to_remove):
bel_idx = idx
break
if bel_idx is not None:
del bels_using_sink[bel_idx]
if len(bels_using_sink) == 0:
removed_sinks.append(self.site_wire_to_wire_pkey[sink_wire])
sources_to_remove = []
for source_wire, (bel, _) in self.sources.items():
if id(bel) == id(bel_to_remove):
removed_sources.append(
self.site_wire_to_wire_pkey[source_wire])
sources_to_remove.append(source_wire)
for wire in sources_to_remove:
del self.sources[wire]
return removed_sinks, removed_sources
def find_internal_source(self, bel, internal_source):
source_wire = bel.connections[internal_source]
assert source_wire in self.internal_sources, (internal_source,
source_wire)
for source, (bel_source, bel_wire) in self.sources.items():
if id(bel_source) != id(bel):
continue
if bel_wire == internal_source:
continue
return source
return None
def find_internal_sink(self, bel, internal_sink):
sink_wire = bel.connections[internal_sink]
assert sink_wire not in bel.outputs, (internal_sink, sink_wire)
if sink_wire not in self.internal_sources:
assert sink_wire in self.sinks
return sink_wire
def remove_internal_sink(self, bel, internal_sink):
sink_wire = self.find_internal_sink(bel, internal_sink)
bel.connections[internal_sink] = None
if sink_wire is not None:
idx_to_remove = []
for idx, (other_bel,
other_internal_sink) in enumerate(self.sinks[sink_wire]):
if id(bel) == id(other_bel):
assert other_internal_sink == internal_sink
idx_to_remove.append(idx)
for idx in sorted(idx_to_remove)[::-1]:
del self.sinks[sink_wire][idx]
if len(self.sinks[sink_wire]) == 0:
del self.sinks[sink_wire]
return self.site_wire_to_wire_pkey[sink_wire]
@functools.lru_cache(maxsize=None)
def make_site_pin_map(site_pins):
""" Create map of site pin names to tile wire names. """
site_pin_map = {}
for site_pin in site_pins:
site_pin_map[site_pin.name] = site_pin.wire
return site_pin_map
def merge_exclusive_sets(set_a, set_b):
""" set_b into set_a after verifying that set_a and set_b are disjoint. """
assert len(set_a & set_b) == 0, (set_a & set_b)
set_a |= set_b
def merge_exclusive_dicts(dict_a, dict_b):
""" dict_b into dict_a after verifying that dict_a and dict_b have disjoint keys. """
assert len(set(dict_a.keys()) & set(dict_b.keys())) == 0
dict_a.update(dict_b)
class Module(object):
""" Object to model a design. """
def __init__(self, db, grid, conn, name="top"):
self.name = name
self.db = db
self.grid = grid
self.conn = conn
self.sites = []
self.source_bels = {}
self.disabled_drcs = set()
self.default_iostandard = None
self.default_drive = None
self.net_to_iosettings = {}
# Map of source to sink.
self.shorted_nets = {}
# Map of wire_pkey to Verilog wire.
self.wire_pkey_to_wire = {}
# wire_pkey of sinks that are not connected to their routing.
self.unrouted_sinks = set()
# wire_pkey of sources that are not connected to their routing.
self.unrouted_sources = set()
# Known active pips, tuples of sink and source wire_pkey's.
# The sink wire_pkey is a net with the source wire_pkey.
self.active_pips = set()
self.root_in = set()
self.root_out = set()
self.root_inout = set()
self.wires = set()
self.wire_assigns = {}
# Optional map of site to signal names.
# This was originally intended for IPAD and OPAD signal naming.
self.site_to_signal = {}
self.top_level_signal_nets = set()
# Optional map of wire_pkey for site pin sources to net name.
self.wire_pkey_net_map = {}
self.wire_name_net_map = {}
# Map of (subckt pin, vector index (None for scale), and net) to
# .cname value.
self.cname_map = {}
# Extra TCL lines (e.g. VREF)
self.extra_tcl = []
# IO bank lookup (if part was provided).
self.iobank_lookup = {}
def set_default_iostandard(self, iostandard, drive):
self.default_iostandard = iostandard
self.default_drive = drive
def make_iosettings_map(self, parsed_eblif):
"""
Fills in the net_to_iosettings dict with IO settings information read
from the eblif file.
"""
# Tuple of EBLIF cell parameters.
IOBUF_PARAMS = (
"IOSTANDARD",
"DRIVE",
)
# Regex for matching ports belonging to a single inout port
INOUT_RE = re.compile(
r"(.*)(_\$inp$|_\$inp(\[[0-9]+\])$|_\$out$|_\$out(\[[0-9]+\])$)(.*)"
)
# Eblif parameter decoding
BIN_RE = re.compile(r"^([01]+)$")
STR_RE = re.compile(r"^\"(.*)\"$")
# No subcircuits
if "subckt" not in parsed_eblif:
return
# Look for IO cells
for subckt in parsed_eblif["subckt"]:
# No parameters
if "param" not in subckt:
continue
# Gather nets that the cell is connected to.
# Collapse input and output nets that correspond to an inout port
# to a single net name
#
# "net_$inp" -> "net"
# "net_$out" -> "net"
# "net_$inp[0]" -> "net[0]"
# "net_$out[0]" -> "net[0]"
nets = set()
for conn_str in subckt["args"][1:]:
port, net = conn_str.split("=")
match = INOUT_RE.match(net)
if match:
groups = match.groups()
net = groups[0] + "".join(
[g for g in groups[2:] if g is not None])
nets.add(net)
# Check if the cell is connected to a top-level port. If not then
# skip this cell.
nets &= self.top_level_signal_nets
if len(nets) == 0:
continue
# Get interesting params
params = {}
for param, _value in subckt["param"].items():
if param not in IOBUF_PARAMS:
continue
# Parse the value
value = _value
match = BIN_RE.match(_value)
if match:
value = int(match.group(1), 2)
match = STR_RE.match(_value)
if match:
value = str(match.group(1))
# Store the parameter
params[param] = value
# No interestin params
if len(params) == 0:
continue
# Assign cell parameters to all top-level nets it is connected to.
for net in nets:
self.net_to_iosettings[net] = params
def get_site_iosettings(self, site):
"""
Returns a dict with IO settings for the given site name. The
information is taken from EBLIF cell parameters, connection between
top-level ports and EBLIF cells is read from the PCF file.
"""
# Site not in site to signal list
if site not in self.site_to_signal:
return None
signal = self.site_to_signal[site]
# Signal not in IO settings map
if signal not in self.net_to_iosettings:
return None
return self.net_to_iosettings[signal]
def add_extra_tcl_line(self, tcl_line):
self.extra_tcl.append(tcl_line)
def disable_drc(self, drc):
self.disabled_drcs.add(drc)
def set_net_map(self, net_map):
self.wire_pkey_net_map = net_map
def check_for_net_name(self, wire_pkey):
if wire_pkey in self.wire_pkey_net_map:
# Top-level port names supress net names.
name = self.wire_pkey_net_map[wire_pkey]
if name in self.top_level_signal_nets:
return None
return escape_verilog_name(name)
else:
return None
def set_site_to_signal(self, site_to_signal):
""" Assing site to signal map for top level sites.
Args:
site_to_signal (dict): Site to signal name map
"""
self.site_to_signal = site_to_signal
self.top_level_signal_nets = set(self.site_to_signal.values())
def _check_top_name(self, tile, site, name):
""" Returns top level port name for given tile and site
Args:
tile (str): Tile containing site
site (str): Site containing top level pad.
name (str): User-defined pad name (e.g. IPAD or OPAD, etc).
"""
if site not in self.site_to_signal:
return '{}_{}_{}'.format(tile, site, name)
else:
return self.site_to_signal[site]
def add_top_in_port(self, tile, site, name):
""" Add a top level input port.
tile (str): Tile name that will sink the input port.
site (str): Site name that will sink the input port.
name (str): Name of port.
Returns str of root level port name.
"""
port = self._check_top_name(tile, site, name)
assert port not in self.root_in
self.root_in.add(port)
return port
def add_top_out_port(self, tile, site, name):
""" Add a top level output port.
tile (str): Tile name that will sink the output port.
site (str): Site name that will sink the output port.
name (str): Name of port.
Returns str of root level port name.
"""
port = self._check_top_name(tile, site, name)
assert port not in self.root_out
self.root_out.add(port)
return port
def add_top_inout_port(self, tile, site, name):
""" Add a top level inout port.
tile (str): Tile name that will sink the inout port.
site (str): Site name that will sink the inout port.
name (str): Name of port.
Returns str of root level port name.
"""
port = self._check_top_name(tile, site, name)
assert port not in self.root_inout
self.root_inout.add(port)
return port
def is_top_level(self, wire):
""" Returns true if specified wire is a top level wire. """
return wire in self.root_in or wire in self.root_out or wire in self.root_inout
def add_site(self, site):
""" Adds a site to the module. """
integrated_site = site.integrate_site(self.conn, self)
merge_exclusive_sets(self.wires, integrated_site['wires'])
merge_exclusive_sets(self.unrouted_sinks,
integrated_site['unrouted_sinks'])
merge_exclusive_sets(self.unrouted_sources,
integrated_site['unrouted_sources'])
merge_exclusive_dicts(self.wire_pkey_to_wire,
integrated_site['wire_pkey_to_wire'])
merge_exclusive_dicts(self.source_bels, integrated_site['source_bels'])
merge_exclusive_dicts(self.wire_assigns,
integrated_site['wire_assigns'])
merge_exclusive_dicts(self.shorted_nets,
integrated_site['shorted_nets'])
merge_exclusive_dicts(self.wire_name_net_map,
integrated_site['net_map'])
self.sites.append(site)
def make_routes(self, allow_orphan_sinks):
""" Create nets from top level wires, activie PIPS, sources and sinks.
Invoke make_routes after all sites and pips have been added.
allow_orphan_sinks (bool): Controls whether it is an error if a sink
has no source.
"""
self.nets = {}
self.net_map = {}
for sink_wire, src_wire in make_routes(
db=self.db,
conn=self.conn,
wire_pkey_to_wire=self.wire_pkey_to_wire,
unrouted_sinks=self.unrouted_sinks,
unrouted_sources=self.unrouted_sources,
active_pips=self.active_pips,
allow_orphan_sinks=allow_orphan_sinks,
shorted_nets=self.shorted_nets,
nets=self.nets,
net_map=self.net_map,
):
if sink_wire not in self.wire_assigns:
self.wire_assigns[sink_wire] = []
self.wire_assigns[sink_wire].append(src_wire)
self.handle_post_route_cleanup()
def output_verilog(self):
""" Yields lines of verilog that represent the design in Verilog.
Invoke output_verilog after invoking make_routes to ensure that
inter-site connections are made.
"""
root_module_args = []
for in_wire, width in make_bus(self.root_in):
if width is None:
root_module_args.append(' input ' + in_wire)
else:
root_module_args.append(' input [{}:0] {}'.format(
width, in_wire))
for out_wire, width in make_bus(self.root_out):
if width is None:
root_module_args.append(' output ' + out_wire)
else:
root_module_args.append(' output [{}:0] {}'.format(
width, out_wire))
for inout_wire, width in make_bus(self.root_inout):
if width is None:
root_module_args.append(' inout ' + inout_wire)
else:
root_module_args.append(' inout [{}:0] {}'.format(
width, inout_wire))
yield 'module {}('.format(self.name)
yield ',\n'.join(root_module_args)
yield ' );'
for wire, width in make_bus(self.wires):
if width is None:
yield ' wire [0:0] {};'.format(wire)
else:
yield ' wire [{}:0] {};'.format(width, wire)
for site in self.sites:
for bel in site.bels:
bel.make_net_map(top=self, net_map=self.wire_name_net_map)
for lhs, rhs in self.wire_assigns.items():
assert len(rhs) == 1
self.wire_name_net_map[lhs] = flatten_wires(
rhs[0], self.wire_assigns, self.wire_name_net_map)
for site in self.sites:
for bel in sorted(site.bels, key=lambda bel: bel.priority):
yield ''
for line in bel.output_verilog(
top=self, net_map=self.wire_name_net_map, indent=' '):
yield line
for lhs, rhs in self.wire_name_net_map.items():
yield ' assign {} = {};'.format(lhs, rhs)
yield 'endmodule'
def output_bel_locations(self):
""" Yields lines of tcl that will assign set the location of BELs. """
for bel in sorted(self.get_bels(), key=lambda bel: bel.priority):
get_cell = "[get_cells *{cell}]".format(
cell=bel.get_prefixed_name())
if bel.bel is not None:
yield """\
set_property BEL {bel} {get_cell}""".format(
bel=bel.bel,
get_cell=get_cell,
)
yield """\
set_property LOC {site} {get_cell}""".format(
site=bel.site, get_cell=get_cell)
def output_nets(self):
""" Yields lines of tcl that will assign the exact routing path for nets.
Invoke output_nets after invoking make_routes.
"""
assert len(self.nets) > 0
for net_wire_pkey, net in self.nets.items():
if net_wire_pkey == ZERO_NET:
yield 'set net [get_nets {<const0>}]'
elif net_wire_pkey == ONE_NET:
yield 'set net [get_nets {<const1>}]'
else:
if net_wire_pkey not in self.source_bels:
continue
if not net.is_net_alive():
continue
bel, pin = self.source_bels[net_wire_pkey]
yield """
set pin [get_pins *{cell}/{pin}]
set net [get_nets -of_object $pin]""".format(
cell=bel.get_prefixed_name(),
pin=pin,
)
# If the ZERO_NET or ONE_NET is not used, do not emit it.
fixed_route = list(
net.make_fixed_route(self.conn, self.wire_pkey_to_wire))
if ' '.join(fixed_route).replace(' ', '').replace('{}',
'') == '[list]':
assert net_wire_pkey in [ZERO_NET, ONE_NET]
continue
yield """set route {fixed_route}""".format(
fixed_route=' '.join(fixed_route))
# Remove extra {} elements required to construct 1-length lists.
yield """set_property FIXED_ROUTE $route $net"""
def output_disabled_drcs(self):
for drc in self.disabled_drcs:
yield "set_property SEVERITY {{Warning}} [get_drc_checks {}]".format(
drc)
def get_bels(self):
""" Yield a list of Bel objects in the module. """
for site in self.sites:
for bel in site.bels:
yield bel
def handle_post_route_cleanup(self):
""" Handle post route clean-up. """
for site in self.sites:
if site.post_route_cleanup is not None:
site.post_route_cleanup(self, site)
prune_antennas(self.conn, self.nets, self.unrouted_sinks)
def find_sinks_from_source(self, site, site_wire):
""" Yields sink wire names from a site wire source. """
wire_pkey = site.site_wire_to_wire_pkey[site_wire]
assert wire_pkey in self.nets
source_wire = self.wire_pkey_to_wire[wire_pkey]
for sink_wire, other_source_wires in self.wire_assigns.items():
for other_source_wire in other_source_wires:
if source_wire == other_source_wire:
yield sink_wire
def find_sources_from_sink(self, site, site_wire):
""" Return all source wire names from a site wire sink. """
wire_pkey = site.site_wire_to_wire_pkey[site_wire]
sink_wire = self.wire_pkey_to_wire[wire_pkey]
if sink_wire not in self.wire_assigns:
return []
return self.wire_assigns[sink_wire]
def find_source_from_sink(self, site, site_wire):
""" Return source wire name from a site wire sink.
Raises
------
AssertionError : If multiple sources are currently defined for
this sink. """
sources = self.find_sources_from_sink(site, site_wire)
assert len(sources) == 1, sources
return sources[0]
def remove_site(self, site):
site_idx = None
for idx, a_site in enumerate(self.sites):
if site is a_site:
assert site_idx is None
site_idx = idx
assert site_idx is not None
for bel in site.bels:
self.remove_bel(site, bel)
def remove_bel(self, site, bel):
""" Remove a BEL from the module.
If this is the last use of a site sink, then that wire and wire
connection is removed.
"""
removed_sinks, removed_sources = site.remove_bel(bel)
# Make sure none of the sources are the only source for a net.
for wire_pkey in removed_sources:
source_wire = self.wire_pkey_to_wire[wire_pkey]
for _, other_source_wires in self.wire_assigns.items():
if source_wire in other_source_wires:
if len(other_source_wires) == 1:
assert source_wire != other_source_wires[0], source_wire
else:
other_source_wires.remove(source_wire)
# Remove the sources and sinks from the wires, wire assigns, and net
for wire_pkey in removed_sources:
self.remove_source(wire_pkey)
for wire_pkey in removed_sinks:
self.remove_sink(wire_pkey)
def remove_source(self, wire_pkey):
self.unrouted_sources.remove(wire_pkey)
del self.source_bels[wire_pkey]
self.wires.remove(self.wire_pkey_to_wire[wire_pkey])
def remove_sink(self, wire_pkey):
self.unrouted_sinks.remove(wire_pkey)
self.wires.remove(self.wire_pkey_to_wire[wire_pkey])
sink_wire = self.wire_pkey_to_wire[wire_pkey]
if sink_wire in self.wire_assigns:
del self.wire_assigns[sink_wire]
def prune_unconnected_ports(self):
"""
Identifies and removes unconnected top level ports
"""
# Checks whether a top level port is connected to any bel
def is_connected_to_bel(port):
for site in self.sites:
for bel in site.bels:
for bel_pin, conn in bel.connections.items():
if conn == port:
return True
return False
# Check whether a top level port is used in assign
def is_used(port):
if port in self.wire_assigns:
return True
for other_wires in self.wire_assigns.values():
for other_wire in other_wires:
if other_wire == port:
return True
return False
# Remove
for ports in (self.root_in, self.root_out, self.root_inout):
to_remove = set()
for port in ports:
if not is_connected_to_bel(port) and not is_used(port):
to_remove.add(port)
for port in to_remove:
ports.remove(port)
def add_to_cname_map(self, parsed_eblif):
""" Create a map from subckt (pin, index, net) to cnames.
Arguments
---------
parsed_eblif
Output from eblif.parse_blif
"""
""" Example subckt from eblif.parse_blif:
# > parse_eblif['subckt'][3]
{'args': ['MUXF6',
'I0=$abc$6342$auto$blifparse.cc:492:parse_blif$6343.T0',
'I1=$abc$6342$auto$blifparse.cc:492:parse_blif$6343.T1',
'O=$abc$6342$auto$dff2dffe.cc:175:make_patterns_logic$1556',
'S=$abc$6342$new_n472_'],
'cname': ['$abc$6342$auto$blifparse.cc:492:parse_blif$6343.fpga_mux_0'],
'data': [],
'type': 'subckt'}
"""
for subckt in parsed_eblif['subckt']:
if 'cname' not in subckt:
continue
assert len(subckt['cname']) == 1
for arg in subckt['args'][1:]:
port, net = arg.split('=')
pin, index = pin_to_wire_and_idx(port)
self.cname_map[(pin, index,
escape_verilog_name(net))] = subckt['cname'][0]
def lookup_cname(self, pin, idx, net):
return self.cname_map.get((pin, idx, net))
def output_extra_tcl(self):
return self.extra_tcl
def set_io_banks(self, iobanks):
self.iobank_lookup = dict((v, int(k)) for k, v in iobanks.items())
def find_iobank(self, hclk_ioi3_tile):
return self.iobank_lookup[hclk_ioi3_tile]
| 32.665737
| 89
| 0.575158
|
07af0f6ad1b5f5b814370764ddd8020b4ae72c83
| 9,282
|
py
|
Python
|
tensorflow_model_analysis/extractors/predict_extractor_v2.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/extractors/predict_extractor_v2.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | 1
|
2020-03-03T03:34:37.000Z
|
2020-03-03T03:34:37.000Z
|
tensorflow_model_analysis/extractors/predict_extractor_v2.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Predict extractor."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import copy
from typing import Dict, List, Optional, Sequence, Text, Union
import apache_beam as beam
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.extractors import extractor
PREDICT_EXTRACTOR_STAGE_NAME = 'ExtractPredictions'
PREDICT_SIGNATURE_DEF_KEY = 'predict'
def PredictExtractor(
eval_config: config.EvalConfig,
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]],
desired_batch_size: Optional[int] = None) -> extractor.Extractor:
"""Creates an extractor for performing predictions.
The extractor's PTransform loads and runs the serving saved_model(s) against
every extract yielding a copy of the incoming extracts with an additional
extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model
inputs are searched for under tfma.FEATURES_KEY (keras only) or tfma.INPUT_KEY
(if tfma.FEATURES_KEY is not set or the model is non-keras). If multiple
models are used the predictions will be stored in a dict keyed by model name.
Args:
eval_config: Eval config.
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation).
desired_batch_size: Optional batch size.
Returns:
Extractor for extracting predictions.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
# To maintain consistency between settings where single models are used,
# always use '' as the model name regardless of whether a name is passed.
if len(eval_shared_models) == 1:
eval_shared_models = {'': list(eval_shared_models.values())[0]}
# pylint: disable=no-value-for-parameter
return extractor.Extractor(
stage_name=PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractPredictions(
eval_config=eval_config,
eval_shared_models=eval_shared_models,
desired_batch_size=desired_batch_size))
@beam.typehints.with_input_types(beam.typehints.List[types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
class _PredictionDoFn(model_util.BatchReducibleDoFnWithModels):
"""A DoFn that loads the models and predicts."""
def __init__(self, eval_config: config.EvalConfig,
eval_shared_models: Dict[Text, types.EvalSharedModel]) -> None:
super(_PredictionDoFn, self).__init__(
{k: v.model_loader for k, v in eval_shared_models.items()})
self._eval_config = eval_config
def _batch_reducible_process(
self,
batch_of_extracts: List[types.Extracts]) -> Sequence[types.Extracts]:
result = copy.deepcopy(batch_of_extracts)
for spec in self._eval_config.model_specs:
# To maintain consistency between settings where single models are used,
# always use '' as the model name regardless of whether a name is passed.
model_name = spec.name if len(self._eval_config.model_specs) > 1 else ''
if model_name not in self._loaded_models:
raise ValueError(
'loaded model for "{}" not found: eval_config={}'.format(
spec.name, self._eval_config))
loaded_model = self._loaded_models[model_name]
signatures = None
if loaded_model.keras_model:
signatures = loaded_model.keras_model.signatures
elif loaded_model.saved_model:
signatures = loaded_model.saved_model.signatures
if not signatures:
raise ValueError(
'PredictExtractor V2 requires a keras model or a serving model. '
'If using EvalSavedModel then you must use PredictExtractor V1.')
signature_key = spec.signature_name
# TODO(mdreves): Add support for multiple signatures per output.
if not signature_key:
# First try 'predict' then try 'serving_default'. The estimator output
# for the 'serving_default' key does not include all the heads in a
# multi-head model. However, keras only uses the 'serving_default' for
# its outputs. Note that the 'predict' key only exists for estimators
# for multi-head models, for single-head models only 'serving_default'
# is used.
signature_key = tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
if PREDICT_SIGNATURE_DEF_KEY in signatures:
signature_key = PREDICT_SIGNATURE_DEF_KEY
if signature_key not in signatures:
raise ValueError('{} not found in model signatures: {}'.format(
signature_key, signatures))
signature = signatures[signature_key]
# If input names exist then filter the inputs by these names (unlike
# estimators, keras does not accept unknown inputs).
input_names = None
input_specs = None
# First arg of structured_input_signature tuple is shape, second is dtype
# (we currently only support named params passed as a dict)
if (signature.structured_input_signature and
len(signature.structured_input_signature) == 2 and
isinstance(signature.structured_input_signature[1], dict)):
input_names = [name for name in signature.structured_input_signature[1]]
input_specs = signature.structured_input_signature[1]
elif loaded_model.keras_model is not None:
# Calling keras_model.input_names does not work properly in TF 1.15.0.
# As a work around, make sure the signature.structured_input_signature
# check is before this check (see b/142807137).
input_names = loaded_model.keras_model.input_names
inputs = None
if input_names is not None:
inputs = model_util.rebatch_by_input_names(batch_of_extracts,
input_names, input_specs)
if not inputs and (input_names is None or len(input_names) <= 1):
# Assume serialized examples
inputs = [extract[constants.INPUT_KEY] for extract in batch_of_extracts]
if isinstance(inputs, dict):
outputs = signature(**{k: tf.constant(v) for k, v in inputs.items()})
else:
outputs = signature(tf.constant(inputs, dtype=tf.string))
for i in range(len(result)):
output = {k: v[i].numpy() for k, v in outputs.items()}
# Keras and regression serving models return a dict of predictions even
# for single-outputs. Convert these to a single tensor for compatibility
# with the labels (and model.predict API).
if len(output) == 1:
output = list(output.values())[0]
# If only one model, the predictions are stored without using a dict
if len(self._eval_config.model_specs) == 1:
result[i][constants.PREDICTIONS_KEY] = output
else:
if constants.PREDICTIONS_KEY not in result[i]:
result[i][constants.PREDICTIONS_KEY] = {}
result[i][constants.PREDICTIONS_KEY][spec.name] = output
return result
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractPredictions( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, eval_config: config.EvalConfig,
eval_shared_models: Dict[Text, types.EvalSharedModel],
desired_batch_size: Optional[int]) -> beam.pvalue.PCollection:
"""A PTransform that adds predictions and possibly other tensors to extracts.
Args:
extracts: PCollection of extracts containing model inputs keyed by
tfma.FEATURES_KEY (if model inputs are named) or tfma.INPUTS_KEY (if model
takes raw tf.Examples as input).
eval_config: Eval config.
eval_shared_models: Shared model parameters keyed by model name.
desired_batch_size: Optional batch size.
Returns:
PCollection of Extracts updated with the predictions.
"""
batch_args = {}
# TODO(b/143484017): Consider removing this option if autotuning is better
# able to handle batch size selection.
if desired_batch_size is not None:
batch_args = dict(
min_batch_size=desired_batch_size, max_batch_size=desired_batch_size)
return (
extracts
| 'Batch' >> beam.BatchElements(**batch_args)
| 'Predict' >> beam.ParDo(
_PredictionDoFn(
eval_config=eval_config, eval_shared_models=eval_shared_models)))
| 44.411483
| 80
| 0.721935
|
e0b401fdbdc4badab53f36d8a3ec8c715c080548
| 9,783
|
py
|
Python
|
lehrer_funktionen.py
|
astroPythoner/Lehrer_vs_Zombies
|
4f9f933f502da803db5936a32c15df26f67a198a
|
[
"MIT"
] | 1
|
2020-02-02T21:03:49.000Z
|
2020-02-02T21:03:49.000Z
|
lehrer_funktionen.py
|
astroPythoner/Lehrer_vs_Zombies
|
4f9f933f502da803db5936a32c15df26f67a198a
|
[
"MIT"
] | null | null | null |
lehrer_funktionen.py
|
astroPythoner/Lehrer_vs_Zombies
|
4f9f933f502da803db5936a32c15df26f67a198a
|
[
"MIT"
] | null | null | null |
# Diese Funktioen werden aufgerufen, wenn das entsprechende Eregniss im Spiel passiert.
# Sollte beim hinzufuegen eines neuen Lehrers vergessen werden die Funktionen in dieser Datei hinzuzufuegen werden sie in constants.py automatisch erstellt
from constants import *
from sprites import *
def is_zombie_close_to_player(zombie, player_pos, area_radius=250):
if zombie.pos.x < player_pos.x + area_radius and zombie.pos.x > player_pos.x - area_radius and zombie.pos.y < player_pos.y + area_radius and zombie.pos.y > player_pos.y - area_radius:
return True
def power_up_nomagic(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(LEHRER["nomagic"]["other_files"]["frozen_zombie"], 3000, stand_still_during_time=True)
def object_collect_nomagic(game, player, test=False):
if not test:
if player.weapon_upgrade_unlocked:
player.change_img_for_given_time(LEHRER["nomagic"]["other_files"]["big_eyes_schwamm"], 2200)
else:
player.change_img_for_given_time(LEHRER["nomagic"]["other_files"]["big_eyes"], 2000)
def obstacle_nomagic(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["nomagic"]["other_files"]["aua"], 1500, 30, 0)
def health_pack_nomagic(game, player, test=False):
if not test:
pass
def power_up_ecoltes(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(LEHRER["ecoltes"]["other_files"]["zombie_ohren_zu"], 3000, damge_during_time=MOB_HEALTH * 3 / 4)
zombie.place_img_on_zombie_for_given_time(LEHRER["ecoltes"]["other_files"]["aua"], 3000, 20, -5)
player.place_img_on_player_for_given_time(LEHRER["ecoltes"]["other_files"]["bonjour"], 3000, 20, -10)
def object_collect_ecoltes(game, player, test=False):
if not test:
if player.weapon_upgrade_unlocked:
player.change_img_for_given_time(LEHRER["ecoltes"]["other_files"]["player_pfandflasche_marmelade"], 2000)
else:
player.change_img_for_given_time(LEHRER["ecoltes"]["other_files"]["player_pfandflasche"], 2000)
def obstacle_ecoltes(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["ecoltes"]["other_files"]["zut"], 1500, 30, 0)
def health_pack_ecoltes(game, player, test=False):
if not test:
pass
def power_up_gnatrium(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(LEHRER["gnatrium"]["other_files"]["blumen_zombie"], 3000, damge_during_time=MOB_HEALTH * 2 / 3)
Gas_Wolke(game, LEHRER["gnatrium"]["other_files"]["gaswolke"], (50, 50), (500, 500), player.pos, 1000, 2000)
def object_collect_gnatrium(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(PEROSNEN_OBJECT_IMGES["gnatrium"]["img"], 1500, 30, -20)
def obstacle_gnatrium(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["gnatrium"]["other_files"]["aua"], 1500, 30, 0)
def health_pack_gnatrium(game, player, test=False):
if not test:
pass
def power_up_windmauer(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(None, 3000, stand_still_during_time=True)
zombie.place_img_on_zombie_for_given_time(LEHRER["windmauer"]["other_files"]["zzz"], 3000, 30, -30)
player.place_img_on_player_for_given_time(LEHRER["windmauer"]["other_files"]["bla bla bla"], 2000, 30, -10)
def object_collect_windmauer(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["windmauer"]["other_files"]["abikorrektur"], 1500, 30, -10)
def obstacle_windmauer(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["windmauer"]["other_files"]["kommatest"], 1500, 30, -10)
def health_pack_windmauer(game, player, test=False):
if not test:
pass
def power_up_honyoung(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["honyoung"]["other_files"]["be quiet"], 2500, 30, 0)
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(None, 4000, stand_still_during_time=True)
def object_collect_honyoung(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["honyoung"]["other_files"]["I love that sentence"], 1500, 30, 0)
def obstacle_honyoung(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["honyoung"]["other_files"]["fisch"], 1500, 30, 0)
def health_pack_honyoung(game, player, test=False):
if not test:
pass
def power_up_wolkenstaedtle(game, player, test=False):
if not test:
player.change_img_for_given_time(image=LEHRER["wolkenstaedtle"]["other_files"]["Pferd"], time_in_millis=3500)
nahe_zombies = []
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
nahe_zombies.append(zombie)
for number, zombie in enumerate(nahe_zombies):
zombie.place_img_on_zombie_for_given_time([LEHRER["wolkenstaedtle"]["other_files"]["Name?"], LEHRER["wolkenstaedtle"]["other_files"]["Lenard?"]][number % 2], 3250, 30, 5)
zombie.change_img_for_given_time(None, 3500, stand_still_during_time=True)
def object_collect_wolkenstaedtle(game, player, test=False):
if not test:
player.change_img_for_given_time(image=LEHRER["wolkenstaedtle"]["other_files"]["sitzen"], time_in_millis=2500)
def obstacle_wolkenstaedtle(game, player, test=False):
if not test:
pass
def health_pack_wolkenstaedtle(game, player, test=False):
if not test:
pass
def power_up_tomathely(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["tomathely"]["other_files"]["noch nicht abschreiben"], 3250, 30, -20)
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(None, 3500, stand_still_during_time=True)
def object_collect_tomathely(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["tomathely"]["other_files"]["Michel"], 1500, 30, 0)
def obstacle_tomathely(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["tomathely"]["other_files"]["umstellen"], 3250, 30, -5)
def health_pack_tomathely(game, player, test=False):
if not test:
pass
def power_up_gruss(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["gruss"]["other_files"]["Das haengt von der Definition ab"], 4250, 30, -50)
first = True
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(LEHRER["gruss"]["other_files"]["anderer_zombie"], 4000, damge_during_time=MOB_HEALTH * 1 / 2)
if first:
zombie.place_img_on_zombie_for_given_time(LEHRER["gruss"]["other_files"]["sind wir zombies"], 4000, 30, -30)
first = False
def object_collect_gruss(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["gruss"]["other_files"]["bewiesen"], 1500, 35, -25)
def obstacle_gruss(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["gruss"]["other_files"]["warum laedt des nicht"], 1500, 40, -35)
def health_pack_gruss(game, player, test=False):
if not test:
pass
def power_up_hozler(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(None, 3000, stand_still_during_time=True,
damge_during_time=MOB_HEALTH / 2)
Shaking_object(game, PERSONEN_POWER_UP_ICONS["hozler"], player.pos, 3000)
def object_collect_hozler(game, player, test=False):
if not test:
player.health += 15
if player.health > LEHRER["hozler"]["player_health"]:
player.health = LEHRER["hozler"]["player_health"]
def obstacle_hozler(game, player, test=False):
if not test:
pass
def health_pack_hozler(game, player, test=False):
if not test:
pass
def power_up_schueler(game, player, test=False):
if not test:
for zombie in game.zombies:
if is_zombie_close_to_player(zombie, player.pos):
zombie.change_img_for_given_time(None, 3000, stand_still_during_time=True, damge_during_time=MOB_HEALTH * 2 / 3)
Shaking_object(game, PERSONEN_POWER_UP_ICONS["schueler"], player.pos, 3000)
def object_collect_schueler(game, player, test=False):
if not test:
Spielhack(game, player)
def obstacle_schueler(game, player, test=False):
if not test:
player.place_img_on_player_for_given_time(LEHRER["schueler"]["other_files"]["unnoetig"], 1500, 30, 0)
def health_pack_schueler(game, player, test=False):
if not test:
pass
| 37.918605
| 187
| 0.696003
|
c9287284285ad2e8f6d354b41fdd4159cda2c11e
| 18,390
|
py
|
Python
|
Scripts/SongRequest/Library/song_request_parent_wrapper.py
|
Vasar007/Streamlabs-Chatbot-Scripts
|
5f3dacf249100ef5054a17f0eb15bb0c39e60a4d
|
[
"Apache-2.0"
] | 1
|
2021-04-15T14:48:09.000Z
|
2021-04-15T14:48:09.000Z
|
Scripts/SongRequest/Library/song_request_parent_wrapper.py
|
Vasar007/Streamlabs-Chatbot-Scripts
|
5f3dacf249100ef5054a17f0eb15bb0c39e60a4d
|
[
"Apache-2.0"
] | null | null | null |
Scripts/SongRequest/Library/song_request_parent_wrapper.py
|
Vasar007/Streamlabs-Chatbot-Scripts
|
5f3dacf249100ef5054a17f0eb15bb0c39e60a4d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
class SongRequestParentWrapper(object):
r"""
"Parent" object wrapper.
"""
def __init__(self, Parent):
self._Parent = Parent
# Messages And Events.
def send_stream_message(self, message):
r"""
Sends message to the stream chat.
void SendStreamMessage(string message)
"""
self._Parent.SendStreamMessage(message)
def send_stream_whisper(self, target_id, message):
r"""
Sends whisper message to the stream chat (only applicable on Twitch).
void SendStreamWhisper(string target, string message)
"""
self._Parent.SendStreamWhisper(target_id, message)
def send_twitch_message(self, message):
r"""
Sends message to the Twitch chat (only if the user has set up
the bot for Twitch).
void SendTwitchMessage(string message)
"""
self._Parent.SendTwitchMessage(message)
def send_twitch_whisper(self, target_id, message):
r"""
Sends whisper message to the Twitch chat (only if the user has set up
the bot for Twitch).
void SendTwitchWhisper(string target, string message)
"""
self._Parent.SendTwitchWhisper(target_id, message)
def send_discord_message(self, message):
r"""
Sends message to Discord (only if the user has set up the bot for
Discord).
void SendDiscordMessage(string message)
"""
self._Parent.SendDiscordMessage(message)
def send_discord_dm(self, target_id, message):
r"""
Sends DMs to users on Discord.
void SendDiscordDM(string target, string message)
"""
self._Parent.SendDiscordDM(target_id, message)
def broadcast_ws_event(self, event_name, json_data):
r"""
Sends an event to connected overlays.
void BroadcastWsEvent(string eventName, string jsonData)
"""
self._Parent.BroadcastWsEvent(event_name, json_data)
# Currency Manipulation.
def add_points(self, user_id, amount):
r"""
Adds currency to a single user.
bool AddPoints(string userid, long amount)
"""
return self._Parent.AddPoints(user_id, amount)
def remove_points(self, user_id, amount):
r"""
Removes currency from a single user.
bool RemovePoints(string userid, long amount)
"""
return self._Parent.RemovePoints(user_id, amount)
def add_points_with_name(self, user_id, user_name, amount):
r"""
Adds currency to a single user.
bool AddPoints(string userid, string username, long amount)
"""
return self._Parent.AddPoints(user_id, user_name, amount)
def remove_points_with_name(self, user_id, user_name, amount):
r"""
Removes currency from a single user.
bool RemovePoints(string userid, string username, long amount)
"""
return self._Parent.RemovePoints(user_id, user_name, amount)
def add_points_all(self, data):
r"""
Synchronously adds currency to the several users.
Returns a list of user IDs that could not receive currency because they
were not in chat.
List\<string userid> AddPointsAll(PythonDictionary<string userid, long amount> data)
"""
return self._Parent.AddPointsAll(data)
def add_points_all_async(self, data, callback):
r"""
Asynchronously adds currency to the several users.
Callback will receive a list of user IDs that could not receive currency
because they were not in chat. The same value will be return from
function.
List\<string userid> AddPointsAllAsync(PythonDictionary<string userid, long amount> data, Action<List\<string userid>> callback)
"""
return self._Parent.AddPointsAllAsync(data, callback)
def remove_points_all(self, data):
r"""
Synchronously removes currency from the several users.
Returns a list of user IDs that could not lose currency because they were
not in chat.
List\<string userid> RemovePointsAll(PythonDictionary<string userid, long amount> data)
"""
return self._Parent.RemovePointsAll(data)
def remove_points_all_async(self, data, callback):
r"""
Asynchronously removes currency to the several users.
Callback will receive a list of user IDs that could not lose currency
because they were not in chat. The same value will be return from
function.
List\<string userid> RemovePointsAllAsync(PythonDictionary<string userid, long amount> data, Action<List\<string userid>> callback)
"""
return self._Parent.RemovePointsAllAsync(data, callback)
def get_points(self, user_id):
r"""
Retrieves single user's currency.
long GetPoints(string userid)
"""
return self._Parent.GetPoints(user_id)
def get_hours(self, user_id):
r"""
Retrieves single user's hours watched.
long GetHours(string userid)
"""
return self._Parent.GetHours(user_id)
def get_rank(self, user_id):
r"""
Retrieves single user's rank.
string GetRank(string userid)
"""
return self._Parent.GetRank(user_id)
def get_top_currency(self, top):
r"""
Retrieves Top-X users based on currency.
PythonDictionary<string userid, long amount> GetTopCurrency(int top)
"""
return self._Parent.GetTopCurrency(top)
def get_top_hours(self, top):
r"""
Retrieves Top-X Users based on hours watched.
PythonDictionary<string userid, long amount> GetTopHours(int top)
"""
return self._Parent.GetTopHours(top)
def get_points_all(self, user_ids):
r"""
Retrieves several user's points.
Note: "user_ids" should be .NET "System.Collections.Generic.List"
collection.
PythonDictionary<string userid, long amount> GetPointsAll(List\<string> userids)
"""
return self._Parent.GetPointsAll(user_ids)
def get_ranks_all(self, user_ids):
r"""
Retrieves several user's ranks.
Note: "user_ids" should be .NET "System.Collections.Generic.List"
collection.
PythonDictionary<string userid, long amount> GetRanksAll(List\<string> userids)
"""
return self._Parent.GetRanksAll(user_ids)
def get_hours_all(self, user_ids):
r"""
Retrieves several user's hours.
Note: "user_ids" should be .NET "System.Collections.Generic.List"
collection.
PythonDictionary<string userid, long amount> GetHoursAll(List\<string> userids)
"""
return self._Parent.GetHoursAll(user_ids)
def get_currency_users(self, user_ids):
r"""
Retrieves several user's currency information.
Note: "user_ids" should be .NET "System.Collections.Generic.List"
collection.
List\<Currency> GetCurrencyUsers(List\<string> userids)
Currency Object:
| Variable | Usage |
| ----------------------------- | --------------- |
| string UserId | obj.UserId |
| string UserName | obj.UserName |
| long Points | obj.Points |
| long TimeWatched (In Minutes) | obj.TimeWatched |
| string Rank | obj.Rank |
"""
return self._Parent.GetCurrencyUsers(user_ids)
# Permissions.
def has_permission(self, user_id, permission, info):
r"""
Checks permissions.
bool HasPermission(string userid, string permission, string info)
"""
return self._Parent.HasPermission(user_id, permission, info)
# Viewers.
def get_viewer_list(self):
r"""
Retrieves the viewerlist.
List\<string userid> GetViewerList()
"""
return self._Parent.GetViewerList()
def get_active_users(self):
r"""
Retrieves all active users.
List\<string userid> GetActiveUsers()
"""
return self._Parent.GetActiveUsers()
def get_random_active_user(self):
r"""
Retrieves a single random active user.
string GetRandomActiveUser()
"""
return self._Parent.GetRandomActiveUser()
def get_display_name(self, user_id):
r"""
Retrieves a single user display name.
string GetDisplayName(string userId)
"""
return self._Parent.GetDisplayName(user_id)
def get_display_names(self, user_ids):
r"""
Retrieves the several user display names.
Note: "user_ids" should be .NET "System.Collections.Generic.List"
collection.
PythonDictionary<string userid, string username> GetDisplayNames(List\<string> userIds)
"""
return self._Parent.GetDisplayNames(user_ids)
# Cooldown Management.
def add_cooldown(self, script_name, command, seconds):
r"""
Adds a command to the cooldown manager.
void AddCooldown(string scriptName, string command, int seconds)
"""
self._Parent.AddCooldown(script_name, command, seconds)
def is_on_cooldown(self, script_name, command):
r"""
Checks if the command is on cooldown.
bool IsOnCooldown(string scriptName, string command)
"""
return self._Parent.IsOnCooldown(script_name, command)
def get_cooldown_duration(self, script_name, command):
r"""
Retrieves the remaining cooldown duration.
int GetCooldownDuration(string scriptName, string command)
"""
return self._Parent.GetCooldownDuration(script_name, command)
def add_user_cooldown(self, script_name, command, user_id, seconds):
r"""
Adds a user cooldown to a command.
void AddUserCooldown(string scriptName, string command, string userid, int seconds)
"""
self._Parent.AddUserCooldown(script_name, command, user_id, seconds)
def is_on_user_cooldown(self, script_name, command, user_id):
r"""
Checks if a command is on user cooldown.
bool IsOnUserCooldown(string scriptName, string command, string userid)
"""
return self._Parent.IsOnUserCooldown(script_name, command, user_id)
def get_user_cooldown_duration(self, script_name, command, user_id):
r"""
Retrieves the remaining user cooldown duration.
bool GetUserCooldownDuration(string scriptName, string command, string userid)
"""
return self._Parent.GetUserCooldownDuration(script_name, command, user_id)
# OBS Management.
def set_obs_current_scene(self, scene_name, callback=None):
r"""
Changes scene on OBS.
Callback will receive the JSON string that OBS returns.
void SetOBSCurrentScene(string sceneName, Action\<string> callback = null)
"""
self._Parent.SetOBSCurrentScene(scene_name, callback)
def set_obs_source_render(self, source, render, scene_name=None, callback=None):
r"""
Shows/Hides a source in OBS.
Callback will receive the JSON string that OBS returns.
void SetOBSSourceRender(string source, bool render, string sceneName = null, Action\<string> callback = null)
"""
self._Parent.SetOBSSourceRender(source, render, scene_name, callback)
def stop_obs_streaming(self, callback=None):
r"""
Stops the stream.
Callback will receive the JSON string that OBS returns.
void StopOBSStreaming(Action\<string> callback = null)
"""
self._Parent.StopOBSStreaming(callback)
def get_obs_special_sources(self, callback=None):
r"""
Retrieves all audio sources.
Callback will receive the JSON string that OBS returns.
void GetOBSSpecialSources(Action\<string> callback)
"""
self._Parent.GetOBSSpecialSources(callback)
def get_obs_volume(self, source, callback=None):
r"""
Retrieves the volume of an OBS source.
Callback will receive the JSON string that OBS returns.
void GetOBSVolume(string source, Action\<string> callback = null)
"""
self._Parent.GetOBSVolume(source, callback)
def set_obs_volume(self, source, volume, callback=None):
r"""
Controls the volume of an OBS source.
Callback will receive the JSON string that OBS returns.
void SetOBSVolume(string source, double volume, Action\<string> callback = null)
"""
self._Parent.SetOBSVolume(source, volume, callback)
def get_obs_mute(self, source, callback=None):
r"""
Mutes a specific source in OBS.
Callback will receive the JSON string that OBS returns.
void GetOBSMute(string source, Action\<string> callback)
"""
self._Parent.GetOBSMute(source, callback)
def set_obs_mute(self, source, mute, callback=None):
r"""
Toggles the mute state of a specific OBS source.
Callback will receive the JSON string that OBS returns.
void SetOBSMute(string source, bool mute, Action\<string> callback = null)
"""
self._Parent.SetOBSMute(source, mute, callback)
def toggle_obs_mute(self, source, callback=None):
r"""
Toggles mute of a specific OBS source.
Callback will receive the JSON string that OBS returns.
void ToggleOBSMute(string source, Action\<string> callback = null)
"""
self._Parent.ToggleOBSMute(source, callback)
# API Requests.
def get_request(self, url, headers):
r"""
Sends HTTP GET request.
string GetRequest(string url, PythonDictionary headers)
"""
return self._Parent.GetRequest(url, headers)
def post_request(self, url, headers, content, isJsonContent=True):
r"""
Sends HTTP POST request.
string PostRequest(string url, PythonDictionary headers, PythonDictionary content, bool isJsonContent = true)
"""
return self._Parent.PostRequest(url, headers, content, isJsonContent)
def delete_request(self, url, headers):
r"""
Sends HTTP DELETE request.
string DeleteRequest(string url, PythonDictionary headers)
"""
return self._Parent.DeleteRequest(url, headers)
def put_request(self, url, headers, content, isJsonContent=True):
r"""
Sends HTTP PUT request.
string PutRequest(string url, PythonDictionary headers, PythonDictionary content, bool isJsonContent = true)
"""
return self._Parent.PutRequest(url, headers, content, isJsonContent)
# Stream Information.
def is_live(self):
r"""
Checks if the stream is live.
bool IsLive()
"""
return self._Parent.IsLive()
# GameWisp Information.
def get_gw_tier_level(self, user_id):
r"""
Retrieves a user's GameWisp Sub Tier.
int GetGwTierLevel(string user)
"""
return self._Parent.GetGwTierLevel(user_id)
# Miscellaneous.
def get_random(self, min_, max_):
r"""
Gets a random number.
int GetRandom(int min, int max)
"""
return self._Parent.GetRandom(min_, max_)
def get_streaming_service(self):
r"""
Retrieves the streaming platform that the Chatbot is being used on.
string GetStreamingService()
"""
return self._Parent.GetStreamingService()
def get_channel_name(self):
r"""
Gets the stream's channel name (only applicable to Twitch).
string GetChannelName()
"""
return self._Parent.GetChannelName()
def get_currency_name(self):
r"""
Retrieves the stream's currency name.
string GetCurrencyName()
"""
return self._Parent.GetCurrencyName()
def log(self, script_name, message):
r"""
Logs information to the Bot's Log Window.
void Log(string scriptName, string message)
"""
self._Parent.Log(script_name, message)
def play_sound(self, file_path, volume):
r"""
Attempts to play a sound if possible.
bool PlaySound(string filePath, float volume)
"""
return self._Parent.PlaySound(file_path, volume)
def get_queue(self, max_):
r"""
Retrieves X amount of users that are in the queue at the moment.
PythonDictionary<int position, string userid> GetQueue(int max)
"""
return self._Parent.GetQueue(max_)
# Song Queue Playlist Information.
def get_song_queue(self, max_):
r"""
Retrieves the next X amount of songs in the queue.
List\<Song> GetSongQueue(int max)
Song Object:
| Variable | Usage |
| ---------------------- |-------------------- |
| string Title | obj.Title |
| string RequestedBy | obj.RequestedBy |
| string RequestedByName | obj.RequestedByName |
| string ID | obj.ID |
| string URL | obj.URL |
"""
return self._Parent.GetSongQueue(max_)
def get_song_playlist(self, max_):
r"""
Retrieves the next X amount of songs in the playlist.
List\<Song> GetSongPlaylist(int max)
Song Object:
| Variable | Usage |
| ---------------------- |-------------------- |
| string Title | obj.Title |
| string RequestedBy | obj.RequestedBy |
| string RequestedByName | obj.RequestedByName |
| string ID | obj.ID |
| string URL | obj.URL |
"""
return self._Parent.GetSongPlaylist(max_)
def get_now_playing(self):
r"""
Gets the current song that's playing.
KeyValuePair<string title, string requestedBy> GetNowPlaying()
"""
return self._Parent.GetNowPlaying()
| 30.599002
| 139
| 0.621697
|
61fb23d382d3a04d0d0084e8049e86b46a75782f
| 792
|
py
|
Python
|
workers/pull_request_worker/runtime.py
|
theamankumarsingh/augur
|
3ae0c4d6e4fb9b6dfddff02a92a170763e9bf8bd
|
[
"MIT"
] | 443
|
2018-09-19T00:30:36.000Z
|
2022-03-31T11:39:13.000Z
|
workers/pull_request_worker/runtime.py
|
theamankumarsingh/augur
|
3ae0c4d6e4fb9b6dfddff02a92a170763e9bf8bd
|
[
"MIT"
] | 613
|
2018-09-19T18:31:13.000Z
|
2022-03-31T05:41:16.000Z
|
workers/pull_request_worker/runtime.py
|
theamankumarsingh/augur
|
3ae0c4d6e4fb9b6dfddff02a92a170763e9bf8bd
|
[
"MIT"
] | 764
|
2018-10-17T01:08:10.000Z
|
2022-03-31T05:25:01.000Z
|
#SPDX-License-Identifier: MIT
from flask import Flask, jsonify, request, Response
import click, os, json, requests, logging
from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker
from workers.util import create_server, WorkerGunicornApplication
def main():
""" Declares singular worker and creates the server and flask app that it will be running on
"""
app = Flask(__name__)
app.worker = GitHubPullRequestWorker()
create_server(app)
WorkerGunicornApplication(app).run()
if app.worker._child is not None:
app.worker._child.terminate()
try:
requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']})
except:
pass
os.kill(os.getpid(), 9)
| 33
| 125
| 0.719697
|
57e642eb953450edcd41673ee1af07397ce6ad1a
| 580
|
py
|
Python
|
src/openprocurement/tender/simpledefense/views/tender_document.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/simpledefense/views/tender_document.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/simpledefense/views/tender_document.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openuadefense.views.tender_document import TenderUaDocumentResource as TenderDocumentResource
@optendersresource(
name="simple.defense:Tender Documents",
collection_path="/tenders/{tender_id}/documents",
path="/tenders/{tender_id}/documents/{document_id}",
procurementMethodType="simple.defense",
description="Tender simple.defense related binary files (PDFs, etc.)",
)
class TenderSimpleDefDocumentResource(TenderDocumentResource):
pass
| 38.666667
| 121
| 0.791379
|
89c2cb7c19793c0ac66a07b900c79de6f9ad02b6
| 40,425
|
py
|
Python
|
sdks/python/apache_beam/runners/common.py
|
azurezyq/beam
|
6518abfb3ea47a4802d76ca3c405c3f66e48eaa2
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/runners/common.py
|
azurezyq/beam
|
6518abfb3ea47a4802d76ca3c405c3f66e48eaa2
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/runners/common.py
|
azurezyq/beam
|
6518abfb3ea47a4802d76ca3c405c3f66e48eaa2
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2019-10-26T12:26:16.000Z
|
2019-10-26T12:26:16.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=True
"""Worker operations executor.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
import traceback
from builtins import next
from builtins import object
from builtins import zip
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam.internal import util
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import DoFn
from apache_beam.transforms import core
from apache_beam.transforms import userstate
from apache_beam.transforms.core import RestrictionProvider
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.counters import Counter
from apache_beam.utils.counters import CounterName
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
class NameContext(object):
"""Holds the name information for a step."""
def __init__(self, step_name):
"""Creates a new step NameContext.
Args:
step_name: The name of the step.
"""
self.step_name = step_name
def __eq__(self, other):
return self.step_name == other.step_name
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'NameContext(%s)' % self.__dict__
def __hash__(self):
return hash(self.step_name)
def metrics_name(self):
"""Returns the step name used for metrics reporting."""
return self.step_name
def logging_name(self):
"""Returns the step name used for logging."""
return self.step_name
# TODO(BEAM-4028): Move DataflowNameContext to Dataflow internal code.
class DataflowNameContext(NameContext):
"""Holds the name information for a step in Dataflow.
This includes a step_name (e.g. s2), a user_name (e.g. Foo/Bar/ParDo(Fab)),
and a system_name (e.g. s2-shuffle-read34)."""
def __init__(self, step_name, user_name, system_name):
"""Creates a new step NameContext.
Args:
step_name: The internal name of the step (e.g. s2).
user_name: The full user-given name of the step (e.g. Foo/Bar/ParDo(Far)).
system_name: The step name in the optimized graph (e.g. s2-1).
"""
super(DataflowNameContext, self).__init__(step_name)
self.user_name = user_name
self.system_name = system_name
def __eq__(self, other):
return (self.step_name == other.step_name and
self.user_name == other.user_name and
self.system_name == other.system_name)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.step_name, self.user_name, self.system_name))
def __repr__(self):
return 'DataflowNameContext(%s)' % self.__dict__
def logging_name(self):
"""Stackdriver logging relies on user-given step names (e.g. Foo/Bar)."""
return self.user_name
class Receiver(object):
"""For internal use only; no backwards-compatibility guarantees.
An object that consumes a WindowedValue.
This class can be efficiently used to pass values between the
sdk and worker harnesses.
"""
def receive(self, windowed_value):
raise NotImplementedError
class MethodWrapper(object):
"""For internal use only; no backwards-compatibility guarantees.
Represents a method that can be invoked by `DoFnInvoker`."""
def __init__(self, obj_to_invoke, method_name):
"""
Initiates a ``MethodWrapper``.
Args:
obj_to_invoke: the object that contains the method. Has to either be a
`DoFn` object or a `RestrictionProvider` object.
method_name: name of the method as a string.
"""
if not isinstance(obj_to_invoke, (DoFn, RestrictionProvider)):
raise ValueError('\'obj_to_invoke\' has to be either a \'DoFn\' or '
'a \'RestrictionProvider\'. Received %r instead.'
% obj_to_invoke)
fullargspec = core.get_function_arguments(
obj_to_invoke, method_name)
# TODO(BEAM-5878) support kwonlyargs on Python 3.
args = fullargspec[0]
defaults = fullargspec[3]
defaults = defaults if defaults else []
method_value = getattr(obj_to_invoke, method_name)
self.method_value = method_value
self.args = args
self.defaults = defaults
self.has_userstate_arguments = False
self.state_args_to_replace = {}
self.timer_args_to_replace = {}
self.timestamp_arg_name = None
self.window_arg_name = None
self.key_arg_name = None
for kw, v in zip(args[-len(defaults):], defaults):
if isinstance(v, core.DoFn.StateParam):
self.state_args_to_replace[kw] = v.state_spec
self.has_userstate_arguments = True
elif isinstance(v, core.DoFn.TimerParam):
self.timer_args_to_replace[kw] = v.timer_spec
self.has_userstate_arguments = True
elif v == core.DoFn.TimestampParam:
self.timestamp_arg_name = kw
elif v == core.DoFn.WindowParam:
self.window_arg_name = kw
elif v == core.DoFn.KeyParam:
self.key_arg_name = kw
def invoke_timer_callback(self,
user_state_context,
key,
window,
timestamp):
# TODO(ccy): support side inputs.
kwargs = {}
if self.has_userstate_arguments:
for kw, state_spec in self.state_args_to_replace.items():
kwargs[kw] = user_state_context.get_state(state_spec, key, window)
for kw, timer_spec in self.timer_args_to_replace.items():
kwargs[kw] = user_state_context.get_timer(timer_spec, key, window)
if self.timestamp_arg_name:
kwargs[self.timestamp_arg_name] = Timestamp(seconds=timestamp)
if self.window_arg_name:
kwargs[self.window_arg_name] = window
if self.key_arg_name:
kwargs[self.key_arg_name] = key
if kwargs:
return self.method_value(**kwargs)
else:
return self.method_value()
class DoFnSignature(object):
"""Represents the signature of a given ``DoFn`` object.
Signature of a ``DoFn`` provides a view of the properties of a given ``DoFn``.
Among other things, this will give an extensible way for for (1) accessing the
structure of the ``DoFn`` including methods and method parameters
(2) identifying features that a given ``DoFn`` support, for example, whether
a given ``DoFn`` is a Splittable ``DoFn`` (
https://s.apache.org/splittable-do-fn) (3) validating a ``DoFn`` based on the
feature set offered by it.
"""
def __init__(self, do_fn):
# We add a property here for all methods defined by Beam DoFn features.
assert isinstance(do_fn, core.DoFn)
self.do_fn = do_fn
self.process_method = MethodWrapper(do_fn, 'process')
self.start_bundle_method = MethodWrapper(do_fn, 'start_bundle')
self.finish_bundle_method = MethodWrapper(do_fn, 'finish_bundle')
self.setup_lifecycle_method = MethodWrapper(do_fn, 'setup')
self.teardown_lifecycle_method = MethodWrapper(do_fn, 'teardown')
restriction_provider = self.get_restriction_provider()
self.initial_restriction_method = (
MethodWrapper(restriction_provider, 'initial_restriction')
if restriction_provider else None)
self.restriction_coder_method = (
MethodWrapper(restriction_provider, 'restriction_coder')
if restriction_provider else None)
self.create_tracker_method = (
MethodWrapper(restriction_provider, 'create_tracker')
if restriction_provider else None)
self.split_method = (
MethodWrapper(restriction_provider, 'split')
if restriction_provider else None)
self._validate()
# Handle stateful DoFns.
self._is_stateful_dofn = userstate.is_stateful_dofn(do_fn)
self.timer_methods = {}
if self._is_stateful_dofn:
# Populate timer firing methods, keyed by TimerSpec.
_, all_timer_specs = userstate.get_dofn_specs(do_fn)
for timer_spec in all_timer_specs:
method = timer_spec._attached_callback
self.timer_methods[timer_spec] = MethodWrapper(do_fn, method.__name__)
def get_restriction_provider(self):
result = _find_param_with_default(self.process_method,
default_as_type=DoFn.RestrictionParam)
return result[1].restriction_provider if result else None
def _validate(self):
self._validate_process()
self._validate_bundle_method(self.start_bundle_method)
self._validate_bundle_method(self.finish_bundle_method)
self._validate_stateful_dofn()
def _validate_process(self):
"""Validate that none of the DoFnParameters are repeated in the function
"""
param_ids = [d.param_id for d in self.process_method.defaults
if isinstance(d, core._DoFnParam)]
if len(param_ids) != len(set(param_ids)):
raise ValueError(
'DoFn %r has duplicate process method parameters: %s.' % (
self.do_fn, param_ids))
def _validate_bundle_method(self, method_wrapper):
"""Validate that none of the DoFnParameters are used in the function
"""
for param in core.DoFn.DoFnProcessParams:
if param in method_wrapper.defaults:
raise ValueError(
'DoFn.process() method-only parameter %s cannot be used in %s.' %
(param, method_wrapper))
def _validate_stateful_dofn(self):
userstate.validate_stateful_dofn(self.do_fn)
def is_splittable_dofn(self):
return any([isinstance(default, DoFn.RestrictionParam) for default in
self.process_method.defaults])
def is_stateful_dofn(self):
return self._is_stateful_dofn
def has_timers(self):
_, all_timer_specs = userstate.get_dofn_specs(self.do_fn)
return bool(all_timer_specs)
class DoFnInvoker(object):
"""An abstraction that can be used to execute DoFn methods.
A DoFnInvoker describes a particular way for invoking methods of a DoFn
represented by a given DoFnSignature."""
def __init__(self, output_processor, signature):
"""
Initializes `DoFnInvoker`
:param output_processor: an OutputProcessor for receiving elements produced
by invoking functions of the DoFn.
:param signature: a DoFnSignature for the DoFn being invoked
"""
self.output_processor = output_processor
self.signature = signature
self.user_state_context = None
self.bundle_finalizer_param = None
@staticmethod
def create_invoker(
signature,
output_processor=None,
context=None, side_inputs=None, input_args=None, input_kwargs=None,
process_invocation=True,
user_state_context=None,
bundle_finalizer_param=None):
""" Creates a new DoFnInvoker based on given arguments.
Args:
output_processor: an OutputProcessor for receiving elements produced by
invoking functions of the DoFn.
signature: a DoFnSignature for the DoFn being invoked.
context: Context to be used when invoking the DoFn (deprecated).
side_inputs: side inputs to be used when invoking th process method.
input_args: arguments to be used when invoking the process method. Some
of the arguments given here might be placeholders (for
example for side inputs) that get filled before invoking the
process method.
input_kwargs: keyword arguments to be used when invoking the process
method. Some of the keyword arguments given here might be
placeholders (for example for side inputs) that get filled
before invoking the process method.
process_invocation: If True, this function may return an invoker that
performs extra optimizations for invoking process()
method efficiently.
user_state_context: The UserStateContext instance for the current
Stateful DoFn.
bundle_finalizer_param: The param that passed to a process method, which
allows a callback to be registered.
"""
side_inputs = side_inputs or []
default_arg_values = signature.process_method.defaults
use_simple_invoker = not process_invocation or (
not side_inputs and not input_args and not input_kwargs and
not default_arg_values and not signature.is_stateful_dofn())
if use_simple_invoker:
return SimpleInvoker(output_processor, signature)
else:
return PerWindowInvoker(
output_processor,
signature, context, side_inputs, input_args, input_kwargs,
user_state_context, bundle_finalizer_param)
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
"""Invokes the DoFn.process() function.
Args:
windowed_value: a WindowedValue object that gives the element for which
process() method should be invoked along with the window
the element belongs to.
output_procesor: if provided given OutputProcessor will be used.
additional_args: additional arguments to be passed to the current
`DoFn.process()` invocation, usually as side inputs.
additional_kwargs: additional keyword arguments to be passed to the
current `DoFn.process()` invocation.
"""
raise NotImplementedError
def invoke_setup(self):
"""Invokes the DoFn.setup() method
"""
self.signature.setup_lifecycle_method.method_value()
def invoke_start_bundle(self):
"""Invokes the DoFn.start_bundle() method.
"""
self.output_processor.start_bundle_outputs(
self.signature.start_bundle_method.method_value())
def invoke_finish_bundle(self):
"""Invokes the DoFn.finish_bundle() method.
"""
self.output_processor.finish_bundle_outputs(
self.signature.finish_bundle_method.method_value())
def invoke_teardown(self):
"""Invokes the DoFn.teardown() method
"""
self.signature.teardown_lifecycle_method.method_value()
def invoke_user_timer(self, timer_spec, key, window, timestamp):
self.output_processor.process_outputs(
WindowedValue(None, timestamp, (window,)),
self.signature.timer_methods[timer_spec].invoke_timer_callback(
self.user_state_context, key, window, timestamp))
def invoke_split(self, element, restriction):
return self.signature.split_method.method_value(element, restriction)
def invoke_initial_restriction(self, element):
return self.signature.initial_restriction_method.method_value(element)
def invoke_restriction_coder(self):
return self.signature.restriction_coder_method.method_value()
def invoke_create_tracker(self, restriction):
return self.signature.create_tracker_method.method_value(restriction)
def _find_param_with_default(
method, default_as_value=None, default_as_type=None):
if ((default_as_value and default_as_type) or
not (default_as_value or default_as_type)):
raise ValueError(
'Exactly one of \'default_as_value\' and \'default_as_type\' should be '
'provided. Received %r and %r.' % (default_as_value, default_as_type))
defaults = method.defaults
ret = None
for i, value in enumerate(defaults):
if default_as_value and value == default_as_value:
ret = (method.args[len(method.args) - len(defaults) + i], value)
elif default_as_type and isinstance(value, default_as_type):
index = len(method.args) - len(defaults) + i
ret = (method.args[index], value)
return ret
class SimpleInvoker(DoFnInvoker):
"""An invoker that processes elements ignoring windowing information."""
def __init__(self, output_processor, signature):
super(SimpleInvoker, self).__init__(output_processor, signature)
self.process_method = signature.process_method.method_value
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
if not output_processor:
output_processor = self.output_processor
output_processor.process_outputs(
windowed_value, self.process_method(windowed_value.value))
class PerWindowInvoker(DoFnInvoker):
"""An invoker that processes elements considering windowing information."""
def __init__(self, output_processor, signature, context,
side_inputs, input_args, input_kwargs, user_state_context,
bundle_finalizer_param):
super(PerWindowInvoker, self).__init__(output_processor, signature)
self.side_inputs = side_inputs
self.context = context
self.process_method = signature.process_method.method_value
default_arg_values = signature.process_method.defaults
self.has_windowed_inputs = (
not all(si.is_globally_windowed() for si in side_inputs) or
(core.DoFn.WindowParam in default_arg_values) or
signature.is_stateful_dofn())
self.user_state_context = user_state_context
self.is_splittable = signature.is_splittable_dofn()
self.restriction_tracker = None
self.current_windowed_value = None
self.bundle_finalizer_param = bundle_finalizer_param
self.is_key_param_required = False
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Flag to cache additional arguments on the first element if all
# inputs are within the global window.
self.cache_globally_windowed_args = not self.has_windowed_inputs
input_args = input_args if input_args else []
input_kwargs = input_kwargs if input_kwargs else {}
arguments = signature.process_method.args
defaults = signature.process_method.defaults
# Create placeholder for element parameter of DoFn.process() method.
self_in_args = int(signature.do_fn.is_process_bounded())
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
if core.DoFn.ElementParam not in default_arg_values:
args_to_pick = len(arguments) - len(default_arg_values) - 1 - self_in_args
args_with_placeholders = (
[ArgPlaceholder(core.DoFn.ElementParam)] + input_args[:args_to_pick])
else:
args_to_pick = len(arguments) - len(defaults) - self_in_args
args_with_placeholders = input_args[:args_to_pick]
# Fill the OtherPlaceholders for context, key, window or timestamp
remaining_args_iter = iter(input_args[args_to_pick:])
for a, d in zip(arguments[-len(defaults):], defaults):
if d == core.DoFn.ElementParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.KeyParam:
self.is_key_param_required = True
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.WindowParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.TimestampParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.SideInputParam:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
if a not in input_kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
elif isinstance(d, core.DoFn.StateParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif isinstance(d, core.DoFn.TimerParam):
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.BundleFinalizerParam:
args_with_placeholders.append(ArgPlaceholder(d))
else:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
pass
args_with_placeholders.extend(list(remaining_args_iter))
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder) for (i, x) in enumerate(
args_with_placeholders)
if isinstance(x, ArgPlaceholder)]
self.args_for_process = args_with_placeholders
self.kwargs_for_process = input_kwargs
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
if not additional_args:
additional_args = []
if not additional_kwargs:
additional_kwargs = {}
if not output_processor:
output_processor = self.output_processor
self.context.set_element(windowed_value)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
if self.is_splittable and not restriction_tracker:
restriction = self.invoke_initial_restriction(windowed_value.value)
restriction_tracker = self.invoke_create_tracker(restriction)
if restriction_tracker:
if len(windowed_value.windows) > 1 and self.has_windowed_inputs:
# Should never get here due to window explosion in
# the upstream pair-with-restriction.
raise NotImplementedError(
'SDFs in multiply-windowed values with windowed arguments.')
restriction_tracker_param = _find_param_with_default(
self.signature.process_method,
default_as_type=DoFn.RestrictionParam)[0]
if not restriction_tracker_param:
raise ValueError(
'A RestrictionTracker %r was provided but DoFn does not have a '
'RestrictionTrackerParam defined' % restriction_tracker)
additional_kwargs[restriction_tracker_param] = restriction_tracker
try:
self.current_windowed_value = windowed_value
self.restriction_tracker = restriction_tracker
return self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs,
output_processor)
finally:
self.restriction_tracker = None
self.current_windowed_value = windowed_value
elif self.has_windowed_inputs and len(windowed_value.windows) != 1:
for w in windowed_value.windows:
self._invoke_process_per_window(
WindowedValue(windowed_value.value, windowed_value.timestamp, (w,)),
additional_args, additional_kwargs, output_processor)
else:
self._invoke_process_per_window(
windowed_value, additional_args, additional_kwargs, output_processor)
def _invoke_process_per_window(
self, windowed_value, additional_args,
additional_kwargs, output_processor):
if self.has_windowed_inputs:
window, = windowed_value.windows
side_inputs = [si[window] for si in self.side_inputs]
side_inputs.extend(additional_args)
args_for_process, kwargs_for_process = util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
side_inputs)
elif self.cache_globally_windowed_args:
# Attempt to cache additional args if all inputs are globally
# windowed inputs when processing the first element.
self.cache_globally_windowed_args = False
# Fill in sideInputs if they are globally windowed
global_window = GlobalWindow()
self.args_for_process, self.kwargs_for_process = (
util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
[si[global_window] for si in self.side_inputs]))
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
else:
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
# Extract key in the case of a stateful DoFn. Note that in the case of a
# stateful DoFn, we set during __init__ self.has_windowed_inputs to be
# True. Therefore, windows will be exploded coming into this method, and
# we can rely on the window variable being set above.
if self.user_state_context or self.is_key_param_required:
try:
key, unused_value = windowed_value.value
except (TypeError, ValueError):
raise ValueError(
('Input value to a stateful DoFn or KeyParam must be a KV tuple; '
'instead, got \'%s\'.') % (windowed_value.value,))
for i, p in self.placeholders:
if p == core.DoFn.ElementParam:
args_for_process[i] = windowed_value.value
elif p == core.DoFn.KeyParam:
args_for_process[i] = key
elif p == core.DoFn.WindowParam:
args_for_process[i] = window
elif p == core.DoFn.TimestampParam:
args_for_process[i] = windowed_value.timestamp
elif isinstance(p, core.DoFn.StateParam):
args_for_process[i] = (
self.user_state_context.get_state(p.state_spec, key, window))
elif isinstance(p, core.DoFn.TimerParam):
args_for_process[i] = (
self.user_state_context.get_timer(p.timer_spec, key, window))
elif p == core.DoFn.BundleFinalizerParam:
args_for_process[i] = self.bundle_finalizer_param
if additional_kwargs:
if kwargs_for_process is None:
kwargs_for_process = additional_kwargs
else:
for key in additional_kwargs:
kwargs_for_process[key] = additional_kwargs[key]
if kwargs_for_process:
output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process, **kwargs_for_process))
else:
output_processor.process_outputs(
windowed_value, self.process_method(*args_for_process))
if self.is_splittable:
deferred_status = self.restriction_tracker.deferred_status()
if deferred_status:
deferred_restriction, deferred_watermark = deferred_status
element = windowed_value.value
size = self.signature.get_restriction_provider().restriction_size(
element, deferred_restriction)
return (
windowed_value.with_value(((element, deferred_restriction), size)),
deferred_watermark)
def try_split(self, fraction):
restriction_tracker = self.restriction_tracker
current_windowed_value = self.current_windowed_value
if restriction_tracker and current_windowed_value:
split = restriction_tracker.try_split(fraction)
if split:
primary, residual = split
element = self.current_windowed_value.value
restriction_provider = self.signature.get_restriction_provider()
primary_size = restriction_provider.restriction_size(element, primary)
residual_size = restriction_provider.restriction_size(element, residual)
return (
(self.current_windowed_value.with_value(
((element, primary), primary_size)),
None),
(self.current_windowed_value.with_value(
((element, residual), residual_size)),
restriction_tracker.current_watermark()))
def current_element_progress(self):
restriction_tracker = self.restriction_tracker
if restriction_tracker:
return restriction_tracker.current_progress()
class DoFnRunner(Receiver):
"""For internal use only; no backwards-compatibility guarantees.
A helper class for executing ParDo operations.
"""
def __init__(self,
fn,
args,
kwargs,
side_inputs,
windowing,
tagged_receivers=None,
step_name=None,
logging_context=None,
state=None,
scoped_metrics_container=None,
operation_name=None,
user_state_context=None):
"""Initializes a DoFnRunner.
Args:
fn: user DoFn to invoke
args: positional side input arguments (static and placeholder), if any
kwargs: keyword side input arguments (static and placeholder), if any
side_inputs: list of sideinput.SideInputMaps for deferred side inputs
windowing: windowing properties of the output PCollection(s)
tagged_receivers: a dict of tag name to Receiver objects
step_name: the name of this step
logging_context: DEPRECATED [BEAM-4728]
state: handle for accessing DoFn state
scoped_metrics_container: DEPRECATED
operation_name: The system name assigned by the runner for this operation.
user_state_context: The UserStateContext instance for the current
Stateful DoFn.
"""
# Need to support multiple iterations.
side_inputs = list(side_inputs)
self.step_name = step_name
self.context = DoFnContext(step_name, state=state)
self.bundle_finalizer_param = DoFn.BundleFinalizerParam()
do_fn_signature = DoFnSignature(fn)
# Optimize for the common case.
main_receivers = tagged_receivers[None]
# TODO(BEAM-3937): Remove if block after output counter released.
if 'outputs_per_element_counter' in RuntimeValueProvider.experiments:
# TODO(BEAM-3955): Make step_name and operation_name less confused.
output_counter_name = (CounterName('per-element-output-count',
step_name=operation_name))
per_element_output_counter = state._counter_factory.get_counter(
output_counter_name, Counter.DATAFLOW_DISTRIBUTION).accumulator
else:
per_element_output_counter = None
output_processor = _OutputProcessor(
windowing.windowfn, main_receivers, tagged_receivers,
per_element_output_counter)
if do_fn_signature.is_stateful_dofn() and not user_state_context:
raise Exception(
'Requested execution of a stateful DoFn, but no user state context '
'is available. This likely means that the current runner does not '
'support the execution of stateful DoFns.')
self.do_fn_invoker = DoFnInvoker.create_invoker(
do_fn_signature, output_processor, self.context, side_inputs, args,
kwargs, user_state_context=user_state_context,
bundle_finalizer_param=self.bundle_finalizer_param)
def receive(self, windowed_value):
self.process(windowed_value)
def process(self, windowed_value):
try:
return self.do_fn_invoker.invoke_process(windowed_value)
except BaseException as exn:
self._reraise_augmented(exn)
def process_with_sized_restriction(self, windowed_value):
(element, restriction), _ = windowed_value.value
return self.do_fn_invoker.invoke_process(
windowed_value.with_value(element),
restriction_tracker=self.do_fn_invoker.invoke_create_tracker(
restriction))
def try_split(self, fraction):
return self.do_fn_invoker.try_split(fraction)
def current_element_progress(self):
return self.do_fn_invoker.current_element_progress()
def process_user_timer(self, timer_spec, key, window, timestamp):
try:
self.do_fn_invoker.invoke_user_timer(timer_spec, key, window, timestamp)
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_bundle_method(self, bundle_method):
try:
self.context.set_element(None)
bundle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def _invoke_lifecycle_method(self, lifecycle_method):
try:
self.context.set_element(None)
lifecycle_method()
except BaseException as exn:
self._reraise_augmented(exn)
def setup(self):
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
def start(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_start_bundle)
def finish(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_finish_bundle)
def teardown(self):
self._invoke_lifecycle_method(self.do_fn_invoker.invoke_teardown)
def finalize(self):
self.bundle_finalizer_param.finalize_bundle()
def _reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
step_annotation = " [while running '%s']" % self.step_name
# To emulate exception chaining (not available in Python 2).
try:
# Attempt to construct the same kind of exception
# with an augmented message.
new_exn = type(exn)(exn.args[0] + step_annotation, *exn.args[1:])
new_exn._tagged_with_step = True # Could raise attribute error.
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exn = RuntimeError(
traceback.format_exception_only(type(exn), exn)[-1].strip()
+ step_annotation)
new_exn._tagged_with_step = True
raise_with_traceback(new_exn)
class OutputProcessor(object):
def process_outputs(self, windowed_input_element, results):
raise NotImplementedError
class _OutputProcessor(OutputProcessor):
"""Processes output produced by DoFn method invocations."""
def __init__(self,
window_fn,
main_receivers,
tagged_receivers,
per_element_output_counter):
"""Initializes ``_OutputProcessor``.
Args:
window_fn: a windowing function (WindowFn).
main_receivers: a dict of tag name to Receiver objects.
tagged_receivers: main receiver object.
per_element_output_counter: per_element_output_counter of one work_item.
could be none if experimental flag turn off
"""
self.window_fn = window_fn
self.main_receivers = main_receivers
self.tagged_receivers = tagged_receivers
self.per_element_output_counter = per_element_output_counter
def process_outputs(self, windowed_input_element, results):
"""Dispatch the result of process computation to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
# TODO(BEAM-3937): Remove if block after output counter released.
# Only enable per_element_output_counter when counter cythonized.
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(0)
return
output_element_count = 0
for result in results:
# results here may be a generator, which cannot call len on it.
output_element_count += 1
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None
and len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value, result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
# TODO(BEAM-3937): Remove if block after output counter released.
# Only enable per_element_output_counter when counter cythonized
if (self.per_element_output_counter is not None and
self.per_element_output_counter.is_cythonized):
self.per_element_output_counter.add_input(output_element_count)
def start_bundle_outputs(self, results):
"""Validate that start_bundle does not output any elements"""
if results is None:
return
raise RuntimeError(
'Start Bundle should not output any elements but got %s' % results)
def finish_bundle_outputs(self, results):
"""Dispatch the result of finish_bundle to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, (str, unicode)):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
else:
raise RuntimeError('Finish Bundle should only output WindowedValue ' +\
'type but got %s' % type(result))
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
class _NoContext(WindowFn.AssignContext):
"""An uninspectable WindowFn.AssignContext."""
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
"""For internal use only; no backwards-compatibility guarantees.
Keeps track of state that DoFns want, currently, user counters.
"""
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
"""Looks up the counter for this aggregator, creating one if necessary."""
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
# TODO(robertwb): Replace core.DoFnContext with this.
class DoFnContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
| 38.426806
| 80
| 0.706568
|
efae19e100c6f4590c8de08748d90a1723da7d21
| 3,194
|
py
|
Python
|
bbp/comps/lf_seismograms.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 28
|
2017-10-31T09:16:30.000Z
|
2022-02-28T23:44:29.000Z
|
bbp/comps/lf_seismograms.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 37
|
2017-05-23T15:15:35.000Z
|
2022-02-05T09:13:18.000Z
|
bbp/comps/lf_seismograms.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 26
|
2017-09-21T17:43:33.000Z
|
2021-11-29T06:34:30.000Z
|
#!/usr/bin/env python
"""
Copyright 2010-2017 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
# Import Broadband modules
from install_cfg import InstallCfg
from station_list import StationList
class LFSeismograms(object):
"""
This module copies pre-computed low-frequency seismograms to the
tmpdir directory
"""
def __init__(self, i_seis_dir, i_r_stations, sim_id=0):
"""
Initialize class variables
"""
self.seis_dir = i_seis_dir
self.r_stations = i_r_stations
self.sim_id = sim_id
def run(self):
"""
Goes through the station list and copy each low-frequency
seismogram from the seis_dir to the simulation's tmpdir
"""
install = InstallCfg.getInstance()
sim_id = self.sim_id
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_stations = os.path.join(a_indir, self.r_stations)
print(self.seis_dir)
slo = StationList(a_stations)
stat_list = slo.getStationList()
for stat in stat_list:
# Look for bbp seismogram, copy in
print("%s/%s-lf.bbp" % (self.seis_dir, stat.scode))
if os.path.exists("%s/%s-lf.bbp" % (self.seis_dir, stat.scode)):
print("Copying for site %s" % (stat.scode))
# Need to eliminate negative times
fp_in = open("%s/%s-lf.bbp" % (self.seis_dir, stat.scode), 'r')
fp_out = open("%s/%d.%s-lf.bbp" %
(a_tmpdir, sim_id, stat.scode), 'w')
for line in fp_in:
pieces = line.split()
try:
if pieces[0] == '#' or pieces[0] == '%':
fp_out.write(line)
elif float(pieces[0]) < -0.0001:
continue
elif float(pieces[0]) < 0.0001:
fp_out.write("0.0\t%s\t%s\t%s\n" % (pieces[1],
pieces[2],
pieces[3]))
else:
fp_out.write(line)
except ValueError:
fp_out.write(line)
fp_in.close()
fp_out.flush()
fp_out.close()
else:
print("Could not find LF seismogram for station %s!" %
(stat.scode))
| 37.139535
| 79
| 0.550094
|
170889e931ecb837b8df989651cccbb37d626ca4
| 516
|
py
|
Python
|
src/bot.py
|
Tomodachi94/DiscordBotTemplate.py
|
9f4c79a7ec46049bfe4331ca63e044936e004f05
|
[
"MIT"
] | null | null | null |
src/bot.py
|
Tomodachi94/DiscordBotTemplate.py
|
9f4c79a7ec46049bfe4331ca63e044936e004f05
|
[
"MIT"
] | null | null | null |
src/bot.py
|
Tomodachi94/DiscordBotTemplate.py
|
9f4c79a7ec46049bfe4331ca63e044936e004f05
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
# This imports the Discord package for use in the bot.
import os
from python_dotenv import load_dotenv
# You'll need this package to load your token from .env.
# It is an encouraged practice to keep tokens etc out of source.
bot = commands.Bot(command_prefix="!")
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
else:
print(f'Unable to load {filename[:-3]}')
bot.run(os.getenv("DISCORD_TOKEN"))
| 28.666667
| 64
| 0.71124
|
1a188fded8aecf9c12638e060a1e147df4d185bd
| 500
|
py
|
Python
|
plyer/facades/keystore.py
|
EdwardCoventry/plyer
|
4002b21fe1a664e80b422547b8ae04d2a2d3037d
|
[
"MIT"
] | 1,184
|
2015-01-02T23:24:46.000Z
|
2022-03-27T16:28:16.000Z
|
plyer/facades/keystore.py
|
EdwardCoventry/plyer
|
4002b21fe1a664e80b422547b8ae04d2a2d3037d
|
[
"MIT"
] | 469
|
2015-01-02T09:23:15.000Z
|
2022-03-17T10:35:58.000Z
|
plyer/facades/keystore.py
|
EdwardCoventry/plyer
|
4002b21fe1a664e80b422547b8ae04d2a2d3037d
|
[
"MIT"
] | 431
|
2015-01-05T23:00:43.000Z
|
2022-03-15T04:20:03.000Z
|
class Keystore:
'''
Keyring facade
.. versionadded:: x.x.x
'''
def set_key(self, servicename, key, value, **kwargs):
self._set_key(servicename, key, value, **kwargs)
def _set_key(self, servicename, key, value, **kwargs):
raise NotImplementedError()
def get_key(self, servicename, key, **kwargs):
return self._get_key(servicename, key)
def _get_key(self, servicename, key, **kwargs):
raise NotImplementedError()
| 25
| 59
| 0.612
|
4049136315cd5585c903354251f145117ceaf780
| 2,525
|
py
|
Python
|
imu.py
|
robotika/husky
|
bbe57fc80a9d2e99c027a57390af07df064e826e
|
[
"MIT"
] | 3
|
2016-02-16T02:02:25.000Z
|
2021-08-06T20:39:11.000Z
|
imu.py
|
robotika/husky
|
bbe57fc80a9d2e99c027a57390af07df064e826e
|
[
"MIT"
] | null | null | null |
imu.py
|
robotika/husky
|
bbe57fc80a9d2e99c027a57390af07df064e826e
|
[
"MIT"
] | 1
|
2018-03-15T03:37:06.000Z
|
2018-03-15T03:37:06.000Z
|
#!/usr/bin/python
"""
Light weight wrapper for Chrobotics UM6 IMU
usage:
./imu.py <task> [<replay log file> [F|FF]]
"""
# https://www.chrobotics.com/docs/UM6_datasheet.pdf
# http://wiki.ros.org/um6
import sys
import serial
import struct
from logit import *
# page 42
UM6_GYRO_RAW_XY = 0x56
UM6_GYRO_PROC_XY = 0x5e
UM6_MAG_PROC_XY = 0x60
UM6_EULER_PHI_THETA = 0x62
UM6_TEMPERATURE = 0x76
class IMU:
def __init__( self, com ):
self.com = com
self.temperature = None
self.config()
def sendPacket( self, messageType, data = "" ):
pass
def readPacket( self ):
data = self.com.read(7) # minimal size
while not data.startswith( "snp" ):
print ord(data[0])
data = data[1:] + self.com.read(1)
pt = ord(data[3])
hasData = pt & 0x80
isBatch = pt & 0x40
batchLen = (pt >> 2) & 0x0F
if hasData:
if isBatch:
if batchLen > 0:
data += self.com.read( 4 * batchLen ) # number of 4-bytes registers
else:
data += self.com.read( 4 ) # single register
chsum = sum([ord(x) for x in data[:-2]])
assert struct.unpack(">H",data[-2:])[0] == chsum, (data[-2:], hex(chsum))
addr = ord(data[4])
assert addr in [0x76, 0x56, 0x5c, 0x5e, 0x60, 0x62], hex(addr) # visible in current dump
if addr == UM6_TEMPERATURE:
self.temperature = struct.unpack_from(">f", data, 5)[0]
print self.temperature
return data
def config( self ):
pass
def update( self ):
packet = self.readPacket()
print "".join(["%02X" % ord(x) for x in packet])
def testUsage( com ):
"log available data"
imu = IMU( com )
for i in xrange(100):
imu.update()
if __name__ == "__main__":
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
filename = None
com = None
if len(sys.argv) > 2:
replayAssert = True
filename = sys.argv[2]
if len(sys.argv) > 3:
assert sys.argv[3] in ['F','FF']
if sys.argv[2] == 'F':
replayAssert = False
else:
com = ReplyLogInputsOnly( filename )
if filename:
if com == None:
com = ReplayLog( filename, assertWrite=replayAssert )
else:
com = LogIt( serial.Serial( '/dev/ttyUSB0', 115200 ), prefix='logs/imu' )
testUsage( com )
# vim: expandtab sw=4 ts=4
| 26.578947
| 96
| 0.547327
|
e7c5af77cef865b29569ca4159590da6cc270520
| 5,281
|
py
|
Python
|
gui.py
|
mayurrkukreja/image-captioning
|
a2b185dac18f807d7efa8677c8899a8744b580d8
|
[
"MIT"
] | 3
|
2021-11-05T20:23:18.000Z
|
2021-11-15T11:10:07.000Z
|
gui.py
|
mayurrkukreja/image-captioning
|
a2b185dac18f807d7efa8677c8899a8744b580d8
|
[
"MIT"
] | null | null | null |
gui.py
|
mayurrkukreja/image-captioning
|
a2b185dac18f807d7efa8677c8899a8744b580d8
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy as np
import cv2
#load the trained model to classify sign
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from pickle import dump, load
from keras.preprocessing.image import load_img, img_to_array
base_model = InceptionV3(weights = 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
vgg_model = Model(base_model.input, base_model.layers[-2].output)
def preprocess_img(img_path):
#inception v3 excepts img in 299*299
img = load_img(img_path, target_size = (299, 299))
x = img_to_array(img)
# Add one more dimension
x = np.expand_dims(x, axis = 0)
x = preprocess_input(x)
return x
def encode(image):
image = preprocess_img(image)
vec = vgg_model.predict(image)
vec = np.reshape(vec, (vec.shape[1]))
return vec
pickle_in = open("wordtoix.pkl", "rb")
wordtoix = load(pickle_in)
pickle_in = open("ixtoword.pkl", "rb")
ixtoword = load(pickle_in)
max_length = 74
def greedy_search(pic):
start = 'startseq'
for i in range(max_length):
seq = [wordtoix[word] for word in start.split() if word in wordtoix]
seq = pad_sequences([seq], maxlen = max_length)
yhat = model.predict([pic, seq])
yhat = np.argmax(yhat)
word = ixtoword[yhat]
start += ' ' + word
if word == 'endseq':
break
final = start.split()
final = final[1:-1]
final = ' '.join(final)
return final
def beam_search(image, beam_index = 3):
start = [wordtoix["startseq"]]
# start_word[0][0] = index of the starting word
# start_word[0][1] = probability of the word predicted
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_length:
temp = []
for s in start_word:
par_caps = pad_sequences([s[0]], maxlen=max_length)
e = image
preds = model.predict([e, np.array(par_caps)])
# Getting the top <beam_index>(n) predictions
word_preds = np.argsort(preds[0])[-beam_index:]
# creating a new list so as to put them via the model again
for w in word_preds:
next_cap, prob = s[0][:], s[1]
next_cap.append(w)
prob += preds[0][w]
temp.append([next_cap, prob])
start_word = temp
# Sorting according to the probabilities
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
# Getting the top words
start_word = start_word[-beam_index:]
start_word = start_word[-1][0]
intermediate_caption = [ixtoword[i] for i in start_word]
final_caption = []
for i in intermediate_caption:
if i != 'endseq':
final_caption.append(i)
else:
break
final_caption = ' '.join(final_caption[1:])
return final_caption
model = load_model('new-model-1.h5')
#initialise GUI
top=tk.Tk()
top.geometry('800x600')
top.title('Caption Generator')
top.configure(background='#CDCDCD')
label2=Label(top,background='#CDCDCD', font=('arial',15))
label1=Label(top,background='#CDCDCD', font=('arial',15))
label=Label(top,background='#CDCDCD', font=('arial',15))
sign_image = Label(top)
def classify(file_path):
global label_packed
enc = encode(file_path)
image = enc.reshape(1, 2048)
pred = greedy_search(image)
print(pred)
label.configure(foreground='#000', text= 'Greedy: ' + pred)
label.pack(side=BOTTOM,expand=True)
beam_3 = beam_search(image)
print(beam_3)
label1.configure(foreground='#011638', text = 'Beam_3: ' + beam_3)
label1.pack(side = BOTTOM, expand = True)
beam_5 = beam_search(image, 5)
print(beam_5)
label2.configure(foreground='#228B22', text = 'Beam_5: ' + beam_5)
label2.pack(side = BOTTOM, expand = True)
def show_classify_button(file_path):
classify_b=Button(top,text="Generate",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
label1.configure(text='')
label2.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload an image",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
#label2.pack(side = BOTTOM, expand = True)
heading = Label(top, text="Caption Generator (Flickr30k)",pady=20, font=('arial',22,'bold'))
heading.configure(background='#CDCDCD',foreground='#FF6347')
heading.pack()
top.mainloop()
| 33.213836
| 93
| 0.655747
|
9b6ffb197fcf72baa34dfbd2d05d560b70d0e21c
| 10,926
|
py
|
Python
|
y2018/control_loops/python/intake_simple.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 39
|
2021-06-18T03:22:30.000Z
|
2022-03-21T15:23:43.000Z
|
y2018/control_loops/python/intake_simple.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 10
|
2021-06-18T03:22:19.000Z
|
2022-03-18T22:14:15.000Z
|
y2018/control_loops/python/intake_simple.py
|
Ewpratten/frc_971_mirror
|
3a8a0c4359f284d29547962c2b4c43d290d8065c
|
[
"BSD-2-Clause"
] | 4
|
2021-08-19T19:20:04.000Z
|
2022-03-08T07:33:18.000Z
|
#!/usr/bin/python3
# This code was used to select the gear ratio for the intake.
# Run it from the command line and it displays the time required
# to rotate the intake 180 degrees.
#
# Michael Schuh
# January 20, 2018
import math
import numpy
import scipy.integrate
pi = math.pi
pi2 = 2.0 * pi
rad_to_deg = 180.0 / pi
inches_to_meters = 0.0254
lbs_to_kg = 1.0 / 2.2
newton_to_lbf = 0.224809
newton_meters_to_ft_lbs = 0.73756
run_count = 0
theta_travel = 0.0
def to_deg(angle):
return angle * rad_to_deg
def to_rad(angle):
return angle / rad_to_deg
def to_rotations(angle):
return angle / pi2
def time_derivative(x, t, voltage, c1, c2, c3):
global run_count
theta, omega = x
dxdt = [omega, -c1 * omega + c3 * math.sin(theta) + c2 * voltage]
run_count = run_count + 1
return dxdt
def get_distal_angle(theta_proximal):
# For the proximal angle = -50 degrees, the distal angle is -180 degrees
# For the proximal angle = 10 degrees, the distal angle is -90 degrees
distal_angle = to_rad(-180.0 - (-50.0 - to_deg(theta_proximal)) * \
(180.0 - 90.0) / (50.0 + 10.0))
return distal_angle
def get_180_degree_time(c1, c2, c3, voltage, gear_ratio, motor_free_speed):
global run_count
global theta_travel
if ( True ):
# Gravity is assisting the motion.
theta_start = 0.0
theta_target = pi
elif ( False ):
# Gravity is assisting the motion.
theta_start = 0.0
theta_target = -pi
elif ( False ):
# Gravity is slowing the motion.
theta_start = pi
theta_target = 0.0
elif ( False ):
# Gravity is slowing the motion.
theta_start = -pi
theta_target = 0.0
elif ( False ):
# This is for the proximal arm motion.
theta_start = to_rad(-50.0)
theta_target = to_rad(10.0)
theta_half = 0.5 * (theta_start + theta_target)
if theta_start > theta_target:
voltage = -voltage
theta = theta_start
theta_travel = theta_start - theta_target
if run_count == 0:
print("# Theta Start = %.2f radians End = %.2f Theta travel %.2f "
"Theta half = %.2f Voltage = %.2f" % (
theta_start, theta_target, theta_travel, theta_half, voltage))
print("# Theta Start = %.2f degrees End = %.2f Theta travel %.2f "
"Theta half = %.2f Voltage = %.2f" % (
to_deg(theta_start), to_deg(theta_target), to_deg(theta_travel),
to_deg(theta_half), voltage))
omega = 0.0
time = 0.0
delta_time = 0.01 # time step in seconds
for step in range(1, 5000):
t = numpy.array([time, time + delta_time])
time = time + delta_time
x = [theta, omega]
angular_acceleration = -c1 * omega + c2 * voltage
x_n_plus_1 = scipy.integrate.odeint(time_derivative, x, t,
args=(voltage, c1, c2, c3))
theta, omega = x_n_plus_1[1]
if ( False ):
print("%4d %8.4f %8.2f %8.4f %8.4f %8.3f "
"%8.3f %8.3f %8.3f" % (
step, time, theta, omega, angular_acceleration,
to_rotations(theta), to_rotations(omega),
omega * gear_ratio * 60.0 / pi2,
omega * gear_ratio / motor_free_speed))
if theta_start < theta_target:
# Angle is increasing through the motion.
if theta > theta_half:
break
else:
# Angle is decreasing through the motion.
if (theta < theta_half):
break
return 2.0 * time
def main():
# m/sec^2 Gravity Constant
gravity = 9.8
# m/sec^2 Gravity Constant - Use 0.0 for the intake. It is horizontal.
gravity = 0.0
# Volts
voltage_nominal = 12
# Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
current_stall = 134 # amps stall current
current_no_load = 0.7 # amps no load current
torque_stall = 710/1000.0 # N-m Stall Torque
speed_no_load_rpm = 18730 # RPM no load speed
if ( True ):
# Bag motor from https://www.vexrobotics.com/217-3351.html
motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
current_stall = 53.0 # amps stall current
current_no_load = 1.8 # amps no load current
torque_stall = 0.4 # N-m Stall Torque
speed_no_load_rpm = 13180.0 # RPM no load speed
if ( False ):
# Mini CIM motor from https://www.vexrobotics.com/217-3371.html
motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
current_stall = 89.0 # amps stall current
current_no_load = 3.0 # amps no load current
torque_stall = 1.4 # N-m Stall Torque
speed_no_load_rpm = 5840.0 # RPM no load speed
# How many motors are we using?
num_motors = 1
# Motor values
print("# Motor: %s" % (motor_name))
print("# Number of motors: %d" % (num_motors))
print("# Stall torque: %.1f n-m" % (torque_stall))
print("# Stall current: %.1f amps" % (current_stall))
print("# No load current: %.1f amps" % (current_no_load))
print("# No load speed: %.0f rpm" % (speed_no_load_rpm))
# Constants from motor values
resistance_motor = voltage_nominal / current_stall
speed_no_load_rps = speed_no_load_rpm / 60.0 # Revolutions per second no load speed
speed_no_load = speed_no_load_rps * 2.0 * pi
Kt = num_motors * torque_stall / current_stall # N-m/A torque constant
Kv_rpm = speed_no_load_rpm / (voltage_nominal -
resistance_motor * current_no_load) # rpm/V
Kv = Kv_rpm * 2.0 * pi / 60.0 # rpm/V
# Robot Geometry and physics
# m Length of arm connected to the robot base
length_proximal_arm = inches_to_meters * 47.34
# m Length of arm that holds the cube
length_distal_arm = inches_to_meters * 44.0
# m Length of intake arm from the pivot point to where the big roller contacts a cube.
length_intake_arm = inches_to_meters * 9.0
mass_cube = 6.0 * lbs_to_kg # Weight of the cube in Kgrams
mass_proximal_arm = 5.5 * lbs_to_kg # Weight of proximal arm
mass_distal_arm = 3.5 * lbs_to_kg # Weight of distal arm
mass_distal = mass_cube + mass_distal_arm
mass_proximal = mass_proximal_arm + mass_distal
# m Length from arm pivot point to arm CG
radius_to_proximal_arm_cg = 22.0 * inches_to_meters
# m Length from arm pivot point to arm CG
radius_to_distal_arm_cg = 10.0 * inches_to_meters
radius_to_distal_cg = (length_distal_arm * mass_cube +
radius_to_distal_arm_cg * mass_distal_arm) / \
mass_distal
radius_to_proximal_cg = (length_proximal_arm * mass_distal +
radius_to_proximal_arm_cg * mass_proximal_arm) / \
mass_proximal
J_cube = length_distal_arm * length_distal_arm*mass_cube
# Kg m^2 Moment of inertia of the proximal arm
J_proximal_arm = radius_to_proximal_arm_cg * radius_to_proximal_arm_cg * \
mass_distal_arm
# Kg m^2 Moment of inertia distal arm and cube at end of proximal arm.
J_distal_arm_and_cube_at_end_of_proximal_arm = length_proximal_arm * \
length_proximal_arm * mass_distal
# Kg m^2 Moment of inertia of the distal arm
J_distal_arm = radius_to_distal_arm_cg * radius_to_distal_arm_cg * mass_distal_arm
# Moment of inertia of the arm with the cube on the end
J = J_distal_arm_and_cube_at_end_of_proximal_arm + J_proximal_arm
# Intake claw
J_intake = 0.295 # Kg m^2 Moment of inertia of intake
J = J_intake
gear_ratio = 140.0 # Guess at the gear ratio
gear_ratio = 100.0 # Guess at the gear ratio
gear_ratio = 90.0 # Guess at the gear ratio
error_margine = 1.0
voltage = 10.0 # voltage for the motor. Assuming a loaded robot so not using 12 V.
# It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
# motor_free_speed = Kv * voltage
motor_free_speed = speed_no_load
print("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" % (Kt, Kv_rpm, Kv))
print("# %.2f Ohms Resistance of the motor " % (resistance_motor))
print("# %.2f kg Cube weight" % (mass_cube))
print("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
print("# %.2f kg Distal Arm mass" % (mass_distal_arm))
print("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
print("# %.2f m Length from distal arm pivot point to arm CG" % (
radius_to_distal_arm_cg))
print("# %.2f m Length from distal arm pivot point to arm and cube cg" % (
radius_to_distal_cg))
print("# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point" % (J_cube))
print("# %.2f m Length from proximal arm pivot point to arm CG" % (radius_to_proximal_arm_cg))
print("# %.2f m Length from proximal arm pivot point to arm and cube cg" % (
radius_to_proximal_cg))
print("# %.2f m Proximal arm length" % (length_proximal_arm))
print("# %.2f m Distal arm length" % (length_distal_arm))
print("# %.2f kg-m^2 Moment of inertia of the intake about the intake pivot point" % (
J_intake))
print("# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point" % (
J_distal_arm))
print("# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point" % (
J_proximal_arm))
print("# %.2f kg-m^2 Moment of inertia of the distal arm and cube mass about "
"the proximal arm pivot point" % (
J_distal_arm_and_cube_at_end_of_proximal_arm))
print("# %.2f kg-m^2 Moment of inertia of the intake the intake pivot point "
"(J value used in simulation)" % (J))
print("# %d Number of motors" % (num_motors))
print("# %.2f V Motor voltage" % (voltage))
for gear_ratio in range(60, 241, 10):
c1 = Kt * gear_ratio * gear_ratio / (Kv * resistance_motor * J)
c2 = gear_ratio * Kt / (J * resistance_motor)
c3 = radius_to_proximal_cg * mass_proximal * gravity / J
if ( False ):
print("# %.8f 1/sec C1 constant" % (c1))
print("# %.2f 1/sec C2 constant" % (c2))
print("# %.2f 1/(V sec^2) C3 constant" % (c3))
print("# %.2f RPM Free speed at motor voltage" % (voltage * Kv_rpm))
torque_90_degrees = radius_to_distal_cg * mass_distal * gravity
voltage_90_degrees = resistance_motor * torque_90_degrees / (gear_ratio * Kt)
torque_peak = gear_ratio * num_motors * torque_stall
torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
normal_force = torque_peak / length_intake_arm
normal_force_lbf = newton_to_lbf * normal_force
time_required = get_180_degree_time(c1, c2, c3, voltage,
gear_ratio, motor_free_speed)
print("Time for %.1f degrees for gear ratio %3.0f is %.2f seconds. "
"Peak (stall) torque %3.0f nm %3.0f ft-lb Normal force at intake "
"end %3.0f N %2.0f lbf" % \
(to_deg(theta_travel), gear_ratio, time_required,
torque_peak, torque_peak_ft_lbs, normal_force, normal_force_lbf))
if __name__ == '__main__':
main()
| 39.875912
| 96
| 0.663372
|
da6d1735a3913e4b7dc03fd05c7f34bea012ae7d
| 3,215
|
py
|
Python
|
h/util/redirects.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
h/util/redirects.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
h/util/redirects.py
|
tgiardina/rpp-h
|
fece590f901b052a59c19a24acfeba52cee33c84
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Utilities for processing a set of redirect specifications from a text file.
Redirects can be specified in a simple text-based format, in which each line
consists of three whitespace-delimited fields:
<source path> <redirect type> <destination>
The redirect type can be one of the following:
exact - requests with paths that exactly match the specified
source path will be redirected to the destination URL.
prefix - requests with paths that start with the specified source
path will be redirected to URLs relative to the
destination URL.
internal-exact - same as `exact`, but the destination will be treated as
a route name rather than a URL.
internal-prefix - same as `prefix`, but the destination will be treated as
a route name rather than a URL.
Lines that contain only whitespace, or which start with a '#' character, will
be ignored.
"""
from collections import namedtuple
class Redirect(
namedtuple(
"Redirect",
[
"src", # matching prefix (if prefix redirect) or path (if exact)
"dst", # route name (if internal redirect) or URL (if external)
"prefix", # prefix redirect if true, exact otherwise
"internal", # internal redirect if true, external otherwise
],
)
):
pass
class ParseError(Exception):
pass
def lookup(redirects, request):
"""
Check if a request matches any of a list of redirects.
Returns None if the request does not match, and the URL to redirect to
otherwise.
"""
# Compute and cache `request.path` once, rather than recomputing for each
# redirect rule that the path is matched against.
path = request.path
for r in redirects:
if r.prefix and path.startswith(r.src):
suffix = path.replace(r.src, "", 1)
return _dst_root(request, r) + suffix
elif not r.prefix and path == r.src:
return _dst_root(request, r)
return None
def parse(specs):
"""Parse a list of redirects from a sequence of redirect specifiers."""
result = []
for line in specs:
# Ignore comments and blank lines
if line.startswith("#") or not line.strip():
continue
try:
src, typ, dst = line.split(None, 3)
except ValueError:
raise ParseError("invalid redirect specification: {!r}".format(line))
if typ == "internal-exact":
r = Redirect(prefix=False, internal=True, src=src, dst=dst)
elif typ == "internal-prefix":
r = Redirect(prefix=True, internal=True, src=src, dst=dst)
elif typ == "exact":
r = Redirect(prefix=False, internal=False, src=src, dst=dst)
elif typ == "prefix":
r = Redirect(prefix=True, internal=False, src=src, dst=dst)
else:
raise ParseError("unknown redirect type: {!r}".format(typ))
result.append(r)
return result
def _dst_root(request, redirect):
if redirect.internal:
return request.route_url(redirect.dst)
else:
return redirect.dst
| 33.14433
| 81
| 0.621773
|
fe8af25575e0b7e3d2eba74246f100488e972c93
| 1,252
|
py
|
Python
|
sorts/bead_sort.py
|
sourcery-ai-bot/Python
|
f1444aca73068e0c55f60ec63b12c53b402e54dd
|
[
"MIT"
] | 1
|
2020-08-28T18:25:45.000Z
|
2020-08-28T18:25:45.000Z
|
sorts/bead_sort.py
|
sourcery-ai-bot/Python
|
f1444aca73068e0c55f60ec63b12c53b402e54dd
|
[
"MIT"
] | 1
|
2020-08-28T18:24:31.000Z
|
2020-08-28T19:35:47.000Z
|
sorts/bead_sort.py
|
MKiperszmid/Python
|
6b368e6ab2fa1a839b029fd45e127521bbe76005
|
[
"MIT"
] | null | null | null |
"""
Bead sort only works for sequences of nonegative integers.
https://en.wikipedia.org/wiki/Bead_sort
"""
def bead_sort(sequence: list) -> list:
"""
>>> bead_sort([6, 11, 12, 4, 1, 5])
[1, 4, 5, 6, 11, 12]
>>> bead_sort([9, 8, 7, 6, 5, 4 ,3, 2, 1])
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> bead_sort([5, 0, 4, 3])
[0, 3, 4, 5]
>>> bead_sort([8, 2, 1])
[1, 2, 8]
>>> bead_sort([1, .9, 0.0, 0, -1, -.9])
Traceback (most recent call last):
...
TypeError: Sequence must be list of nonnegative integers
>>> bead_sort("Hello world")
Traceback (most recent call last):
...
TypeError: Sequence must be list of nonnegative integers
"""
if any(not isinstance(x, int) or x < 0 for x in sequence):
raise TypeError("Sequence must be list of nonnegative integers")
for item in sequence:
for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 28.454545
| 80
| 0.557508
|
cca6169f382994f4fe7a3e6f0a3a84d6c2f802b4
| 6,017
|
py
|
Python
|
software_keyboard/low_level_serial_to_keyboard.py
|
TolgaReis/simulator-console
|
35837d886c1e34208d9b89343591db2cdedf5d65
|
[
"Apache-2.0"
] | null | null | null |
software_keyboard/low_level_serial_to_keyboard.py
|
TolgaReis/simulator-console
|
35837d886c1e34208d9b89343591db2cdedf5d65
|
[
"Apache-2.0"
] | null | null | null |
software_keyboard/low_level_serial_to_keyboard.py
|
TolgaReis/simulator-console
|
35837d886c1e34208d9b89343591db2cdedf5d65
|
[
"Apache-2.0"
] | null | null | null |
import ctypes
from ctypes import wintypes
import time
import serial
import time
import serial.tools.list_ports
from flag import Flag
from config import Config
user32 = ctypes.WinDLL('user32', use_last_error=True)
INPUT_KEYBOARD = 1
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD, ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
ports = [comport.device for comport in serial.tools.list_ports.comports()]
while len(ports) < 1:
ports = [comport.device for comport in serial.tools.list_ports.comports()]
if len(ports) >= 1:
break
while True:
try:
ser = serial.Serial(port = ports[0],
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
break
except:
pass
key_flag = Flag
while True:
try:
data = ser.readline().decode()
if(len(data) == 8):
if(data[0] == '1'):
PressKey(Config.T_KEY)
key_flag.T = True
key_flag.U = False
elif(data[0] == '2'):
PressKey(Config.U_KEY)
key_flag.T = False
key_flag.U = True
if(key_flag.T == True and key_flag.U == False):
ReleaseKey(Config.U_KEY)
elif(key_flag.U == True and key_flag.T == False):
ReleaseKey(Config.T_KEY)
if(data[0] == '0'):
if(key_flag.T == True):
ReleaseKey(Config.T_KEY)
elif(key_flag.U == True):
ReleaseKey(Config.U_KEY)
if(data[1] == '1'):
PressKey(Config.I_KEY)
key_flag.I = True
key_flag.K = False
elif(data[1] == '2'):
PressKey(Config.K_KEY)
key_flag.I = False
key_flag.K = True
if(key_flag.I == True and key_flag.K == False):
ReleaseKey(Config.K_KEY)
elif(key_flag.K == True and key_flag.I == False):
ReleaseKey(Config.I_KEY)
if(data[1] == '0'):
if(key_flag.I == True):
ReleaseKey(Config.I_KEY)
elif(key_flag.K == True):
ReleaseKey(Config.K_KEY)
if(data[2] == '1'):
PressKey(Config.O_KEY)
key_flag.O = True
key_flag.L = False
elif(data[2] == '2'):
PressKey(Config.L_KEY)
key_flag.O = False
key_flag.L = True
if(key_flag.O == True and key_flag.L == False):
ReleaseKey(Config.L_KEY)
elif(key_flag.L == True and key_flag.O == False):
ReleaseKey(Config.O_KEY)
if(data[2] == '0'):
if(key_flag.O == True):
ReleaseKey(Config.O_KEY)
elif(key_flag.L == True):
ReleaseKey(Config.L_KEY)
if(data[3] == '1'):
PressKey(Config.V_KEY)
key_flag.V = True
if(data[3] == '2' and key_flag.V == True):
ReleaseKey(Config.V_KEY)
key_flag.V = False
if(data[4] == '1'):
PressKey(Config.M_KEY)
key_flag.M = True
if(data[4] == '2' and key_flag.M == True):
ReleaseKey(Config.M_KEY)
key_flag.M = False
if(data[5] == '1'):
PressKey(Config.B_KEY)
key_flag.B = True
if(data[5] == '2' and key_flag.B == True):
ReleaseKey(Config.B_KEY)
key_flag.B = False
except:
try:
ser = serial.Serial(port = ports[0],
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
except:
pass
| 34.382857
| 78
| 0.491441
|
4bee0e302a3a2f11e6f48d80f0ee40de4a4232aa
| 1,134
|
py
|
Python
|
queue_and_stack/get_closing_paren.py
|
Oyekunle-Mark/eat-that-cake
|
85b46ae5e7aad38d0827ac68ae7492a95ced74a3
|
[
"MIT"
] | 1
|
2021-10-31T14:39:33.000Z
|
2021-10-31T14:39:33.000Z
|
queue_and_stack/get_closing_paren.py
|
Oyekunle-Mark/eat-that-cake
|
85b46ae5e7aad38d0827ac68ae7492a95ced74a3
|
[
"MIT"
] | null | null | null |
queue_and_stack/get_closing_paren.py
|
Oyekunle-Mark/eat-that-cake
|
85b46ae5e7aad38d0827ac68ae7492a95ced74a3
|
[
"MIT"
] | 1
|
2021-10-31T14:39:34.000Z
|
2021-10-31T14:39:34.000Z
|
def get_closing_paren(sentence, opening_paren_index):
# check if the starting parenthesis is a closing one
if sentence[opening_paren_index] != '(':
# raise an exception
raise IndexError("Starting position must be an opening parenthesis")
# instantiate paren_count to zero
paren_count = 0
# initialize sen_length to the length of the sentence
sen_length = len(sentence)
# loop from opening_paren_index to the end index of sentence
for index in range(opening_paren_index, sen_length):
# if character at current index is an opening parenthesis
if sentence[index] == '(':
# increment paren_count
paren_count += 1
# otherwise, if it is a closing parenthesis
elif sentence[index] == ')':
# decrement paren_count
paren_count -= 1
# if paren_count equals zero
if paren_count == 0:
# return index
return index
# if the loop terminates without finding matching position
# raise an exception
raise IndexError("Starting position must be an opening parenthesis")
| 36.580645
| 76
| 0.659612
|
e41a69996d940a164ebe0a3444d20fdeda21b770
| 324
|
py
|
Python
|
provdbconnector/tests/__init__.py
|
Ama-Gi/prov-neo4j-covid19-track
|
67a79694ad3b48c34dd263f1508c0bdfbc6702fb
|
[
"Apache-2.0"
] | 15
|
2016-09-21T22:27:45.000Z
|
2022-01-17T15:44:42.000Z
|
provdbconnector/tests/__init__.py
|
Ama-Gi/prov-neo4j-covid19-track
|
67a79694ad3b48c34dd263f1508c0bdfbc6702fb
|
[
"Apache-2.0"
] | 87
|
2016-09-19T13:26:05.000Z
|
2022-03-16T04:16:47.000Z
|
provdbconnector/tests/__init__.py
|
Ama-Gi/prov-neo4j-covid19-track
|
67a79694ad3b48c34dd263f1508c0bdfbc6702fb
|
[
"Apache-2.0"
] | 3
|
2016-10-17T19:25:10.000Z
|
2020-06-26T12:38:34.000Z
|
from provdbconnector.tests.db_adapters.test_baseadapter import AdapterTestTemplate
from provdbconnector.tests.test_prov_db import ProvDbTestTemplate
import unittest
def additional_tests():
from examples.tests.test_examples import ExamplesTest
return unittest.defaultTestLoader.loadTestsFromTestCase(ExamplesTest)
| 32.4
| 82
| 0.867284
|
66d25f1e356e572a5ecc20ea7a53e523f8a19b54
| 1,271
|
py
|
Python
|
custom_components/fpl/sensor_AverageDailySensor.py
|
Dominic7/hass-fpl
|
fa5ba8a1b6f2e0b0c812edfe5568074c7c42a764
|
[
"MIT"
] | 12
|
2020-10-16T15:13:03.000Z
|
2022-03-23T15:16:00.000Z
|
custom_components/fpl/sensor_AverageDailySensor.py
|
Dominic7/hass-fpl
|
fa5ba8a1b6f2e0b0c812edfe5568074c7c42a764
|
[
"MIT"
] | 27
|
2020-01-18T19:30:32.000Z
|
2022-03-28T22:27:33.000Z
|
custom_components/fpl/sensor_AverageDailySensor.py
|
Dominic7/hass-fpl
|
fa5ba8a1b6f2e0b0c812edfe5568074c7c42a764
|
[
"MIT"
] | 16
|
2020-06-16T16:45:37.000Z
|
2022-03-24T03:26:03.000Z
|
from .fplEntity import FplEntity
class FplAverageDailySensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Average")
@property
def state(self):
budget = self.getData("budget_bill")
budget_billing_projected_bill = self.getData("budget_billing_daily_avg")
if budget == True and budget_billing_projected_bill is not None:
return self.getData("budget_billing_daily_avg")
return self.getData("daily_avg")
@property
def icon(self):
return "mdi:currency-usd"
class BudgetDailyAverageSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Budget Daily Average")
@property
def state(self):
return self.getData("budget_billing_daily_avg")
@property
def icon(self):
return "mdi:currency-usd"
class ActualDailyAverageSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Actual Daily Average")
@property
def state(self):
return self.getData("daily_avg")
@property
def icon(self):
return "mdi:currency-usd"
| 27.042553
| 80
| 0.685287
|
12e522a10c2acffbcb6d2bbcfafd0d7d0e46274d
| 4,439
|
py
|
Python
|
st2reactor/tests/unit/test_hash_partitioner.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 4,920
|
2015-01-01T15:12:17.000Z
|
2022-03-31T19:31:15.000Z
|
st2reactor/tests/unit/test_hash_partitioner.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 3,563
|
2015-01-05T19:02:19.000Z
|
2022-03-31T19:23:09.000Z
|
st2reactor/tests/unit/test_hash_partitioner.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 774
|
2015-01-01T20:41:24.000Z
|
2022-03-31T13:25:29.000Z
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import math
from random_words import RandomWords
from st2reactor.container.hash_partitioner import HashPartitioner, Range
from st2tests import config
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
PACK = "generic"
FIXTURES_1 = {"sensors": ["sensor1.yaml", "sensor2.yaml", "sensor3.yaml"]}
class HashPartitionerTest(DbTestCase):
models = None
@classmethod
def setUpClass(cls):
super(HashPartitionerTest, cls).setUpClass()
# Create TriggerTypes before creation of Rule to avoid failure. Rule requires the
# Trigger and therefore TriggerType to be created prior to rule creation.
cls.models = FixturesLoader().save_fixtures_to_db(
fixtures_pack=PACK, fixtures_dict=FIXTURES_1
)
config.parse_args()
def test_full_range_hash_partitioner(self):
partitioner = HashPartitioner("node1", "MIN..MAX")
sensors = partitioner.get_sensors()
self.assertEqual(len(sensors), 3, "Expected all sensors")
def test_multi_range_hash_partitioner(self):
range_third = int(Range.RANGE_MAX_VALUE / 3)
range_two_third = range_third * 2
hash_ranges = "MIN..{range_third}|{range_third}..{range_two_third}|{range_two_third}..MAX".format(
range_third=range_third, range_two_third=range_two_third
)
partitioner = HashPartitioner("node1", hash_ranges)
sensors = partitioner.get_sensors()
self.assertEqual(len(sensors), 3, "Expected all sensors")
def test_split_range_hash_partitioner(self):
range_mid = int(Range.RANGE_MAX_VALUE / 2)
partitioner = HashPartitioner("node1", "MIN..%s" % range_mid)
sensors1 = partitioner.get_sensors()
partitioner = HashPartitioner("node2", "%s..MAX" % range_mid)
sensors2 = partitioner.get_sensors()
self.assertEqual(len(sensors1) + len(sensors2), 3, "Expected all sensors")
def test_hash_effectiveness(self):
range_third = int(Range.RANGE_MAX_VALUE / 3)
partitioner1 = HashPartitioner("node1", "MIN..%s" % range_third)
partitioner2 = HashPartitioner(
"node2", "%s..%s" % (range_third, range_third + range_third)
)
partitioner3 = HashPartitioner("node2", "%s..MAX" % (range_third + range_third))
refs_count = 1000
refs = self._generate_refs(count=refs_count)
p1_count = 0
p2_count = 0
p3_count = 0
for ref in refs:
if partitioner1._is_in_hash_range(ref):
p1_count += 1
# note if and not else-if.
if partitioner2._is_in_hash_range(ref):
p2_count += 1
if partitioner3._is_in_hash_range(ref):
p3_count += 1
self.assertEqual(
p1_count + p2_count + p3_count, refs_count, "Sum should equal all sensors."
)
# Test effectiveness by checking if the sd is within 20% of mean
mean = refs_count / 3
variance = (
float(
(p1_count - mean) ** 2 + (p1_count - mean) ** 2 + (p3_count - mean) ** 2
)
/ 3
)
sd = math.sqrt(variance)
self.assertTrue(sd / mean <= 0.2, "Some values deviate too much from the mean.")
def _generate_refs(self, count=10):
random_word_count = int(math.sqrt(count)) + 1
words = RandomWords().random_words(count=random_word_count)
x_index = 0
y_index = 0
while count > 0:
yield "%s.%s" % (words[x_index], words[y_index])
if y_index < len(words) - 1:
y_index += 1
else:
x_index += 1
y_index = 0
count -= 1
return
| 36.089431
| 106
| 0.642712
|
42a0df208dc7e36c3faeec0f02fb36a96a1f1dcf
| 73
|
py
|
Python
|
pdfplumber/_version.py
|
OisinMoran/pdfplumber
|
eb8aef5eb1591561a9b3e80a507f9bf66abea4fd
|
[
"MIT"
] | 1
|
2019-06-14T12:24:43.000Z
|
2019-06-14T12:24:43.000Z
|
pdfplumber/_version.py
|
yooongchun/pdfplumber
|
eb8aef5eb1591561a9b3e80a507f9bf66abea4fd
|
[
"MIT"
] | null | null | null |
pdfplumber/_version.py
|
yooongchun/pdfplumber
|
eb8aef5eb1591561a9b3e80a507f9bf66abea4fd
|
[
"MIT"
] | null | null | null |
version_info = (0, 5, 10)
__version__ = '.'.join(map(str, version_info))
| 24.333333
| 46
| 0.671233
|
19970dae577f3811fd081fbc166f2a66e2a0beca
| 1,710
|
py
|
Python
|
epuck-nav/controllers/robot_controller/robot_controller.py
|
lucas-emery/webots-rl-options
|
67cf4442ab74cc4734e00d4854014f4fc7b144d8
|
[
"MIT"
] | 2
|
2020-07-11T16:32:17.000Z
|
2021-02-01T13:00:11.000Z
|
epuck-nav/controllers/robot_controller/robot_controller.py
|
lucas-emery/webots-rl-options
|
67cf4442ab74cc4734e00d4854014f4fc7b144d8
|
[
"MIT"
] | null | null | null |
epuck-nav/controllers/robot_controller/robot_controller.py
|
lucas-emery/webots-rl-options
|
67cf4442ab74cc4734e00d4854014f4fc7b144d8
|
[
"MIT"
] | null | null | null |
from deepbots.robots.controllers.robot_emitter_receiver_csv import RobotEmitterReceiverCSV
class EpuckRobot(RobotEmitterReceiverCSV):
def __init__(self):
super().__init__(emitter_name='EPUCK_EMMITER',
receiver_name='EPUCK_RECEIVER',
timestep=32)
self.wheel_left = self.robot.getMotor('left wheel motor')
self.wheel_left.setPosition(float('inf'))
self.wheel_left.setVelocity(0.0)
self.wheel_right = self.robot.getMotor('right wheel motor')
self.wheel_right.setPosition(float('inf'))
self.wheel_right.setVelocity(0.0)
self.distance_sensors = []
for i in range(8):
d_sensor = self.robot.getDistanceSensor('ps{}'.format(i))
d_sensor.enable(self.get_timestep())
self.distance_sensors.append(d_sensor)
def create_message(self):
message = []
for i in range(8):
message.append(str(self.distance_sensors[i].getValue()))
return message
def use_message_data(self, message):
action = int(message[0])
vel = self.wheel_left.getMaxVelocity() / 3
if action == 0: # forward
self.wheel_left.setVelocity(vel)
self.wheel_right.setVelocity(vel)
elif action == 1: # left
self.wheel_left.setVelocity(-vel)
self.wheel_right.setVelocity(vel)
elif action == 2: # right
self.wheel_left.setVelocity(vel)
self.wheel_right.setVelocity(-vel)
else:
raise Exception("Fran se la come")
# Create the robot controller object and run it
robot_controller = EpuckRobot()
robot_controller.run()
| 34.897959
| 90
| 0.627485
|
ab94603d03f67adccd7238aa9e21f58c8750152d
| 2,671
|
py
|
Python
|
dbt/clients/git.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | 1
|
2018-06-20T17:51:20.000Z
|
2018-06-20T17:51:20.000Z
|
dbt/clients/git.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | null | null | null |
dbt/clients/git.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | null | null | null |
import re
import os.path
from dbt.clients.system import run_cmd, rmdir
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
def clone(repo, cwd, dirname=None, remove_git_dir=False):
clone_cmd = ['git', 'clone', '--depth', '1', repo]
if dirname is not None:
clone_cmd.append(dirname)
result = run_cmd(cwd, clone_cmd)
if remove_git_dir:
rmdir(os.path.join(dirname, '.git'))
return result
def list_tags(cwd):
out, err = run_cmd(cwd, ['git', 'tag', '--list'])
tags = set(out.decode('utf-8').strip().split("\n"))
return tags
def checkout(cwd, repo, branch=None):
if branch is None:
branch = 'master'
logger.debug(' Checking out branch {}.'.format(branch))
run_cmd(cwd, ['git', 'remote', 'set-branches', 'origin', branch])
run_cmd(cwd, ['git', 'fetch', '--tags', '--depth', '1', 'origin', branch])
tags = list_tags(cwd)
# Prefer tags to branches if one exists
if branch in tags:
spec = 'tags/{}'.format(branch)
else:
spec = 'origin/{}'.format(branch)
out, err = run_cmd(cwd, ['git', 'reset', '--hard', spec])
stderr = err.decode('utf-8').strip()
if stderr.startswith('fatal:'):
dbt.exceptions.bad_package_spec(repo, branch, stderr)
else:
return out, err
def get_current_sha(cwd):
out, err = run_cmd(cwd, ['git', 'rev-parse', 'HEAD'])
return out.decode('utf-8')
def remove_remote(cwd):
return run_cmd(cwd, ['git', 'remote', 'rm', 'origin'])
def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
branch=None):
_, err = clone(repo, cwd, dirname=dirname, remove_git_dir=remove_git_dir)
exists = re.match("fatal: destination path '(.+)' already exists",
err.decode('utf-8'))
directory = None
start_sha = None
if exists:
directory = exists.group(1)
logger.debug('Updating existing dependency %s.', directory)
else:
matches = re.match("Cloning into '(.+)'", err.decode('utf-8'))
directory = matches.group(1)
logger.debug('Pulling new dependency %s.', directory)
full_path = os.path.join(cwd, directory)
start_sha = get_current_sha(full_path)
checkout(full_path, repo, branch)
end_sha = get_current_sha(full_path)
if exists:
if start_sha == end_sha:
logger.debug(' Already at %s, nothing to do.', start_sha[:7])
else:
logger.debug(' Updated checkout from %s to %s.',
start_sha[:7], end_sha[:7])
else:
logger.debug(' Checked out at %s.', end_sha[:7])
return directory
| 29.032609
| 78
| 0.608012
|
ba83cb473275e491ac7ba59dccb3d1c99750212e
| 530
|
py
|
Python
|
cap11/ex1/consulta3.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap11/ex1/consulta3.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap11/ex1/consulta3.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
import sqlite3
from contextlib import closing
nome = input('Produto a pesquisar: ')
with sqlite3.connect('precos.db') as con:
with closing(con.cursor()) as cursor:
cursor.execute('select * from precos where nome = ?', (nome,))
x = 0
while True:
res = cursor.fetchone()
if res is None:
if x == 0:
print('Nada encontrado.')
break
else:
print(f'Nome: {res[0]} Preço: {res[1]}')
x += 1
| 31.176471
| 70
| 0.496226
|
027ac8a1452dee7af67efb71ccb13ac752bb0767
| 4,062
|
py
|
Python
|
tools/make_ctocpp_header.py
|
chromium-googlesource-mirror/chromiumembedded
|
f659ddbeb58e957a3c57a309cdcfc52c999fe719
|
[
"BSD-3-Clause"
] | 97
|
2015-05-03T20:16:08.000Z
|
2021-11-16T13:16:25.000Z
|
tools/make_ctocpp_header.py
|
chromium-googlesource-mirror/chromiumembedded
|
f659ddbeb58e957a3c57a309cdcfc52c999fe719
|
[
"BSD-3-Clause"
] | 3
|
2020-10-16T03:15:20.000Z
|
2020-10-26T15:31:01.000Z
|
tools/make_ctocpp_header.py
|
chromium-googlesource-mirror/chromiumembedded
|
f659ddbeb58e957a3c57a309cdcfc52c999fe719
|
[
"BSD-3-Clause"
] | 29
|
2015-03-28T02:20:33.000Z
|
2021-10-29T20:58:28.000Z
|
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from cef_parser import *
def make_ctocpp_header(header, clsname):
cls = header.get_class(clsname)
if cls is None:
raise Exception('Class does not exist: '+clsname)
clientside = cls.is_client_side()
defname = string.upper(get_capi_name(clsname[3:], False))
capiname = cls.get_capi_name()
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'+ \
'#define CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n' + \
'#pragma once\n'
if clientside:
result += """
#ifndef BUILDING_CEF_SHARED
#pragma message("Warning: "__FILE__" may be accessed DLL-side only")
#else // BUILDING_CEF_SHARED
"""
else:
result += """
#ifndef USING_CEF_SHARED
#pragma message("Warning: "__FILE__" may be accessed wrapper-side only")
#else // USING_CEF_SHARED
"""
# build the function body
func_body = ''
funcs = cls.get_virtual_funcs()
for func in funcs:
func_body += ' virtual '+func.get_cpp_proto()+' OVERRIDE;\n'
# include standard headers
if func_body.find('std::map') > 0 or func_body.find('std::multimap') > 0:
result += '\n#include <map>'
if func_body.find('std::vector') > 0:
result += '\n#include <vector>'
# include the headers for this class
result += '\n#include "include/'+cls.get_file_name()+'"'+ \
'\n#include "include/capi/'+cls.get_capi_file_name()+'"\n'
# include headers for any forward declared classes that are not in the same file
declares = cls.get_forward_declares()
for declare in declares:
dcls = header.get_class(declare)
if dcls.get_file_name() != cls.get_file_name():
result += '#include "include/'+dcls.get_file_name()+'"\n' \
'#include "include/capi/'+dcls.get_capi_file_name()+'"\n'
result += """#include "libcef_dll/ctocpp/ctocpp.h"
// Wrap a C structure with a C++ class.
"""
if clientside:
result += '// This class may be instantiated and accessed DLL-side only.\n'
else:
result += '// This class may be instantiated and accessed wrapper-side only.\n'
result += 'class '+clsname+'CToCpp\n'+ \
' : public CefCToCpp<'+clsname+'CToCpp, '+clsname+', '+capiname+'> {\n'+ \
' public:\n'+ \
' explicit '+clsname+'CToCpp('+capiname+'* str)\n'+ \
' : CefCToCpp<'+clsname+'CToCpp, '+clsname+', '+capiname+'>(str) {}\n'+ \
' virtual ~'+clsname+'CToCpp() {}\n\n'+ \
' // '+clsname+' methods\n';
result += func_body
result += '};\n\n'
if clientside:
result += '#endif // BUILDING_CEF_SHARED\n'
else:
result += '#endif // USING_CEF_SHARED\n'
result += '#endif // CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'
return wrap_code(result)
def write_ctocpp_header(header, clsname, dir, backup):
file = dir+os.sep+get_capi_name(clsname[3:], False)+'_ctocpp.h'
if path_exists(file):
oldcontents = read_file(file)
else:
oldcontents = ''
newcontents = make_ctocpp_header(header, clsname)
if newcontents != oldcontents:
if backup and oldcontents != '':
backup_file(file)
write_file(file, newcontents)
return True
return False
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 3:
sys.stderr.write('Usage: '+sys.argv[0]+' <infile> <classname>')
sys.exit()
# create the header object
header = obj_header()
header.add_file(sys.argv[1])
# dump the result to stdout
sys.stdout.write(make_ctocpp_header(header, sys.argv[2]))
| 33.02439
| 94
| 0.604874
|
d80be2db3bc02b155576c5853c3c286177fedb1d
| 9,665
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
RocketFund/RocketFundCoin
|
8acb4bdbc113c2aa5df46da6af576822bc48857e
|
[
"MIT"
] | 4
|
2019-11-27T22:24:55.000Z
|
2020-11-18T18:24:54.000Z
|
contrib/bitrpc/bitrpc.py
|
RocketFund/RocketFundCoin
|
8acb4bdbc113c2aa5df46da6af576822bc48857e
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
RocketFund/RocketFundCoin
|
8acb4bdbc113c2aa5df46da6af576822bc48857e
|
[
"MIT"
] | 1
|
2020-03-28T11:39:27.000Z
|
2020-03-28T11:39:27.000Z
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:36144")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:36144")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 28.594675
| 101
| 0.573513
|
771afc4aa6bbda45e23efa16fea5cfc800f7ea5d
| 6,190
|
py
|
Python
|
stethoscope/plugins/sources/jamf/base.py
|
pjmorr/stethoscope
|
2d054b90d06dd54d2241c7d646b276d112e223df
|
[
"Apache-2.0"
] | null | null | null |
stethoscope/plugins/sources/jamf/base.py
|
pjmorr/stethoscope
|
2d054b90d06dd54d2241c7d646b276d112e223df
|
[
"Apache-2.0"
] | 2
|
2021-04-30T21:38:37.000Z
|
2022-02-11T03:48:36.000Z
|
stethoscope/plugins/sources/jamf/base.py
|
pjmorr/stethoscope
|
2d054b90d06dd54d2241c7d646b276d112e223df
|
[
"Apache-2.0"
] | null | null | null |
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, print_function, unicode_literals
import arrow
import logbook
import stethoscope.plugins.sources.jamf.utils as jutils
import stethoscope.validation
import stethoscope.configurator
import stethoscope.utils
logger = logbook.Logger(__name__)
def inject_last_updated(data, last_updated):
data['last_updated'] = last_updated
return data
class JAMFDataSourceBase(stethoscope.configurator.Configurator):
config_keys = (
'JAMF_API_USERNAME',
'JAMF_API_PASSWORD',
'JAMF_API_HOSTADDR',
)
def _check_uptodate(self, raw):
updates = raw['computer']['software']['available_software_updates']
data = {'value': (len(updates) == 0)}
if len(updates) > 0:
data['details'] = "Missing Updates:\n"
for update in updates:
data['details'] += " {!s}\n".format(update)
return data
def _check_autoupdate(self, attributes):
attrs = [
('1 Auto Check For Updates Enabled', 'True', 'Automatically check for updates'),
('2 Get New Updates in Background Enabled', 'True',
'Download newly available updates in background'),
('3 Install App Updates Enabled', 'False', 'Install app updates'),
('4 Install OS X Updates Enabled', 'False', 'Install OS X updates'),
('5 Install Security Updates Enabled', 'True', 'Install security updates'),
('6 Install System Data Files Enabled', 'True', 'Install system data files'),
]
values = list()
for (attr, default, _) in attrs:
value = attributes.get(attr)
values.append(value if value in ['True', 'False'] else default)
data = {'value': all(value == "True" for value in values)}
if not data['value']:
data['details'] = "Disabled settings:\n" + "\n".join(" {!s}".format(label)
for value, (_, _, label) in zip(values, attrs) if value != 'True')
return data
def _check_encryption(self, raw):
data = {}
try:
storage = raw['computer']['hardware']['storage']
except KeyError:
pass
else:
details = list()
encrypted = list()
for drive in storage:
if drive['drive_capacity_mb'] > 0 and 'partition' in drive:
# hack to work around bug in JAMF
if drive['partition']['name'] == 'Recovery HD':
continue
encrypted.append(drive['partition']['filevault2_status'] == "Encrypted")
# hack to work around bug in JAMF
status = drive['partition']['filevault2_status']
if status == "Not Supported":
status = "Not Encrypted"
details.append("{name!s}: {status:s} ({filevault2_percent:d}%)"
"".format(status=status, **drive['partition']))
data['value'] = all(encrypted)
data['details'] = '\n'.join(details)
return data
def _normalize_software_entry(self, entry):
"""Convert software information returned from JAMF to common software list format."""
return entry
def _process_device(self, raw):
if raw is None:
return None
data = {'_raw': raw} if self._debug else {}
computer = raw['computer']
# logger.debug("computer:\n{:s}".format(pprint.pformat(computer)))
attributes = jutils._parse_parameter_list(computer['extension_attributes'])
# logger.debug("extension attributes:\n{:s}".format(pprint.pformat(attributes)))
# INFORMATION
data['model'] = computer['hardware']['model']
data.update(stethoscope.utils.copy_partial_dict(computer['general'], {
'platform': 'platform',
'serial': 'serial_number',
'name': 'name',
}))
data.update(stethoscope.utils.copy_partial_dict(computer['hardware'], {
'os': 'os_name',
'os_version': 'os_version',
}))
try:
last_updated = arrow.get(computer['general']['report_date_utc'])
except arrow.parser.ParserError:
last_updated = None
data['last_sync'] = last_updated
# PRACTICES
data['practices'] = dict()
data['practices']['encryption'] = inject_last_updated(self._check_encryption(raw), last_updated)
data['practices']['uptodate'] = inject_last_updated(self._check_uptodate(raw), last_updated)
data['practices']['autoupdate'] = inject_last_updated(self._check_autoupdate(attributes),
last_updated)
data['software'] = {'last_scan_date': last_updated}
data['software']['installed'] = dict((entry['name'], self._normalize_software_entry(entry))
for entry in raw['computer']['software']['applications'])
data['software']['services'] = dict((service, {'name': service}) for service in
raw['computer']['software']['running_services'])
try:
practice = {'value': int(attributes['Firewall Status']) > 0}
except (KeyError, ValueError):
practice = {}
data['practices']['firewall'] = inject_last_updated(practice, last_updated)
for key, attr, ok_value in [
('screenlock', 'Screen Saver Lock Enabled', 'Enabled'),
('remotelogin', 'Remote Login', 'Off'),
]:
practice = {} if attr not in attributes else {'value': attributes[attr] == ok_value}
data['practices'][key] = inject_last_updated(practice, last_updated)
# IDENTIFIERS
possible_macs = [
computer['general'].get('mac_address', ''),
computer['general'].get('alt_mac_address', ''),
attributes.get('Wireless Mac Address', ''),
]
mac_addresses = set(stethoscope.validation.canonicalize_macaddr(addr)
for addr in possible_macs if addr != '')
data['identifiers'] = {
'serial': computer['general']['serial_number'],
'mac_addresses': list(mac_addresses),
'udid': computer['general']['udid'],
}
data['source'] = 'jamf'
# logger.debug("returned info:\n{:s}".format(pprint.pformat(data)))
return data
@staticmethod
def _extract_device_ids_from_userinfo(userinfo_response):
return JAMFDataSourceBase._extract_device_ids_from_response(
userinfo_response.get('user', {}).get('links', {})
)
@staticmethod
def _extract_device_ids_from_response(search_response):
return [computer['id'] for computer in search_response.get('computers', [])]
| 33.825137
| 100
| 0.649758
|
09485c75a6ac4c1f3f56fa564f2eeb302da206d0
| 1,191
|
py
|
Python
|
food_premises_registration/factory.py
|
openregister/food-premises-registration
|
352176ecddfa91e2469f3e7f30323bcc5a8caf39
|
[
"MIT"
] | null | null | null |
food_premises_registration/factory.py
|
openregister/food-premises-registration
|
352176ecddfa91e2469f3e7f30323bcc5a8caf39
|
[
"MIT"
] | null | null | null |
food_premises_registration/factory.py
|
openregister/food-premises-registration
|
352176ecddfa91e2469f3e7f30323bcc5a8caf39
|
[
"MIT"
] | 1
|
2021-04-11T08:30:54.000Z
|
2021-04-11T08:30:54.000Z
|
# -*- coding: utf-8 -*-
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
# from fsa_approved_premises.extensions import (
# #add as needed
# )
def asset_path_context_processor():
return {'asset_path': '/static/'}
def create_app(config_filename):
''' An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
'''
app = Flask(__name__)
app.config.from_object(config_filename)
register_errorhandlers(app)
register_blueprints(app)
register_extensions(app)
app.context_processor(asset_path_context_processor)
return app
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_blueprints(app):
from food_premises_registration.frontend.views import frontend
app.register_blueprint(frontend)
def register_extensions(app):
pass
| 29.775
| 73
| 0.718724
|
3272ef05af39b19d111ef81e4abdd1f0c740cb61
| 171
|
py
|
Python
|
Python_Challenge_115/2/2.py
|
LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 28
|
2019-10-15T13:15:26.000Z
|
2021-11-08T08:23:45.000Z
|
Python_Challenge_115/2/2.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | null | null | null |
Python_Challenge_115/2/2.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 17
|
2019-09-09T00:15:36.000Z
|
2021-01-28T13:08:51.000Z
|
'''
Statement
Given a two-digit integer, swap its digits and print the result.
Example input
79
Example output
97
'''
a = int(input())
print(str(a % 10) + str(a // 10))
| 13.153846
| 64
| 0.672515
|
167e2afa0accaf9cfc98f6a1fda5758d225a8776
| 276
|
py
|
Python
|
python/myAutoSsh/myAutoSsh.py
|
william-shang/myShowcase
|
5c7adf2f6ddbab7282591b0df5cca7dc6a1b479e
|
[
"MIT"
] | 1
|
2019-07-14T07:40:04.000Z
|
2019-07-14T07:40:04.000Z
|
python/myAutoSsh/myAutoSsh.py
|
william-shang/myShowcase
|
5c7adf2f6ddbab7282591b0df5cca7dc6a1b479e
|
[
"MIT"
] | 4
|
2020-02-26T20:22:13.000Z
|
2021-09-23T23:24:12.000Z
|
python/myAutoSsh/myAutoSsh.py
|
william-shang/myShowcase
|
5c7adf2f6ddbab7282591b0df5cca7dc6a1b479e
|
[
"MIT"
] | null | null | null |
import paramiko
from myVarConf import mySshVars
if __name__ == "__main__":
s = paramiko.SSHClient()
s.load_system_host_keys()
s.connect(hostname, port, username, password)
stdin, stdout, stderr = s.exec_command('uptime')
print stdout.read()
s.close()
| 25.090909
| 52
| 0.699275
|
d03b65bdc21d9d33d270d4e393acbf7cbf8839cc
| 19,909
|
py
|
Python
|
psyneulink/library/models/Cohen_Huston1994_horse_race.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/library/models/Cohen_Huston1994_horse_race.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | 77
|
2020-10-01T06:27:19.000Z
|
2022-03-31T02:03:33.000Z
|
psyneulink/library/models/Cohen_Huston1994_horse_race.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import numpy as np
import psyneulink as pnl
parser = argparse.ArgumentParser()
parser.add_argument('--no-plot', action='store_false', help='Disable plotting', dest='enable_plot')
parser.add_argument('--threshold', type=float, help='Termination threshold for response output (default: %(default)f)', default=0.55)
parser.add_argument('--word-runs', type=int, help='Number of runs after word is presented (default: %(default)d)', default=5)
parser.add_argument('--color-runs', type=int, help='Number of runs after color is presented (default: %(default)d)', default=4)
parser.add_argument('--settle-trials', type=int, help='Number of trials for composition to initialize and settle (default: %(default)d)', default=50)
parser.add_argument('--pre-stimulus-trials', type=int, help='Number of trials before stimulus is added', default=100)
args = parser.parse_args()
# This implements the horse race Figure shown in Cohen & Huston (1994).
# Note that noise is turned off and each stimulus is only showed once for each stimulus onset asynchrony.
# Define Variables ----------------------------------------------------------------------------------------------------
rate = 0.1 # The integration rate was changed from 0.01 to 0.1
inhibition = -2.0 # Mutual inhibition across each layer
bias = 4.0 # bias for hidden layer units
threshold = args.threshold # Threshold until a response is made, changed from 0.6 to 0.55
settle_trials = args.settle_trials # Time for system to initialize and settle
prior120 = args.pre_stimulus_trials # Cycles needed to be added for stimulus to start
# Different time steps at which the System should end run and start new terminate_processing run
# This is needed for conditions in which the irrelevant condition is like a neutral trial and could already lead to
# a correct response. This is basically the reason why with long positive stimulus onset asynchrony the three
# condistions (congruent, incongruent, neutral) lead to the same reaction time.
terminate2 = 180
terminate3 = 200
terminate4 = 220
terminate5 = 240
# Create mechanisms ---------------------------------------------------------------------------------------------------
# Linear input units, colors: ('red', 'green'), words: ('RED','GREEN')
colors_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='COLORS_INPUT')
words_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='WORDS_INPUT')
task_input_layer = pnl.TransferMechanism(size=2,
function=pnl.Linear,
name='TASK_INPUT')
# Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.RecurrentTransferMechanism(size=2,
function=pnl.Logistic(),
hetero=-2,
integrator_mode=True,
integration_rate=0.1,
name='TASK')
# Hidden layer units, colors: ('red','green') words: ('RED','GREEN')
colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
function=pnl
.Logistic(x_0=4.0),
integrator_mode=True,
hetero=-2.0,
# noise=pnl.NormalDist(mean=0.0, standard_deviation=.0),
integration_rate=0.1, # cohen-huston text says 0.01
name='COLORS HIDDEN')
words_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
function=pnl.Logistic(x_0=4.0),
hetero=-2,
integrator_mode=True,
# noise=pnl.NormalDist(mean=0.0, standard_deviation=.05),
integration_rate=0.1,
name='WORDS HIDDEN')
# Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix
response_layer = pnl.RecurrentTransferMechanism(size=2,
function=pnl.Logistic(),
hetero=-2.0,
integrator_mode=True,
integration_rate=0.1,
name='RESPONSE')
# Log mechanisms ------------------------------------------------------------------------------------------------------
#task_layer.set_log_conditions('gain')
task_layer.set_log_conditions('value')
colors_hidden_layer.set_log_conditions('value')
words_hidden_layer.set_log_conditions('value')
response_layer.set_log_conditions('value')
# Connect mechanisms --------------------------------------------------------------------------------------------------
# (note that response layer projections are set to all zero first for initialization
color_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]]))
word_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]]))
task_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0],
[0.0, 1.0]]))
color_task_weights = pnl.MappingProjection(matrix=np.array([[4.0, 0.0],
[4.0, 0.0],
[4.0, 0.0]]))
task_color_weights = pnl.MappingProjection(matrix=np.array([[4.0, 4.0, 4.0],
[0.0, 0.0, 0.0]]))
word_task_weights = pnl.MappingProjection(matrix=np.array([[0.0, 4.0],
[0.0, 4.0],
[0.0, 4.0]]))
task_word_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[4.0, 4.0, 4.0]]))
response_color_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
color_response_weights = pnl.MappingProjection(matrix=np.array([[1.5, 0.0],
[0.0, 1.5],
[0.0, 0.0]]))
word_response_weights = pnl.MappingProjection(matrix=np.array([[2.5, 0.0],
[0.0, 2.5],
[0.0, 0.0]]))
Bidirectional_Stroop = pnl.Composition(name='FEEDFORWARD_STROOP_SYSTEM')
# Create pathways -----------------------------------------------------------------------------------------------------
Bidirectional_Stroop.add_linear_processing_pathway(
pathway=[
colors_input_layer,
color_input_weights,
colors_hidden_layer,
color_response_weights,
response_layer,
response_color_weights,
colors_hidden_layer
],
name='COLORS_RESPONSE_PROCESS'
)
Bidirectional_Stroop.add_linear_processing_pathway(
pathway=[
words_input_layer,
word_input_weights,
words_hidden_layer,
word_response_weights,
response_layer,
response_word_weights,
words_hidden_layer
],
name='WORDS_RESPONSE_PROCESS'
)
Bidirectional_Stroop.add_linear_processing_pathway(
pathway=[
task_input_layer,
task_input_weights,
task_layer,
task_color_weights,
colors_hidden_layer,
color_task_weights,
task_layer
]
)
Bidirectional_Stroop.add_linear_processing_pathway(
pathway=[
task_input_layer,
task_layer,
task_word_weights,
words_hidden_layer,
word_task_weights,
task_layer
]
)
# LOGGING:
colors_hidden_layer.set_log_conditions('value')
words_hidden_layer.set_log_conditions('value')
# Bidirectional_Stroop.show_graph(show_dimensions=pnl.ALL)#,show_mechanism_structure=pnl.VALUES) # Uncomment to show graph of the system
# Create threshold function -------------------------------------------------------------------------------------------
# context is automatically passed into Conditions, and references the execution context in which they are being run,
# which in this case is simply the Bidirectional_Stroop system
def pass_threshold(response_layer, thresh, context):
results1 = response_layer.get_output_values(context)[0][0] #red response
results2 = response_layer.get_output_values(context)[0][1] #green response
if results1 >= thresh or results2 >= thresh:
return True
return False
# 2nd threshold function
def pass_threshold2(response_layer, thresh, terminate, context):
results1 = response_layer.get_output_values(context)[0][0] #red response
results2 = response_layer.get_output_values(context)[0][1] #green response
length = response_layer.log.nparray_dictionary()[context.execution_id]['value'].shape[0]
if results1 >= thresh or results2 >= thresh:
return True
if length ==terminate:
return True
return False
# Create different terminate trial conditions --------------------------------------------------------------------------
terminate_trial = {
pnl.TimeScale.TRIAL: pnl.While(pass_threshold, response_layer, threshold)
}
terminate_trial2 = {
pnl.TimeScale.TRIAL: pnl.While(pass_threshold2, response_layer, threshold, terminate2)
}
terminate_trial3 = {
pnl.TimeScale.TRIAL: pnl.While(pass_threshold2, response_layer, threshold, terminate3)
}
terminate_trial4 = {
pnl.TimeScale.TRIAL: pnl.While(pass_threshold2, response_layer, threshold, terminate4)
}
terminate_trial5 = {
pnl.TimeScale.TRIAL: pnl.While(pass_threshold2, response_layer, threshold, terminate5)
}
terminate_list = [terminate_trial2,
terminate_trial3,
terminate_trial4,
terminate_trial5]
# Create test trials function -----------------------------------------------------------------------------------------
# a BLUE word input is [1,0] to words_input_layer and GREEN word is [0,1]
# a blue color input is [1,0] to colors_input_layer and green color is [0,1]
# a color-naming trial is [1,0] to task_layer and a word-reading trial is [0,1]
def trial_dict(red_color, green_color, neutral_color, red_word, green_word, neutral_word, CN, WR):
trialdict = {
colors_input_layer: [red_color, green_color, neutral_color],
words_input_layer: [red_word, green_word, neutral_word],
task_input_layer: [CN, WR]
}
return trialdict
# Define initialization trials separately
# WR_initialize_input = trial_dict(0, 0, 0, 0, 0, 0, 0, 1)
CN_initialize_input = trial_dict(0, 0, 0, 0, 0, 0, 1, 0)
CN_incongruent_trial_input = trial_dict(1, 0, 0, 0, 1, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_congruent_trial_input = trial_dict(1, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_control_trial_input = trial_dict(1, 0, 0, 0, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_control_word_trial_input = trial_dict(0, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_congruent_word_first_input = trial_dict(0, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_incongruent_word_first_input = trial_dict(0, 0, 0, 0, 1, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
# WR_congruent_trial_input = trial_dict(1, 0, 0, 1, 0, 0, 0, 1) #red_color, green color, red_word, green word, CN, WR
# WR_incongruent_trial_input = trial_dict(1, 0, 0, 0, 1, 0, 0, 1) #red_color, green color, red_word, green word, CN, WR
# WR_control_trial_input = trial_dict(1, 0, 0, 0, 0, 0, 0, 1) #red_color, green color, red_word, green word, CN, WR
conditions = 3
runs = args.word_runs
runs2 = args.color_runs
response_all = []
response_all2 = []
Stimulus = [[CN_initialize_input, CN_congruent_word_first_input, CN_congruent_trial_input, CN_control_trial_input],
[CN_initialize_input, CN_incongruent_word_first_input, CN_incongruent_trial_input, CN_control_trial_input],
[CN_initialize_input, CN_control_word_trial_input, CN_control_trial_input, CN_control_trial_input]]
post_settlement_multiplier = int(prior120 / 5)
# First "for loop" over conditions
# Second "for loop" over runs
for cond in range(conditions):
# ---------------------------------------------------------------------------------------------------------------------
# Run congruent trial with word presented 1200 trials prior ------------------------------------------------------------
for run in range(runs):
response_color_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
Bidirectional_Stroop.run(inputs=Stimulus[cond][0], num_trials=settle_trials) # run system to settle for 200 trials with congruent stimuli input
Bidirectional_Stroop.run(inputs=Stimulus[cond][0], num_trials=post_settlement_multiplier * (run)) # run system to settle for 200 trials with congruent stimuli input
response_color_weights = pnl.MappingProjection(matrix=np.array([[1.5, 0.0, 0.0],
[0.0, 1.5, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[2.5, 0.0, 0.0],
[0.0, 2.5, 0.0]]))
Bidirectional_Stroop.run(inputs=Stimulus[cond][1], num_trials=prior120 - (run * post_settlement_multiplier))# termination_processing=terminate_trial) # run system with congruent stimulus input until
Bidirectional_Stroop.run(inputs=Stimulus[cond][2], termination_processing=terminate_trial) # run system with congruent stimulus input until
# threshold in of of the response layer units is reached
# Store values from run -----------------------------------------------------------------------------------------------
r = response_layer.log.nparray_dictionary('value') # Log response output from special logistic function
rr = r[Bidirectional_Stroop.name]['value']
n_r = rr.shape[0]
rrr = rr.reshape(n_r,2)
response_all.append(rrr.shape[0])
# Clear log & reset ----------------------------------------------------------------------------------------
response_layer.log.clear_entries()
colors_hidden_layer.log.clear_entries()
words_hidden_layer.log.clear_entries()
task_layer.log.clear_entries()
colors_hidden_layer.reset([[0, 0, 0]], context=Bidirectional_Stroop)
words_hidden_layer.reset([[0, 0, 0]], context=Bidirectional_Stroop)
response_layer.reset([[0, 0]], context=Bidirectional_Stroop)
task_layer.reset([[0, 0]], context=Bidirectional_Stroop)
print('response_all: ', response_all)
# Run trials after congruent color was presented ----------------------------------------------------------------------
for run2 in range(runs2):
response_color_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
Bidirectional_Stroop.run(inputs=Stimulus[cond][0], num_trials = settle_trials) # run system to settle for 200 trials with congruent stimuli input
Bidirectional_Stroop.run(inputs=Stimulus[cond][0], num_trials = prior120) # run system to settle for 200 trials with congruent stimuli input
response_color_weights = pnl.MappingProjection(matrix=np.array([[1.5, 0.0, 0.0],
[0.0, 1.5, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[2.5, 0.0, 0.0],
[0.0, 2.5, 0.0]]))
Bidirectional_Stroop.run(inputs=Stimulus[cond][3], termination_processing=terminate_list[run2])#terminate_list[run2]) # termination_processing=terminate_trial) # run system with congruent stimulus input until
Bidirectional_Stroop.run(inputs=Stimulus[cond][2], termination_processing=terminate_trial) # run system with congruent stimulus input until
# threshold in of of the response layer units is reached
# Store values from run -----------------------------------------------------------------------------------------------
r = response_layer.log.nparray_dictionary('value') # Log response output from special logistic function
rr = r[Bidirectional_Stroop.name]['value']
n_r = rr.shape[0]
rrr = rr.reshape(n_r,2)
response_all.append(rrr.shape[0])
# Clear log & reset ------------------------------------------------------------------------------------
response_layer.log.clear_entries()
colors_hidden_layer.log.clear_entries()
words_hidden_layer.log.clear_entries()
task_layer.log.clear_entries()
colors_hidden_layer.reset([[0, 0, 0]], context=Bidirectional_Stroop)
words_hidden_layer.reset([[0, 0, 0]], context=Bidirectional_Stroop)
response_layer.reset([[0, 0]], context=Bidirectional_Stroop)
task_layer.reset([[0, 0]], context=Bidirectional_Stroop)
# Plotting ------------------------------------------------------------------------------------------------------------
if args.enable_plot:
import matplotlib.pyplot as plt
# compute regression for model
reg = np.dot(response_all, 2) + 123
plt.figure()
# plt.plot(response_all[0:9])
# plt.plot(response_all[9:18])
# plt.plot(response_all[18:27])
response_len = runs + runs2
stimulus_onset_asynchrony = np.linspace(-400, 400, response_len)
plt.plot(stimulus_onset_asynchrony, reg[0:response_len], '-^')
plt.plot(stimulus_onset_asynchrony, reg[response_len:2 * response_len], '-s')
plt.plot(stimulus_onset_asynchrony, reg[2 * response_len:3 * response_len], '-o')
plt.title('stimulus onset asynchrony - horse race model ')
plt.legend(['congruent', 'incongruent', 'neutral'])
plt.ylabel('reaction time in ms')
plt.show(block=not pnl._called_from_pytest)
| 53.808108
| 217
| 0.556683
|
534595e76603b68cf90098d0a128cc87ff0a2f23
| 1,086
|
py
|
Python
|
backend/app/models/event.py
|
gustavoCorreiaGonzalez/hackathon_ccr
|
42e1fac0499bcec44b0730ca54e4effcc1014923
|
[
"MIT"
] | null | null | null |
backend/app/models/event.py
|
gustavoCorreiaGonzalez/hackathon_ccr
|
42e1fac0499bcec44b0730ca54e4effcc1014923
|
[
"MIT"
] | 1
|
2020-06-15T01:49:14.000Z
|
2020-06-15T01:49:14.000Z
|
backend/app/models/event.py
|
gustavoCorreiaGonzalez/hackathon_ccr
|
42e1fac0499bcec44b0730ca54e4effcc1014923
|
[
"MIT"
] | null | null | null |
from app import db, ma
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
descripton = db.Column(db.String(255), nullable=False)
date = db.Column(db.Date(), nullable=False)
# "bem estar" "saúde" "informativo"
type_event = db.Column(db.String(255), nullable=False)
latitude = db.Column(db.String(255), nullable=False)
longitude = db.Column(db.String(255), nullable=False)
def __init__(self, name, descripton, date, type_event, latitude, longitude):
self.name = name
self.descripton = descripton
self.date = date
self.type_event = type_event
self.latitude = latitude
self.longitude = longitude
def __repr_(self):
return f'<Event : {self.name} >'
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'descripton', 'date', 'type_event', 'latitude', 'longitude')
event_share_schema = EventSchema()
events_share_schema = EventSchema(many=True)
| 32.909091
| 92
| 0.657459
|
c03ea544cd2cd3429c3c028698afe0cb76d32f41
| 10,663
|
py
|
Python
|
tests/ut/python/dataset/test_datasets_voc.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 4
|
2021-01-26T09:14:01.000Z
|
2021-01-26T09:17:24.000Z
|
tests/ut/python/dataset/test_datasets_voc.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_datasets_voc.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
DATA_DIR = "../data/dataset/testVOC2012"
IMAGE_SHAPE = [2268, 2268, 2268, 2268, 642, 607, 561, 596, 612, 2268]
TARGET_SHAPE = [680, 680, 680, 680, 642, 607, 561, 596, 612, 680]
def test_voc_segmentation():
data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False, decode=True)
num = 0
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
assert item["image"].shape[0] == IMAGE_SHAPE[num]
assert item["target"].shape[0] == TARGET_SHAPE[num]
num += 1
assert num == 10
def test_voc_detection():
data1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
num = 0
count = [0, 0, 0, 0, 0, 0]
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
assert item["image"].shape[0] == IMAGE_SHAPE[num]
for label in item["label"]:
count[label[0]] += 1
num += 1
assert num == 9
assert count == [3, 2, 1, 2, 4, 3]
def test_voc_class_index():
class_index = {'car': 0, 'cat': 1, 'train': 5}
data1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", class_indexing=class_index, decode=True)
class_index1 = data1.get_class_indexing()
assert (class_index1 == {'car': 0, 'cat': 1, 'train': 5})
data1 = data1.shuffle(4)
class_index2 = data1.get_class_indexing()
assert (class_index2 == {'car': 0, 'cat': 1, 'train': 5})
num = 0
count = [0, 0, 0, 0, 0, 0]
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
for label in item["label"]:
count[label[0]] += 1
assert label[0] in (0, 1, 5)
num += 1
assert num == 6
assert count == [3, 2, 0, 0, 0, 3]
def test_voc_get_class_indexing():
data1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", decode=True)
class_index1 = data1.get_class_indexing()
assert (class_index1 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
data1 = data1.shuffle(4)
class_index2 = data1.get_class_indexing()
assert (class_index2 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
num = 0
count = [0, 0, 0, 0, 0, 0]
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
for label in item["label"]:
count[label[0]] += 1
assert label[0] in (0, 1, 2, 3, 4, 5)
num += 1
assert num == 9
assert count == [3, 2, 1, 2, 4, 3]
def test_case_0():
data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", decode=True)
resize_op = vision.Resize((224, 224))
data1 = data1.map(operations=resize_op, input_columns=["image"])
data1 = data1.map(operations=resize_op, input_columns=["target"])
repeat_num = 4
data1 = data1.repeat(repeat_num)
batch_size = 2
data1 = data1.batch(batch_size, drop_remainder=True)
num = 0
for _ in data1.create_dict_iterator(num_epochs=1):
num += 1
assert num == 20
def test_case_1():
data1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", decode=True)
resize_op = vision.Resize((224, 224))
data1 = data1.map(operations=resize_op, input_columns=["image"])
repeat_num = 4
data1 = data1.repeat(repeat_num)
batch_size = 2
data1 = data1.batch(batch_size, drop_remainder=True, pad_info={})
num = 0
for _ in data1.create_dict_iterator(num_epochs=1):
num += 1
assert num == 18
def test_case_2():
data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", decode=True)
sizes = [0.5, 0.5]
randomize = False
dataset1, dataset2 = data1.split(sizes=sizes, randomize=randomize)
num_iter = 0
for _ in dataset1.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 5
num_iter = 0
for _ in dataset2.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 5
def test_voc_exception():
try:
data1 = ds.VOCDataset(DATA_DIR, task="InvalidTask", usage="train", decode=True)
for _ in data1.create_dict_iterator(num_epochs=1):
pass
assert False
except ValueError:
pass
try:
data2 = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", class_indexing={"cat": 0}, decode=True)
for _ in data2.create_dict_iterator(num_epochs=1):
pass
assert False
except ValueError:
pass
try:
data3 = ds.VOCDataset(DATA_DIR, task="Detection", usage="notexist", decode=True)
for _ in data3.create_dict_iterator(num_epochs=1):
pass
assert False
except ValueError:
pass
try:
data4 = ds.VOCDataset(DATA_DIR, task="Detection", usage="xmlnotexist", decode=True)
for _ in data4.create_dict_iterator(num_epochs=1):
pass
assert False
except RuntimeError:
pass
try:
data5 = ds.VOCDataset(DATA_DIR, task="Detection", usage="invalidxml", decode=True)
for _ in data5.create_dict_iterator(num_epochs=1):
pass
assert False
except RuntimeError:
pass
try:
data6 = ds.VOCDataset(DATA_DIR, task="Detection", usage="xmlnoobject", decode=True)
for _ in data6.create_dict_iterator(num_epochs=1):
pass
assert False
except RuntimeError:
pass
def exception_func(item):
raise Exception("Error occur!")
try:
data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False)
data = data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["bbox"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["difficult"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["truncate"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False)
data = data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False)
data = data.map(operations=exception_func, input_columns=["target"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", shuffle=False)
data = data.map(operations=vision.Decode(), input_columns=["target"], num_parallel_workers=1)
data = data.map(operations=exception_func, input_columns=["target"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
def test_voc_num_classes():
data1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
assert data1.num_classes() is None
class_index = {'car': 0, 'cat': 1, 'train': 5}
data2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", class_indexing=class_index, decode=True)
assert data2.num_classes() is None
if __name__ == '__main__':
test_voc_segmentation()
test_voc_detection()
test_voc_class_index()
test_voc_get_class_indexing()
test_case_0()
test_case_1()
test_case_2()
test_voc_exception()
test_voc_num_classes()
| 36.642612
| 115
| 0.644284
|
4af9af5c40fa40446f105bfaa6d68591776e8020
| 5,004
|
py
|
Python
|
DigitRecognizer/cnn_solution.py
|
zhong110020/Kaggle
|
981dfd7a8f3fbb2b6545cafec6877b39aed3d16d
|
[
"MIT"
] | 12
|
2017-01-02T03:29:35.000Z
|
2019-07-08T11:20:32.000Z
|
DigitRecognizer/cnn_solution.py
|
zhong110020/Kaggle
|
981dfd7a8f3fbb2b6545cafec6877b39aed3d16d
|
[
"MIT"
] | null | null | null |
DigitRecognizer/cnn_solution.py
|
zhong110020/Kaggle
|
981dfd7a8f3fbb2b6545cafec6877b39aed3d16d
|
[
"MIT"
] | 14
|
2017-11-25T16:09:09.000Z
|
2019-04-07T04:54:15.000Z
|
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import pandas as pd
'''加载数据'''
mnist = pd.read_csv(r'data/train.csv')
train_labels = mnist['label']
train_images = mnist.iloc[:,1:]
train_images.astype(np.float)
train_images = np.multiply(train_images, 1.0/255.0)
train_images = train_images.as_matrix()
train_labels = train_labels.as_matrix()
def compute_accuracy(xs,ys,X,y,keep_prob,sess,prediction):
y_pre = sess.run(prediction,feed_dict={xs:X,keep_prob:1.0})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,feed_dict={xs:X,ys:y,keep_prob:1.0})
return result
def weight_variable(shape):
inital = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(inital)
def bias_variable(shape):
inital = tf.constant(0.1,shape=shape)
return tf.Variable(inital)
def conv2d(x,W):#x是图片的所有参数,W是此卷积层的权重
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#strides[0]和strides[3]的两个1是默认值,中间两个1代表padding时在x方向运动一步,y方向运动一步
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME')#池化的核函数大小为2x2,因此ksize=[1,2,2,1],步长为2,因此strides=[1,2,2,1]
epochs_compeleted = 0
index_in_epoch = 0
def cnn():
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs,[-1,28,28,1]) #-1代表先不考虑输入的图片例子多少这个维度,后面的1是channel的数量,因为我们输入的图片是黑白的,因此channel是1,例如如果是RGB图像,那么channel就是3
# conv layer1
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# conv layer2
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
predict = tf.argmax(prediction, 1)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(2000):
batch_xs,batch_ys = next_batch(mnist, batch_size=100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
mnist_test = pd.read_csv(r'data/test.csv')
mnist_test.astype(np.float)
mnist_test = np.multiply(mnist_test,1.0/255.0)
X = mnist_test.as_matrix()
BATCH_SIZE = 100
predictions = np.zeros(mnist_test.shape[0])
for i in range(mnist_test.shape[0]//BATCH_SIZE): # 一批一批的预测,否则内存可能不够,这里4G
predictions[i*BATCH_SIZE : (i+1)*BATCH_SIZE] = sess.run(predict,feed_dict={xs:X[i*BATCH_SIZE : (i+1)*BATCH_SIZE],keep_prob:1.0})
result = pd.DataFrame(data={'ImageId':range(1,X.shape[0]+1),'Label':predictions.astype(np.int32)})
result.to_csv(r'my_prediction.csv',index=False)
#np.savetxt('submission_softmax.csv',
#np.c_[range(1,len(test_images)+1),predicted_lables],
#delimiter=',',
#header = 'ImageId,Label',
#comments = '',
#fmt='%d')
'''数据的映射,例如1-->[0,1,0,0,0,0,0,0,0,0]'''
def dense_to_one_hot(label_dense,num_classes):
num_labels = label_dense.shape[0]
index_offset = np.arange(num_labels)*num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + label_dense.ravel()] = 1 # flat展开
return labels_one_hot
'''使用SGD随机梯度下降,所以指定next batch的训练集'''
def next_batch(mnist,batch_size):
num_examples = mnist.shape[0]
global train_images
global train_labels
global index_in_epoch
global epochs_compeleted
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > num_examples:
epochs_compeleted += 1
perm = np.arange(num_examples)
np.random.shuffle(perm)
train_images = train_images[perm]
train_labels = train_labels[perm]
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
train_labels_one_hot = dense_to_one_hot(train_labels[start:end], num_classes=10)
return train_images[start:end], train_labels_one_hot
if __name__ == '__main__':
cnn()
| 35.239437
| 136
| 0.676859
|
26abe8af77cb73bdf9bbee348e473c10463db026
| 36,046
|
py
|
Python
|
desktop/libs/notebook/src/notebook/api.py
|
sungpeo/hue
|
3be162ce57af272cb117fadacd7f6a790667fb9f
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/notebook/src/notebook/api.py
|
sungpeo/hue
|
3be162ce57af272cb117fadacd7f6a790667fb9f
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/notebook/src/notebook/api.py
|
sungpeo/hue
|
3be162ce57af272cb117fadacd7f6a790667fb9f
|
[
"Apache-2.0"
] | 1
|
2021-07-30T09:50:35.000Z
|
2021-07-30T09:50:35.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import json
import logging
import sqlparse
import sys
from django.urls import reverse
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
import opentracing.tracer
from azure.abfs.__init__ import abfspath
from desktop.conf import TASK_SERVER, ENABLE_CONNECTORS
from desktop.lib.i18n import smart_str
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document2, Document, __paginate, _get_gist_document, FilesystemException
from indexer.file_format import HiveFormat
from indexer.fields import Field
from metadata.conf import OPTIMIZER
from notebook.connectors.base import Notebook, QueryExpired, SessionExpired, QueryError, _get_snippet_name, patch_snippet_for_connector
from notebook.connectors.hiveserver2 import HS2Api
from notebook.decorators import api_error_handler, check_document_access_permission, check_document_modify_permission
from notebook.models import escape_rows, make_notebook, upgrade_session_properties, get_api, MockRequest
if sys.version_info[0] > 2:
from urllib.parse import unquote as urllib_unquote
else:
from urllib import unquote as urllib_unquote
LOG = logging.getLogger(__name__)
DEFAULT_HISTORY_NAME = ''
@require_POST
@api_error_handler
def create_notebook(request):
response = {'status': -1}
editor_type = request.POST.get('type', 'notebook')
gist_id = request.POST.get('gist')
directory_uuid = request.POST.get('directory_uuid')
if gist_id:
gist_doc = _get_gist_document(uuid=gist_id)
statement = json.loads(gist_doc.data)['statement']
editor = make_notebook(
name='',
description='',
editor_type=editor_type,
statement=statement,
is_presentation_mode=True
)
else:
editor = Notebook()
data = editor.get_data()
if editor_type != 'notebook':
data['name'] = ''
data['type'] = 'query-%s' % editor_type # TODO: Add handling for non-SQL types
data['directoryUuid'] = directory_uuid
editor.data = json.dumps(data)
response['notebook'] = editor.get_data()
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def create_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
properties = session.get('properties', [])
response['session'] = get_api(request, session).create_session(lang=session['type'], properties=properties)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def close_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
response['session'] = get_api(request, {'type': session['type']}).close_session(session=session)
response['status'] = 0
return JsonResponse(response)
def _execute_notebook(request, notebook, snippet):
response = {'status': -1}
result = None
history = None
historify = (notebook['type'] != 'notebook' or snippet.get('wasBatchExecuted')) and not notebook.get('skipHistorify')
try:
try:
sessions = notebook.get('sessions') and notebook['sessions'] # Session reference for snippet execution without persisting it
active_executable = json.loads(request.POST.get('executable', '{}')) # Editor v2
# TODO: Use statement, database etc. from active_executable
if historify:
history = _historify(notebook, request.user)
notebook = Notebook(document=history).get_data()
interpreter = get_api(request, snippet)
if snippet.get('interface') == 'sqlalchemy':
interpreter.options['session'] = sessions[0]
with opentracing.tracer.start_span('interpreter') as span:
# interpreter.execute needs the sessions, but we don't want to persist them
pre_execute_sessions = notebook['sessions']
notebook['sessions'] = sessions
response['handle'] = interpreter.execute(notebook, snippet)
notebook['sessions'] = pre_execute_sessions
# Retrieve and remove the result from the handle
if response['handle'].get('sync'):
result = response['handle'].pop('result')
finally:
if historify:
_snippet = [s for s in notebook['snippets'] if s['id'] == snippet['id']][0]
if 'id' in active_executable: # Editor v2
# notebook_executable is the 1-to-1 match of active_executable in the notebook structure
notebook_executable = [e for e in _snippet['executor']['executables'] if e['id'] == active_executable['id']][0]
if 'handle' in response:
notebook_executable['handle'] = response['handle']
if history:
notebook_executable['history'] = {
'id': history.id,
'uuid': history.uuid
}
notebook_executable['operationId'] = history.uuid
if 'handle' in response: # No failure
if 'result' not in _snippet: # Editor v2
_snippet['result'] = {}
_snippet['result']['handle'] = response['handle']
_snippet['result']['statements_count'] = response['handle'].get('statements_count', 1)
_snippet['result']['statement_id'] = response['handle'].get('statement_id', 0)
_snippet['result']['handle']['statement'] = response['handle'].get('statement', snippet['statement']).strip() # For non HS2, as non multi query yet
else:
_snippet['status'] = 'failed'
if history: # If _historify failed, history will be None. If we get Atomic block exception, something underneath interpreter.execute() crashed and is not handled.
history.update_data(notebook)
history.save()
response['history_id'] = history.id
response['history_uuid'] = history.uuid
if notebook['isSaved']: # Keep track of history of saved queries
response['history_parent_uuid'] = history.dependencies.filter(type__startswith='query-').latest('last_modified').uuid
except QueryError as ex: # We inject the history information from _historify() to the failed queries
if response.get('history_id'):
ex.extra['history_id'] = response['history_id']
if response.get('history_uuid'):
ex.extra['history_uuid'] = response['history_uuid']
if response.get('history_parent_uuid'):
ex.extra['history_parent_uuid'] = response['history_parent_uuid']
raise ex
# Inject and HTML escape results
if result is not None:
response['result'] = result
response['result']['data'] = escape_rows(result['data'])
response['status'] = 0
return response
@require_POST
@check_document_access_permission
@api_error_handler
def execute(request, dialect=None):
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if dialect:
notebook['dialect'] = dialect
with opentracing.tracer.start_span('notebook-execute') as span:
span.set_tag('user-id', request.user.username)
response = _execute_notebook(request, notebook, snippet)
span.set_tag('query-id', response.get('handle', {}).get('guid'))
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def check_status(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
with opentracing.tracer.start_span('notebook-check_status') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet.get('result', {}).get('handle', {}).get('guid')
)
response = _check_status(request, notebook=notebook, snippet=snippet, operation_id=operation_id)
return JsonResponse(response)
def _check_status(request, notebook=None, snippet=None, operation_id=None):
response = {'status': -1}
if operation_id or not snippet: # To unify with _get_snippet
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
notebook = Notebook(document=nb_doc).get_data() # Used below
snippet = notebook['snippets'][0]
try:
response['query_status'] = get_api(request, snippet).check_status(notebook, snippet)
response['status'] = 0
except SessionExpired:
response['status'] = 'expired'
raise
except QueryExpired:
response['status'] = 'expired'
raise
finally:
if response['status'] == 0 and snippet['status'] != response['query_status']:
status = response['query_status']['status']
elif response['status'] == 'expired':
status = 'expired'
else:
status = 'failed'
if response.get('query_status'):
has_result_set = response['query_status'].get('has_result_set')
else:
has_result_set = None
if notebook.get('dialect') or notebook['type'].startswith('query') or notebook.get('isManaged'):
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
if nb_doc.can_write(request.user):
nb = Notebook(document=nb_doc).get_data()
if status != nb['snippets'][0]['status'] or has_result_set != nb['snippets'][0].get('has_result_set'):
nb['snippets'][0]['status'] = status
if has_result_set is not None:
nb['snippets'][0]['has_result_set'] = has_result_set
nb_doc.update_data(nb)
nb_doc.save()
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_data(request):
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
rows = json.loads(request.POST.get('rows', '100'))
start_over = json.loads(request.POST.get('startOver', 'false'))
with opentracing.tracer.start_span('notebook-fetch_result_data') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response = _fetch_result_data(request.user, notebook, snippet, operation_id, rows=rows, start_over=start_over)
response['status'] = 0
return JsonResponse(response)
def _fetch_result_data(user, notebook=None, snippet=None, operation_id=None, rows=100, start_over=False, nulls_only=False):
snippet = _get_snippet(user, notebook, snippet, operation_id)
request = MockRequest(user)
response = {
'result': get_api(request, snippet).fetch_result(notebook, snippet, rows, start_over)
}
# Materialize and HTML escape results
if response['result'].get('data') and response['result'].get('type') == 'table' and not response['result'].get('isEscaped'):
response['result']['data'] = escape_rows(response['result']['data'], nulls_only=nulls_only)
response['result']['isEscaped'] = True
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_metadata(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_metadata') as span:
response['result'] = get_api(request, snippet).fetch_result_metadata(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_size(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_size') as span:
response['result'] = get_api(request, snippet).fetch_result_size(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def cancel_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = None
operation_id = request.POST.get('operationId') or notebook['uuid']
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-cancel_statement') as span:
response['result'] = get_api(request, snippet).cancel(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_logs(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if operation_id:
notebook['uuid'] = operation_id
startFrom = request.POST.get('from')
startFrom = int(startFrom) if startFrom else None
size = request.POST.get('size')
size = int(size) if size else None
full_log = smart_str(request.POST.get('full_log', ''))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
db = get_api(request, snippet)
with opentracing.tracer.start_span('notebook-get_logs') as span:
logs = smart_str(db.get_log(notebook, snippet, startFrom=startFrom, size=size))
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
full_log += logs
jobs = db.get_jobs(notebook, snippet, full_log)
response['logs'] = logs.strip()
response['progress'] = min(
db.progress(notebook, snippet, logs=full_log),
99
) if snippet['status'] != 'available' and snippet['status'] != 'success' else 100
response['jobs'] = jobs
response['isFullLogs'] = db.get_log_is_full_log(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
def _save_notebook(notebook, user):
if notebook['snippets'][0].get('connector') and notebook['snippets'][0]['connector'].get('dialect'): # TODO Connector unification
notebook_type = 'query-%(dialect)s' % notebook['snippets'][0]['connector']
if notebook['snippets'][0] and notebook['snippets'][0].get('executor'):
notebook['snippets'][0]['executor']['executables'] = []
else:
notebook_type = notebook.get('type', 'notebook')
save_as = False
if notebook.get('parentSavedQueryUuid'): # We save into the original saved query, not into the query history
notebook_doc = Document2.objects.get_by_uuid(user=user, uuid=notebook['parentSavedQueryUuid'])
elif notebook.get('id'):
notebook_doc = Document2.objects.get(id=notebook['id'])
else:
notebook_doc = Document2.objects.create(name=notebook['name'], uuid=notebook['uuid'], type=notebook_type, owner=user)
Document.objects.link(notebook_doc, owner=notebook_doc.owner, name=notebook_doc.name, description=notebook_doc.description, extra=notebook_type)
save_as = True
if notebook.get('directoryUuid'):
notebook_doc.parent_directory = Document2.objects.get_by_uuid(user=user, uuid=notebook.get('directoryUuid'), perm_type='write')
else:
notebook_doc.parent_directory = Document2.objects.get_home_directory(user)
notebook['isSaved'] = True
notebook['isHistory'] = False
notebook['id'] = notebook_doc.id
_clear_sessions(notebook)
notebook_doc1 = notebook_doc._get_doc1(doc2_type=notebook_type)
if ENABLE_CONNECTORS.get():
notebook_doc.connector_id = int(notebook['snippets'][0]['connector']['type'])
notebook_doc.update_data(notebook)
notebook_doc.search = _get_statement(notebook)
notebook_doc.name = notebook_doc1.name = notebook['name']
notebook_doc.description = notebook_doc1.description = notebook['description']
notebook_doc.save()
notebook_doc1.save()
return notebook_doc, save_as
@api_error_handler
@require_POST
@check_document_modify_permission()
def save_notebook(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
notebook_doc, save_as = _save_notebook(notebook, request.user)
response['status'] = 0
response['save_as'] = save_as
response.update(notebook_doc.to_dict())
response['message'] = request.POST.get('editorMode') == 'true' and _('Query saved successfully') or _('Notebook saved successfully')
return JsonResponse(response)
def _clear_sessions(notebook):
notebook['sessions'] = [_s for _s in notebook['sessions'] if _s['type'] in ('scala', 'spark', 'pyspark', 'sparkr', 'r')]
def _historify(notebook, user):
query_type = 'query-%(dialect)s' % notebook if ENABLE_CONNECTORS.get() else notebook['type']
name = notebook['name'] if (notebook['name'] and notebook['name'].strip() != '') else DEFAULT_HISTORY_NAME
is_managed = notebook.get('isManaged') == True # Prevents None
if is_managed and Document2.objects.filter(uuid=notebook['uuid']).exists():
history_doc = Document2.objects.get(uuid=notebook['uuid'])
else:
history_doc = Document2.objects.create(
name=name,
type=query_type,
owner=user,
is_history=True,
is_managed=is_managed,
)
# Link history of saved query
if notebook['isSaved']:
parent_doc = Document2.objects.get(uuid=notebook.get('parentSavedQueryUuid') or notebook['uuid']) # From previous history query or initial saved query
notebook['parentSavedQueryUuid'] = parent_doc.uuid
history_doc.dependencies.add(parent_doc)
if not is_managed:
Document.objects.link(
history_doc,
name=history_doc.name,
owner=history_doc.owner,
description=history_doc.description,
extra=query_type
)
notebook['uuid'] = history_doc.uuid
_clear_sessions(notebook)
if ENABLE_CONNECTORS.get():
history_doc.connector_id = int(notebook['type'])
history_doc.update_data(notebook)
history_doc.search = _get_statement(notebook)
history_doc.save()
return history_doc
def _get_statement(notebook):
if notebook['snippets'] and len(notebook['snippets']) > 0:
return Notebook.statement_with_variables(notebook['snippets'][0])
return ''
@require_GET
@api_error_handler
@check_document_access_permission
def get_history(request):
response = {'status': -1}
doc_type = request.GET.get('doc_type')
doc_text = request.GET.get('doc_text')
connector_id = request.GET.get('doc_connector')
page = min(int(request.GET.get('page', 1)), 100)
limit = min(int(request.GET.get('limit', 50)), 100)
is_notification_manager = request.GET.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
docs = Document2.objects.get_tasks_history(user=request.user)
else:
docs = Document2.objects.get_history(doc_type='query-%s' % doc_type, connector_id=connector_id, user=request.user)
if doc_text:
docs = docs.filter(Q(name__icontains=doc_text) | Q(description__icontains=doc_text) | Q(search__icontains=doc_text))
# Paginate
docs = docs.order_by('-last_modified')
response['count'] = docs.count()
docs = __paginate(page, limit, queryset=docs)['documents']
history = []
for doc in docs:
notebook = Notebook(document=doc).get_data()
if 'snippets' in notebook:
statement = notebook['description'] if is_notification_manager else _get_statement(notebook)
history.append({
'name': doc.name,
'id': doc.id,
'uuid': doc.uuid,
'type': doc.type,
'data': {
'statement': statement[:1001] if statement else '',
'lastExecuted': notebook['snippets'][0].get('lastExecuted', -1),
'status': notebook['snippets'][0]['status'],
'parentSavedQueryUuid': notebook.get('parentSavedQueryUuid', '')
} if notebook['snippets'] else {},
'absoluteUrl': doc.get_absolute_url(),
})
else:
LOG.error('Incomplete History Notebook: %s' % notebook)
response['history'] = sorted(history, key=lambda row: row['data']['lastExecuted'], reverse=True)
response['message'] = _('History fetched')
response['status'] = 0
return JsonResponse(response)
@require_POST
@api_error_handler
@check_document_modify_permission()
def clear_history(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
doc_type = request.POST.get('doc_type')
is_notification_manager = request.POST.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
history = Document2.objects.get_tasks_history(user=request.user)
else:
history = Document2.objects.get_history(doc_type='query-%s' % doc_type, user=request.user)
response['updated'] = history.delete()
response['message'] = _('History cleared !')
response['status'] = 0
return JsonResponse(response)
@require_GET
@check_document_access_permission
def open_notebook(request):
response = {'status': -1}
notebook_id = request.GET.get('notebook')
notebook = Notebook(document=Document2.objects.get(id=notebook_id))
notebook = upgrade_session_properties(request, notebook)
response['status'] = 0
response['notebook'] = notebook.get_json()
response['message'] = _('Notebook loaded successfully')
@require_POST
@check_document_access_permission
def close_notebook(request):
response = {'status': -1, 'result': []}
notebook = json.loads(request.POST.get('notebook', '{}'))
for session in [_s for _s in notebook['sessions']]:
try:
api = get_api(request, session)
if hasattr(api, 'close_session_idle'):
response['result'].append(api.close_session_idle(notebook, session))
else:
response['result'].append(api.close_session(session))
except QueryExpired:
pass
except Exception as e:
LOG.exception('Error closing session %s' % str(e))
return JsonResponse(response)
@require_POST
@check_document_access_permission
def close_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = None
operation_id = request.POST.get('operationId')
if operation_id and not notebook.get('uuid'):
notebook['uuid'] = operation_id
try:
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-close_statement') as span:
response['result'] = get_api(request, snippet).close_statement(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
except QueryExpired:
response['message'] = _('Query already expired.')
except FilesystemException:
response['message'] = _('Query id could not be found.')
else:
response['message'] = _('Query closed.')
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def autocomplete(request, server=None, database=None, table=None, column=None, nested=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
action = request.POST.get('operation', 'schema')
try:
autocomplete_data = get_api(request, snippet).autocomplete(snippet, database, table, column, nested, action)
response.update(autocomplete_data)
except QueryExpired as e:
LOG.warn('Expired query seen: %s' % e)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_sample_data(request, server=None, database=None, table=None, column=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
is_async = json.loads(request.POST.get('async', 'false'))
operation = json.loads(request.POST.get('operation', '"default"'))
sample_data = get_api(request, snippet).get_sample_data(snippet, database, table, column, is_async=is_async, operation=operation)
response.update(sample_data)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def explain(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response = get_api(request, snippet).explain(notebook, snippet)
return JsonResponse(response)
@require_POST
@api_error_handler
def format(request):
response = {'status': 0}
statements = request.POST.get('statements', '')
response['formatted_statements'] = sqlparse.format(statements, reindent=True, keyword_case='upper') # SQL only currently
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def export_result(request):
response = {'status': -1, 'message': _('Success')}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
data_format = json.loads(request.POST.get('format', '"hdfs-file"'))
destination = urllib_unquote(json.loads(request.POST.get('destination', '""')))
overwrite = json.loads(request.POST.get('overwrite', 'false'))
is_embedded = json.loads(request.POST.get('is_embedded', 'false'))
start_time = json.loads(request.POST.get('start_time', '-1'))
api = get_api(request, snippet)
if data_format == 'hdfs-file': # Blocking operation, like downloading
if request.fs.isdir(destination):
if notebook.get('name'):
destination += '/%(name)s.csv' % notebook
else:
destination += '/%(type)s-%(id)s.csv' % notebook
if overwrite and request.fs.exists(destination):
request.fs.do_as_user(request.user.username, request.fs.rmtree, destination)
response['watch_url'] = api.export_data_as_hdfs_file(snippet, destination, overwrite)
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS destination: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hive-table':
if is_embedded:
sql, success_url = api.export_data_as_table(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to table %s') % (snippet['type'], destination),
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=save_as_table¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to Hive table: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hdfs-directory':
if destination.lower().startswith("abfs"):
destination = abfspath(destination)
if request.fs.exists(destination) and request.fs.listdir_stats(destination):
raise PopupException(_('The destination is not an empty directory!'))
if is_embedded:
sql, success_url = api.export_large_data_to_hdfs(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to directory') % snippet['type'],
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready-execute',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=insert_as_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS directory: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format in ('search-index', 'dashboard'):
# Open the result in the Dashboard via a SQL sub-query or the Import wizard (quick vs scalable)
if is_embedded:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
if data_format == 'dashboard':
engine = notebook['type'].replace('query-', '')
response['watch_url'] = reverse(
'dashboard:browse',
kwargs={'name': notebook_id}
) + '?source=query&engine=%(engine)s' % {'engine': engine}
response['status'] = 0
else:
sample = get_api(request, snippet).fetch_result(notebook, snippet, rows=4, start_over=True)
for col in sample['meta']:
col['type'] = HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')
response['status'] = 0
response['id'] = notebook_id
response['name'] = _get_snippet_name(notebook)
response['source_type'] = 'query'
response['target_type'] = 'index'
response['target_path'] = destination
response['sample'] = list(sample['data'])
response['columns'] = [
Field(col['name'], col['type']).to_dict() for col in sample['meta']
]
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=index_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
if response.get('status') != 0:
response['message'] = _('Exporting result failed.')
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_risk(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
api = get_api(request, snippet)
response['query_complexity'] = api.statement_risk(interface, notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_compatibility(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
api = get_api(request, snippet)
response['query_compatibility'] = api.statement_compatibility(
interface,
notebook,
snippet,
source_platform=source_platform,
target_platform=target_platform
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_similarity(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
source_platform = request.POST.get('sourcePlatform')
api = get_api(request, snippet)
response['statement_similarity'] = api.statement_similarity(interface, notebook, snippet, source_platform=source_platform)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_external_statement(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if snippet.get('statementType') == 'file':
response['statement'] = _get_statement_from_file(request.user, request.fs, snippet)
elif snippet.get('statementType') == 'document':
notebook = Notebook(Document2.objects.get_by_uuid(user=request.user, uuid=snippet['associatedDocumentUuid'], perm_type='read'))
response['statement'] = notebook.get_str()
response['status'] = 0
return JsonResponse(response)
def _get_statement_from_file(user, fs, snippet):
script_path = snippet['statementPath']
if script_path:
script_path = script_path.replace('hdfs://', '')
if fs.do_as_user(user, fs.isfile, script_path):
return fs.do_as_user(user, fs.read, script_path, 0, 16 * 1024 ** 2)
@require_POST
@api_error_handler
def describe(request, database, table=None, column=None):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
source_type = request.POST.get('source_type', '')
snippet = {'type': source_type}
patch_snippet_for_connector(snippet)
describe = get_api(request, snippet).describe(notebook, snippet, database, table, column=column)
response.update(describe)
return JsonResponse(response)
def _get_snippet(user, notebook, snippet, operation_id):
if operation_id or not snippet:
nb_doc = Document2.objects.get_by_uuid(user=user, uuid=operation_id or notebook.get('uuid'))
notebook = Notebook(document=nb_doc).get_data()
snippet = notebook['snippets'][0]
return snippet
| 35.166829
| 170
| 0.700466
|
a126c0b0ac01a98766c9969f91f94228a342b6c4
| 349
|
py
|
Python
|
IPython/kernel/__init__.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 1
|
2018-09-24T13:45:40.000Z
|
2018-09-24T13:45:40.000Z
|
IPython/kernel/__init__.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 3
|
2015-04-01T13:14:57.000Z
|
2015-05-26T16:01:37.000Z
|
IPython/kernel/__init__.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 1
|
2015-05-17T14:14:26.000Z
|
2015-05-17T14:14:26.000Z
|
"""IPython.kernel has been replaced by IPython.parallel.
The previous version of IPython's parallel library was located at this
location (IPython.kernel). It has been moved to the IPython.parallel
subpackage and has been refactored to use zeromq/pyzmq instead of twisted.
Please see INSERT URL for further details.
"""
raise ImportError(__doc__)
| 31.727273
| 74
| 0.799427
|
119e4ae0aca4c8cfd392f2d5fc310aa1caeb25ef
| 5,567
|
py
|
Python
|
myneighborhood/settings.py
|
nimowairimu/My-neighborhood
|
23e66f221a0bc864dcc12309d02079dbfce6123c
|
[
"MIT"
] | null | null | null |
myneighborhood/settings.py
|
nimowairimu/My-neighborhood
|
23e66f221a0bc864dcc12309d02079dbfce6123c
|
[
"MIT"
] | null | null | null |
myneighborhood/settings.py
|
nimowairimu/My-neighborhood
|
23e66f221a0bc864dcc12309d02079dbfce6123c
|
[
"MIT"
] | null | null | null |
"""
Django settings for myneighborhood project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import cloudinary
import cloudinary.uploader
import cloudinary.api
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'django-insecure-g@m9(mw^nhf2nf*feehjcxy77p2r2#7==*uwbtr1qu#w=8dr04'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myhood',
'crispy_forms',
'bootstrap4',
'cloudinary',
'rest_framework',
# 'account',
# 'tinymce',
# 'pyuploadcare.dj',
'django.contrib.sites',
]
SITE_ID = 1
UPLOADCARE = {
'pub_key': '2b709bca64245dd9e55e',
'secret': '0a60851de5f3db2dc728',
}
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myneighborhood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myneighborhood.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'myhood',
# 'USER': 'nimo',
# 'PASSWORD':'kadesho62',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
cloudinary.config(
cloud_name = "dbdaabcxr",
api_key = "388228873379869",
api_secret = "7jQEu12De26r0srjbzqrxUJMeMw"
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning'
}
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals())
| 25.420091
| 91
| 0.694629
|
b893ff291c15e86162b5a330575e4a2abddbc2f2
| 809
|
py
|
Python
|
Final-Term/2-A/fitbit_max5.py
|
ISKU/DataScience
|
a94aeb9316765db47a3ee2ae383f9073a73de2a2
|
[
"Apache-2.0"
] | 1
|
2017-06-27T12:14:14.000Z
|
2017-06-27T12:14:14.000Z
|
Final-Term/2-A/fitbit_max5.py
|
ISKU/DataScience
|
a94aeb9316765db47a3ee2ae383f9073a73de2a2
|
[
"Apache-2.0"
] | null | null | null |
Final-Term/2-A/fitbit_max5.py
|
ISKU/DataScience
|
a94aeb9316765db47a3ee2ae383f9073a73de2a2
|
[
"Apache-2.0"
] | null | null | null |
import csv
import json
import operator
from pathlib import Path
fitbit_file = open('fitbit.csv', 'r')
fitbit = csv.DictReader(fitbit_file)
fitbitmax = []
for info in fitbit :
fitbitmax.append(info)
for i, line in fitbitmax:
fitbitmax[i]['step'] = int(fitbitmax[i]['step'])
print('step')
fitbitmax.sort(key = operator.itemgetter('step'), reverse = False)
for i in range(0, 5, 1):
print(fitbitmax[i]['name'] + ", " + fitbitmax[i]['step'] + "\n")
#print('heart')
#result.sort(key = operator.itemgetter('heart'), reverse = True)
#for i in range(0, 5, 1):
# print(result[i]['name'] + ", " + result[i]['heart'] + "\n")
#print('sleep')
#result.sort(key = operator.itemgetter('sleep'), reverse = True)
#for i in range(0, 5, 1):
# print(result[i]['name'] + ", " + result[i]['sleep'] + "\n")
| 24.515152
| 68
| 0.629172
|
fb70546f846db2b6b28b62d4c007d1cecf167025
| 5,016
|
py
|
Python
|
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | 2
|
2022-01-19T02:39:43.000Z
|
2022-02-07T01:58:17.000Z
|
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | null | null | null |
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | null | null | null |
from shift_oelint_adv.cls_rule import Rule
from shift_oelint_parser.cls_item import Variable
from shift_oelint_parser.helper_files import get_scr_components
from shift_oelint_parser.parser import INLINE_BLOCK
class VarSRCUriOptions(Rule):
def __init__(self):
super(VarSRCUriOptions, self).__init__(id='oelint.vars.srcurioptions',
severity='warning',
message='<FOO>')
self._general_options = [
'apply',
'destsuffix',
'name',
'patchdir',
'striplevel',
'subdir',
'unpack',
]
self._valid_options = {
'az': [],
'bzr': [
'protocol',
'scmdata',
],
'crcc': [
'module',
'proto',
'vob',
],
'cvs': [
'date',
'fullpath',
'localdir',
'method',
'module',
'norecurse',
'port',
'rsh',
'scmdata',
'tag',
],
'file': [
'downloadfilename',
],
'ftp': [
'downloadfilename',
],
'git': [
'branch',
'destsuffix',
'nobranch',
'nocheckout',
'protocol',
'rebaseable',
'rev',
'subdir',
'subpath',
'tag',
'usehead',
'user',
],
'gitsm': [
'branch',
'destsuffix',
'nobranch',
'nocheckout',
'protocol',
'rebaseable',
'rev',
'subpath',
'tag',
'usehead',
],
'gitannex': [],
'hg': [
'module',
'rev',
'scmdata',
'protocol',
],
'http': [
'downloadfilename',
],
'https': [
'downloadfilename',
],
'osc': [
'module',
'protocol',
'rev',
],
'p4': [
'revision',
],
'repo': [
'branch',
'manifest',
'protocol',
],
'ssh': [],
's3': [
'downloadfilename',
],
'sftp': [
'downloadfilename',
'protocol',
],
'npm': [
'name',
'noverify',
'version',
],
'npmsw': [
'dev',
],
'svn': [
'module',
'path_spec',
'protocol',
'rev',
'scmdata',
'ssh',
'transportuser',
],
}
def __analyse(self, item, _input, _index):
_url = get_scr_components(_input)
res = []
if 'scheme' not in _url:
return res # pragma: no cover
# For certain types of file:// url parsing fails
# ignore those
if _url['scheme'] not in self._valid_options.keys() and not _input.strip().startswith('file://') and _url['scheme']:
res += self.finding(item.Origin, item.InFileLine + _index,
'Fetcher \'{a}\' is not known'.format(a=_url['scheme']))
else:
for k, v in _url['options'].items():
if _url['scheme'] not in self._valid_options:
continue # pragma: no cover
if k == 'type' and v == 'kmeta':
continue # linux-yocto uses this option to indicate kernel metadata sources
if k not in self._valid_options[_url['scheme']] + self._general_options:
res += self.finding(item.Origin, item.InFileLine + _index,
'Option \'{a}\' is not known with this fetcher type'.format(a=k))
return res
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue='SRC_URI')
for item in items:
if any([item.Flag.endswith(x) for x in ['md5sum', 'sha256sum']]):
# These are just the hashes
continue
lines = [y.strip('"') for y in item.get_items() if y]
for x in lines:
if x == INLINE_BLOCK:
continue
res += self.__analyse(item, x, lines.index(x))
return res
| 30.585366
| 124
| 0.381579
|
5d17bf7a3ca27ded44e36e809fd44bde4ca81c6a
| 8,809
|
py
|
Python
|
evaluation/evaluation.py
|
karamsa/GreatSQL
|
7d9cd7eadbffbaef2dbf47a823a5818482bdf623
|
[
"BSD-3-Clause"
] | 4
|
2021-02-04T14:01:05.000Z
|
2022-03-23T12:47:20.000Z
|
evaluation/evaluation.py
|
karamsa/GreatSQL
|
7d9cd7eadbffbaef2dbf47a823a5818482bdf623
|
[
"BSD-3-Clause"
] | null | null | null |
evaluation/evaluation.py
|
karamsa/GreatSQL
|
7d9cd7eadbffbaef2dbf47a823a5818482bdf623
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import json
from argparse import ArgumentParser
from tqdm import tqdm
from collections import OrderedDict
def load_normal_file(path):
array = []
with open(path, 'r', encoding='utf-8') as fp:
for cnt, line in enumerate(fp):
array.append(line.strip())
return array
def load_json_file(path):
data = []
with open(path, 'r', encoding='utf-8') as row:
data = json.load(row)
return data
def eval_query_components(ground_truth, predicted_json):
# Make single components matching
select_cl = eval_single_component(ground_truth["select_columns"], predicted_json["select_columns"])
tables_cl = eval_single_component(ground_truth["from_tables"], predicted_json["from_tables"])
where_cl = eval_single_component(ground_truth["where"], predicted_json["where"])
group_by_cl = eval_single_component(ground_truth["group_by"], predicted_json["group_by"])
having_cl = eval_single_component(ground_truth["having"], predicted_json["having"])
order_by_cl = eval_single_component(ground_truth["order_by"], predicted_json["order_by"])
limit_cl = eval_single_component(ground_truth["limit"], predicted_json["limit"])
return select_cl, tables_cl, where_cl, group_by_cl, having_cl, order_by_cl, limit_cl
def eval_single_component(source_item, predicted_item):
source_item = json.dumps(source_item, sort_keys=True)
predicted_item = json.dumps(predicted_item, sort_keys=True)
source_item = json.loads(source_item.lower())
predicted_item = json.loads(predicted_item.lower())
return check_predictions(source_item, predicted_item)
def avg_accuracies(*accuracies):
return sum(accuracies)/len(accuracies)
def check_predictions(source_item, predicted_item):
try:
if len(predicted_item) != len(source_item):
return 0
seen_predicted_items = []
for i, row in enumerate(predicted_item):
if row in seen_predicted_items:
return 0
# print(str(row))
if "value\':" in str(row) and len(str(row["value"]))>1 and str(row["value"])[0] == "{" and "select_columns" in row["value"]:
select_cl, tables_cl, where_cl, group_by_cl, having_cl, order_by_cl, limit_cl = eval_query_components(source_item[i]["value"], row["value"])
avg = avg_accuracies(select_cl, tables_cl, where_cl, group_by_cl, having_cl, order_by_cl, limit_cl)
if avg != 1.0:
return 0
elif row not in source_item:
return 0
seen_predicted_items.append(row)
except Exception as e:
print(source_item)
print(predicted_item)
return 0
return 1
def check(json_object):
res = OrderedDict()
for k, v in sorted(json_object.items()):
if isinstance(v, dict):
res[k] = sort_json_recursively(v)
else:
res[k] = v
return res
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('dataset_file_path', help='source file for the prediction(train, dev or test set file)')
parser.add_argument('predicted_sqls_file_path', help='SQL predictions by the model. One SQL query per line')
parser.add_argument('predicted_components_file_path', help='component predictions by the model. Has the same structure of the ground truth of the dataset files')
args = parser.parse_args()
all_select_cl = []
all_tables_cl = []
all_where_cl = []
all_group_by_cl = []
all_having_cl = []
all_order_by_cl = []
all_limit_cl = []
all_exact_match = []
all_string_exact_match = []
print("=======================Files load start=======================")
source_file = load_json_file(args.dataset_file_path) # train, dev or test set file
predicted_sqls = load_normal_file(args.predicted_sqls_file_path) # the predicted SQLs file, contains one SQL query per line
predicted_components = load_json_file(args.predicted_components_file_path) # the predicted components file (json format)
print("=======================Files load end=======================")
print(" ")
print(" ")
print("=======================Evaluation start=======================")
total_samples = len(source_file)
total_sql_queries = len(predicted_sqls)
total_json_components_queries = len(predicted_components)
#Check the length of each file and make sure they have all the same length
if (total_samples == total_json_components_queries):
# Check if no SEM(String Exact Match) is used
if (total_samples != total_sql_queries):
total_sql_queries = total_json_components_queries
predicted_sqls = [""] * total_json_components_queries
print("SQL gold file doesn't have the same size as source file. The SEM metric will be ignored!")
print(" ")
# loop over files
# get each sample from the source file, the predicted String SQL query and the predicted json components
for sample, predicted_sql, predicted_json in tqdm(zip(source_file, predicted_sqls, predicted_components), total = total_samples):
#Get the SQL label from the source file
source_sql = sample["sql"].lower().strip()
predicted_sql = predicted_sql.lower().strip()
# Get the ground truth from the source file
ground_truth = sample["ground_truth"]
#Remove the schema_index if it exists# we don't need it in the evaluation
if "schema_index" in ground_truth:
del ground_truth["schema_index"]
if "schema_index" in predicted_json:
del predicted_json["schema_index"]
# Make components matching
select_cl, tables_cl, where_cl, group_by_cl, having_cl, order_by_cl, limit_cl = eval_query_components(ground_truth, predicted_json)
# components_match
all_select_cl.append(select_cl)
all_tables_cl.append(tables_cl)
all_where_cl.append(where_cl)
all_group_by_cl.append(group_by_cl)
all_having_cl.append(having_cl)
all_order_by_cl.append(order_by_cl)
all_limit_cl.append(limit_cl)
# ground_truth_d = json.loads(json.dumps(ground_truth, sort_keys=True))
# predicted_json_d = json.loads(json.dumps(predicted_json, sort_keys=True))
# exact_match = ground_truth_d == predicted_json_d
# Make exact matching
avg = avg_accuracies(select_cl, tables_cl, where_cl, group_by_cl, having_cl, order_by_cl, limit_cl)
exact_match = 1
if avg != 1.0:
exact_match = 0
# Make String Exact matching
string_exact_match = source_sql == predicted_sql
all_exact_match.append(exact_match)
all_string_exact_match.append(string_exact_match)
print("=======================Evaluation end=======================")
print(" ")
print(" ")
print("=======================Global Accuracy=======================")
em_accuracy = (sum(all_exact_match) / len(all_exact_match))*100
sem_accuracy = (sum(all_string_exact_match) / len(all_string_exact_match))*100
print(json.dumps({
'em_accuracy': em_accuracy,
'sem_accuracy': sem_accuracy,
}, indent=2))
print("=======================Partial Accuracy=======================")
select_accuracy= (sum(all_select_cl) / len(all_select_cl))*100
tables_accuracy= (sum(all_tables_cl) / len(all_tables_cl))*100
where_accuracy= (sum(all_where_cl) / len(all_where_cl))*100
group_by_accuracy= (sum(all_group_by_cl) / len(all_group_by_cl))*100
having_accuracy= (sum(all_having_cl) / len(all_having_cl))*100
order_by_accuracy= (sum(all_order_by_cl) / len(all_order_by_cl))*100
limit_accuracy= (sum(all_limit_cl) / len(all_limit_cl))*100
print(json.dumps({
'cm_accuracy': {
'select_accuracy': select_accuracy,
'tables_accuracy': tables_accuracy,
'where_accuracy': where_accuracy,
'group_by_accuracy': group_by_accuracy,
'having_accuracy': having_accuracy,
'order_by_accuracy': order_by_accuracy,
'limit_accuracy': limit_accuracy
}
}, indent=2))
else:
print("The files have not the same number of queries. Please check that predicted queries are included at least in the components files")
exit()
| 36.251029
| 165
| 0.632875
|
af1b2ca02ee9d112a1853e19dc7df6fc8cea7d7b
| 779
|
py
|
Python
|
K-Nearest Neighbor/BreastCancer/breast_cancer.py
|
paramkpr/MessingWithML
|
aa5a811cb8171cc3798f3fe8b26ae16e8ea8a8b4
|
[
"MIT"
] | 2
|
2020-08-08T11:46:42.000Z
|
2020-08-08T11:53:49.000Z
|
K-Nearest Neighbor/BreastCancer/breast_cancer.py
|
psrth/MessingWithML
|
92ad9efd18decd020cfcffb56bc84de16f9aaf02
|
[
"MIT"
] | null | null | null |
K-Nearest Neighbor/BreastCancer/breast_cancer.py
|
psrth/MessingWithML
|
92ad9efd18decd020cfcffb56bc84de16f9aaf02
|
[
"MIT"
] | 2
|
2018-04-30T07:01:49.000Z
|
2020-06-26T11:03:08.000Z
|
import numpy as np
from sklearn import preprocessing, model_selection, neighbors
import pandas as pd
df = (pd.read_csv('breast-cancer-wisconsin.data.txt'))
df.replace('?', -99999, inplace=True)
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X = preprocessing.scale(X)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy, 'is the accuracy of this algorithm.')
# noinspection PyPep8
example_values = np.array([4,1,1,1,1,2,1,2,1])
example_values = example_values.reshape(1, -1)
prediction = clf.predict(example_values)
print(prediction, '(2 for benign, 4 for malignant)')
| 27.821429
| 88
| 0.735558
|
cbbd94471220853e4d088b3cde67e8c73860723c
| 25,330
|
py
|
Python
|
git/test/test_docs.py
|
csantizo/GitPython
|
cf8dc259fcc9c1397ea67cec3a6a4cb5816e3e68
|
[
"BSD-3-Clause"
] | 1
|
2019-11-01T11:45:22.000Z
|
2019-11-01T11:45:22.000Z
|
git/test/test_docs.py
|
99/GitPython
|
32da7feb496ef31c48b5cbe4e37a4c68ed1b7dd5
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T23:03:45.000Z
|
2021-06-10T18:05:11.000Z
|
git/test/test_docs.py
|
99/GitPython
|
32da7feb496ef31c48b5cbe4e37a4c68ed1b7dd5
|
[
"BSD-3-Clause"
] | 1
|
2019-11-01T11:38:54.000Z
|
2019-11-01T11:38:54.000Z
|
# -*- coding: utf-8 -*-
# test_git.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
from git.test.lib import TestBase
from git.test.lib.helper import with_rw_directory
import os.path as osp
class Tutorials(TestBase):
def tearDown(self):
import gc
gc.collect()
# @skipIf(HIDE_WINDOWS_KNOWN_ERRORS, ## ACTUALLY skipped by `git.submodule.base#L869`.
# "FIXME: helper.wrapper fails with: PermissionError: [WinError 5] Access is denied: "
# "'C:\\Users\\appveyor\\AppData\\Local\\Temp\\1\\test_work_tree_unsupportedryfa60di\\master_repo\\.git\\objects\\pack\\pack-bc9e0787aef9f69e1591ef38ea0a6f566ec66fe3.idx") # noqa E501
@with_rw_directory
def test_init_repo_object(self, rw_dir):
# [1-test_init_repo_object]
from git import Repo
join = osp.join
# rorepo is a Repo instance pointing to the git-python repository.
# For all you know, the first argument to Repo is a path to the repository
# you want to work with
repo = Repo(self.rorepo.working_tree_dir)
assert not repo.bare
# ![1-test_init_repo_object]
# [2-test_init_repo_object]
bare_repo = Repo.init(join(rw_dir, 'bare-repo'), bare=True)
assert bare_repo.bare
# ![2-test_init_repo_object]
# [3-test_init_repo_object]
repo.config_reader() # get a config reader for read-only access
with repo.config_writer(): # get a config writer to change configuration
pass # call release() to be sure changes are written and locks are released
# ![3-test_init_repo_object]
# [4-test_init_repo_object]
assert not bare_repo.is_dirty() # check the dirty state
repo.untracked_files # retrieve a list of untracked files
# ['my_untracked_file']
# ![4-test_init_repo_object]
# [5-test_init_repo_object]
cloned_repo = repo.clone(join(rw_dir, 'to/this/path'))
assert cloned_repo.__class__ is Repo # clone an existing repository
assert Repo.init(join(rw_dir, 'path/for/new/repo')).__class__ is Repo
# ![5-test_init_repo_object]
# [6-test_init_repo_object]
with open(join(rw_dir, 'repo.tar'), 'wb') as fp:
repo.archive(fp)
# ![6-test_init_repo_object]
# repository paths
# [7-test_init_repo_object]
assert osp.isdir(cloned_repo.working_tree_dir) # directory with your work files
assert cloned_repo.git_dir.startswith(cloned_repo.working_tree_dir) # directory containing the git repository
assert bare_repo.working_tree_dir is None # bare repositories have no working tree
# ![7-test_init_repo_object]
# heads, tags and references
# heads are branches in git-speak
# [8-test_init_repo_object]
self.assertEqual(repo.head.ref, repo.heads.master, # head is a sym-ref pointing to master
"It's ok if TC not running from `master`.")
self.assertEqual(repo.tags['0.3.5'], repo.tag('refs/tags/0.3.5')) # you can access tags in various ways too
self.assertEqual(repo.refs.master, repo.heads['master']) # .refs provides all refs, ie heads ...
if 'TRAVIS' not in os.environ:
self.assertEqual(repo.refs['origin/master'], repo.remotes.origin.refs.master) # ... remotes ...
self.assertEqual(repo.refs['0.3.5'], repo.tags['0.3.5']) # ... and tags
# ![8-test_init_repo_object]
# create a new head/branch
# [9-test_init_repo_object]
new_branch = cloned_repo.create_head('feature') # create a new branch ...
assert cloned_repo.active_branch != new_branch # which wasn't checked out yet ...
self.assertEqual(new_branch.commit, cloned_repo.active_branch.commit) # pointing to the checked-out commit
# It's easy to let a branch point to the previous commit, without affecting anything else
# Each reference provides access to the git object it points to, usually commits
assert new_branch.set_commit('HEAD~1').commit == cloned_repo.active_branch.commit.parents[0]
# ![9-test_init_repo_object]
# create a new tag reference
# [10-test_init_repo_object]
past = cloned_repo.create_tag('past', ref=new_branch,
message="This is a tag-object pointing to %s" % new_branch.name)
self.assertEqual(past.commit, new_branch.commit) # the tag points to the specified commit
assert past.tag.message.startswith("This is") # and its object carries the message provided
now = cloned_repo.create_tag('now') # This is a tag-reference. It may not carry meta-data
assert now.tag is None
# ![10-test_init_repo_object]
# Object handling
# [11-test_init_repo_object]
assert now.commit.message != past.commit.message
# You can read objects directly through binary streams, no working tree required
assert (now.commit.tree / 'VERSION').data_stream.read().decode('ascii').startswith('2')
# You can traverse trees as well to handle all contained files of a particular commit
file_count = 0
tree_count = 0
tree = past.commit.tree
for item in tree.traverse():
file_count += item.type == 'blob'
tree_count += item.type == 'tree'
assert file_count and tree_count # we have accumulated all directories and files
self.assertEqual(len(tree.blobs) + len(tree.trees), len(tree)) # a tree is iterable on its children
# ![11-test_init_repo_object]
# remotes allow handling push, pull and fetch operations
# [12-test_init_repo_object]
from git import RemoteProgress
class MyProgressPrinter(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
print(op_code, cur_count, max_count, cur_count / (max_count or 100.0), message or "NO MESSAGE")
# end
self.assertEqual(len(cloned_repo.remotes), 1) # we have been cloned, so should be one remote
self.assertEqual(len(bare_repo.remotes), 0) # this one was just initialized
origin = bare_repo.create_remote('origin', url=cloned_repo.working_tree_dir)
assert origin.exists()
for fetch_info in origin.fetch(progress=MyProgressPrinter()):
print("Updated %s to %s" % (fetch_info.ref, fetch_info.commit))
# create a local branch at the latest fetched master. We specify the name statically, but you have all
# information to do it programatically as well.
bare_master = bare_repo.create_head('master', origin.refs.master)
bare_repo.head.set_reference(bare_master)
assert not bare_repo.delete_remote(origin).exists()
# push and pull behave very similarly
# ![12-test_init_repo_object]
# index
# [13-test_init_repo_object]
self.assertEqual(new_branch.checkout(), cloned_repo.active_branch) # checking out branch adjusts the wtree
self.assertEqual(new_branch.commit, past.commit) # Now the past is checked out
new_file_path = osp.join(cloned_repo.working_tree_dir, 'my-new-file')
open(new_file_path, 'wb').close() # create new file in working tree
cloned_repo.index.add([new_file_path]) # add it to the index
# Commit the changes to deviate masters history
cloned_repo.index.commit("Added a new file in the past - for later merege")
# prepare a merge
master = cloned_repo.heads.master # right-hand side is ahead of us, in the future
merge_base = cloned_repo.merge_base(new_branch, master) # allwos for a three-way merge
cloned_repo.index.merge_tree(master, base=merge_base) # write the merge result into index
cloned_repo.index.commit("Merged past and now into future ;)",
parent_commits=(new_branch.commit, master.commit))
# now new_branch is ahead of master, which probably should be checked out and reset softly.
# note that all these operations didn't touch the working tree, as we managed it ourselves.
# This definitely requires you to know what you are doing :) !
assert osp.basename(new_file_path) in new_branch.commit.tree # new file is now in tree
master.commit = new_branch.commit # let master point to most recent commit
cloned_repo.head.reference = master # we adjusted just the reference, not the working tree or index
# ![13-test_init_repo_object]
# submodules
# [14-test_init_repo_object]
# create a new submodule and check it out on the spot, setup to track master branch of `bare_repo`
# As our GitPython repository has submodules already that point to github, make sure we don't
# interact with them
for sm in cloned_repo.submodules:
assert not sm.remove().exists() # after removal, the sm doesn't exist anymore
sm = cloned_repo.create_submodule('mysubrepo', 'path/to/subrepo', url=bare_repo.git_dir, branch='master')
# .gitmodules was written and added to the index, which is now being committed
cloned_repo.index.commit("Added submodule")
assert sm.exists() and sm.module_exists() # this submodule is defintely available
sm.remove(module=True, configuration=False) # remove the working tree
assert sm.exists() and not sm.module_exists() # the submodule itself is still available
# update all submodules, non-recursively to save time, this method is very powerful, go have a look
cloned_repo.submodule_update(recursive=False)
assert sm.module_exists() # The submodules working tree was checked out by update
# ![14-test_init_repo_object]
@with_rw_directory
def test_references_and_objects(self, rw_dir):
# [1-test_references_and_objects]
import git
repo = git.Repo.clone_from(self._small_repo_url(), osp.join(rw_dir, 'repo'), branch='master')
heads = repo.heads
master = heads.master # lists can be accessed by name for convenience
master.commit # the commit pointed to by head called master
master.rename('new_name') # rename heads
master.rename('master')
# ![1-test_references_and_objects]
# [2-test_references_and_objects]
tags = repo.tags
tagref = tags[0]
tagref.tag # tags may have tag objects carrying additional information
tagref.commit # but they always point to commits
repo.delete_tag(tagref) # delete or
repo.create_tag("my_tag") # create tags using the repo for convenience
# ![2-test_references_and_objects]
# [3-test_references_and_objects]
head = repo.head # the head points to the active branch/ref
master = head.reference # retrieve the reference the head points to
master.commit # from here you use it as any other reference
# ![3-test_references_and_objects]
#
# [4-test_references_and_objects]
log = master.log()
log[0] # first (i.e. oldest) reflog entry
log[-1] # last (i.e. most recent) reflog entry
# ![4-test_references_and_objects]
# [5-test_references_and_objects]
new_branch = repo.create_head('new') # create a new one
new_branch.commit = 'HEAD~10' # set branch to another commit without changing index or working trees
repo.delete_head(new_branch) # delete an existing head - only works if it is not checked out
# ![5-test_references_and_objects]
# [6-test_references_and_objects]
new_tag = repo.create_tag('my_new_tag', message='my message')
# You cannot change the commit a tag points to. Tags need to be re-created
self.failUnlessRaises(AttributeError, setattr, new_tag, 'commit', repo.commit('HEAD~1'))
repo.delete_tag(new_tag)
# ![6-test_references_and_objects]
# [7-test_references_and_objects]
new_branch = repo.create_head('another-branch')
repo.head.reference = new_branch
# ![7-test_references_and_objects]
# [8-test_references_and_objects]
hc = repo.head.commit
hct = hc.tree
hc != hct # @NoEffect
hc != repo.tags[0] # @NoEffect
hc == repo.head.reference.commit # @NoEffect
# ![8-test_references_and_objects]
# [9-test_references_and_objects]
self.assertEqual(hct.type, 'tree') # preset string type, being a class attribute
assert hct.size > 0 # size in bytes
assert len(hct.hexsha) == 40
assert len(hct.binsha) == 20
# ![9-test_references_and_objects]
# [10-test_references_and_objects]
self.assertEqual(hct.path, '') # root tree has no path
assert hct.trees[0].path != '' # the first contained item has one though
self.assertEqual(hct.mode, 0o40000) # trees have the mode of a linux directory
self.assertEqual(hct.blobs[0].mode, 0o100644) # blobs have specific mode, comparable to a standard linux fs
# ![10-test_references_and_objects]
# [11-test_references_and_objects]
hct.blobs[0].data_stream.read() # stream object to read data from
hct.blobs[0].stream_data(open(osp.join(rw_dir, 'blob_data'), 'wb')) # write data to given stream
# ![11-test_references_and_objects]
# [12-test_references_and_objects]
repo.commit('master')
repo.commit('v0.8.1')
repo.commit('HEAD~10')
# ![12-test_references_and_objects]
# [13-test_references_and_objects]
fifty_first_commits = list(repo.iter_commits('master', max_count=50))
assert len(fifty_first_commits) == 50
# this will return commits 21-30 from the commit list as traversed backwards master
ten_commits_past_twenty = list(repo.iter_commits('master', max_count=10, skip=20))
assert len(ten_commits_past_twenty) == 10
assert fifty_first_commits[20:30] == ten_commits_past_twenty
# ![13-test_references_and_objects]
# [14-test_references_and_objects]
headcommit = repo.head.commit
assert len(headcommit.hexsha) == 40
assert len(headcommit.parents) > 0
assert headcommit.tree.type == 'tree'
assert headcommit.author.name == 'Sebastian Thiel'
assert isinstance(headcommit.authored_date, int)
assert headcommit.committer.name == 'Sebastian Thiel'
assert isinstance(headcommit.committed_date, int)
assert headcommit.message != ''
# ![14-test_references_and_objects]
# [15-test_references_and_objects]
import time
time.asctime(time.gmtime(headcommit.committed_date))
time.strftime("%a, %d %b %Y %H:%M", time.gmtime(headcommit.committed_date))
# ![15-test_references_and_objects]
# [16-test_references_and_objects]
assert headcommit.parents[0].parents[0].parents[0] == repo.commit('master^^^')
# ![16-test_references_and_objects]
# [17-test_references_and_objects]
tree = repo.heads.master.commit.tree
assert len(tree.hexsha) == 40
# ![17-test_references_and_objects]
# [18-test_references_and_objects]
assert len(tree.trees) > 0 # trees are subdirectories
assert len(tree.blobs) > 0 # blobs are files
assert len(tree.blobs) + len(tree.trees) == len(tree)
# ![18-test_references_and_objects]
# [19-test_references_and_objects]
self.assertEqual(tree['smmap'], tree / 'smmap') # access by index and by sub-path
for entry in tree: # intuitive iteration of tree members
print(entry)
blob = tree.trees[0].blobs[0] # let's get a blob in a sub-tree
assert blob.name
assert len(blob.path) < len(blob.abspath)
self.assertEqual(tree.trees[0].name + '/' + blob.name, blob.path) # this is how relative blob path generated
self.assertEqual(tree[blob.path], blob) # you can use paths like 'dir/file' in tree
# ![19-test_references_and_objects]
# [20-test_references_and_objects]
assert tree / 'smmap' == tree['smmap']
assert tree / blob.path == tree[blob.path]
# ![20-test_references_and_objects]
# [21-test_references_and_objects]
# This example shows the various types of allowed ref-specs
assert repo.tree() == repo.head.commit.tree
past = repo.commit('HEAD~5')
assert repo.tree(past) == repo.tree(past.hexsha)
self.assertEqual(repo.tree('v0.8.1').type, 'tree') # yes, you can provide any refspec - works everywhere
# ![21-test_references_and_objects]
# [22-test_references_and_objects]
assert len(tree) < len(list(tree.traverse()))
# ![22-test_references_and_objects]
# [23-test_references_and_objects]
index = repo.index
# The index contains all blobs in a flat list
assert len(list(index.iter_blobs())) == len([o for o in repo.head.commit.tree.traverse() if o.type == 'blob'])
# Access blob objects
for (path, stage), entry in index.entries.items(): # @UnusedVariable
pass
new_file_path = osp.join(repo.working_tree_dir, 'new-file-name')
open(new_file_path, 'w').close()
index.add([new_file_path]) # add a new file to the index
index.remove(['LICENSE']) # remove an existing one
assert osp.isfile(osp.join(repo.working_tree_dir, 'LICENSE')) # working tree is untouched
self.assertEqual(index.commit("my commit message").type, 'commit') # commit changed index
repo.active_branch.commit = repo.commit('HEAD~1') # forget last commit
from git import Actor
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
# commit by commit message and author and committer
index.commit("my commit message", author=author, committer=committer)
# ![23-test_references_and_objects]
# [24-test_references_and_objects]
from git import IndexFile
# loads a tree into a temporary index, which exists just in memory
IndexFile.from_tree(repo, 'HEAD~1')
# merge two trees three-way into memory
merge_index = IndexFile.from_tree(repo, 'HEAD~10', 'HEAD', repo.merge_base('HEAD~10', 'HEAD'))
# and persist it
merge_index.write(osp.join(rw_dir, 'merged_index'))
# ![24-test_references_and_objects]
# [25-test_references_and_objects]
empty_repo = git.Repo.init(osp.join(rw_dir, 'empty'))
origin = empty_repo.create_remote('origin', repo.remotes.origin.url)
assert origin.exists()
assert origin == empty_repo.remotes.origin == empty_repo.remotes['origin']
origin.fetch() # assure we actually have data. fetch() returns useful information
# Setup a local tracking branch of a remote branch
empty_repo.create_head('master', origin.refs.master) # create local branch "master" from remote "master"
empty_repo.heads.master.set_tracking_branch(origin.refs.master) # set local "master" to track remote "master
empty_repo.heads.master.checkout() # checkout local "master" to working tree
# Three above commands in one:
empty_repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master).checkout()
# rename remotes
origin.rename('new_origin')
# push and pull behaves similarly to `git push|pull`
origin.pull()
origin.push()
# assert not empty_repo.delete_remote(origin).exists() # create and delete remotes
# ![25-test_references_and_objects]
# [26-test_references_and_objects]
assert origin.url == repo.remotes.origin.url
with origin.config_writer as cw:
cw.set("pushurl", "other_url")
# Please note that in python 2, writing origin.config_writer.set(...) is totally safe.
# In py3 __del__ calls can be delayed, thus not writing changes in time.
# ![26-test_references_and_objects]
# [27-test_references_and_objects]
hcommit = repo.head.commit
hcommit.diff() # diff tree against index
hcommit.diff('HEAD~1') # diff tree against previous tree
hcommit.diff(None) # diff tree against working tree
index = repo.index
index.diff() # diff index against itself yielding empty diff
index.diff(None) # diff index against working copy
index.diff('HEAD') # diff index against current HEAD tree
# ![27-test_references_and_objects]
# [28-test_references_and_objects]
# Traverse added Diff objects only
for diff_added in hcommit.diff('HEAD~1').iter_change_type('A'):
print(diff_added)
# ![28-test_references_and_objects]
# [29-test_references_and_objects]
# Reset our working tree 10 commits into the past
past_branch = repo.create_head('past_branch', 'HEAD~10')
repo.head.reference = past_branch
assert not repo.head.is_detached
# reset the index and working tree to match the pointed-to commit
repo.head.reset(index=True, working_tree=True)
# To detach your head, you have to point to a commit directly
repo.head.reference = repo.commit('HEAD~5')
assert repo.head.is_detached
# now our head points 15 commits into the past, whereas the working tree
# and index are 10 commits in the past
# ![29-test_references_and_objects]
# [30-test_references_and_objects]
# checkout the branch using git-checkout. It will fail as the working tree appears dirty
self.failUnlessRaises(git.GitCommandError, repo.heads.master.checkout)
repo.heads.past_branch.checkout()
# ![30-test_references_and_objects]
# [31-test_references_and_objects]
git = repo.git
git.checkout('HEAD', b="my_new_branch") # create a new branch
git.branch('another-new-one')
git.branch('-D', 'another-new-one') # pass strings for full control over argument order
git.for_each_ref() # '-' becomes '_' when calling it
# ![31-test_references_and_objects]
repo.git.clear_cache()
def test_submodules(self):
# [1-test_submodules]
repo = self.rorepo
sms = repo.submodules
assert len(sms) == 1
sm = sms[0]
self.assertEqual(sm.name, 'gitdb') # git-python has gitdb as single submodule ...
self.assertEqual(sm.children()[0].name, 'smmap') # ... which has smmap as single submodule
# The module is the repository referenced by the submodule
assert sm.module_exists() # the module is available, which doesn't have to be the case.
assert sm.module().working_tree_dir.endswith('gitdb')
# the submodule's absolute path is the module's path
assert sm.abspath == sm.module().working_tree_dir
self.assertEqual(len(sm.hexsha), 40) # Its sha defines the commit to checkout
assert sm.exists() # yes, this submodule is valid and exists
# read its configuration conveniently
assert sm.config_reader().get_value('path') == sm.path
self.assertEqual(len(sm.children()), 1) # query the submodule hierarchy
# ![1-test_submodules]
@with_rw_directory
def test_add_file_and_commit(self, rw_dir):
import git
repo_dir = osp.join(rw_dir, 'my-new-repo')
file_name = osp.join(repo_dir, 'new-file')
r = git.Repo.init(repo_dir)
# This function just creates an empty file ...
open(file_name, 'wb').close()
r.index.add([file_name])
r.index.commit("initial commit")
# ![test_add_file_and_commit]
| 51.171717
| 196
| 0.630043
|
f9b796dd0b8d7aefde42bfd75cf52ba80886f3d2
| 1,596
|
py
|
Python
|
test.py
|
megvii-research/NBNet
|
73112b185e022d0920f2f45c34c5bcf7c581d983
|
[
"Apache-2.0"
] | 91
|
2021-04-17T12:17:47.000Z
|
2022-03-31T12:51:22.000Z
|
test.py
|
megvii-research/NBNet
|
73112b185e022d0920f2f45c34c5bcf7c581d983
|
[
"Apache-2.0"
] | 10
|
2021-05-24T06:09:44.000Z
|
2022-03-22T09:07:07.000Z
|
test.py
|
megvii-research/NBNet
|
73112b185e022d0920f2f45c34c5bcf7c581d983
|
[
"Apache-2.0"
] | 13
|
2021-04-19T02:48:06.000Z
|
2022-03-31T12:51:21.000Z
|
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = megengine.tensor(image)
label = megengine.tensor(label)
psnr_it = func(image, label)
psnr_v += psnr_it
psnr_v /= step + 1
return psnr_v
psnr_v = valid(valid_step, valid_dataloader)
print("PSNR: {:.3f}".format(psnr_v.item()) )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MegEngine NBNet")
parser.add_argument("-d", "--data", default="/data/sidd", metavar="DIR", help="path to imagenet dataset")
parser.add_argument("-c", "--checkpoint", help="path to checkpoint")
args = parser.parse_args()
test(args)
# vim: ts=4 sw=4 sts=4 expandtab
| 28
| 109
| 0.648496
|
d9b612a09c9c66500daad47b91251690b701947d
| 13,293
|
py
|
Python
|
src/main.py
|
zzangjinsun/NLSPN_ECCV20
|
ba33fa5d9ea62ca970026a145ab18fab76d79d4a
|
[
"MIT"
] | 181
|
2020-07-10T12:45:59.000Z
|
2022-03-16T14:51:35.000Z
|
src/main.py
|
BinuxLiu/NLSPN_ECCV20
|
46dfa4c42fc380a1261b4f7175753b897ec9bfb0
|
[
"MIT"
] | 40
|
2020-08-21T02:47:51.000Z
|
2022-01-17T14:02:55.000Z
|
src/main.py
|
BinuxLiu/NLSPN_ECCV20
|
46dfa4c42fc380a1261b4f7175753b897ec9bfb0
|
[
"MIT"
] | 42
|
2020-09-14T12:53:48.000Z
|
2022-03-27T16:57:58.000Z
|
"""
Non-Local Spatial Propagation Network for Depth Completion
Jinsun Park, Kyungdon Joo, Zhe Hu, Chi-Kuei Liu and In So Kweon
European Conference on Computer Vision (ECCV), Aug 2020
Project Page : https://github.com/zzangjinsun/NLSPN_ECCV20
Author : Jinsun Park (zzangjinsun@kaist.ac.kr)
======================================================================
main script for training and testing.
"""
from config import args as args_config
import time
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = args_config.gpus
os.environ["MASTER_ADDR"] = 'localhost'
os.environ["MASTER_PORT"] = args_config.port
import json
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import utility
from model import get as get_model
from data import get as get_data
from loss import get as get_loss
from summary import get as get_summary
from metric import get as get_metric
# Multi-GPU and Mixed precision supports
# NOTE : Only 1 process per GPU is supported now
import torch.multiprocessing as mp
import torch.distributed as dist
import apex
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
# Minimize randomness
torch.manual_seed(args_config.seed)
np.random.seed(args_config.seed)
random.seed(args_config.seed)
torch.cuda.manual_seed_all(args_config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def check_args(args):
if args.batch_size < args.num_gpus:
print("batch_size changed : {} -> {}".format(args.batch_size,
args.num_gpus))
args.batch_size = args.num_gpus
new_args = args
if args.pretrain is not None:
assert os.path.exists(args.pretrain), \
"file not found: {}".format(args.pretrain)
if args.resume:
checkpoint = torch.load(args.pretrain)
new_args = checkpoint['args']
new_args.test_only = args.test_only
new_args.pretrain = args.pretrain
new_args.dir_data = args.dir_data
new_args.resume = args.resume
return new_args
def train(gpu, args):
# Initialize workers
# NOTE : the worker with gpu=0 will do logging
dist.init_process_group(backend='nccl', init_method='env://',
world_size=args.num_gpus, rank=gpu)
torch.cuda.set_device(gpu)
# Prepare dataset
data = get_data(args)
data_train = data(args, 'train')
data_val = data(args, 'val')
sampler_train = DistributedSampler(
data_train, num_replicas=args.num_gpus, rank=gpu)
sampler_val = DistributedSampler(
data_val, num_replicas=args.num_gpus, rank=gpu)
batch_size = args.batch_size // args.num_gpus
loader_train = DataLoader(
dataset=data_train, batch_size=batch_size, shuffle=False,
num_workers=args.num_threads, pin_memory=True, sampler=sampler_train,
drop_last=True)
loader_val = DataLoader(
dataset=data_val, batch_size=1, shuffle=False,
num_workers=args.num_threads, pin_memory=True, sampler=sampler_val,
drop_last=False)
# Network
model = get_model(args)
net = model(args)
net.cuda(gpu)
if gpu == 0:
if args.pretrain is not None:
assert os.path.exists(args.pretrain), \
"file not found: {}".format(args.pretrain)
checkpoint = torch.load(args.pretrain)
net.load_state_dict(checkpoint['net'])
print('Load network parameters from : {}'.format(args.pretrain))
# Loss
loss = get_loss(args)
loss = loss(args)
loss.cuda(gpu)
# Optimizer
optimizer, scheduler = utility.make_optimizer_scheduler(args, net)
net = apex.parallel.convert_syncbn_model(net)
net, optimizer = amp.initialize(net, optimizer, opt_level=args.opt_level,
verbosity=0)
if gpu == 0:
if args.pretrain is not None:
if args.resume:
try:
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
amp.load_state_dict(checkpoint['amp'])
print('Resume optimizer, scheduler and amp '
'from : {}'.format(args.pretrain))
except KeyError:
print('State dicts for resume are not saved. '
'Use --save_full argument')
del checkpoint
net = DDP(net)
metric = get_metric(args)
metric = metric(args)
summary = get_summary(args)
if gpu == 0:
utility.backup_source_code(args.save_dir + '/code')
try:
os.makedirs(args.save_dir, exist_ok=True)
os.makedirs(args.save_dir + '/train', exist_ok=True)
os.makedirs(args.save_dir + '/val', exist_ok=True)
except OSError:
pass
if gpu == 0:
writer_train = summary(args.save_dir, 'train', args,
loss.loss_name, metric.metric_name)
writer_val = summary(args.save_dir, 'val', args,
loss.loss_name, metric.metric_name)
with open(args.save_dir + '/args.json', 'w') as args_json:
json.dump(args.__dict__, args_json, indent=4)
if args.warm_up:
warm_up_cnt = 0.0
warm_up_max_cnt = len(loader_train)+1.0
for epoch in range(1, args.epochs+1):
# Train
net.train()
sampler_train.set_epoch(epoch)
if gpu == 0:
current_time = time.strftime('%y%m%d@%H:%M:%S')
list_lr = []
for g in optimizer.param_groups:
list_lr.append(g['lr'])
print('=== Epoch {:5d} / {:5d} | Lr : {} | {} | {} ==='.format(
epoch, args.epochs, list_lr, current_time, args.save_dir
))
num_sample = len(loader_train) * loader_train.batch_size * args.num_gpus
if gpu == 0:
pbar = tqdm(total=num_sample)
log_cnt = 0.0
log_loss = 0.0
for batch, sample in enumerate(loader_train):
sample = {key: val.cuda(gpu) for key, val in sample.items()
if val is not None}
if epoch == 1 and args.warm_up:
warm_up_cnt += 1
for param_group in optimizer.param_groups:
lr_warm_up = param_group['initial_lr'] \
* warm_up_cnt / warm_up_max_cnt
param_group['lr'] = lr_warm_up
optimizer.zero_grad()
output = net(sample)
loss_sum, loss_val = loss(sample, output)
# Divide by batch size
loss_sum = loss_sum / loader_train.batch_size
loss_val = loss_val / loader_train.batch_size
with amp.scale_loss(loss_sum, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if gpu == 0:
metric_val = metric.evaluate(sample, output, 'train')
writer_train.add(loss_val, metric_val)
log_cnt += 1
log_loss += loss_sum.item()
current_time = time.strftime('%y%m%d@%H:%M:%S')
error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
'Train', current_time, log_loss / log_cnt)
if epoch == 1 and args.warm_up:
list_lr = []
for g in optimizer.param_groups:
list_lr.append(round(g['lr'], 6))
error_str = '{} | Lr Warm Up : {}'.format(error_str,
list_lr)
pbar.set_description(error_str)
pbar.update(loader_train.batch_size * args.num_gpus)
if gpu == 0:
pbar.close()
writer_train.update(epoch, sample, output)
if args.save_full or epoch == args.epochs:
state = {
'net': net.module.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'amp': amp.state_dict(),
'args': args
}
else:
state = {
'net': net.module.state_dict(),
'args': args
}
torch.save(state, '{}/model_{:05d}.pt'.format(args.save_dir, epoch))
# Val
torch.set_grad_enabled(False)
net.eval()
num_sample = len(loader_val) * loader_val.batch_size * args.num_gpus
if gpu == 0:
pbar = tqdm(total=num_sample)
log_cnt = 0.0
log_loss = 0.0
for batch, sample in enumerate(loader_val):
sample = {key: val.cuda(gpu) for key, val in sample.items()
if val is not None}
output = net(sample)
loss_sum, loss_val = loss(sample, output)
# Divide by batch size
loss_sum = loss_sum / loader_val.batch_size
loss_val = loss_val / loader_val.batch_size
if gpu == 0:
metric_val = metric.evaluate(sample, output, 'train')
writer_val.add(loss_val, metric_val)
log_cnt += 1
log_loss += loss_sum.item()
current_time = time.strftime('%y%m%d@%H:%M:%S')
error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
'Val', current_time, log_loss / log_cnt)
pbar.set_description(error_str)
pbar.update(loader_val.batch_size * args.num_gpus)
if gpu == 0:
pbar.close()
writer_val.update(epoch, sample, output)
print('')
writer_val.save(epoch, batch, sample, output)
torch.set_grad_enabled(True)
scheduler.step()
def test(args):
# Prepare dataset
data = get_data(args)
data_test = data(args, 'test')
loader_test = DataLoader(dataset=data_test, batch_size=1,
shuffle=False, num_workers=args.num_threads)
# Network
model = get_model(args)
net = model(args)
net.cuda()
if args.pretrain is not None:
assert os.path.exists(args.pretrain), \
"file not found: {}".format(args.pretrain)
checkpoint = torch.load(args.pretrain)
key_m, key_u = net.load_state_dict(checkpoint['net'], strict=False)
if key_u:
print('Unexpected keys :')
print(key_u)
if key_m:
print('Missing keys :')
print(key_m)
raise KeyError
net = nn.DataParallel(net)
metric = get_metric(args)
metric = metric(args)
summary = get_summary(args)
try:
os.makedirs(args.save_dir, exist_ok=True)
os.makedirs(args.save_dir + '/test', exist_ok=True)
except OSError:
pass
writer_test = summary(args.save_dir, 'test', args, None, metric.metric_name)
net.eval()
num_sample = len(loader_test)*loader_test.batch_size
pbar = tqdm(total=num_sample)
t_total = 0
for batch, sample in enumerate(loader_test):
sample = {key: val.cuda() for key, val in sample.items()
if val is not None}
t0 = time.time()
output = net(sample)
t1 = time.time()
t_total += (t1 - t0)
metric_val = metric.evaluate(sample, output, 'train')
writer_test.add(None, metric_val)
# Save data for analysis
if args.save_image:
writer_test.save(args.epochs, batch, sample, output)
current_time = time.strftime('%y%m%d@%H:%M:%S')
error_str = '{} | Test'.format(current_time)
pbar.set_description(error_str)
pbar.update(loader_test.batch_size)
pbar.close()
writer_test.update(args.epochs, sample, output)
t_avg = t_total / num_sample
print('Elapsed time : {} sec, '
'Average processing time : {} sec'.format(t_total, t_avg))
def main(args):
if not args.test_only:
if args.no_multiprocessing:
train(0, args)
else:
assert args.num_gpus > 0
spawn_context = mp.spawn(train, nprocs=args.num_gpus, args=(args,),
join=False)
while not spawn_context.join():
pass
for process in spawn_context.processes:
if process.is_alive():
process.terminate()
process.join()
args.pretrain = '{}/model_{:05d}.pt'.format(args.save_dir,
args.epochs)
test(args)
if __name__ == '__main__':
args_main = check_args(args_config)
print('\n\n=== Arguments ===')
cnt = 0
for key in sorted(vars(args_main)):
print(key, ':', getattr(args_main, key), end=' | ')
cnt += 1
if (cnt + 1) % 5 == 0:
print('')
print('\n')
main(args_main)
| 30.006772
| 80
| 0.564959
|
067ad4885ca3826e320529e901808f0330119bf1
| 6,136
|
py
|
Python
|
python/test/reader_test.py
|
christosbampis/vmaf
|
33e8dc675ace44dd1412b318c31eb3378612744c
|
[
"Apache-2.0"
] | null | null | null |
python/test/reader_test.py
|
christosbampis/vmaf
|
33e8dc675ace44dd1412b318c31eb3378612744c
|
[
"Apache-2.0"
] | 1
|
2018-09-05T16:33:08.000Z
|
2018-09-05T16:33:08.000Z
|
python/test/reader_test.py
|
christosbampis/vmaf
|
33e8dc675ace44dd1412b318c31eb3378612744c
|
[
"Apache-2.0"
] | 2
|
2018-09-05T03:59:46.000Z
|
2018-09-18T03:57:52.000Z
|
__copyright__ = "Copyright 2016-2017, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import unittest
import numpy as np
from vmaf.config import VmafConfig
from vmaf.tools.reader import YuvReader
class YuvReaderTest(unittest.TestCase):
def test_yuv_reader(self):
yuv_reader = YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
)
self.assertEquals(yuv_reader.num_bytes, 13436928)
self.assertEquals(yuv_reader.num_frms, 48)
self.assertEquals(yuv_reader._get_uv_width_height_multiplier(), (0.5, 0.5))
def test_with(self):
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
) as yuv_reader:
self.assertEquals(yuv_reader.file.__class__, file)
def test_next_y_u_v(self):
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
) as yuv_reader:
y, u, v = yuv_reader.next_y_u_v()
self.assertEquals(y[0][0], 87)
self.assertEquals(y[0][1], 131)
self.assertEquals(y[1][0], 95)
self.assertEquals(u[0][0], 92)
self.assertEquals(u[0][1], 97)
self.assertEquals(u[1][0], 90)
self.assertEquals(v[0][0], 121)
self.assertEquals(v[0][1], 126)
self.assertEquals(v[1][0], 122)
self.assertAlmostEquals(y.mean(), 61.928749785665296, places=4)
self.assertAlmostEquals(u.mean(), 114.6326517489712, places=4)
self.assertAlmostEquals(v.mean(), 122.05084019204389, places=4)
y, u, v = yuv_reader.next_y_u_v()
self.assertEquals(y[0][0], 142)
self.assertEquals(y[0][1], 128)
self.assertEquals(y[1][0], 134)
self.assertEquals(u[0][0], 93)
self.assertEquals(u[0][1], 102)
self.assertEquals(u[1][0], 91)
self.assertEquals(v[0][0], 128)
self.assertEquals(v[0][1], 126)
self.assertEquals(v[1][0], 124)
self.assertAlmostEquals(y.mean(), 61.265260631001375, places=4)
self.assertAlmostEquals(u.mean(), 114.72515860768175, places=4)
self.assertAlmostEquals(v.mean(), 122.12022033607681, places=4)
def test_iteration(self):
y_1stmoments = []
y_2ndmoments = []
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"),
width=576, height=324, yuv_type='yuv420p') as yuv_reader:
for y, u, v in yuv_reader:
y_1stmoments.append(y.mean())
y_2ndmoments.append(y.var() + y.mean() * y.mean())
self.assertEquals(len(y_1stmoments), 48)
self.assertEquals(len(y_2ndmoments), 48)
self.assertAlmostEquals(np.mean(y_1stmoments), 61.332006624999984, places=4)
self.assertAlmostEquals(np.mean(y_2ndmoments), 4798.659574041666, places=4)
class YuvReaderTest10le(unittest.TestCase):
def test_yuv_reader(self):
yuv_reader = YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv422p10le.yuv"),
width=576,
height=324,
yuv_type='yuv422p10le'
)
self.assertEquals(yuv_reader.num_bytes, 35831808)
self.assertEquals(yuv_reader.num_frms, 48)
self.assertEquals(yuv_reader._get_uv_width_height_multiplier(), (0.5, 1.0))
def test_with(self):
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv422p10le.yuv"),
width=576,
height=324,
yuv_type='yuv422p10le'
) as yuv_reader:
y, u, v = yuv_reader.next_y_u_v()
self.assertEquals(y[0][0], 87)
self.assertEquals(y[0][1], 131)
self.assertEquals(y[1][0], 95)
self.assertEquals(u[0][0], 92.25)
self.assertEquals(u[0][1], 97.5)
self.assertEquals(u[1][0], 91.75)
self.assertEquals(v[0][0], 121)
self.assertEquals(v[0][1], 126.25)
self.assertEquals(v[1][0], 121.25)
self.assertAlmostEquals(y.mean(), 61.928749785665296, places=4)
self.assertAlmostEquals(u.mean(), 114.63283661265432, places=4)
self.assertAlmostEquals(v.mean(), 122.05113490226337, places=4)
y, u, v = yuv_reader.next_y_u_v()
self.assertEquals(y[0][0], 142)
self.assertEquals(y[0][1], 128)
self.assertEquals(y[1][0], 134)
self.assertEquals(u[0][0], 93.25)
self.assertEquals(u[0][1], 102.75)
self.assertEquals(u[1][0], 92.75)
self.assertEquals(v[0][0], 128.25)
self.assertEquals(v[0][1], 126.5)
self.assertEquals(v[1][0], 127.25)
self.assertAlmostEquals(y.mean(), 61.265260631001375, places=4)
self.assertAlmostEquals(u.mean(), 114.72527917095336, places=4)
self.assertAlmostEquals(v.mean(), 122.12047217935527, places=4)
def test_iteration(self):
y_1stmoments = []
y_2ndmoments = []
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv422p10le.yuv"),
width=576, height=324, yuv_type='yuv422p10le') as yuv_reader:
for y, u, v in yuv_reader:
y_1stmoments.append(y.mean())
y_2ndmoments.append(y.var() + y.mean() * y.mean())
self.assertEquals(len(y_1stmoments), 48)
self.assertEquals(len(y_2ndmoments), 48)
self.assertAlmostEquals(np.mean(y_1stmoments), 61.332006624999984, places=4)
self.assertAlmostEquals(np.mean(y_2ndmoments), 4798.659574041666, places=4)
if __name__ == '__main__':
unittest.main()
| 35.674419
| 101
| 0.597132
|
592f231f643578055d1b79bc1f07512b3b58be04
| 4,637
|
py
|
Python
|
kube-hunter.py
|
ccojocar/kube-hunter
|
d050f18cd43ff583a7f95275dc20caeba87f10d3
|
[
"Apache-2.0"
] | null | null | null |
kube-hunter.py
|
ccojocar/kube-hunter
|
d050f18cd43ff583a7f95275dc20caeba87f10d3
|
[
"Apache-2.0"
] | null | null | null |
kube-hunter.py
|
ccojocar/kube-hunter
|
d050f18cd43ff583a7f95275dc20caeba87f10d3
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
from __future__ import print_function
import argparse
import logging
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
parser = argparse.ArgumentParser(description='Kube-Hunter - hunts for security weaknesses in Kubernetes clusters')
parser.add_argument('--list', action="store_true", help="displays all tests in kubehunter (add --active flag to see active tests)")
parser.add_argument('--internal', action="store_true", help="set hunting of all internal network interfaces")
parser.add_argument('--pod', action="store_true", help="set hunter as an insider pod")
parser.add_argument('--cidr', type=str, help="set an ip range to scan, example: 192.168.0.0/16")
parser.add_argument('--mapping', action="store_true", help="outputs only a mapping of the cluster's nodes")
parser.add_argument('--remote', nargs='+', metavar="HOST", default=list(), help="one or more remote ip/dns to hunt")
parser.add_argument('--active', action="store_true", help="enables active hunting")
parser.add_argument('--log', type=str, metavar="LOGLEVEL", default='INFO', help="set log level, options are: debug, info, warn, none")
parser.add_argument('--report', type=str, default='plain', help="set report type, options are: plain, yaml")
import plugins
config = parser.parse_args()
try:
loglevel = getattr(logging, config.log.upper())
except:
pass
if config.log.lower() != "none":
logging.basicConfig(level=loglevel, format='%(message)s', datefmt='%H:%M:%S')
from src.modules.report.plain import PlainReporter
from src.modules.report.yaml import YAMLReporter
if config.report.lower() == "yaml":
config.reporter = YAMLReporter()
else:
config.reporter = PlainReporter()
from src.core.events import handler
from src.core.events.types import HuntFinished, HuntStarted
from src.modules.discovery import HostDiscovery
from src.modules.discovery.hosts import HostScanEvent
import src
def interactive_set_config():
"""Sets config manually, returns True for success"""
options = [("Remote scanning", "scans one or more specific IPs or DNS names"),
("Subnet scanning","scans subnets on all local network interfaces"),
("IP range scanning","scans a given IP range")]
print("Choose one of the options below:")
for i, (option, explanation) in enumerate(options):
print("{}. {} ({})".format(i+1, option.ljust(20), explanation))
choice = raw_input("Your choice: ")
if choice == '1':
config.remote = raw_input("Remotes (separated by a ','): ").replace(' ', '').split(',')
elif choice == '2':
config.internal = True
elif choice == '3':
config.cidr = raw_input("CIDR (example - 192.168.1.0/24): ").replace(' ', '')
else:
return False
return True
def parse_docs(hunter, docs):
"""returns tuple of (name, docs)"""
if not docs:
return hunter.__name__, "<no documentation>"
docs = docs.strip().split('\n')
for i, line in enumerate(docs):
docs[i] = line.strip()
return docs[0], ' '.join(docs[1:]) if len(docs[1:]) else "<no documentation>"
def list_hunters():
print("\nPassive Hunters:\n----------------")
for i, (hunter, docs) in enumerate(handler.passive_hunters.items()):
name, docs = parse_docs(hunter, docs)
print("* {}\n {}\n".format( name, docs))
if config.active:
print("\n\nActive Hunters:\n---------------")
for i, (hunter, docs) in enumerate(handler.active_hunters.items()):
name, docs = parse_docs(hunter, docs)
print("* {}\n {}\n".format( name, docs))
hunt_started = False
def main():
global hunt_started
scan_options = [
config.pod,
config.cidr,
config.remote,
config.internal
]
try:
if config.list:
list_hunters()
return
if not any(scan_options):
if not interactive_set_config(): return
hunt_started = True
handler.publish_event(HuntStarted())
handler.publish_event(HostScanEvent())
# Blocking to see discovery output
handler.join()
except KeyboardInterrupt:
logging.debug("Kube-Hunter stopped by user")
# happens when running a container without interactive option
except EOFError:
logging.error("\033[0;31mPlease run again with -it\033[0m")
finally:
if hunt_started:
handler.publish_event(HuntFinished())
handler.join()
handler.free()
logging.debug("Cleaned Queue")
if __name__ == '__main__':
main()
| 36.226563
| 134
| 0.650421
|
215930627df591578a414fbeebe0f22a875c5297
| 9,307
|
py
|
Python
|
qmeq_elph/approach/redfield.py
|
gedaskir/qmeq_elph
|
8330d8df32b92f928b33c8cfaa3a309655b34f02
|
[
"BSD-2-Clause"
] | 1
|
2020-01-10T17:46:35.000Z
|
2020-01-10T17:46:35.000Z
|
qmeq_elph/approach/redfield.py
|
gedaskir/qmeq_elph
|
8330d8df32b92f928b33c8cfaa3a309655b34f02
|
[
"BSD-2-Clause"
] | null | null | null |
qmeq_elph/approach/redfield.py
|
gedaskir/qmeq_elph
|
8330d8df32b92f928b33c8cfaa3a309655b34f02
|
[
"BSD-2-Clause"
] | null | null | null |
"""Module containing python functions, which generate first order Redfield kernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import itertools
from .neumann1 import generate_w1fct_elph
from ..aprclass import Approach_elph
from qmeq.mytypes import complexnp
from qmeq.mytypes import doublenp
from qmeq.approach.redfield import generate_phi1fct
from qmeq.approach.redfield import generate_kern_redfield
from qmeq.approach.redfield import generate_current_redfield
from qmeq.approach.redfield import generate_vec_redfield
#---------------------------------------------------------------------------------------------------------
# Redfield approach
#---------------------------------------------------------------------------------------------------------
def generate_kern_redfield_elph(sys):
(E, Vbbp, w1fct, symq, norm_rowp) = (sys.qd.Ea, sys.baths.Vbbp, sys.w1fct, sys.funcp.symq, sys.funcp.norm_row)
(si, si_elph) = (sys.si, sys.si_elph)
norm_row = norm_rowp if symq else si.ndm0r
last_row = si.ndm0r-1 if symq else si.ndm0r
kern = sys.kern
if kern is None:
kern = np.zeros((last_row+1, si.ndm0r), dtype=doublenp)
# Here letter convention is not used
# For example, the label `a' has the same charge as the label `b'
for charge in range(si.ncharge):
for b, bp in itertools.combinations_with_replacement(si.statesdm[charge], 2):
bbp = si.get_ind_dm0(b, bp, charge)
bbp_bool = si.get_ind_dm0(b, bp, charge, 2)
if bbp != -1 and bbp_bool:
bbpi = si.ndm0 + bbp - si.npauli
bbpi_bool = True if bbpi >= si.ndm0 else False
#--------------------------------------------------
for a, ap in itertools.product(si.statesdm[charge], si.statesdm[charge]):
aap = si.get_ind_dm0(a, ap, charge)
if aap != -1:
bpap = si_elph.get_ind_dm0(bp, ap, charge)
ba = si_elph.get_ind_dm0(b, a, charge)
fct_aap = 0
for l in range(si.nbaths):
gamma_ba_bpap = 0.5*(Vbbp[l, b, a]*Vbbp[l, bp, ap].conjugate()
+Vbbp[l, a, b].conjugate()*Vbbp[l, ap, bp])
fct_aap += gamma_ba_bpap*(w1fct[l, bpap, 0].conjugate() - w1fct[l, ba, 0])
aapi = si.ndm0 + aap - si.npauli
aap_sgn = +1 if si.get_ind_dm0(a, ap, charge, maptype=3) else -1
kern[bbp, aap] += fct_aap.imag # kern[bbp, aap] += fct_aap.imag
if aapi >= si.ndm0:
kern[bbp, aapi] += fct_aap.real*aap_sgn # kern[bbp, aapi] += fct_aap.real*aap_sgn
if bbpi_bool:
kern[bbpi, aapi] += fct_aap.imag*aap_sgn # kern[bbpi, aapi] += fct_aap.imag*aap_sgn
if bbpi_bool:
kern[bbpi, aap] -= fct_aap.real # kern[bbpi, aap] -= fct_aap.real
#--------------------------------------------------
for bpp in si.statesdm[charge]:
bppbp = si.get_ind_dm0(bpp, bp, charge)
if bppbp != -1:
fct_bppbp = 0
for a in si.statesdm[charge]:
bppa = si_elph.get_ind_dm0(bpp, a, charge)
for l in range(si.nbaths):
gamma_ba_bppa = 0.5*(Vbbp[l, b, a]*Vbbp[l, bpp, a].conjugate()
+Vbbp[l, a, b].conjugate()*Vbbp[l, a, bpp])
fct_bppbp += +gamma_ba_bppa*w1fct[l, bppa, 1].conjugate()
for c in si.statesdm[charge]:
cbpp = cbp = si_elph.get_ind_dm0(c, bpp, charge)
for l in range(si.nbaths):
gamma_bc_bppc = 0.5*(Vbbp[l, b, c]*Vbbp[l, bpp, c].conjugate()
+Vbbp[l, c, b].conjugate()*Vbbp[l, c, bpp])
fct_bppbp += +gamma_bc_bppc*w1fct[l, cbpp, 0]
bppbpi = si.ndm0 + bppbp - si.npauli
bppbp_sgn = +1 if si.get_ind_dm0(bpp, bp, charge, maptype=3) else -1
kern[bbp, bppbp] += fct_bppbp.imag # kern[bbp, bppbp] += fct_bppbp.imag
if bppbpi >= si.ndm0:
kern[bbp, bppbpi] += fct_bppbp.real*bppbp_sgn # kern[bbp, bppbpi] += fct_bppbp.real*bppbp_sgn
if bbpi_bool:
kern[bbpi, bppbpi] += fct_bppbp.imag*bppbp_sgn # kern[bbpi, bppbpi] += fct_bppbp.imag*bppbp_sgn
if bbpi_bool:
kern[bbpi, bppbp] -= fct_bppbp.real # kern[bbpi, bppbp] -= fct_bppbp.real
#--------------------------------------------------
bbpp = si.get_ind_dm0(b, bpp, charge)
if bbpp != -1:
fct_bbpp = 0
for a in si.statesdm[charge]:
bppa = si_elph.get_ind_dm0(bpp, a, charge)
for l in range(si.nbaths):
gamma_abpp_abp = 0.5*(Vbbp[l, a, bpp].conjugate()*Vbbp[l, a, bp]
+Vbbp[l, bpp, a]*Vbbp[l, bp, a].conjugate())
fct_bbpp += -gamma_abpp_abp*w1fct[l, bppa, 1]
for c in si.statesdm[charge]:
cbpp = si_elph.get_ind_dm0(c, bpp, charge)
for l in range(si.nbaths):
gamma_cbpp_cbp = 0.5*(Vbbp[l, c, bpp].conjugate()*Vbbp[l, c, bp]
+Vbbp[l, bpp, c]*Vbbp[l, bp, c].conjugate())
fct_bbpp += -gamma_cbpp_cbp*w1fct[l, cbpp, 0].conjugate()
bbppi = si.ndm0 + bbpp - si.npauli
bbpp_sgn = +1 if si.get_ind_dm0(b, bpp, charge, maptype=3) else -1
kern[bbp, bbpp] += fct_bbpp.imag # kern[bbp, bbpp] += fct_bbpp.imag
if bbppi >= si.ndm0:
kern[bbp, bbppi] += fct_bbpp.real*bbpp_sgn # kern[bbp, bbppi] += fct_bbpp.real*bbpp_sgn
if bbpi_bool:
kern[bbpi, bbppi] += fct_bbpp.imag*bbpp_sgn # kern[bbpi, bbppi] += fct_bbpp.imag*bbpp_sgn
if bbpi_bool:
kern[bbpi, bbpp] -= fct_bbpp.real # kern[bbpi, bbpp] -= fct_bbpp.real
#--------------------------------------------------
for c, cp in itertools.product(si.statesdm[charge], si.statesdm[charge]):
ccp = si.get_ind_dm0(c, cp, charge)
if ccp != -1:
cpbp = si_elph.get_ind_dm0(cp, bp, charge)
cb = si_elph.get_ind_dm0(c, b, charge)
fct_ccp = 0
for l in range(si.nbaths):
gamma_bc_bpcp = 0.5*(Vbbp[l, b, c]*Vbbp[l, bp, cp].conjugate()
+Vbbp[l, c, b].conjugate()*Vbbp[l, cp, bp])
fct_ccp += gamma_bc_bpcp*(w1fct[l, cpbp, 1] - w1fct[l, cb, 1].conjugate())
ccpi = si.ndm0 + ccp - si.npauli
ccp_sgn = +1 if si.get_ind_dm0(c, cp, charge, maptype=3) else -1
kern[bbp, ccp] += fct_ccp.imag # kern[bbp, ccp] += fct_ccp.imag
if ccpi >= si.ndm0:
kern[bbp, ccpi] += fct_ccp.real*ccp_sgn # kern[bbp, ccpi] += fct_ccp.real*ccp_sgn
if bbpi_bool:
kern[bbpi, ccpi] += fct_ccp.imag*ccp_sgn # kern[bbpi, ccpi] += fct_ccp.imag*ccp_sgn
if bbpi_bool:
kern[bbpi, ccp] -= fct_ccp.real # kern[bbpi, ccp] -= fct_ccp.real
#--------------------------------------------------
# Normalisation condition
kern[norm_row] = np.zeros(si.ndm0r, dtype=doublenp)
for charge in range(si.ncharge):
for b in si.statesdm[charge]:
bb = si.get_ind_dm0(b, b, charge)
kern[norm_row, bb] += 1
sys.kern = kern
return 0
class Approach_pyRedfield(Approach_elph):
kerntype = 'pyRedfield'
generate_fct = staticmethod(generate_phi1fct)
generate_kern = staticmethod(generate_kern_redfield)
generate_current = staticmethod(generate_current_redfield)
generate_vec = staticmethod(generate_vec_redfield)
#
generate_fct_elph = staticmethod(generate_w1fct_elph)
generate_kern_elph = staticmethod(generate_kern_redfield_elph)
| 61.635762
| 128
| 0.46191
|
2328649003e3e8e6de63a5369ae34269d0346723
| 9,968
|
py
|
Python
|
midca/modules/_plan/pyhop.py
|
COLAB2/midca
|
18d6b13e3d6b0d980cd3453196e82fad7302e79b
|
[
"MIT"
] | 12
|
2018-01-23T01:31:33.000Z
|
2022-02-03T04:47:10.000Z
|
midca/modules/_plan/pyhop.py
|
COLAB2/midca
|
18d6b13e3d6b0d980cd3453196e82fad7302e79b
|
[
"MIT"
] | 32
|
2017-11-02T20:58:03.000Z
|
2021-04-15T18:59:27.000Z
|
midca/modules/_plan/pyhop.py
|
COLAB2/midca
|
18d6b13e3d6b0d980cd3453196e82fad7302e79b
|
[
"MIT"
] | 5
|
2017-12-01T17:28:01.000Z
|
2020-03-18T14:43:32.000Z
|
"""
Pyhop, version 1.2.1 -- a simple SHOP-like planner written in Python.
Author: Dana S. Nau, 15 February 2013
Copyright 2013 Dana S. Nau - http://www.cs.umd.edu/~nau
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Pyhop should work correctly in both Python 2.7 and Python 3.2.
For examples of how to use it, see the example files that come with Pyhop.
Pyhop provides the following classes and functions:
- foo = State('foo') tells Pyhop to create an empty state object named 'foo'.
To put variables and values into it, you should do assignments such as
foo.var1 = val1
- bar = Goal('bar') tells Pyhop to create an empty goal object named 'bar'.
To put variables and values into it, you should do assignments such as
bar.var1 = val1
- print_state(foo) will print the variables and values in the state foo.
- print_goal(foo) will print the variables and values in the goal foo.
- declare_operators(o1, o2, ..., ok) tells Pyhop that o1, o2, ..., ok
are all of the planning operators; this supersedes any previous call
to declare_operators.
- print_operators() will print out the list of available operators.
- declare_methods('foo', m1, m2, ..., mk) tells Pyhop that m1, m2, ..., mk
are all of the methods for tasks having 'foo' as their taskname; this
supersedes any previous call to declare_methods('foo', ...).
- print_methods() will print out a list of all declared methods.
- pyhop(state1,tasklist) tells Pyhop to find a plan for accomplishing tasklist
(a list of tasks), starting from an initial state state1, using whatever
methods and operators you declared previously.
- In the above call to pyhop, you can add an optional 3rd argument called
'verbose' that tells pyhop how much debugging printout it should provide:
- if verbose = 0, then pyhop prints nothing;
- if verbose = 1, it prints the initial parameters and the answer;
- if verbose = 2, it also prints a message on each recursive call;
- if verbose = 3, it also prints info about what it's computing.
"""
# Like the SHOP and JSHOP planners (see http://www.cs.umd.edu/projects/shop),
# Pyhop uses HTN methods to decompose tasks into smaller and smaller
# subtasks, until it finds tasks that correspond directly to actions.
# But several of the details are different:
#
# (1) In SHOP and JSHOP, one uses a special-purpose language to write HTN
# methods and planning operators -- but in Pyhop, one writes the methods
# and operators as ordinary Python functions. This should make it easier
# to use Pyhop as part of other programs.
#
# (2) Pyhop represents states as collections of variables, not collections
# of logical assertions. For example, to say that box b is in room r1,
# instead of writing an assertion such as "in(b,r1)", you would write
# something like "loc[b] = r1".
#
# (3) The current state is a Python object. The state variables are part of
# that object, and you need to refer to this object explicitly in the
# operator and method definitions. Thus, what you'd *really* write in
# the above example is something like this:
# s = State()
# s.loc['b'] = 'r1'
#
# (4) You also can define a goal as a Python object. For example, to specify
# that your goal is to have box b in room r2, you might write this:
# g = Goal()
# g.loc['b'] = 'r2'
# Pyhop doesn't explicitly check to see if the goal is achieved. But you
# can pass the goal object as an argument to your operators and methods,
# so that their preconditions and effects can refer to it. If you want to
# accomplish a sequence of goals, one at a time (e.g., first achieve g1,
# then g2, then g3), you could define all three of them as goal objects,
# pass all three of them as arguments to your operators and methods.
#
# (5) Unlike SHOP and JSHOP, Pyhop doesn't include a Horn-clause inference
# engine for use in evaluating preconditions. So far, I haven't seen any
# need for it; I've found it easier to write precondition-evaluation
# functions directly in Python. But I'll consider adding Horn-clause
# inference to Pyhop if someone convinces me that it's really necessary.
#
# Accompanying this file is a file called examples.py that provides examples
# of how to use Pyhop. To run it, launch python and type 'import examples'.
import copy,sys, pprint
############################################################
# States and goals
class State():
"""A state is just a collection of variable bindings."""
def __init__(self,name):
self.__name__ = name
class Goal():
"""A goal is just a collection of variable bindings."""
def __init__(self,name):
self.__name__ = name
### print_state and print_goal are identical except for the name
def print_state(state,indent=4):
"""Print each variable in state, indented by indent spaces."""
if state != False:
for (name,val) in list(vars(state).items()):
if name != '__name__':
for x in range(indent): sys.stdout.write(' ')
sys.stdout.write(state.__name__ + '.' + name)
print(' =', val)
else: print('False')
def print_goal(goal,indent=4):
"""Print each variable in goal, indented by indent spaces."""
if goal != False:
for (name,val) in list(vars(goal).items()):
if name != '__name__':
for x in range(indent): sys.stdout.write(' ')
sys.stdout.write(goal.__name__ + '.' + name)
print(' =', val)
else: print('False')
############################################################
# Helper functions that may be useful in domain models
def forall(seq,cond):
"""True if cond(x) holds for all x in seq, otherwise False."""
for x in seq:
if not cond(x): return False
return True
def find_if(cond,seq):
"""
Return the first x in seq such that cond(x) holds, if there is one.
Otherwise return None.
"""
for x in seq:
if cond(x): return x
return None
############################################################
# Commands to tell Pyhop what the operators and methods are
operators = {}
methods = {}
def declare_operators(*op_list):
"""
Call this after defining the operators, to tell Pyhop what they are.
op_list must be a list of functions, not strings.
"""
operators.update({op.__name__:op for op in op_list})
return operators
def declare_methods(task_name,*method_list):
"""
Call this once for each task, to tell Pyhop what the methods are.
task_name must be a string.
method_list must be a list of functions, not strings.
"""
methods.update({task_name:list(method_list)})
return methods[task_name]
############################################################
# Commands to find out what the operators and methods are
def print_operators(olist=operators):
"""Print out the names of the operators"""
print('OPERATORS:', ', '.join(olist))
def print_methods(mlist=methods):
"""Print out a table of what the methods are for each task"""
print('{:<14}{}'.format('TASK:','METHODS:'))
for task in mlist:
print('{:<14}'.format(task) + ', '.join([f.__name__ for f in mlist[task]]))
############################################################
# The actual planner
def pyhop(state,tasks,verbose=0):
"""
Try to find a plan that accomplishes tasks in state.
If successful, return the plan. Otherwise return False.
"""
if verbose>0: print('** pyhop:\n state = {}\n tasks = {}'.format(state.__name__,tasks))
result = seek_plan(state,tasks,[],0,verbose)
if verbose>0: print('** result =',result,'\n')
return result
def copy_state(state):
try:
return state.copy()
except AttributeError:
return copy.deepcopy(state)
def seek_plan(state,tasks,plan,depth,verbose=0):
"""
Workhorse for pyhop. state and tasks are as in pyhop.
- plan is the current partial plan.
- depth is the recursion depth, for use in debugging
- verbose is whether to print debugging messages
"""
if verbose>1: print('depth {} tasks {}'.format(depth,tasks))
if tasks == []:
if verbose>2: print('depth {} returns plan {}'.format(depth,plan))
return plan
task1 = tasks[0]
if task1[0] in operators:
if verbose>2: print('depth {} action {}'.format(depth,task1))
operator = operators[task1[0]]
newstate = operator(copy_state(state),*task1[1:])
if verbose>2:
print('depth {} new state:'.format(depth))
print_state(newstate)
if newstate:
solution = seek_plan(newstate,tasks[1:],plan+[task1],depth+1,verbose)
if solution != False:
return solution
if task1[0] in methods:
if verbose>2: print('depth {} method instance {}'.format(depth,task1))
relevant = methods[task1[0]]
for method in relevant:
subtasks = method(state,*task1[1:])
# Can't just say "if subtasks:", because that's wrong if subtasks == []
if verbose>2:
print('depth {} new tasks: {}'.format(depth,subtasks))
if subtasks != False:
solution = seek_plan(state,subtasks+tasks[1:],plan,depth+1,verbose)
if solution != False:
return solution
if verbose>2: print('depth {} returns failure'.format(depth))
return False
| 39.555556
| 95
| 0.649177
|
20597fa839fe7595f351861bd1c4c5b3911e0a45
| 905
|
py
|
Python
|
examples/undocumented/python_modular/kernel_io_modular.py
|
srgnuclear/shogun
|
33c04f77a642416376521b0cd1eed29b3256ac13
|
[
"Ruby",
"MIT"
] | 1
|
2015-11-05T18:31:14.000Z
|
2015-11-05T18:31:14.000Z
|
examples/undocumented/python_modular/kernel_io_modular.py
|
waderly/shogun
|
9288b6fa38e001d63c32188f7f847dadea66e2ae
|
[
"Ruby",
"MIT"
] | null | null | null |
examples/undocumented/python_modular/kernel_io_modular.py
|
waderly/shogun
|
9288b6fa38e001d63c32188f7f847dadea66e2ae
|
[
"Ruby",
"MIT"
] | null | null | null |
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list=[[traindat,testdat,1.9],[traindat,testdat,1.7]]
def kernel_io_modular (train_fname=traindat,test_fname=testdat,width=1.9):
from modshogun import RealFeatures, GaussianKernel, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
kernel=GaussianKernel(feats_train, feats_train, width)
km_train=kernel.get_kernel_matrix()
f=CSVFile("tmp/gaussian_train.csv","w")
kernel.save(f)
del f
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
f=CSVFile("tmp/gaussian_test.csv","w")
kernel.save(f)
del f
#clean up
import os
os.unlink("tmp/gaussian_test.csv")
os.unlink("tmp/gaussian_train.csv")
return km_train, km_test, kernel
if __name__=='__main__':
print('Gaussian')
kernel_io_modular(*parameter_list[0])
| 25.857143
| 74
| 0.771271
|
338169593570d20540774cf7c0085e37dd33ec7d
| 2,784
|
py
|
Python
|
storage/models.py
|
SashaPoraiko/academy-storage
|
387f236971085fde605c2a12b53b1734a925759a
|
[
"Unlicense",
"MIT"
] | null | null | null |
storage/models.py
|
SashaPoraiko/academy-storage
|
387f236971085fde605c2a12b53b1734a925759a
|
[
"Unlicense",
"MIT"
] | 7
|
2020-06-05T23:54:27.000Z
|
2022-02-10T10:36:29.000Z
|
storage/models.py
|
SashaPoraiko/academy-storage
|
387f236971085fde605c2a12b53b1734a925759a
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
class PhoneModel(models.Model):
brand_choices = [
('APPLE', 'Apple'),
('SAMSUNG', 'Samsung'),
('XIAOMI', 'Xiaomi'),
]
name = models.CharField(max_length=200)
brand = models.CharField(
max_length=30,
choices=brand_choices
)
model_year = models.IntegerField(
validators=[MaxValueValidator(timezone.now().year),
MinValueValidator(timezone.now().year - 1000)])
def __str__(self):
return self.name
class Phone(models.Model):
phone_statuses = [
('ACTIVE', 'Active'),
('DELETED', 'Deleted')
]
phone_model = models.ForeignKey('storage.PhoneModel', on_delete=models.CASCADE)
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
comment = models.CharField(max_length=255)
condition = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)], default=1)
date_release = models.DateTimeField(default=timezone.now)
date_create = models.DateTimeField(default=timezone.now)
date_modify = models.DateTimeField(default=timezone.now)
status = models.CharField(default=phone_statuses[0], max_length=30, choices=phone_statuses)
def __str__(self):
return ' '.join(map(str, (self.phone_model, self.comment)))
class Part(models.Model):
name = models.CharField(max_length=80)
condition = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
phone_models = models.ManyToManyField('storage.PhoneModel')
def __str__(self):
return f'name: {self.name}, condition: {self.condition}'
class Device(models.Model):
part = models.OneToOneField('storage.Part', null=True, blank=True, on_delete=models.PROTECT)
phone = models.OneToOneField('storage.Phone', null=True, blank=True, on_delete=models.PROTECT)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if self.part and self.phone:
raise Exception('Cant hold 2 items')
return super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return f'Part:{self.part}' if self.part else f'Phone:{self.phone}'
class Storage(models.Model):
locker = models.CharField(max_length=80)
row = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
column = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
device = models.ForeignKey('storage.device', on_delete=models.CASCADE, null=True)
def __str__(self):
return ' '.join(map(str, (self.locker, self.row, self.column)))
| 36.631579
| 104
| 0.696839
|
936340413b0ae4c82a1cd1232b928aa9abdff579
| 802
|
py
|
Python
|
mywebapp/mywebapp/urls.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
mywebapp/mywebapp/urls.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
mywebapp/mywebapp/urls.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
"""mywebapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('items/', include('items.urls')),
path('admin/', admin.site.urls),
]
| 34.869565
| 77
| 0.703242
|
6a10766782ceaa82c8b0a978836e8d296ab6c1c2
| 653
|
py
|
Python
|
manage.py
|
recommend-games/recommend-games-server
|
700e4039d8f72e77ac988b02476f84d701fd5225
|
[
"MIT"
] | 1
|
2021-01-28T18:16:11.000Z
|
2021-01-28T18:16:11.000Z
|
manage.py
|
MarkusShepherd/ludoj-server
|
6cfb218029e6e1cf520eed1ccab2576c990cd287
|
[
"MIT"
] | 6
|
2019-12-10T14:03:23.000Z
|
2021-08-22T13:23:36.000Z
|
manage.py
|
MarkusShepherd/ludoj-server
|
6cfb218029e6e1cf520eed1ccab2576c990cd287
|
[
"MIT"
] | 2
|
2019-10-11T11:52:06.000Z
|
2022-01-18T21:56:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" command line script """
import os
import sys
from dotenv import load_dotenv
if __name__ == "__main__":
load_dotenv(verbose=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rg.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 25.115385
| 73
| 0.675345
|
c302dcda1cdabdd30bccc5df1dd00c5325cf2c7a
| 22,783
|
py
|
Python
|
interferogram/sentinel/standard_product_packaging.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 4
|
2019-11-19T03:35:35.000Z
|
2020-12-07T18:43:11.000Z
|
interferogram/sentinel/standard_product_packaging.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 3
|
2019-06-05T03:35:55.000Z
|
2020-04-09T14:16:08.000Z
|
interferogram/sentinel/standard_product_packaging.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 6
|
2019-08-23T22:53:11.000Z
|
2021-11-06T15:15:30.000Z
|
#!/usr/bin/env python3
# By David Bekaert - Jet Propulsion Laboratory
from builtins import map
from builtins import str
from builtins import range
from builtins import object
import sys
import json
import logging
import traceback
from collections import OrderedDict
import os
from netCDF4 import Dataset
import numpy as np
import isce
import osgeo
from osgeo import gdal
from osgeo import osr
import collections
import pdb
log_format = "[%(asctime)s: %(levelname)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('standard_product_packaging')
BASE_PATH = os.path.dirname(__file__)
class content_properties(object):
names = ('type','src_file','nodata','chunks','band','description',
'dims','python_action','python_action_args','attribute',
'description','name','crs_name','crs_attribute','data_type','global_attribute')
def __init__(self,dataset):
for property_name in self.names:
setattr(self,property_name,extract_key(dataset,property_name))
def netdf4_dtype_check(dtype):
"""
only allow the dtypes that netcdf4 supports
make it all upcase and then pass through a dict
NC_BYTE 8-bit signed integer
NC_UBYTE 8-bit unsigned integer
NC_CHAR 8-bit character byte
NC_SHORT 16-bit signed integer
NC_USHORT 16-bit unsigned integer *
NC_INT (or NC_LONG) 32-bit signed integer
NC_UINT 32-bit unsigned integer *
NC_INT64 64-bit signed integer *
NC_UINT64 64-bit unsigned integer *
NC_FLOAT 32-bit floating point
NC_DOUBLE 64-bit floating point
NC_STRING variable length character string +
NUMPY2NETCDF4_DATATYPE = {
1 : 'BYTE',
2 : 'uint16',
3 : 'SHORT',
4 : 'uint32',
5 : 'INT',
6 : 'FLOAT',
7 : 'DOUBLE',
10: 'CFLOAT',
11: 'complex128',
"""
logger.info("testing")
def create_group(fid,group,fid_parent=None):
'''
Create a group within the fid
'''
name = group["name"]
contents = group["content"]
# create a group with the provided name
grp_id = fid.createGroup(name)
# track the parent fid
if fid_parent is None:
fid_parent = fid
for content in contents:
dataset_flag = extract_key(content,"dataset")
group_flag = extract_key(content,"group")
if dataset_flag is not None:
for dataset in content["dataset"]:
create_dataset(grp_id,dataset,fid_parent)
if group_flag is not None:
for subgroup in content["group"]:
create_group(grp_id,subgroup,fid_parent)
def python_execution(python_string,python_args=None):
'''
Executing a python function using a module.function string and provided arguments
'''
import importlib
# split the the python string in a python module and python function
python_string_list = python_string.split('.')
python_module = python_string_list[0]
python_function = python_string_list[-1]
# loading the python module
module = importlib.import_module(python_module)
# loading the function
function = module.__getattribute__(python_function)
# execute function with arguments
if python_args is not None:
output = function(python_args)
else:
output = function()
return output
def write_dataset(fid,data,properties_data):
'''
Writing out the data in netcdf arrays or strings depending on the type of data or polygons depending on the data_type.
'''
# for now only support polygon for vector
if False:
print("nothing")
# this is either string or dataset option
else:
if isinstance(data,str):
dset = fid.createVariable(properties_data.name, str, ('matchup',), zlib=True)
dset[0]=data
elif isinstance(data,np.ndarray):
# make sure the _fillvalue is formatted the same as the data_type
if properties_data.type is None:
properties_data.type = data.dtype.name
if properties_data.nodata is not None:
nodata = np.array(properties_data.nodata,dtype=properties_data.type)
else:
nodata = None
if len(properties_data.dims)==1:
dset = fid.createVariable(properties_data.name, properties_data.type, (properties_data.dims[0]), fill_value=nodata, zlib=True)
elif len(properties_data.dims)==2:
dset = fid.createVariable(properties_data.name, properties_data.type, (properties_data.dims[0], properties_data.dims[1]), fill_value=nodata, zlib=True)
elif len(properties_data.dims)==3:
dset = fid.createVariable(properties_data.name, properties_data.type, (properties_data.dims[0],properties_data.dims[1], properties_data.dims[2]), fill_value=nodata, zlib=True)
elif properties_data.dims is None:
dset = fid.createVariable(properties_data.name, properties_data.type)
dset[:] = data
elif isinstance(data, collections.Iterable):
if isinstance(data[0],str):
dset = fid.createVariable(properties_data.name, str, ('matchup',), zlib=True)
count = 0
for data_line in data:
dset[count]=data_line
logger.info(properties_data.name + " count = " + str(count) + ' ' + data_line)
count =+1
else:
logger.info('i am a collection, not yet programmed')
elif data is None:
logger.info("Action failed...")
dset = None
else:
data = np.array([data])
if properties_data.type is None:
properties_data.type='float32'
#dset = fid.createVariable(properties_data.name,properties_data.type,('matchup',),fill_value=-9999., zlib=True)
dset = fid.createVariable(properties_data.name,properties_data.type)
dset[:] = data
# adding attributes if inputted
if properties_data.attribute is not None and dset is not None:
add_attributes(dset,properties_data.attribute)
def expand_attrdict(attr_dict, attr_name, attr_value):
'''
expand an attribute dict with more keys and values
Update the attribute dictionary if original key is used again with a new value
'''
#pdb.set_trace()
if attr_dict is None:
attr_dict = {}
for count in range(len(attr_name)):
attr_temp = {}
attr_temp["name"]=attr_name[count]
attr_temp["value"]=attr_value[count]
# adding it to the original dictionary
if len(attr_dict)==0:
attr_dict = [attr_temp]
else:
# looping over all the attributes to see if the name already is in use
count_dict = 0
name_match = None
for attr_dict_item in attr_dict:
if attr_dict_item["name"] == attr_temp["name"]:
name_match = count_dict
count_dict = count_dict +1
# if a match was found needs to update the attribute information
if name_match is not None:
attr_dict[name_match]=attr_temp
else:
attr_dict.append(attr_temp)
return attr_dict
def create_dataset(fid,dataset,fid_parent=None):
"""
Creating a dataset, either a gdal readable file, or a string, or an action
"""
import copy
name = dataset["name"]
logger.info("dataset name = " + name)
# extracting the data properties
properties_data = content_properties(dataset)
# Considering the different data parsing methods
# running a python function
if properties_data.python_action is not None:
data = python_execution(properties_data.python_action,properties_data.python_action_args)
# loading data from a src file
elif properties_data.src_file is not None:
# loading the data
data, data_transf, data_proj, data_nodata = data_loading(properties_data.src_file,properties_data.type,properties_data.band)
# setting the no-data value in case the user is not overwriting it
if data_nodata is not None and properties_data.nodata is None:
properties_data.nodata = data_nodata
# check if the user is not over-writing the no-data value with something different.
elif data_nodata is not None and properties_data.nodata is not None:
data[data==data_nodata]=properties_data.nodata
# data is a string
elif properties_data.type == "str":
if properties_data.description is not None:
data = properties_data.description
# special case to parse the connected component data
if properties_data.name.lower()=="connected_components" or properties_data.name.lower() =="connectedcomponents" or properties_data.name.lower() =="coherence":
# setting the no-data value in case the user is not overwriting it
if data["data_nodata"] is not None and properties_data.nodata is None:
properties_data.nodata = data["data_nodata"]
# check if the user is not over-writing the no-data value with something different.
elif data["data_nodata"] is not None and properties_data.nodata is not None:
temp = data["data"]
temp[temp==data["data_nodata"]]=properties_data.nodata
data["data"] = temp
# extract again the actual data to be written to file
data = data["data"]
# check if there is an additional mapping that needs to be done as for the python function this is not done directly
# change the dataype if provided
if properties_data.type is not None:
# changing the format if needed
data = data.astype(dtype=properties_data.type)
# tracking if its a regular dataset, 2D geocoordinates, or 3D geocoordinates and make the CF compliance for these datasets
if properties_data.name=="GEOCOOR2" or properties_data.name=="GEOCOOR3":
# setting the coordinate system
crs_name = properties_data.crs_name
crs_attribute = properties_data.crs_attribute
# ensuring the crs is CF compliant
crs_attribute = CF_attribute_compliance(crs_attribute,crs_name)
# try to see if the geo transformation and projection is passed as well
try:
if data["data_proj"] is not None:
attr_name = ['crs_wkt']
attr_value = [data["data_proj"]]
crs_attribute = expand_attrdict(crs_attribute, attr_name, attr_value)
except:
pass
# modify to make the CRS information locally at the group level of the datasets
dset = fid.createVariable(crs_name, 'i4')
add_attributes(dset,crs_attribute)
## START with 2D: LON LAT
# defining the scales of the data at the parent level of the file for 2D coordinates
lons = data['lons']
lats = data['lats']
lons_dim = data['lons_map']
lats_dim = data['lats_map']
rows_ds = len(lats)
cols_ds = len(lons)
fid.createDimension(lats_dim, rows_ds)
fid.createDimension(lons_dim, cols_ds)
# defining the lon lat datasets
# Longitude
properties_londata = copy.deepcopy(properties_data)
#properties_londata.name = 'longitude'
properties_londata.name = lons_dim
attr_name = ['_CoordinateAxisType','units','long_name','standard_name']
attr_value = ['Lon','degrees_east','longitude','longitude']
properties_londata.attribute = expand_attrdict(properties_londata.attribute, attr_name, attr_value)
properties_londata.dims = [lons_dim]
data_lon = np.array(lons)
write_dataset(fid,data_lon,properties_londata)
# latitude
properties_latdata = copy.deepcopy(properties_data)
#properties_latdata.name = 'latitude'
properties_latdata.name =lats_dim
attr_name = ['_CoordinateAxisType','units','long_name','standard_name']
attr_value = ['Lat','degrees_north','latitude','latitude']
#attr_name = ['_CoordinateAxisType','units','long_name','standard_name','bounds']
#attr_value = ['Lat','degrees_north','latitude','latitude',lats_dim+'_bnds']
properties_latdata.attribute = expand_attrdict(properties_latdata.attribute, attr_name, attr_value)
data_lat = np.array(lats)
properties_latdata.dims = [lats_dim]
write_dataset(fid,data_lat,properties_latdata)
## ADD 3D if needed: HGT
if properties_data.name=="GEOCOOR3":
# defining the scales of the data at the parent level of the file for 3D coordinate
hgts = data['hgts']
hgts_dim = data['hgts_map']
vert_ds = len(hgts)
fid.createDimension(hgts_dim, vert_ds)
# heights
properties_hgtdata = copy.deepcopy(properties_data)
#properties_hgtdata.name = 'heights'
properties_hgtdata.name = hgts_dim
attr_name = ['_CoordinateAxisType','units','long_name','standard_name','positive']
attr_value = ['Lev','meter','height','height','up']
properties_hgtdata.attribute = expand_attrdict(properties_hgtdata.attribute, attr_name, attr_value)
data_hgt = np.array(hgts)
properties_hgtdata.dims = [hgts_dim]
write_dataset(fid,data_hgt,properties_hgtdata)
## POLYGON NEEDS special manipulation compared to raster datasets
elif properties_data.data_type is not None and properties_data.data_type.lower()=="polygon":
# number of polygons corresponds to the length of the list
n_poly = len(data)
# for now lets code the char to be lenth of the first poly
n_char = len(list(data[0]))
# creating the dimensions for the netcdf
fid_parent.createDimension('wkt_length',n_char)
fid_parent.createDimension('wkt_count',n_poly)
dset = fid_parent.createVariable(name,'S1',('wkt_count','wkt_length'))
# formatting the string as an array of single char
# fill data with a charakter at each postion of the polyfgon string
for poly_i in range(n_poly):
polygon_i = list(data[poly_i])
data_temp = []
data_temp = np.empty((len(polygon_i),),'S1')
for n in range(len(polygon_i)):
data_temp[n] = polygon_i[n]
dset[poly_i] = data_temp
# setting the attribute
if properties_data.attribute is not None and dset is not None:
# for CF compliance make sure few attributes are provided
properties_data = CF_attribute_compliance(properties_data,name)
add_attributes(dset,properties_data.attribute)
# adding the crs information for the polygon
crs_name = properties_data.crs_name
dset2 = fid_parent.createVariable(crs_name, 'i4')
crs_attributes = properties_data.crs_attribute
if crs_attributes is not None:
# getting the EPSG code and update corresponding field if needed
projectionRef = None
for crs_attribute in crs_attributes:
crs_attribute_name = extract_key(crs_attribute,"name")
crs_attribute_value = extract_key(crs_attribute,"value")
if crs_attribute_name.lower() == "spatial_ref":
if isinstance(crs_attribute_value,int):
ref = osr.SpatialReference()
ref.ImportFromEPSG(crs_attribute_value)
projectionRef = ref.ExportToWkt()
if projectionRef is not None:
# update the the attribute information
attr_name = ['spatial_ref']
attr_value = [projectionRef]
crs_attributes = expand_attrdict(crs_attributes, attr_name, attr_value)
# ensuring the crs is CF compliant
crs_attributes = CF_attribute_compliance(crs_attributes,crs_name)
# setting the variable
add_attributes(dset2,crs_attributes)
# setting the global attributes
global_attribute = properties_data.global_attribute
add_attributes(fid_parent,global_attribute)
else:
# for CF compliance make sure few attributes are provided
properties_data = CF_attribute_compliance(properties_data,name)
# write the dataset
write_dataset(fid,data,properties_data)
def CF_attribute_compliance(properties_data,name):
"""
Ensuring that few CF attributes are added
"""
# try to see if the attribute list is given directly or if it is part of a class
class_flag = False
try:
data_attribute = properties_data.attribute
class_flag = True
except:
data_attribute = properties_data
# load all current attributes
CF_current_dict = {}
if data_attribute is not None:
for attribute in data_attribute:
CF_current_dict[extract_key(attribute,"name")] = extract_key(attribute,"value")
# ensure the required CF attributes are present
CF_missing_attr_name = []
CF_missing_attr_value = []
for CF_key in ["long_name","standard_name"]:
try:
CF_current_dict[CF_key]
except:
try:
CF_missing_attr_name.append(CF_key)
CF_missing_attr_value.append(name)
except:
pass
if len(CF_missing_attr_name)>0:
if class_flag:
properties_data.attribute = expand_attrdict(properties_data.attribute, CF_missing_attr_name, CF_missing_attr_value)
else:
properties_data = expand_attrdict(properties_data,CF_missing_attr_name, CF_missing_attr_value)
return properties_data
def add_attributes(fid,attributes):
"""
Adding attributes to a group/dataset
"""
# looping over the attributes
if attributes is not None:
for attribute in attributes:
attribute_name = extract_key(attribute,"name")
attribute_value = extract_key(attribute,"value")
# make sure the strings are correctly encoded
if isinstance(attribute_value, str):
attribute_value = attribute_value.encode('ascii')
setattr(fid, attribute_name, attribute_value)
def data_loading(filename,out_data_type=None,data_band=None):
"""
GDAL READER of the data
filename: the gdal readable file that needs to be loaded
out_data_type: the datatype of the output data, default is original
out_data_res: the resolution of the output data, default is original
data_band: the band that needs to be loaded, default is all
"""
# converting to the absolute path
filename = os.path.abspath(filename)
if not os.path.isfile(filename):
logger.info(filename + " does not exist")
out_data = None
return out_data
# open the GDAL file and get typical data information
try:
data = gdal.Open(filename, gdal.GA_ReadOnly)
except:
logger.info(filename + " is not a gdal supported file")
out_data = None
return out_data
# loading the requested band or by default all
if data_band is not None:
raster = data.GetRasterBand(data_band)
out_data = raster.ReadAsArray()
else:
# load the data
out_data = data.ReadAsArray()
# getting the gdal transform and projection
geoTrans = str(data.GetGeoTransform())
projectionRef = str(data.GetProjection())
# getting the no-data value
try:
NoData = data.GetNoDataValue()
logger.info(NoData)
except:
NoData = None
# change the dataype if provided
if out_data_type is not None:
# changing the format if needed
out_data = out_data.astype(dtype=out_data_type)
return out_data, geoTrans,projectionRef, NoData
def extract_key(data_dict,key):
#logger.info(data_dict)
#logger.info(key)
if key in data_dict:
dict_value = data_dict[key]
# convert the chunks string to a tuple
if key=="chunks":
dict_value = tuple(map(int,dict_value.split(",")))
return dict_value
else:
return None
def createParser():
'''
Create command line parser.
'''
parser = argparse.ArgumentParser(description='Unwrap interferogram using snaphu')
parser.add_argument('-i', '--input', dest='filename', type=str, required=True, help='Input json file to be used for packing')
return parser
def cmdLineParse(iargs=None):
'''
Command line parser.
'''
parser = createParser()
return parser.parse_args(args = iargs)
if __name__ == '__main__':
'''
Main driver.
'''
# get config json file
cwd = os.getcwd()
filename = os.path.join(cwd, 'tops_groups.json')
# open the file
f = open(filename)
# read the json file with planned netcdf4 structure and put the content in a dictionary
structure = json.load(f, object_pairs_hook=OrderedDict)
# close the file
f.close
# set netcdf file
netcdf_outfile = structure["filename"]
# Check for existing netcdf file
if os.path.exists(netcdf_outfile):
logger.info('{0} file already exists'.format(netcdf_outfile))
os.remove(netcdf_outfile)
fid = Dataset(netcdf_outfile, 'w')
# create a variable scale for strings in case these are being generated
fid.createDimension('matchup', None)
# adding the global attributes to the file
try:
global_attribute = structure["global_attribute"]
add_attributes(fid, global_attribute)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
pass
# iterate over the different datasets
try:
for dataset in structure.get("dataset", []):
create_dataset(fid, dataset, fid_parent=fid)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
pass
# iterate over the different groups
try:
for group in structure.get("group", []):
create_group(fid, group, fid_parent=fid)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
pass
# close the file
fid.close()
logger.info('Done with packaging')
| 36.925446
| 191
| 0.646535
|
67639e6977583656f7d2ba729059d929c3c5c9d1
| 122
|
py
|
Python
|
QAQ/webansi/views.py
|
Brave01/ang
|
767d964505297d41edb464020781ed312ffbd862
|
[
"MIT"
] | null | null | null |
QAQ/webansi/views.py
|
Brave01/ang
|
767d964505297d41edb464020781ed312ffbd862
|
[
"MIT"
] | null | null | null |
QAQ/webansi/views.py
|
Brave01/ang
|
767d964505297d41edb464020781ed312ffbd862
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def home(request):
return render(request,'home.html')
# Create your views here.
| 17.428571
| 38
| 0.745902
|
c874fe0ad354c2cefbd73604b0b167cbd1ba9ef9
| 1,120
|
py
|
Python
|
head_first_design_patterns/factory/factory_method/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/factory/factory_method/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/factory/factory_method/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
"""
Notes:
- The method is defined in an abstract way, so it can be specialized with
what varies (object creation)
- Store classes inherit from abstract classes
- pass the flavour (str) to a fabric and wait for a pizza
"""
from pizza import Pizza
from pizza_store import ChicagoPizzaStore, NYPizzaStore
ny_store: NYPizzaStore = NYPizzaStore()
chicago_store: ChicagoPizzaStore = ChicagoPizzaStore()
pizza: Pizza = ny_store.order_pizza("cheese")
print(f"Ethan ordered a {pizza.get_name()}")
pizza = chicago_store.order_pizza("cheese")
print(f"Joel ordered a {pizza.get_name()}")
pizza = ny_store.order_pizza("clam")
print(f"Ethan ordered a {pizza.get_name()}")
pizza = chicago_store.order_pizza("clam")
print(f"Joel ordered a {pizza.get_name()}")
pizza = ny_store.order_pizza("pepperoni")
print(f"Ethan ordered a {pizza.get_name()}")
pizza = chicago_store.order_pizza("pepperoni")
print(f"Joel ordered a {pizza.get_name()}")
pizza = ny_store.order_pizza("veggie")
print(f"Ethan ordered a {pizza.get_name()}")
pizza = chicago_store.order_pizza("veggie")
print(f"Joel ordered a {pizza.get_name()}")
| 30.27027
| 77
| 0.744643
|
bf4c7a8b33dc1fb82c643e4398195568ab2e1881
| 2,135
|
py
|
Python
|
services/web/manage.py
|
nicehorse06/flask-movie-list-on-docker
|
b7c549ff086cc67ef2eb05472cc5307a773050f5
|
[
"MIT"
] | 1
|
2022-03-12T17:22:42.000Z
|
2022-03-12T17:22:42.000Z
|
services/web/manage.py
|
nicehorse06/flask-movie-list-on-docker
|
b7c549ff086cc67ef2eb05472cc5307a773050f5
|
[
"MIT"
] | null | null | null |
services/web/manage.py
|
nicehorse06/flask-movie-list-on-docker
|
b7c549ff086cc67ef2eb05472cc5307a773050f5
|
[
"MIT"
] | null | null | null |
import click
from flask.cli import FlaskGroup
from project import app, db, User, Movie
cli = FlaskGroup(app)
@cli.command("forge")
def forge():
"""Generate fake data."""
db.create_all()
# 全局的兩個變量移動到這個函數內
name = 'Jimmy Ma'
movies = [
{'title': 'My Neighbor Totoro', 'year': '1988'},
{'title': 'Dead Poets Society', 'year': '1989'},
{'title': 'A Perfect World', 'year': '1993'},
{'title': 'Leon', 'year': '1994'},
{'title': 'Mahjong', 'year': '1996'},
{'title': 'Swallowtail Butterfly', 'year': '1996'},
{'title': 'King of Comedy', 'year': '1999'},
{'title': 'Devils on the Doorstep', 'year': '1999'},
{'title': 'WALL-E', 'year': '2008'},
{'title': 'The Pork of Music', 'year': '2012'},
]
user = User(name=name)
db.session.add(user)
for m in movies:
movie = Movie(title=m['title'], year=m['year'])
db.session.add(movie)
db.session.commit()
click.echo('Done.')
# 設置flask命令,可以重啟db資料,命令為
'''
$ flask initdb
or
$ flask initdb --drop
'''
@cli.command() # 註冊為命令
@click.option('--drop', is_flag=True, help='Create after drop.') # 設置選項
def initdb(drop):
"""Initialize the database."""
if drop: # 判斷是否輸入了選項
db.drop_all()
db.create_all()
click.echo('Initialized database.') # 輸出提示信息
# 設置註冊admin帳號的指令,option()用來寫入名稱和密碼,指令為flask admin
@cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=True, help='The password used to login.')
def admin(username, password):
"""Create user."""
db.create_all()
user = User.query.first()
if user is not None:
click.echo('Updating user...')
user.username = username
user.set_password(password) # 設置密碼
else:
click.echo('Creating user...')
user = User(username=username, name='Admin')
user.set_password(password) # 設置密碼
db.session.add(user)
db.session.commit() # 提交數據庫會話
click.echo('Done.')
if __name__ == "__main__":
cli()
| 25.722892
| 119
| 0.592974
|
327d93b421461b94a256e7f38a743b7c4c9c6a46
| 9,954
|
py
|
Python
|
django/views/i18n.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 3
|
2016-07-08T23:49:32.000Z
|
2018-04-15T22:55:01.000Z
|
django/views/i18n.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 27
|
2017-02-05T15:57:04.000Z
|
2018-04-15T22:57:26.000Z
|
django/views/i18n.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | null | null | null |
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_unicode
from django.utils.formats import get_format_modules
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
try:
result[attr] = getattr(module, attr)
except AttributeError:
pass
src = []
for k, v in result.items():
if isinstance(v, (basestring, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, basestring):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_catalog_missing = False
# first load all english languages files for defaults
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
# 'en' catalog was missing.
if locale.startswith('en'):
# If 'en' is the selected language this would cause issues
# later on if default_locale is something other than 'en'.
en_catalog_missing = True
# Otherwise it is harmless.
pass
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the flag en_catalog_missing has been set, the currently
# selected language is English but it doesn't have a translation
# catalog (presumably due to being the language translated from).
# If that is the case, a wrong language catalog might have been
# loaded in the previous step. It needs to be discarded.
if en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, basestring):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
| 35.173145
| 125
| 0.599156
|
df491f0326ac5912e43e5da517ef7f8aca7a47cd
| 835
|
py
|
Python
|
stock/stockApi/utils/parsers/common.py
|
minplemon/stockApi
|
a9fd5310796b5efa15aaf66357eb778c97ab553a
|
[
"MIT"
] | null | null | null |
stock/stockApi/utils/parsers/common.py
|
minplemon/stockApi
|
a9fd5310796b5efa15aaf66357eb778c97ab553a
|
[
"MIT"
] | 2
|
2019-06-28T10:03:10.000Z
|
2019-06-28T10:03:12.000Z
|
stock/stockApi/utils/parsers/common.py
|
minplemon/stockApi
|
a9fd5310796b5efa15aaf66357eb778c97ab553a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-18 10:08
# @Author : minp
# @contact : king101125s@gmail.com
# @Site :
# @File : common.py
# @Software: PyCharm
from flask_restful import reqparse
#0-----通用请求参数----------#
request_common = reqparse.RequestParser()
request_common.add_argument(
'api_name', dest='api_name',
type=str, location='json',
required=False, help='This is api_name',
)
request_common.add_argument(
'token', dest='token',
type=str, location='json',
required=False, help='This is token',
)
request_common.add_argument(
'params', dest='params',
type=dict, location='json',
required=False, help='This is params',
)
request_common.add_argument(
'fields', dest='fields',
type=dict, location='json',
required=False, help='This is fields',
)
| 21.973684
| 44
| 0.653892
|
16413b915335ee395041354c308266248261e913
| 427
|
py
|
Python
|
pretrain.py
|
openpipes/PolicyReader
|
168c09a638c2fe81b2daf0c36c3e4afb37975358
|
[
"Apache-2.0"
] | 2
|
2019-12-06T05:27:51.000Z
|
2019-12-16T09:28:41.000Z
|
pretrain.py
|
openpipes/policy-reader
|
168c09a638c2fe81b2daf0c36c3e4afb37975358
|
[
"Apache-2.0"
] | null | null | null |
pretrain.py
|
openpipes/policy-reader
|
168c09a638c2fe81b2daf0c36c3e4afb37975358
|
[
"Apache-2.0"
] | 2
|
2019-08-21T07:42:59.000Z
|
2019-08-21T07:55:32.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@theme: pretrained proceeding
@author: mario
"""
""" Pretrain
Run this script based on the materials from ./src/ , and generate pre-trained
models and indexings.
Structure
Create Document -> Update Document -> Index Document & Vocabulary
-> Entity Extraction -> Index Entity
"""
from .type import *
from .parser import Parser
| 19.409091
| 79
| 0.63466
|
defbbcd0e4ba45994c78a063342a326fe1177343
| 1,153
|
py
|
Python
|
gui/company_list.py
|
keremkoseoglu/Kifu
|
bed7a15f71e2345c654b1adab07a5edecdbae342
|
[
"MIT"
] | null | null | null |
gui/company_list.py
|
keremkoseoglu/Kifu
|
bed7a15f71e2345c654b1adab07a5edecdbae342
|
[
"MIT"
] | 82
|
2020-06-25T09:45:01.000Z
|
2022-03-31T09:35:31.000Z
|
gui/company_list.py
|
keremkoseoglu/Kifu
|
bed7a15f71e2345c654b1adab07a5edecdbae342
|
[
"MIT"
] | null | null | null |
""" Company list window """
import tkinter
import tkinter.ttk
from typing import List
from gui.company_listbox import CompanyListbox
from model.company import Company
class CompanyList(tkinter.Toplevel):
""" Company list window """
_WINDOW_WIDTH = 200
_WINDOW_HEIGHT = 250
def __init__(self, close_handler, companies: List[Company] = None):
# Initialization
self._close_handler = close_handler
tkinter.Toplevel.__init__(self)
self.wm_geometry(str(self._WINDOW_WIDTH) + "x" + str(self._WINDOW_HEIGHT))
# Listbox
self._listbox = CompanyListbox(self, 0, 0, companies=companies)
# Buttons
print_button = tkinter.Button(self, text="Select", command=self._company_selected)
print_button.place(x=0, y=200)
def _company_selected(self):
obj_array = []
selected_companies = self._listbox.selected_company_names
for selected_company in selected_companies:
company_obj = Company(selected_company)
obj_array.append(company_obj)
if len(obj_array) <= 0:
return
self._close_handler(obj_array)
| 30.342105
| 90
| 0.680833
|
5a40b93bbe0cdd8d53adb114ee718d57a43ee370
| 2,647
|
py
|
Python
|
logger.py
|
Chenxuey20/Char2Prosody
|
e91206337b60b9ca43cd61723bdcbe580682df33
|
[
"BSD-3-Clause"
] | 1
|
2021-12-28T12:12:05.000Z
|
2021-12-28T12:12:05.000Z
|
logger.py
|
Chenxuey20/Char2Prosody
|
e91206337b60b9ca43cd61723bdcbe580682df33
|
[
"BSD-3-Clause"
] | null | null | null |
logger.py
|
Chenxuey20/Char2Prosody
|
e91206337b60b9ca43cd61723bdcbe580682df33
|
[
"BSD-3-Clause"
] | 1
|
2022-02-16T02:29:25.000Z
|
2022-02-16T02:29:25.000Z
|
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir):
super(Tacotron2Logger, self).__init__(logdir)
def log_training(self, reduced_loss, reduced_mel_loss, reduced_gate_loss, reduced_select_loss, grad_norm, learning_rate, duration,
iteration):
self.add_scalar("training.loss", reduced_loss, iteration)
self.add_scalar("training.mel_loss", reduced_mel_loss, iteration)
self.add_scalar("training.gate_loss", reduced_gate_loss, iteration)
self.add_scalar("training.select_loss", reduced_select_loss, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
def log_validation(self, reduced_loss, reduced_mel_loss, reduced_gate_loss, reduced_select_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
self.add_scalar("validation.mel_loss", reduced_mel_loss, iteration)
self.add_scalar("validation.gate_loss", reduced_gate_loss, iteration)
self.add_scalar("validation.select_loss", reduced_select_loss, iteration)
_, mel_outputs, gate_outputs, alignments, _ = y_pred
mel_targets, gate_targets, _ = y
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| 48.127273
| 134
| 0.673593
|
b4d32e48a756ba0f4c4deae6aa0a023766f338b7
| 2,441
|
py
|
Python
|
scripts/mysql_util.py
|
wilson-lauw/azkaban
|
3de6d2879638b678c5621e0fb640f9c0ba73de97
|
[
"Apache-2.0"
] | 3
|
2019-12-19T00:04:36.000Z
|
2020-05-07T02:54:56.000Z
|
scripts/mysql_util.py
|
wilson-lauw/azkaban
|
3de6d2879638b678c5621e0fb640f9c0ba73de97
|
[
"Apache-2.0"
] | null | null | null |
scripts/mysql_util.py
|
wilson-lauw/azkaban
|
3de6d2879638b678c5621e0fb640f9c0ba73de97
|
[
"Apache-2.0"
] | 3
|
2018-03-15T04:54:50.000Z
|
2019-07-15T06:33:58.000Z
|
#!/usr/bin/python3
import MySQLdb
import time
import traceback
max_retries = 3
retries_interval = 10
def get_db_cur(host, user, passwd, db, dictCursor=False):
db = MySQLdb.connect(
host=host,
user=user,
passwd=passwd,
db=db,
charset='utf8'
)
if dictCursor:
cur = db.cursor(MySQLdb.cursors.DictCursor)
else:
cur = db.cursor()
cur.connection.autocommit(True)
return db, cur
def mysql_fetch(sql, host, user, passwd, db, dictCursor=True):
retry_counter = 0
while retry_counter < max_retries:
try:
db, cur = get_db_cur(host, user, passwd, db, dictCursor)
print('executing mysql statement ' + sql)
start = time.time()
cur.execute(sql)
end = time.time()
data = cur.fetchall()
print('Elapsed: ' + str((end-start) * 1000) + ' ms')
cur.close()
db.close()
return data
except Exception as ex:
retry_counter += 1
print('MySQL Fetch exception', ex)
print('Try number ' + str(retry_counter))
print(traceback.format_exc())
time.sleep(retries_interval)
raise Exception('MySQL fetch failed')
def mysql_execute(sql, host, user, passwd, db):
success = False
retry_counter = 0
while retry_counter < max_retries and not success:
retry_counter += 1
try:
db, cur = get_db_cur(host, user, passwd, db)
if type(sql) == type([]):
for sta in sql:
print('executing mysql statement ' + sta)
start = time.time()
cur.execute(sta)
end = time.time()
print('Elapsed: ' + str((end-start) * 1000) + ' ms')
else:
print('executing mysql statement ' + sql)
start = time.time()
cur.execute(sql)
end = time.time()
print('Elapsed: ' + str((end-start) * 1000) + ' ms')
cur.close()
db.close()
success = True
except Exception as ex:
print('MySQL Execute exception', ex)
print('Try number ' + str(retry_counter))
print(traceback.format_exc())
time.sleep(retries_interval)
if not success:
raise Exception('MySQL fetch failed')
| 29.409639
| 72
| 0.528472
|
1c6cf252c9fd82c508b7e98eff5308b32a2af2c6
| 663
|
py
|
Python
|
setup.py
|
rjauquet/django-searchable
|
dc3708e9ee3e35ebbf35798eaabd61aef2069155
|
[
"MIT"
] | 18
|
2018-10-29T06:42:42.000Z
|
2021-08-31T10:41:31.000Z
|
setup.py
|
rjauquet/django-search
|
dc3708e9ee3e35ebbf35798eaabd61aef2069155
|
[
"MIT"
] | 5
|
2020-02-11T22:35:34.000Z
|
2020-07-17T18:54:05.000Z
|
setup.py
|
rjauquet/django-searchable
|
dc3708e9ee3e35ebbf35798eaabd61aef2069155
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='django_searchable',
packages=setuptools.find_packages(),
install_requires=[
'Django>=2.x',
'psycopg2-binary>=2.7.4',
],
long_description=long_description,
version='0.1.2',
description='Easy FTS with Django and PostgreSQL',
author='Rob Ervin Jauquet',
author_email='rjauquet@gmail.com',
url='https://github.com/rjauquet/django-searchable',
download_url='https://github.com/rjauquet/django-searchable/archive/0.1.2.tar.gz',
keywords=['search', 'searchable', 'fts'],
classifiers=[],
)
| 28.826087
| 86
| 0.669683
|
46a70abb6d242ef75244c8dcb7b10e140cc383ab
| 1,154
|
py
|
Python
|
py/pypomcpp/example.py
|
tomatenbrei/pomcpp
|
55522748369bc167420f3ca5b0ecde314ca2fee3
|
[
"MIT"
] | null | null | null |
py/pypomcpp/example.py
|
tomatenbrei/pomcpp
|
55522748369bc167420f3ca5b0ecde314ca2fee3
|
[
"MIT"
] | 2
|
2020-06-30T12:01:51.000Z
|
2021-05-14T13:57:48.000Z
|
py/pypomcpp/example.py
|
tomatenbrei/pomcpp
|
55522748369bc167420f3ca5b0ecde314ca2fee3
|
[
"MIT"
] | 2
|
2020-06-30T10:23:43.000Z
|
2021-08-01T17:24:08.000Z
|
import pommerman
import pommerman.agents as agents
from pommerman.agents.simple_agent import SimpleAgent
from pypomcpp.cppagent import CppAgent
from util import ffa_evaluate
from shutil import copyfile
lib_path = "./Release/libpomcpp.so"
def create_lib_copy():
if hasattr(create_lib_copy, "calls"):
create_lib_copy.calls += 1
else:
create_lib_copy.calls = 0
local_lib_path = f"./local_lib_copy_{create_lib_copy.calls}.so"
copyfile(lib_path, local_lib_path)
return local_lib_path
agent_list = [
#agents.SimpleAgent(),
#agents.SimpleAgent(),
#agents.SimpleAgent(),
CppAgent(create_lib_copy(), "SimpleAgent", seed=14, print_json=False),
CppAgent(create_lib_copy(), "SimpleAgent", seed=15),
CppAgent(create_lib_copy(), "SimpleAgent", seed=16),
CppAgent(create_lib_copy(), "SimpleAgent", seed=17),
]
# Make the "Free-For-All" environment using the agent list
env = pommerman.make('PommeFFACompetition-v0', agent_list)
use_env_state = False
if use_env_state:
for a in agent_list:
if isinstance(a, CppAgent):
a.use_env_state(env)
ffa_evaluate(env, 10, True, False)
| 28.146341
| 74
| 0.72617
|
afada1f076aac26513f28cf7550ab63362e93394
| 392
|
py
|
Python
|
ros/build/twist_controller/catkin_generated/pkg.installspace.context.pc.py
|
ranamanish/Capstone
|
44fd6f1fb28d7c88829c52713684b87830a8f138
|
[
"MIT"
] | null | null | null |
ros/build/twist_controller/catkin_generated/pkg.installspace.context.pc.py
|
ranamanish/Capstone
|
44fd6f1fb28d7c88829c52713684b87830a8f138
|
[
"MIT"
] | null | null | null |
ros/build/twist_controller/catkin_generated/pkg.installspace.context.pc.py
|
ranamanish/Capstone
|
44fd6f1fb28d7c88829c52713684b87830a8f138
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "twist_controller"
PROJECT_SPACE_DIR = "/home/rana/Work/CarND-Capstone/ros/install"
PROJECT_VERSION = "0.0.0"
| 43.555556
| 68
| 0.711735
|
93c0dcfc0c928361d3d9a9ebc0c5b2fc403ce00e
| 116
|
py
|
Python
|
rand_param_envs/gym/envs/board_game/__init__.py
|
erinaldi/MetaRL
|
6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871
|
[
"MIT"
] | 24
|
2021-03-24T07:14:52.000Z
|
2022-03-17T08:15:44.000Z
|
rand_param_envs/gym/envs/board_game/__init__.py
|
erinaldi/MetaRL
|
6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871
|
[
"MIT"
] | 12
|
2021-02-02T22:53:59.000Z
|
2022-03-12T00:41:30.000Z
|
rand_param_envs/gym/envs/board_game/__init__.py
|
erinaldi/MetaRL
|
6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871
|
[
"MIT"
] | 6
|
2021-04-12T18:49:47.000Z
|
2021-09-07T05:33:22.000Z
|
from rand_param_envs.gym.envs.board_game.go import GoEnv
from rand_param_envs.gym.envs.board_game.hex import HexEnv
| 38.666667
| 58
| 0.862069
|
8885a4601aa5cd3b0aa25be1e361e19bb31136f6
| 27,391
|
py
|
Python
|
sompy.py
|
Mind-The-Data/SOMPY
|
0f03eacd451266f6c776882fbef0034c7d0d34d6
|
[
"Apache-2.0"
] | null | null | null |
sompy.py
|
Mind-The-Data/SOMPY
|
0f03eacd451266f6c776882fbef0034c7d0d34d6
|
[
"Apache-2.0"
] | null | null | null |
sompy.py
|
Mind-The-Data/SOMPY
|
0f03eacd451266f6c776882fbef0034c7d0d34d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Author: Vahid Moosavi (sevamoo@gmail.com)
# Chair For Computer Aided Architectural Design, ETH Zurich
# Future Cities Lab
# www.vahidmoosavi.com
# Contributor: Sebastian Packmann (sebastian.packmann@gmail.com)
import tempfile
import os
import itertools
import logging
import numpy as np
from time import time
from multiprocessing.dummy import Pool
from multiprocessing import cpu_count
from scipy.sparse import csr_matrix
from sklearn import neighbors
from sklearn.externals.joblib import Parallel, delayed, load, dump
import sys
from .decorators import timeit
from .codebook import Codebook
from .neighborhood import NeighborhoodFactory
from .normalization import NormalizerFactory
class ComponentNamesError(Exception):
pass
class LabelsError(Exception):
pass
class SOMFactory(object):
@staticmethod
def build(data,
mapsize=None,
mask=None,
mapshape='planar',
lattice='rect',
normalization='var',
initialization='pca',
neighborhood='gaussian',
training='batch',
name='sompy',
component_names=None):
"""
:param data: data to be clustered, represented as a matrix of n rows,
as inputs and m cols as input features
:param neighborhood: neighborhood object calculator. Options are:
- gaussian
- bubble
- manhattan (not implemented yet)
- cut_gaussian (not implemented yet)
- epanechicov (not implemented yet)
:param normalization: normalizer object calculator. Options are:
- var
:param mapsize: tuple/list defining the dimensions of the som.
If single number is provided is considered as the number of nodes.
:param mask: mask
:param mapshape: shape of the som. Options are:
- planar
- toroid (not implemented yet)
- cylinder (not implemented yet)
:param lattice: type of lattice. Options are:
- rect
- hexa
:param initialization: method to be used for initialization of the som.
Options are:
- pca
- random
:param name: name used to identify the som
:param training: Training mode (seq, batch)
"""
if normalization:
normalizer = NormalizerFactory.build(normalization)
else:
normalizer = None
neighborhood_calculator = NeighborhoodFactory.build(neighborhood)
return SOM(data, neighborhood_calculator, normalizer, mapsize, mask,
mapshape, lattice, initialization, training, name, component_names)
class SOM(object):
def __init__(self,
data,
neighborhood,
normalizer=None,
mapsize=None,
mask=None,
mapshape='planar',
lattice='rect',
initialization='pca',
training='batch',
name='sompy',
component_names=None):
"""
Self Organizing Map
:param data: data to be clustered, represented as a matrix of n rows,
as inputs and m cols as input features
:param neighborhood: neighborhood object calculator.
:param normalizer: normalizer object calculator.
:param mapsize: tuple/list defining the dimensions of the som. If
single number is provided is considered as the number of nodes.
:param mask: mask
:param mapshape: shape of the som.
:param lattice: type of lattice.
:param initialization: method to be used for initialization of the som.
:param name: name used to identify the som
:param training: Training mode (seq, batch)
"""
self._data = normalizer.normalize(data) if normalizer else data
self._normalizer = normalizer
self._dim = data.shape[1]
self._dlen = data.shape[0]
self._dlabel = None
self._bmu = None
self.name = name
self.data_raw = data
self.neighborhood = neighborhood
self.mapshape = mapshape
self.initialization = initialization
self.mask = mask or np.ones([1, self._dim])
mapsize = self.calculate_map_size(lattice) if not mapsize else mapsize
self.codebook = Codebook(mapsize, lattice)
self.training = training
self._component_names = self.build_component_names() if component_names is None else [component_names]
self._distance_matrix = self.calculate_map_dist()
@property
def component_names(self):
return self._component_names
@component_names.setter
def component_names(self, compnames):
if self._dim == len(compnames):
self._component_names = np.asarray(compnames)[np.newaxis, :]
else:
raise ComponentNamesError('Component names should have the same '
'size as the data dimension/features')
def build_component_names(self):
cc = ['Variable-' + str(i+1) for i in range(0, self._dim)]
return np.asarray(cc)[np.newaxis, :]
@property
def data_labels(self):
return self._dlabel
@data_labels.setter
def data_labels(self, labels):
"""
Set labels of the training data, it should be in the format of a list
of strings
"""
if labels.shape == (1, self._dlen):
label = labels.T
elif labels.shape == (self._dlen, 1):
label = labels
elif labels.shape == (self._dlen,):
label = labels[:, np.newaxis]
else:
raise LabelsError('wrong label format')
self._dlabel = label
def build_data_labels(self):
cc = ['dlabel-' + str(i) for i in range(0, self._dlen)]
return np.asarray(cc)[:, np.newaxis]
def calculate_map_dist(self):
"""
Calculates the grid distance, which will be used during the training
steps. It supports only planar grids for the moment
"""
nnodes = self.codebook.nnodes
distance_matrix = np.zeros((nnodes, nnodes))
for i in range(nnodes):
distance_matrix[i] = self.codebook.grid_dist(i).reshape(1, nnodes)
return distance_matrix
@timeit()
def train(self,
n_job=1,
shared_memory=False,
verbose='info',
train_rough_len=None,
train_rough_radiusin=None,
train_rough_radiusfin=None,
train_finetune_len=None,
train_finetune_radiusin=None,
train_finetune_radiusfin=None,
train_len_factor=1,
maxtrainlen=np.Inf):
"""
Trains the som
:param n_job: number of jobs to use to parallelize the traning
:param shared_memory: flag to active shared memory
:param verbose: verbosity, could be 'debug', 'info' or None
:param train_len_factor: Factor that multiply default training lenghts (similar to "training" parameter in the matlab version). (lbugnon)
"""
logging.root.setLevel(
getattr(logging, verbose.upper()) if verbose else logging.ERROR)
logging.info(" Training...")
logging.debug((
"--------------------------------------------------------------\n"
" details: \n"
" > data len is {data_len} and data dimension is {data_dim}\n"
" > map size is {mpsz0},{mpsz1}\n"
" > array size in log10 scale is {array_size}\n"
" > number of jobs in parallel: {n_job}\n"
" -------------------------------------------------------------\n")
.format(data_len=self._dlen,
data_dim=self._dim,
mpsz0=self.codebook.mapsize[0],
mpsz1=self.codebook.mapsize[1],
array_size=np.log10(
self._dlen * self.codebook.nnodes * self._dim),
n_job=n_job))
if self.initialization == 'random':
self.codebook.random_initialization(self._data)
elif self.initialization == 'pca':
self.codebook.pca_linear_initialization(self._data)
self.rough_train(njob=n_job, shared_memory=shared_memory, trainlen=train_rough_len,
radiusin=train_rough_radiusin, radiusfin=train_rough_radiusfin,trainlen_factor=train_len_factor,maxtrainlen=maxtrainlen)
self.finetune_train(njob=n_job, shared_memory=shared_memory, trainlen=train_finetune_len,
radiusin=train_finetune_radiusin, radiusfin=train_finetune_radiusfin,trainlen_factor=train_len_factor,maxtrainlen=maxtrainlen)
logging.debug(
" --------------------------------------------------------------")
logging.info(" Final quantization error: %f" % np.mean(self._bmu[1]))
def _calculate_ms_and_mpd(self):
mn = np.min(self.codebook.mapsize)
max_s = max(self.codebook.mapsize[0], self.codebook.mapsize[1])
if mn == 1:
mpd = float(self.codebook.nnodes*10)/float(self._dlen)
else:
mpd = float(self.codebook.nnodes)/float(self._dlen)
ms = max_s/2.0 if mn == 1 else max_s
return ms, mpd
def rough_train(self, njob=1, shared_memory=False, trainlen=None, radiusin=None, radiusfin=None,trainlen_factor=1,maxtrainlen=np.Inf):
logging.info(" Rough training...")
ms, mpd = self._calculate_ms_and_mpd()
#lbugnon: add maxtrainlen
trainlen = min(int(np.ceil(30*mpd)),maxtrainlen) if not trainlen else trainlen
#print("maxtrainlen %d",maxtrainlen)
#lbugnon: add trainlen_factor
trainlen=int(trainlen*trainlen_factor)
if self.initialization == 'random':
radiusin = max(1, np.ceil(ms/3.)) if not radiusin else radiusin
radiusfin = max(1, radiusin/6.) if not radiusfin else radiusfin
elif self.initialization == 'pca':
radiusin = max(1, np.ceil(ms/8.)) if not radiusin else radiusin
radiusfin = max(1, radiusin/4.) if not radiusfin else radiusfin
self._batchtrain(trainlen, radiusin, radiusfin, njob, shared_memory)
def finetune_train(self, njob=1, shared_memory=False, trainlen=None, radiusin=None, radiusfin=None,trainlen_factor=1,maxtrainlen=np.Inf):
logging.info(" Finetune training...")
ms, mpd = self._calculate_ms_and_mpd()
#lbugnon: add maxtrainlen
if self.initialization == 'random':
trainlen = min(int(np.ceil(50*mpd)),maxtrainlen) if not trainlen else trainlen
radiusin = max(1, ms/12.) if not radiusin else radiusin # from radius fin in rough training
radiusfin = max(1, radiusin/25.) if not radiusfin else radiusfin
elif self.initialization == 'pca':
trainlen = min(int(np.ceil(40*mpd)),maxtrainlen) if not trainlen else trainlen
radiusin = max(1, np.ceil(ms/8.)/4) if not radiusin else radiusin
radiusfin = 1 if not radiusfin else radiusfin # max(1, ms/128)
#print("maxtrainlen %d",maxtrainlen)
#lbugnon: add trainlen_factor
trainlen=int(trainlen_factor*trainlen)
self._batchtrain(trainlen, radiusin, radiusfin, njob, shared_memory)
def _batchtrain(self, trainlen, radiusin, radiusfin, njob=1,
shared_memory=False):
radius = np.linspace(radiusin, radiusfin, trainlen)
if shared_memory:
data = self._data
data_folder = tempfile.mkdtemp()
data_name = os.path.join(data_folder, 'data')
dump(data, data_name)
data = load(data_name, mmap_mode='r')
else:
data = self._data
bmu = None
# X2 is part of euclidean distance (x-y)^2 = x^2 +y^2 - 2xy that we use
# for each data row in bmu finding.
# Since it is a fixed value we can skip it during bmu finding for each
# data point, but later we need it calculate quantification error
fixed_euclidean_x2 = np.einsum('ij,ij->i', data, data)
logging.info(" radius_ini: %f , radius_final: %f, trainlen: %d\n" %
(radiusin, radiusfin, trainlen))
for i in range(trainlen):
t1 = time()
neighborhood = self.neighborhood.calculate(
self._distance_matrix, radius[i], self.codebook.nnodes)
bmu = self.find_bmu(data, njb=njob)
self.codebook.matrix = self.update_codebook_voronoi(data, bmu,
neighborhood)
#lbugnon: ojo! aca el bmy[1] a veces da negativo, y despues de eso se rompe...hay algo raro ahi
qerror = (i + 1, round(time() - t1, 3),
np.mean(np.sqrt(bmu[1] + fixed_euclidean_x2))) #lbugnon: ojo aca me tiró un warning, revisar (commit sinc: 965666d3d4d93bcf48e8cef6ea2c41a018c1cb83 )
#lbugnon
#ipdb.set_trace()
#
logging.info(
" epoch: %d ---> elapsed time: %f, quantization error: %f\n" %
qerror)
if np.any(np.isnan(qerror)):
logging.info("nan quantization error, exit train\n")
#sys.exit("quantization error=nan, exit train")
bmu[1] = np.sqrt(bmu[1] + fixed_euclidean_x2)
self._bmu = bmu
@timeit(logging.DEBUG)
def find_bmu(self, input_matrix, njb=1, nth=1):
"""
Finds the best matching unit (bmu) for each input data from the input
matrix. It does all at once parallelizing the calculation instead of
going through each input and running it against the codebook.
:param input_matrix: numpy matrix representing inputs as rows and
features/dimension as cols
:param njb: number of jobs to parallelize the search
:returns: the best matching unit for each input
"""
dlen = input_matrix.shape[0]
y2 = np.einsum('ij,ij->i', self.codebook.matrix, self.codebook.matrix)
if njb == -1:
njb = cpu_count()
pool = Pool(njb)
chunk_bmu_finder = _chunk_based_bmu_find
def row_chunk(part):
return part * dlen // njb
def col_chunk(part):
return min((part+1)*dlen // njb, dlen)
chunks = [input_matrix[row_chunk(i):col_chunk(i)] for i in range(njb)]
b = pool.map(lambda chk: chunk_bmu_finder(chk, self.codebook.matrix, y2, nth=nth), chunks)
pool.close()
pool.join()
bmu = np.asarray(list(itertools.chain(*b))).T
del b
return bmu
@timeit(logging.DEBUG)
def update_codebook_voronoi(self, training_data, bmu, neighborhood):
"""
Updates the weights of each node in the codebook that belongs to the
bmu's neighborhood.
First finds the Voronoi set of each node. It needs to calculate a
smaller matrix.
Super fast comparing to classic batch training algorithm, it is based
on the implemented algorithm in som toolbox for Matlab by Helsinky
University.
:param training_data: input matrix with input vectors as rows and
vector features as cols
:param bmu: best matching unit for each input data. Has shape of
(2, dlen) where first row has bmu indexes
:param neighborhood: matrix representing the neighborhood of each bmu
:returns: An updated codebook that incorporates the learnings from the
input data
"""
row = bmu[0].astype(int)
col = np.arange(self._dlen)
val = np.tile(1, self._dlen)
P = csr_matrix((val, (row, col)), shape=(self.codebook.nnodes,
self._dlen))
S = P.dot(training_data)
# neighborhood has nnodes*nnodes and S has nnodes*dim
# ---> Nominator has nnodes*dim
nom = neighborhood.T.dot(S)
nV = P.sum(axis=1).reshape(1, self.codebook.nnodes)
denom = nV.dot(neighborhood.T).reshape(self.codebook.nnodes, 1)
new_codebook = np.divide(nom, denom)
return np.around(new_codebook, decimals=6)
def project_data(self, data):
"""
Projects a data set to a trained SOM. It is based on nearest
neighborhood search module of scikitlearn, but it is not that fast.
"""
clf = neighbors.KNeighborsClassifier(n_neighbors=1)
labels = np.arange(0, self.codebook.matrix.shape[0])
clf.fit(self.codebook.matrix, labels)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
data = self._normalizer.normalize_by(self.data_raw, data)
return clf.predict(data)
def predict_by(self, data, target, k=5, wt='distance'):
# here it is assumed that target is the last column in the codebook
# and data has dim-1 columns
dim = self.codebook.matrix.shape[1]
ind = np.arange(0, dim)
indX = ind[ind != target]
x = self.codebook.matrix[:, indX]
y = self.codebook.matrix[:, target]
n_neighbors = k
clf = neighbors.KNeighborsRegressor(n_neighbors, weights=wt)
clf.fit(x, y)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
dimdata = data.shape[1]
if dimdata == dim:
data[:, target] = 0
data = self._normalizer.normalize_by(self.data_raw, data)
data = data[:, indX]
elif dimdata == dim-1:
data = self._normalizer.normalize_by(self.data_raw[:, indX], data)
predicted_values = clf.predict(data)
predicted_values = self._normalizer.denormalize_by(
self.data_raw[:, target], predicted_values)
return predicted_values
def predict(self, x_test, k=5, wt='distance'):
"""
Similar to SKlearn we assume that we have X_tr, Y_tr and X_test. Here
it is assumed that target is the last column in the codebook and data
has dim-1 columns
:param x_test: input vector
:param k: number of neighbors to use
:param wt: method to use for the weights
(more detail in KNeighborsRegressor docs)
:returns: predicted values for the input data
"""
target = self.data_raw.shape[1]-1
x_train = self.codebook.matrix[:, :target]
y_train = self.codebook.matrix[:, target]
clf = neighbors.KNeighborsRegressor(k, weights=wt)
clf.fit(x_train, y_train)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
x_test = self._normalizer.normalize_by(
self.data_raw[:, :target], x_test)
predicted_values = clf.predict(x_test)
return self._normalizer.denormalize_by(
self.data_raw[:, target], predicted_values)
def find_k_nodes(self, data, k=5):
from sklearn.neighbors import NearestNeighbors
# we find the k most similar nodes to the input vector
neighbor = NearestNeighbors(n_neighbors=k)
neighbor.fit(self.codebook.matrix)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
return neighbor.kneighbors(
self._normalizer.normalize_by(self.data_raw, data))
def bmu_ind_to_xy(self, bmu_ind):
"""
Translates a best matching unit index to the corresponding
matrix x,y coordinates.
:param bmu_ind: node index of the best matching unit
(number of node from top left node)
:returns: corresponding (x,y) coordinate
"""
rows = self.codebook.mapsize[0]
cols = self.codebook.mapsize[1]
# bmu should be an integer between 0 to no_nodes
out = np.zeros((bmu_ind.shape[0], 3))
out[:, 2] = bmu_ind
out[:, 0] = rows-1-bmu_ind / cols
out[:, 0] = bmu_ind / cols
out[:, 1] = bmu_ind % cols
return out.astype(int)
def cluster(self, n_clusters=8, max_iter=300):
'''added max_iter as an argument. and changed (de) to normalization -Brian Stampe '''
import sklearn.cluster as clust
km = clust.KMeans(n_clusters=n_clusters, max_iter=max_iter)
cl_labels= km.fit_predict(self._normalizer.normalize_by(self.data_raw,
self.codebook.matrix))
self.cluster_labels = cl_labels
return cl_labels
def predict_probability(self, data, target, k=5):
"""
Predicts probability of the input data to be target
:param data: data to predict, it is assumed that 'target' is the last
column in the codebook, so data hould have dim-1 columns
:param target: target to predict probability
:param k: k parameter on KNeighborsRegressor
:returns: probability of data been target
"""
dim = self.codebook.matrix.shape[1]
ind = np.arange(0, dim)
indx = ind[ind != target]
x = self.codebook.matrix[:, indx]
y = self.codebook.matrix[:, target]
clf = neighbors.KNeighborsRegressor(k, weights='distance')
clf.fit(x, y)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
dimdata = data.shape[1]
if dimdata == dim:
data[:, target] = 0
data = self._normalizer.normalize_by(self.data_raw, data)
data = data[:, indx]
elif dimdata == dim-1:
data = self._normalizer.normalize_by(self.data_raw[:, indx], data)
weights, ind = clf.kneighbors(data, n_neighbors=k,
return_distance=True)
weights = 1./weights
sum_ = np.sum(weights, axis=1)
weights = weights/sum_[:, np.newaxis]
labels = np.sign(self.codebook.matrix[ind, target])
labels[labels >= 0] = 1
# for positives
pos_prob = labels.copy()
pos_prob[pos_prob < 0] = 0
pos_prob *= weights
pos_prob = np.sum(pos_prob, axis=1)[:, np.newaxis]
# for negatives
neg_prob = labels.copy()
neg_prob[neg_prob > 0] = 0
neg_prob = neg_prob * weights * -1
neg_prob = np.sum(neg_prob, axis=1)[:, np.newaxis]
return np.concatenate((pos_prob, neg_prob), axis=1)
def node_activation(self, data, target=None, wt='distance'):
weights, ind = None, None
if not target:
clf = neighbors.KNeighborsClassifier(
n_neighbors=self.codebook.nnodes)
labels = np.arange(0, self.codebook.matrix.shape[0])
clf.fit(self.codebook.matrix, labels)
# The codebook values are all normalized
# we can normalize the input data based on mean and std of
# original data
data = self._normalizer.normalize_by(self.data_raw, data)
weights, ind = clf.kneighbors(data)
# Softmax function
weights = 1./weights
return weights, ind
def calculate_topographic_error(self):
bmus1 = self.find_bmu(self.data_raw, njb=1, nth=1)
bmus2 = self.find_bmu(self.data_raw, njb=1, nth=2)
topographic_error = None
if self.codebook.lattice=="rect":
bmus_gap = np.abs((self.bmu_ind_to_xy(np.array(bmus1[0]))[:, 0:2] - self.bmu_ind_to_xy(np.array(bmus2[0]))[:, 0:2]).sum(axis=1))
topographic_error = np.mean(bmus_gap != 1)
elif self.codebook.lattice=="hexa":
dist_matrix_1 = self.codebook.lattice_distances[bmus1[0].astype(int)].reshape(len(bmus1[0]), -1)
topographic_error = (np.array(
[distances[bmu2] for bmu2, distances in zip(bmus2[0].astype(int), dist_matrix_1)]) > 2).mean()
return(topographic_error)
def calculate_quantization_error(self):
neuron_values = self.codebook.matrix[self.find_bmu(self._data)[0].astype(int)]
quantization_error = np.mean(np.abs(neuron_values - self._data))
return quantization_error
def calculate_map_size(self, lattice):
"""
Calculates the optimal map size given a dataset using eigenvalues and eigenvectors. Matlab ported
:lattice: 'rect' or 'hex'
:return: map sizes
"""
D = self.data_raw.copy()
dlen = D.shape[0]
dim = D.shape[1]
munits = np.ceil(5 * (dlen ** 0.5))
A = np.ndarray(shape=[dim, dim]) + np.Inf
for i in range(dim):
D[:, i] = D[:, i] - np.mean(D[np.isfinite(D[:, i]), i])
for i in range(dim):
for j in range(dim):
c = D[:, i] * D[:, j]
c = c[np.isfinite(c)]
A[i, j] = sum(c) / len(c)
A[j, i] = A[i, j]
VS = np.linalg.eig(A)
eigval = sorted(np.linalg.eig(A)[0])
if eigval[-1] == 0 or eigval[-2] * munits < eigval[-1]:
ratio = 1
else:
ratio = np.sqrt(eigval[-1] / eigval[-2])
if lattice == "rect":
size1 = min(munits, round(np.sqrt(munits / ratio)))
else:
size1 = min(munits, round(np.sqrt(munits / ratio*np.sqrt(0.75))))
size2 = round(munits / size1)
return [int(size1), int(size2)]
# Since joblib.delayed uses Pickle, this method needs to be a top level
# method in order to be pickled
# Joblib is working on adding support for cloudpickle or dill which will allow
# class methods to be pickled
# when that that comes out we can move this to SOM class
def _chunk_based_bmu_find(input_matrix, codebook, y2, nth=1):
"""
Finds the corresponding bmus to the input matrix.
:param input_matrix: a matrix of input data, representing input vector as
rows, and vectors features/dimention as cols
when parallelizing the search, the input_matrix can be
a sub matrix from the bigger matrix
:param codebook: matrix of weights to be used for the bmu search
:param y2: <not sure>
"""
dlen = input_matrix.shape[0]
nnodes = codebook.shape[0]
bmu = np.empty((dlen, 2))
# It seems that small batches for large dlen is really faster:
# that is because of ddata in loops and n_jobs. for large data it slows
# down due to memory needs in parallel
blen = min(50, dlen)
i0 = 0
while i0+1 <= dlen:
low = i0
high = min(dlen, i0+blen)
i0 = i0+blen
ddata = input_matrix[low:high+1]
d = np.dot(codebook, ddata.T)
d *= -2
d += y2.reshape(nnodes, 1)
bmu[low:high+1, 0] = np.argpartition(d, nth, axis=0)[nth-1]
bmu[low:high+1, 1] = np.partition(d, nth, axis=0)[nth-1]
del ddata
return bmu
| 38.043056
| 171
| 0.598992
|
f4e8ecfcc562dc6cf9b5e71dd4aab11db10bc559
| 1,519
|
py
|
Python
|
fhirclient/r4models/range.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/range.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/range.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Range) on 2019-05-07.
# 2019, SMART Health IT.
from . import element
class Range(element.Element):
""" Set of values bounded by low and high.
A set of ordered Quantities defined by a low and high limit.
"""
resource_type = "Range"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.high = None
""" High limit.
Type `Quantity` (represented as `dict` in JSON). """
self.low = None
""" Low limit.
Type `Quantity` (represented as `dict` in JSON). """
super(Range, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Range, self).elementProperties()
js.extend([
("high", "high", quantity.Quantity, False, None, False),
("low", "low", quantity.Quantity, False, None, False),
])
return js
import sys
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| 30.38
| 103
| 0.589203
|
d9cb0b04c815221e50ab523b6a76df4b994d1d19
| 1,491
|
py
|
Python
|
Web Scrapping/api_blogger.py
|
100rabmittal/python-Automations
|
f57bac14478a82fa4014fa795aeb622c0f59d30d
|
[
"MIT"
] | null | null | null |
Web Scrapping/api_blogger.py
|
100rabmittal/python-Automations
|
f57bac14478a82fa4014fa795aeb622c0f59d30d
|
[
"MIT"
] | null | null | null |
Web Scrapping/api_blogger.py
|
100rabmittal/python-Automations
|
f57bac14478a82fa4014fa795aeb622c0f59d30d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
__author__ = 'sourabh.mittal50@google.com (Sourabh Mittal)'
import sys
from oauth2client import client
from googleapiclient import sample_tools
import requests
import json
class blog:
def svc():
# Authenticate and construct service.
service, flags = sample_tools.init(
sys.argv, 'blogger', 'v3', __doc__, __file__,
scope='https://www.googleapis.com/auth/blogger')
return service
def getUsers(svc):
try:
users = svc.users()
# Retrieve this user's profile information
thisuser = users.get(userId='self').execute()
print('This user\'s display name is: %s' % thisuser['displayName'])
blogs = svc.blogs()
# Retrieve the list of Blogs this user has write privileges on
thisusersblogs = blogs.listByUser(userId='self').execute()
data = []
for blog in thisusersblogs['items']:
data.append('The blog named \'%s\' is at: %s' % (blog['name'], blog['url']))
return data
except client.AccessTokenRefreshError:
return 'The credentials have been revoked or expired, please re-run the application to re-authorize'
def getPosts(svc, data, idno):
posts = svc.posts()
body = {
"kind": "blogger#post",
"id": "3223736704"+str(idno),
"title": "new try",
"content":data
}
posts.insert(blogId='322373670401', body=body, isDraft=True).execute()
return None
| 28.132075
| 106
| 0.637156
|
0861c2ca5912cf26aba21547f30db98ae27ceeef
| 15,285
|
py
|
Python
|
tests/test_check_stake.py
|
luk-kop/verus-stake-notification
|
b93f06f7f30b26bce48cdf87464419a9cbe3d10f
|
[
"MIT"
] | null | null | null |
tests/test_check_stake.py
|
luk-kop/verus-stake-notification
|
b93f06f7f30b26bce48cdf87464419a9cbe3d10f
|
[
"MIT"
] | null | null | null |
tests/test_check_stake.py
|
luk-kop/verus-stake-notification
|
b93f06f7f30b26bce48cdf87464419a9cbe3d10f
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from unittest import mock
from new_stake_script.check_new_stake import StakeTransaction, StakeTransactions, VerusStakeChecker, \
ApiGatewayCognito
def test_process_exist(dummy_process):
"""
GIVEN VerusProcess object
WHEN created VerusProcess object with 'name' attribute that represent existed process
THEN declared process exist
"""
assert dummy_process.status is True
def test_process_exist_directory(dummy_process):
"""
GIVEN VerusProcess object
WHEN created VerusProcess object with 'name' attribute that represent existed process
THEN declared process's base directory is cwd
"""
assert dummy_process.directory == os.getcwd()
def test_process_not_exist(nonexistent_process):
"""
GIVEN VerusProcess object
WHEN created VerusProcess object with 'name' attribute that represent non-existed process
THEN declared process not exist
"""
assert nonexistent_process.status is False
def test_process_not_exist_directory(nonexistent_process):
"""
GIVEN VerusProcess object
WHEN created VerusProcess object with 'name' attribute that represent non-existed process
THEN declared process's base directory is ''
"""
assert nonexistent_process.directory == ''
def test_verus_script_path(verus_stake_checker):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess
THEN script's base directory is cwd + script name
"""
script_path = verus_stake_checker.verus_script_path
assert script_path == Path(os.getcwd()).joinpath('verus')
def test_wallet_info_txcount_current(verus_stake_checker, dummy_wallet_no_stake):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess and dummy wallet info data
THEN current txtcount changed to value from dummy wallet info
"""
# Check txcount value without wallet_info data
assert verus_stake_checker.txcount_current == '0'
# Assign dummy wallet to wallet_info attribute
verus_stake_checker.wallet_info = dummy_wallet_no_stake
assert verus_stake_checker.txcount_current == '10'
def test_wallet_info_txcount_hist_on_start(verus_stake_checker):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess
THEN last (historical) txtcount should be equal 0 on start
"""
# Check txcount last value on start
assert verus_stake_checker.txcount_hist == '0'
def test_wallet_info_txcount_different(verus_stake_checker, dummy_wallet_no_stake):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess and dummy wallet info data
THEN different current and last (historical) txtcount values on VerusStakeChecker object creation (before run)
"""
# Assign dummy wallet to wallet_info attribute
verus_stake_checker.wallet_info = dummy_wallet_no_stake
assert verus_stake_checker.txcount_hist == '0'
assert verus_stake_checker.txcount_current == '10'
def test_verus_state_checker_run_different_txcounts(mocker, verus_stake_checker, dummy_wallet_no_stake, dummy_list_txs):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess and dummy wallet info data
THEN same current and last (historical) txtcount values after run VerusStakeChecker object
"""
# Assign dummy wallet to wallet_info attribute
verus_stake_checker.wallet_info = dummy_wallet_no_stake
# Before run txcounts have different values
assert verus_stake_checker.txcount_hist != verus_stake_checker.txcount_current
# Mock _process_call() method
mocker.patch.object(VerusStakeChecker, '_process_call', return_value=dummy_list_txs)
# After run txcounts should have the same values
verus_stake_checker.run()
assert verus_stake_checker.txcount_hist == verus_stake_checker.txcount_current
def test_verus_state_checker_run_equal_txcounts(mocker, verus_stake_checker, dummy_wallet_no_stake, dummy_list_txs):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess and dummy wallet info data (no stake in wallet)
THEN same txtcount values after run VerusStakeChecker object without new stake in wallet
"""
# Assign dummy wallet to wallet_info attribute
verus_stake_checker.wallet_info = dummy_wallet_no_stake
# Mock _process_call() method
mocker.patch.object(VerusStakeChecker, '_process_call', return_value=dummy_list_txs)
# First run - after first run txcounts should have the same values
verus_stake_checker.run()
# Store txcounta after first run
txcont_last_first_run = verus_stake_checker.txcount_hist
txcount_current_first_run = verus_stake_checker.txcount_current
# Second run without new stake
verus_stake_checker.run()
# After the second run the txcounts should be equal to txcounts from first run
assert verus_stake_checker.txcount_hist == verus_stake_checker.txcount_current
assert verus_stake_checker.txcount_hist == txcont_last_first_run
assert verus_stake_checker.txcount_current == txcount_current_first_run
def test_verus_state_checker_run_new_stake(mocker, verus_stake_checker, dummy_wallet_no_stake,
dummy_wallet_new_stake, dummy_list_txs):
"""
GIVEN VerusStakeChecker object
WHEN created VerusStakeChecker with dummy VerusProcess and dummy wallet info data (new stake in wallet)
THEN different txtcount values after run VerusStakeChecker object with new stake in wallet
"""
# Assign dummy wallet to wallet_info attribute
verus_stake_checker.wallet_info = dummy_wallet_no_stake
# Mock _process_call() method
mocker.patch.object(VerusStakeChecker, '_process_call', return_value=dummy_list_txs)
# First run - after first run txcounts should have the same values
verus_stake_checker.run()
txcont_last_first_run = verus_stake_checker.txcount_hist
txcount_current_first_run = verus_stake_checker.txcount_current
# Second run with new stake
verus_stake_checker.wallet_info = dummy_wallet_new_stake
verus_stake_checker.run()
# After the second run the txcounts should be different to txcounts from first run
assert verus_stake_checker.txcount_hist == verus_stake_checker.txcount_current
assert verus_stake_checker.txcount_hist != txcont_last_first_run
assert verus_stake_checker.txcount_current != txcount_current_first_run
def test_stake_transaction_correct():
"""
GIVEN dummy stake tx
WHEN StakeTransaction object is created
THEN StakeTransaction object's attributes are correct
"""
dummy_tx = {
'address': 'RXXX',
'category': 'mint',
'amount': 12.00000000,
'txid': 'tx01',
'time': 1632511111
}
tx = StakeTransaction(
txid=dummy_tx['txid'],
time=dummy_tx['time'],
amount=dummy_tx['amount'],
address=dummy_tx['address']
)
assert tx.txid == 'tx01'
assert tx.time == 1632511111
assert tx.amount == 12.00000000
assert tx.address == 'RXXX'
def test_stake_transactions_correct_order(dummy_stake_txs):
"""
GIVEN several dummy stake txs
WHEN StakeTransactions object is created and dummy txs are added to StakeTransactions collection
THEN Stake txs are returned in desired order
"""
stake_txs = StakeTransactions()
dummy_stake_txs_unordered, dummy_stake_txs_ordered = dummy_stake_txs
for tx in dummy_stake_txs_unordered:
stake_txs.add_stake_tx(tx)
# Not sorted txs
assert stake_txs.txs == dummy_stake_txs_unordered
# Sorted txs
assert stake_txs.txs_sorted == dummy_stake_txs_ordered
def test_stake_transactions_stakes_txids(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN StakeTransactions stakes_txids attribute is called
THEN wallet's stake txids are returned in desired order
"""
stake_txs = dummy_stake_txs_collection
most_recent_stake_txid = stake_txs.stakes_txids
assert most_recent_stake_txid == ['tx01', 'tx02', 'tx03', 'tx04']
def test_stake_transactions_get_last_stake_txid(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_last_stake_txid() on StakeTransactions object is called
THEN most recent (last) txid in wallet is returned
"""
stake_txs = dummy_stake_txs_collection
most_recent_stake_txid = stake_txs.get_last_stake_txid()
assert most_recent_stake_txid == 'tx04'
def test_stake_transactions_get_tx_exist(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_stake_tx() with an existing stake tx is called
THEN the appropriate stake tx is returned
"""
stake_txs = dummy_stake_txs_collection
tx_01 = stake_txs.get_stake_tx(txid='tx01')
tx_04 = stake_txs.get_stake_tx(txid='tx04')
assert tx_01.txid == 'tx01'
assert tx_04.txid == 'tx04'
def test_stake_transactions_get_tx_not_exist(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_stake_tx() with not existing stake tx is called
THEN the appropriate stake tx is returned
"""
stake_txs = dummy_stake_txs_collection
tx_01 = stake_txs.get_stake_tx(txid='1111')
tx_04 = stake_txs.get_stake_tx(txid='2222')
assert tx_01 is None
assert tx_04 is None
def test_stake_transactions_get_new_stakes(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_new_stakes_txs() with specified stake txid is called
THEN the list of newer txs than the specified txid is returned
"""
stake_txs = dummy_stake_txs_collection
new_stake_txs = stake_txs.get_new_stakes_txs(txid_last='tx02')
new_stake_txids = [tx.txid for tx in new_stake_txs]
assert new_stake_txids == ['tx03', 'tx04']
def test_stake_transactions_get_new_stakes_txid_recent(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_new_stakes_txs() with last (most recent) stake txid is called
THEN the empty list is returned
"""
stake_txs = dummy_stake_txs_collection
new_stake_txs = stake_txs.get_new_stakes_txs(txid_last='tx04')
new_stake_txids = [tx.txid for tx in new_stake_txs]
assert new_stake_txids == []
def test_stake_transactions_get_new_stakes_txid_not_exist(dummy_stake_txs_collection):
"""
GIVEN StakeTransactions object with collection of several dummy stake txs
WHEN method get_new_stakes_txs() with non-existent stake txid is called
THEN the empty list is returned
"""
stake_txs = dummy_stake_txs_collection
new_stake_txs = stake_txs.get_new_stakes_txs(txid_last='xxx')
new_stake_txids = [tx.txid for tx in new_stake_txs]
assert new_stake_txids == []
def test_api_gateway_cognito(dummy_api_env_file_content, api_cognito):
"""
GIVEN dummy env_data
WHEN created ApiGatewayCognito object with specified env_data
THEN object's attributes data is equal to specified in env_data
"""
assert api_cognito.cognito_token_url == dummy_api_env_file_content['COGNITO_TOKEN_URL']
assert api_cognito.cognito_client_id == dummy_api_env_file_content['COGNITO_CLIENT_ID']
assert api_cognito.cognito_client_secret == dummy_api_env_file_content['COGNITO_CLIENT_SECRET']
assert api_cognito.scopes == dummy_api_env_file_content['COGNITO_CUSTOM_SCOPES']
assert api_cognito.api_gateway_url == dummy_api_env_file_content['NOTIFICATION_API_URL']
def test_api_gateway_cognito_check_response_status_200(api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _check_response_status() method with response status_code = 200
THEN None is returned
"""
mocked_response_obj = mock.Mock()
mocked_response_obj.status_code = 200
assert api_cognito._check_response_status(mocked_response_obj) is None
def test_api_gateway_cognito_check_response_status_not_200(mocker, api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _check_response_status() method with response status_code != 200
THEN sys.exit is called
"""
# Mock logger attr
mocker.patch.object(api_cognito, 'logger')
# Mock HTTP response
mocked_response_obj = mock.Mock()
mocked_response_obj.status_code = 404
mocked_response_obj.text = 'Sth is wrong'
mocked_exit = mocker.patch('sys.exit')
api_cognito._check_response_status(response=mocked_response_obj)
# Assertions
mocked_exit.assert_called_once()
mocked_exit.assert_called()
def test_api_gateway_cognito_check_response_status_not_200_logger(mocker, api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _check_response_status() method with response status_code != 200
THEN desired log entry is created
"""
mocked_response_obj = mock.Mock()
mocked_response_obj.status_code = 404
mocked_response_obj.text = 'Sth is wrong'
mocker.patch('sys.exit')
# mocked_logger = mocker.patch('new_stake_script.check_new_stake.logger')
mocked_logger = mocker.patch.object(api_cognito, 'logger')
desired_log_entry = f'API response: {mocked_response_obj.status_code} {mocked_response_obj.text}'
api_cognito._check_response_status(response=mocked_response_obj)
# Assertions
mocked_logger.error.assert_called_with(desired_log_entry)
def test_api_gateway_cognito_get_access_token(mocker, api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _get_access_token() method
THEN valid access_token is returned
"""
# Mock requests.post method
mocked_post = mocker.patch('requests.post', autospec=True)
mocked_response_obj = mock.Mock()
mocked_response_obj.status_code = 200
mocked_response_obj.json = lambda: {'access_token': 'valid-token'}
mocked_post.return_value = mocked_response_obj
# Mock requests.post
assert api_cognito._get_access_token() == 'valid-token'
def test_api_gateway_cognito_check_http_method_not_allowed(mocker, api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _check_method_is_allowed() method with not-allowed HTTP method
THEN sys.exit is called
"""
# Mock logger attr
mocker.patch.object(api_cognito, 'logger')
# Mock sys.exit method
mocked_exit = mocker.patch('sys.exit')
api_cognito.check_http_method(method='PUT')
# Assertions
mocked_exit.assert_called_once()
mocked_exit.assert_called()
def test_api_gateway_cognito_check_http_method_allowed(mocker, api_cognito):
"""
GIVEN ApiGatewayCognito object with dummy env_data
WHEN invoked _check_method_is_allowed() method with allowed HTTP methods
THEN None is returned
"""
for method in ['POST', 'GET', 'get', 'post']:
assert api_cognito.check_http_method(method=method) is None
| 40.223684
| 120
| 0.764868
|
9a96c602ff4be77b7b3bcf4d3b6461a17ba245d4
| 2,598
|
py
|
Python
|
pymcuprog/deviceinfo/devices/attiny1614.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 28
|
2021-05-08T19:28:33.000Z
|
2022-03-23T06:23:13.000Z
|
pymcuprog/deviceinfo/devices/attiny1614.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 20
|
2021-05-24T19:20:39.000Z
|
2022-03-12T20:10:30.000Z
|
pymcuprog/deviceinfo/devices/attiny1614.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 11
|
2021-06-24T20:59:16.000Z
|
2022-03-23T23:59:38.000Z
|
"""
Required device info for the attiny1614 devices
The following data was collected from device pack Microchip.ATtiny_DFP 2.7.128
"""
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'interface': 'UPDI',
'name': 'attiny1614',
'architecture': 'avr8x',
# eeprom
'eeprom_address_byte': 0x00001400,
'eeprom_size_bytes': 0x0100,
'eeprom_page_size_bytes': 0x20,
'eeprom_read_size_bytes': 1,
'eeprom_write_size_bytes': 1,
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
'eeprom_isolated_erase': True,
# fuses
'fuses_address_byte': 0x00001280,
'fuses_size_bytes': 0xA,
'fuses_page_size_bytes': 1,
'fuses_read_size_bytes': 1,
'fuses_write_size_bytes': 1,
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'fuses_isolated_erase': False,
# internal_sram
'internal_sram_address_byte': 0x3800,
'internal_sram_size_bytes': 0x0800,
'internal_sram_page_size_bytes': 1,
'internal_sram_read_size_bytes': 1,
'internal_sram_write_size_bytes': 1,
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'internal_sram_isolated_erase': False,
# lockbits
'lockbits_address_byte': 0x0000128A,
'lockbits_size_bytes': 0x1,
'lockbits_page_size_bytes': 1,
'lockbits_read_size_bytes': 1,
'lockbits_write_size_bytes': 1,
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'lockbits_isolated_erase': False,
# signatures
'signatures_address_byte': 0x00001100,
'signatures_size_bytes': 0x3,
'signatures_page_size_bytes': 0x40,
'signatures_read_size_bytes': 1,
'signatures_write_size_bytes': 0,
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'signatures_isolated_erase': False,
# user_row
'user_row_address_byte': 0x00001300,
'user_row_size_bytes': 0x20,
'user_row_page_size_bytes': 0x20,
'user_row_read_size_bytes': 1,
'user_row_write_size_bytes': 1,
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
'user_row_isolated_erase': True,
# flash
'flash_address_byte': 0x00008000,
'flash_size_bytes': 0x4000,
'flash_page_size_bytes': 0x40,
'flash_read_size_bytes': 2,
'flash_write_size_bytes': 0x40,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': True,
# Some extra AVR specific fields
'address_size': '16-bit',
'device_id': 0x1E9422,
'nvmctrl_base': 0x00001000,
'syscfg_base': 0x00000F00,
'ocd_base': 0x00000F80,
'prog_clock_khz': 900,
}
| 30.209302
| 78
| 0.727098
|
4f7e3f458d1922b5200284393803e812252b1bf0
| 1,853
|
py
|
Python
|
torchclas/utils/io_func.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | null | null | null |
torchclas/utils/io_func.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | 1
|
2021-05-23T13:47:51.000Z
|
2021-05-24T11:39:32.000Z
|
torchclas/utils/io_func.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# @Time : 2020/10/28 9:34
# @Auto : zzf-jeff
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import shutil
import torch
import yaml
import time
from pathlib import Path
from tensorboardX import SummaryWriter
## 常用Path 代替os.path ,Path(面向对象的文件系统路劲)
def config_load(cfg_path):
yaml_file = open(cfg_path, 'r', encoding='utf-8')
config = yaml.load(yaml_file, Loader=yaml.FullLoader)
return config
def create_log_folder(cfg):
root_output_dir = Path(cfg['BASE']['checkpoints'])
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg['BASE']['dataset_name']
model = cfg['BASE']['algorithm']
backones = cfg['BACKBONES']['type']
time_str = time.strftime('%Y%m%d')
checkpoints_output_dir = root_output_dir / dataset / model / backones / 'weights'
print('=> creating {}'.format(checkpoints_output_dir))
checkpoints_output_dir.mkdir(parents=True, exist_ok=True)
tensorboard_log_dir = root_output_dir / dataset / model / backones / time_str / 'tb_log'
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
txt_log_dir = root_output_dir / dataset / model / backones / 'txt_log'
print('=> creating {}'.format(txt_log_dir))
txt_log_dir.mkdir(parents=True, exist_ok=True)
return {'chs_dir': str(checkpoints_output_dir),
'tb_dir': str(tensorboard_log_dir),
'txt_dir': str(txt_log_dir)}
def create_dir(path):
if not (os.path.exists(path)):
os.mkdir(path)
def create_tb(path):
writer = SummaryWriter(log_dir=path)
return writer
| 30.883333
| 93
| 0.678359
|
85302807d4afec7bd899b98ff8c7d2ebe5d2cf71
| 12,629
|
py
|
Python
|
tools/test.py
|
Ultimaker/mbed-os
|
2d0fe8429d9c2d483946ee41b59b869b8e493b2c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-12-07T03:48:15.000Z
|
2021-12-22T10:53:11.000Z
|
tools/test.py
|
Ultimaker/mbed-os
|
2d0fe8429d9c2d483946ee41b59b869b8e493b2c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tools/test.py
|
Ultimaker/mbed-os
|
2d0fe8429d9c2d483946ee41b59b869b8e493b2c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2018-05-17T06:16:33.000Z
|
2018-05-17T06:16:33.000Z
|
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
TEST BUILD & RUN
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import json
import fnmatch
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.config import ConfigException, Config
from tools.test_api import test_path_to_name, find_tests, get_test_config, print_tests, build_tests, test_spec_from_test_builds
from tools.test_configs import get_default_config
from tools.options import get_default_options_parser, extract_profile, extract_mcus
from tools.build_api import build_project, build_library
from tools.build_api import print_build_memory_usage
from tools.build_api import merge_build_data
from tools.targets import TARGET_MAP
from tools.utils import mkdir, ToolException, NotSupportedException, args_error
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type, argparse_lowercase_type, argparse_many
from tools.utils import argparse_dir_not_parent
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS, TOOLCHAIN_CLASSES
from tools.settings import CLI_COLOR_MAP
if __name__ == '__main__':
try:
# Parse Options
parser = get_default_options_parser(add_app_config=True)
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_argument("-j", "--jobs",
type=int,
dest="jobs",
default=0,
help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_argument("--source", dest="source_dir",
type=argparse_filestring_type,
default=None, help="The source (input) directory (for sources other than tests). Defaults to current directory.", action="append")
parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
default=None, help="The build (output) directory")
parser.add_argument("-l", "--list", action="store_true", dest="list",
default=False, help="List (recursively) available tests in order and exit")
parser.add_argument("-p", "--paths", dest="paths",
type=argparse_many(argparse_filestring_type),
default=None, help="Limit the tests to those within the specified comma separated list of paths")
format_choices = ["list", "json"]
format_default_choice = "list"
format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (", ".join(format_choices), format_default_choice)
parser.add_argument("-f", "--format", dest="format",
type=argparse_lowercase_type(format_choices, "format"),
default=format_default_choice, help=format_help)
parser.add_argument("--continue-on-build-fail", action="store_true", dest="continue_on_build_fail",
default=None, help="Continue trying to build all tests if a build failure occurs")
#TODO validate the names instead of just passing through str
parser.add_argument("-n", "--names", dest="names", type=argparse_many(str),
default=None, help="Limit the tests to a comma separated list of names")
parser.add_argument("--test-config", dest="test_config", type=str,
default=None, help="Test config for a module")
parser.add_argument("--test-spec", dest="test_spec",
default=None, help="Destination path for a test spec file that can be used by the Greentea automated test tool")
parser.add_argument("--build-report-junit", dest="build_report_junit",
default=None, help="Destination path for a build report in the JUnit xml format")
parser.add_argument("--build-data",
dest="build_data",
default=None,
help="Dump build_data to this file")
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument("--stats-depth",
type=int,
dest="stats_depth",
default=2,
help="Depth level for static memory report")
options = parser.parse_args()
# Filter tests by path if specified
if options.paths:
all_paths = options.paths
else:
all_paths = ["."]
all_tests = {}
tests = {}
# Target
if options.mcu is None :
args_error(parser, "argument -m/--mcu is required")
mcu = extract_mcus(parser, options)[0]
# Toolchain
if options.tool is None:
args_error(parser, "argument -t/--tool is required")
toolchain = options.tool[0]
if not TOOLCHAIN_CLASSES[toolchain].check_executable():
search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
args_error(parser, "Could not find executable for %s.\n"
"Currently set search path: %s"
% (toolchain, search_path))
# Assign config file. Precedence: test_config>app_config
# TODO: merge configs if both given
if options.test_config:
config = get_test_config(options.test_config, mcu)
if not config:
args_error(parser, "argument --test-config contains invalid path or identifier")
elif options.app_config:
config = options.app_config
else:
config = Config.find_app_config(options.source_dir)
if not config:
config = get_default_config(options.source_dir or ['.'], mcu)
# Find all tests in the relevant paths
for path in all_paths:
all_tests.update(find_tests(path, mcu, toolchain,
app_config=config))
# Filter tests by name if specified
if options.names:
all_names = options.names
all_names = [x.lower() for x in all_names]
for name in all_names:
if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
for testname, test in all_tests.items():
if fnmatch.fnmatch(testname, name):
tests[testname] = test
else:
print("[Warning] Test with name '%s' was not found in the "
"available tests" % (name))
else:
tests = all_tests
if options.color:
# This import happens late to prevent initializing colorization when we don't need it
import colorize
if options.verbose:
notify = mbedToolchain.print_notify_verbose
else:
notify = mbedToolchain.print_notify
notify = colorize.print_in_color_notifier(CLI_COLOR_MAP, notify)
else:
notify = None
if options.list:
# Print available tests in order and exit
print_tests(tests, options.format)
sys.exit(0)
else:
# Build all tests
if not options.build_dir:
args_error(parser, "argument --build is required")
base_source_paths = options.source_dir
# Default base source path is the current directory
if not base_source_paths:
base_source_paths = ['.']
build_report = {}
build_properties = {}
library_build_success = False
profile = extract_profile(parser, options, toolchain)
try:
# Build sources
build_library(base_source_paths, options.build_dir, mcu,
toolchain, jobs=options.jobs,
clean=options.clean, report=build_report,
properties=build_properties, name="mbed-build",
macros=options.macros, verbose=options.verbose,
notify=notify, archive=False,
app_config=config,
build_profile=profile)
library_build_success = True
except ToolException as e:
# ToolException output is handled by the build log
pass
except NotSupportedException as e:
# NotSupportedException is handled by the build log
pass
except Exception as e:
# Some other exception occurred, print the error message
print(e)
if not library_build_success:
print("Failed to build library")
else:
# Build all the tests
test_build_success, test_build = build_tests(tests, [options.build_dir], options.build_dir, mcu, toolchain,
clean=options.clean,
report=build_report,
properties=build_properties,
macros=options.macros,
verbose=options.verbose,
notify=notify,
jobs=options.jobs,
continue_on_build_fail=options.continue_on_build_fail,
app_config=config,
build_profile=profile,
stats_depth=options.stats_depth)
# If a path to a test spec is provided, write it to a file
if options.test_spec:
test_spec_data = test_spec_from_test_builds(test_build)
# Create the target dir for the test spec if necessary
# mkdir will not create the dir if it already exists
test_spec_dir = os.path.dirname(options.test_spec)
if test_spec_dir:
mkdir(test_spec_dir)
try:
with open(options.test_spec, 'w') as f:
f.write(json.dumps(test_spec_data, indent=2))
except IOError as e:
print("[ERROR] Error writing test spec to file")
print(e)
# If a path to a JUnit build report spec is provided, write it to a file
if options.build_report_junit:
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)
# Print memory map summary on screen
if build_report:
print
print(print_build_memory_usage(build_report))
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
if options.build_data:
merge_build_data(options.build_data, build_report, "test")
if status:
sys.exit(0)
else:
sys.exit(1)
except KeyboardInterrupt as e:
print("\n[CTRL+c] exit")
except ConfigException as e:
# Catching ConfigException here to prevent a traceback
print("[ERROR] %s" % str(e))
except Exception as e:
import traceback
traceback.print_exc(file=sys.stdout)
print("[ERROR] %s" % str(e))
sys.exit(1)
| 42.955782
| 158
| 0.58334
|
65b757b7a24e0b8392709b0277a146d06828936e
| 744
|
py
|
Python
|
examples/sample-ui-dialog_activity-button.py
|
tqchagas/AndroidViewClient
|
f1e11c587717b061c4d4434a21c2eb464b464ae0
|
[
"Apache-2.0"
] | 1,155
|
2015-01-07T06:41:35.000Z
|
2022-03-31T07:06:05.000Z
|
examples/sample-ui-dialog_activity-button.py
|
tqchagas/AndroidViewClient
|
f1e11c587717b061c4d4434a21c2eb464b464ae0
|
[
"Apache-2.0"
] | 154
|
2015-01-11T03:33:19.000Z
|
2022-03-03T04:35:06.000Z
|
examples/sample-ui-dialog_activity-button.py
|
tqchagas/AndroidViewClient
|
f1e11c587717b061c4d4434a21c2eb464b464ae0
|
[
"Apache-2.0"
] | 307
|
2015-01-14T15:52:59.000Z
|
2022-01-30T02:33:56.000Z
|
#! /usr/bin/env python
'''
Copyright (C) 2012 Diego Torres Milano
Created on Aug 31, 2012
@author: diego
'''
import re
import sys
import os
# This must be imported before MonkeyRunner and MonkeyDevice,
# otherwise the import fails.
# PyDev sets PYTHONPATH, use it
try:
for p in os.environ['PYTHONPATH'].split(':'):
if not p in sys.path:
sys.path.append(p)
except:
pass
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient, View
vc = ViewClient(*ViewClient.connectToDeviceOrExit())
button = vc.findViewWithTextOrRaise('Show Dialog')
print "button: ", button.getClass(), button.getId(), button.getCoords()
| 20.666667
| 80
| 0.711022
|
d5064b5d98ac9b966ba126f5600151006c7a57bd
| 13,138
|
py
|
Python
|
src/prediction_ann_13.py
|
acmlia/ann_training
|
8cb39123203445cf79c4bd65350fa4063705a518
|
[
"MIT"
] | null | null | null |
src/prediction_ann_13.py
|
acmlia/ann_training
|
8cb39123203445cf79c4bd65350fa4063705a518
|
[
"MIT"
] | null | null | null |
src/prediction_ann_13.py
|
acmlia/ann_training
|
8cb39123203445cf79c4bd65350fa4063705a518
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 23:04:36 2019
@author: rainfall
"""
from __future__ import absolute_import, division, print_function
import os
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from collections import Counter
from src.meteoro_skills import CategoricalScores
from src.meteoro_skills import ContinuousScores
import tensorflow as tf
from tensorflow import keras
from keras import backend
from tensorflow.keras import layers
from keras.layers import GaussianNoise
from keras.layers import GaussianDropout
from keras.models import Sequential
from keras.layers import Dense
#from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import model_from_yaml
print('TF version '+tf.__version__)
# ------------------------------------------------------------------------------
def tic():
global _start_time
_start_time = time.time()
def tac():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec, 60)
(t_hour, t_min) = divmod(t_min, 60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
# ------------------------------------------------------------------------------
class Prediction:
"""
This module is intended to automate the TensorFlow Neural Network training.
"""
PCA = PCA()
seed = 0
run_prefix = ''
tver = ''
vernick = ''
file = ''
path = ''
fig_title = ''
path_fig = ''
mod_out_pth = ''
mod_out_name = ''
ymlv = ''
ymlp = ''
ymlf = ''
def __init__(self, random_seed=0,
run_prefix='',
version='',
version_nickname='',
file_csv='',
path_csv='',
fig_title='',
figure_path='',
model_out_path='',
model_out_name='',
yaml_version='',
yaml_path='',
yaml_file=''):
self.seed=random_seed
self.run_prefix=run_prefix
self.tver=version
self.vernick=version_nickname
self.file=file_csv
self.path=path_csv
self.path_fig=figure_path
self.fig_title=run_prefix+version+version_nickname
self.mod_out_pth=model_out_path
self.mod_out_name=model_out_name
self.ymlv=yaml_version
self.ymlp=yaml_path
self.ymlf=yaml_file
# -------------------------------------------------------------------------
# DROP DATA OUTSIDE INTERVAL
# -------------------------------------------------------------------------
@staticmethod
def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):
keepinterval = np.where((dataframe[target_col] >= keepfrom) &
(dataframe[target_col] <= keepto))
result = dataframe.iloc[keepinterval]
return result
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def PredictScreening(self):
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
## load YAML and create model
yaml_file = open(self.ymlp+'screening_'+self.ymlv+'.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights(self.ymlp+'screening_'+self.ymlv+'.h5')
print("Loaded models yaml and h5 from disk!")
# loaded_model = keras.models.load_model(self.ymlp+self.ymlf)
# loaded_model.summary()
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# Load dataset:
df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
x = df.loc[:,['SI', '89V', '89VH','166V','166VH', '190V', '183VH', 'PCT89']]
x_arr = np.asanyarray(x)
# Scaling the input paramaters:
# scaler_min_max = MinMaxScaler()
norm_sc = Normalizer()
x_normalized= norm_sc.fit_transform(x_arr)
# Split the dataset in test and train samples:
# x_train, x_test, y_train, y_test = train_test_split(x_normalized,
# y_arr, test_size=0.10,
# random_state=101)
# Doing prediction from the test dataset:
y_pred = loaded_model.predict_classes(x_normalized)
y_pred = np.ravel(y_pred)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Appplying meteorological skills to verify the performance of the model, in this case, categorical scores:
skills = CategoricalScores()
print('>>>> DEBUG >>>>', y_true,'\n',y_pred)
val_accuracy, val_bias, val_pod, val_pofd, val_far, val_csi, val_ph, val_ets, val_hss, val_hkd, val_num_pixels = skills.metrics(y_true, y_pred)
#converting to text file
print("converting arrays to text files")
my_scores = {'val_accuracy': val_accuracy,
'val_bias': val_bias,
'val_pod': val_pod,
'val_pofd': val_pofd,
'val_far': val_far,
'val_csi': val_csi,
'val_ph': val_ph,
'val_ets': val_ets,
'val_hss': val_hss,
'val_hkd': val_hkd,
'val_num_pixels': val_num_pixels}
with open('cateorical_scores_R1.txt', 'w') as myfile:
myfile.write(str(my_scores))
print("Text file saved!")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
df['SCR'] = ""
df['SCR'] = y_pred
filename=self.file[22:58]
filename = 'validation_SCR_'+filename+'.csv'
df.to_csv(os.path.join(self.path, filename), index=False, sep=",", decimal='.')
return df
def PredictRetrieval(self):
#------------------------------------------------------------------------------
#load YAML and create model
yaml_file = open(self.ymlp+'final_'+self.ymlv+'.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights(self.ymlp+'final_'+self.ymlv+'.h5')
print("Loaded models yaml and h5 from disk!")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# ------------------------------------------------------------------------------
df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']
scaler = StandardScaler()
normed_input = scaler.fit_transform(df_input)
df_normed_input = pd.DataFrame(normed_input[:],
columns=colunas)
ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
# regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]
# ------------------------------------------------------------------------------
# Choosing the number of components:
TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]
TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]
# ------------------------------------------------------------------------------
# Verifying the number of components that most contribute:
pca = PCA()
pca1 = pca.fit(TB1)
plt.plot(np.cumsum(pca1.explained_variance_ratio_))
plt.xlabel('Number of components for TB1')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.tver + '_PCA_TB1.png')
# ---
pca_trans1 = PCA(n_components=2)
pca1 = pca_trans1.fit(TB1)
TB1_transformed = pca_trans1.transform(TB1)
print("original shape: ", TB1.shape)
print("transformed shape:", TB1_transformed.shape)
# ------------------------------------------------------------------------------
pca = PCA()
pca2 = pca.fit(TB2)
plt.plot(np.cumsum(pca2.explained_variance_ratio_))
plt.xlabel('Number of components for TB2')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.tver + 'PCA_TB2.png')
# ---
pca_trans2 = PCA(n_components=2)
pca2 = pca_trans2.fit(TB2)
TB2_transformed = pca_trans2.transform(TB2)
print("original shape: ", TB2.shape)
print("transformed shape:", TB2_transformed.shape)
# ------------------------------------------------------------------------------
# JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:
PCA1 = pd.DataFrame(TB1_transformed[:],
columns=['pca1_1', 'pca_2'])
PCA2 = pd.DataFrame(TB2_transformed[:],
columns=['pca2_1', 'pca2_2'])
dataset = PCA1.join(PCA2, how='right')
dataset = dataset.join(ancillary, how='right')
dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')
dataset = dataset.join(df_orig.loc[:, ['SCR']], how='right')
# ------------------------------------------------------------------------------
#dataset = self.keep_interval(0.1, 75.0, dataset, 'sfcprcp')
NaN_pixels = np.where((dataset['sfcprcp'] != -9999.0))
dataset = dataset.iloc[NaN_pixels]
SCR_pixels = np.where((dataset['SCR'] == 1))
dataset = dataset.iloc[SCR_pixels]
dataset_index=dataset.index.values
SCR = dataset.pop('SCR')
y_true = dataset.pop('sfcprcp')
x_normed = dataset.values
y_pred = loaded_model.predict(x_normed).flatten()
# ------------------------------------------------------------------------------
# Appplying meteorological skills to verify the performance of the model, in this case, categorical scores:
skills = ContinuousScores()
val_y_pred_mean, val_y_true_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_true, y_pred)
#converting to text file
print("converting arrays to text files")
my_scores = {'val_y_pred_mean': val_y_pred_mean,
'val_y_true_mean': val_y_true_mean,
'val_mae': val_mae,
'val_rmse': val_rmse,
'val_std': val_std,
'val_fseperc': val_fseperc,
'val_fse': val_fse,
'val_corr': val_corr,
'val_num_pixels': val_num_pixels}
with open(self.ymlp+'continuous_scores_'+self.ymlv+'.txt', 'w') as myfile:
myfile.write(str(my_scores))
print("Text file saved!")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
df_final = df_orig.iloc[dataset_index]
df_final['y_true'] = y_true.values
df_final['y_pred'] = y_pred
filename=self.file[21:58]
filename = 'retrieval_ann_'+self.ymlv+'_'+filename+'.csv'
df_final.to_csv(os.path.join(self.ymlp, filename), index=False, sep=",", decimal='.')
return df_final
| 40.928349
| 151
| 0.485919
|
d088d694ffddcae1a8f475b0b6c1062b7744d7cf
| 946
|
py
|
Python
|
demo/vgg16_lstm_train.py
|
chen0040/keras-image-captioning
|
cfe4ba019dfa5dfd1d0c0e7b5812dbb2d4aa5bd4
|
[
"MIT"
] | 2
|
2018-03-01T10:03:08.000Z
|
2018-10-27T01:46:06.000Z
|
demo/vgg16_lstm_train.py
|
chen0040/keras-image-captioning
|
cfe4ba019dfa5dfd1d0c0e7b5812dbb2d4aa5bd4
|
[
"MIT"
] | null | null | null |
demo/vgg16_lstm_train.py
|
chen0040/keras-image-captioning
|
cfe4ba019dfa5dfd1d0c0e7b5812dbb2d4aa5bd4
|
[
"MIT"
] | 3
|
2018-03-01T10:03:09.000Z
|
2021-04-10T21:28:14.000Z
|
from keras_image_captioning.library.img_cap_loader import load_img_cap
from keras_image_captioning.library.text_fit import fit_text
from keras_image_captioning.library.vgg16_lstm import Vgg16LstmImgCap
import numpy as np
from sklearn.model_selection import train_test_split
def main():
seed = 42
max_vocab_size = 5000
np.random.seed(seed)
img_dir_path = './data/pokemon/img'
txt_dir_path = './data/pokemon/txt'
model_dir_path = './models/pokemon'
data = load_img_cap(img_dir_path, txt_dir_path)
train_data, test_data = train_test_split(data, test_size=0.2, random_state=seed)
text_data = [txt for _, txt in data]
text_data_model = fit_text(text_data, max_vocab_size=max_vocab_size, max_allowed_seq_length=20)
img_cap = Vgg16LstmImgCap()
epochs = 100
img_cap.fit(text_data_model, train_data, test_data, model_dir_path=model_dir_path, epochs=epochs)
if __name__ == '__main__':
main()
| 31.533333
| 101
| 0.766385
|
6471a4b583b6e4148c44fe1c646001f699d2876d
| 471
|
py
|
Python
|
assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/Circle/Large/Circle_Large_50.py
|
Red54/reality
|
510d4f5fde2f4c5535482f1ea199f914102b8a2a
|
[
"BSD-3-Clause"
] | null | null | null |
assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/Circle/Large/Circle_Large_50.py
|
Red54/reality
|
510d4f5fde2f4c5535482f1ea199f914102b8a2a
|
[
"BSD-3-Clause"
] | null | null | null |
assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/Circle/Large/Circle_Large_50.py
|
Red54/reality
|
510d4f5fde2f4c5535482f1ea199f914102b8a2a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) Pret-a-3D/Paolo Ciccone. All rights reserved.
# Modified by Fuzzy70/Lee Furssedonn with kind permission from Paolo Ciccone
#
from Reality_services import *
from Reality import *
# To customize this script all you need to do is to
# change the following variable
Re_sIBL_Map = ":Runtime:Textures:Reality:SingleIBLs:Circle:Circle_Large_50.ibl"
# Set the IBL Map
Reality.Scene().setIBLImage(ReResolvePoserPath(Re_sIBL_Map).encode("utf8"))
| 31.4
| 80
| 0.762208
|
0490f90a6f8df2d75ef6757ad1aef5b078e6c396
| 3,067
|
py
|
Python
|
lang/py/avro/errors.py
|
sunjstack/avro
|
5bd7cfe0bf742d0482bf6f54b4541b4d22cc87d9
|
[
"Apache-2.0"
] | 1
|
2021-05-12T01:49:16.000Z
|
2021-05-12T01:49:16.000Z
|
lang/py/avro/errors.py
|
sunjstack/avro
|
5bd7cfe0bf742d0482bf6f54b4541b4d22cc87d9
|
[
"Apache-2.0"
] | 159
|
2020-12-02T07:01:07.000Z
|
2022-03-27T05:01:56.000Z
|
lang/py/avro/errors.py
|
sunjstack/avro
|
5bd7cfe0bf742d0482bf6f54b4541b4d22cc87d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class AvroException(Exception):
"""The base class for exceptions in avro."""
class SchemaParseException(AvroException):
"""Raised when a schema failed to parse."""
class InvalidName(SchemaParseException):
"""User attempted to parse a schema with an invalid name."""
class AvroWarning(UserWarning):
"""Base class for warnings."""
class IgnoredLogicalType(AvroWarning):
"""Warnings for unknown or invalid logical types."""
class AvroTypeException(AvroException):
"""Raised when datum is not an example of schema."""
def __init__(self, expected_schema, datum):
pretty_expected = json.dumps(json.loads(str(expected_schema)), indent=2)
fail_msg = "The datum {} is not an example of the schema {}".format(datum, pretty_expected)
super(AvroTypeException, self).__init__(fail_msg)
class SchemaResolutionException(AvroException):
def __init__(self, fail_msg, writers_schema=None, readers_schema=None):
pretty_writers = json.dumps(json.loads(str(writers_schema)), indent=2)
pretty_readers = json.dumps(json.loads(str(readers_schema)), indent=2)
if writers_schema:
fail_msg += "\nWriter's Schema: {}".format(pretty_writers)
if readers_schema:
fail_msg += "\nReader's Schema: {}".format(pretty_readers)
super(AvroException, self).__init__(fail_msg)
class DataFileException(AvroException):
"""Raised when there's a problem reading or writing file object containers."""
class AvroRemoteException(AvroException):
"""Raised when an error message is sent by an Avro requestor or responder."""
class ConnectionClosedException(AvroException):
"""Raised when attempting IPC on a closed connection."""
class ProtocolParseException(AvroException):
"""Raised when a protocol failed to parse."""
class UnsupportedCodec(NotImplementedError, AvroException):
"""Raised when the compression named cannot be used."""
class UsageError(RuntimeError, AvroException):
"""An exception raised when incorrect arguments were passed."""
class AvroRuntimeException(RuntimeError, AvroException):
"""Raised when compatibility parsing encounters an unknown type"""
| 34.077778
| 99
| 0.735246
|
a887b6d048599564d64582727c35a911b6b92e36
| 5,678
|
py
|
Python
|
bfillings/fasttree_v1.py
|
gregcaporaso/burrito-fillings
|
a7b3b4db0d20b4baa064d447033782969f491622
|
[
"BSD-3-Clause"
] | null | null | null |
bfillings/fasttree_v1.py
|
gregcaporaso/burrito-fillings
|
a7b3b4db0d20b4baa064d447033782969f491622
|
[
"BSD-3-Clause"
] | null | null | null |
bfillings/fasttree_v1.py
|
gregcaporaso/burrito-fillings
|
a7b3b4db0d20b4baa064d447033782969f491622
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Application controller for FastTree v1.0"""
from burrito.parameters import ValuedParameter, FlagParameter
from burrito.util import (CommandLineApplication, FilePath, system,
CommandLineAppResult, ResultPath, remove,
ApplicationError)
from cogent.core.tree import PhyloNode
from cogent.parse.tree import DndParser
from cogent.core.moltype import DNA, RNA, PROTEIN
class FastTree(CommandLineApplication):
"""FastTree application Controller"""
_command = 'FastTree'
_input_handler = '_input_as_multiline_string'
_parameters = {
'-quiet':FlagParameter('-',Name='quiet'),
'-boot':ValuedParameter('-',Delimiter=' ',Name='boot'),
'-seed':ValuedParameter('-',Delimiter=' ',Name='seed'),
'-nni':ValuedParameter('-',Delimiter=' ',Name='nni'),
'-slow':FlagParameter('-',Name='slow'),
'-fastest':FlagParameter('-',Name='fastest'),
'-top':FlagParameter('-',Name='top'),
'-notop':FlagParameter('-',Name='notop'),
'-topm':ValuedParameter('-',Delimiter=' ',Name='topm'),
'-close':ValuedParameter('-',Delimiter=' ',Name='close'),
'-refresh':ValuedParameter('-',Delimiter=' ',Name='refresh'),
'-matrix':ValuedParameter('-',Delimiter=' ',Name='matrix'),
'-nomatrix':FlagParameter('-',Name='nomatrix'),
'-nj':FlagParameter('-',Name='nj'),
'-bionj':FlagParameter('-',Name='bionj'),
'-nt':FlagParameter('-',Name='nt'),
'-n':ValuedParameter('-',Delimiter=' ',Name='n')}
#FastTree [-quiet] [-boot 1000] [-seed 1253] [-nni 10] [-slow | -fastest]
# [-top | -notop] [-topm 1.0 [-close 0.75] [-refresh 0.8]]
# [-matrix Matrix | -nomatrix] [-nj | -bionj]
# [-nt] [-n 100] [alignment] > newick_tree
def __call__(self,data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
NOTE: Override of the base class to handle redirected output
"""
input_handler = self.InputHandler
suppress_stderr = self.SuppressStderr
outfile = self.getTmpFilename(self.TmpDir)
self._outfile = outfile
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self,input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,\
[self.BaseCommand,str(input_arg),'>',str(outfile),'2>',\
str(errfile)]))
if self.HaltExec:
raise AssertionError, "Halted exec with command:\n" + command
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError, \
'Unacceptable application exit status: %s, command: %s'\
% (str(exit_status),command)
out = open(outfile,"r")
err = None
if not suppress_stderr:
err = open(errfile,"r")
result = CommandLineAppResult(out,err,exit_status,\
result_paths=self._get_result_paths(data))
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _get_result_paths(self, data):
result = {}
result['Tree'] = ResultPath(Path=self._outfile)
return result
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params=None):
"""Returns a tree from alignment
Will check MolType of aln object
"""
if params is None:
params = {}
if moltype == DNA or moltype == RNA:
params['-nt'] = True
elif moltype == PROTEIN:
params['-nt'] = False
else:
raise ValueError, \
"FastTree does not support moltype: %s" % moltype.label
app = FastTree(params=params)
if best_tree:
raise NotImplementedError, "best_tree not implemented yet"
result = app(aln.toFasta())
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
return tree
| 38.890411
| 78
| 0.590173
|
e94010fb212141a6830254adbc47c260b98c6b87
| 815
|
py
|
Python
|
args_predict.py
|
meirof/aipnd-project
|
2922e0b2644e11e8dc363e9321ab2bf9ff805ad8
|
[
"MIT"
] | null | null | null |
args_predict.py
|
meirof/aipnd-project
|
2922e0b2644e11e8dc363e9321ab2bf9ff805ad8
|
[
"MIT"
] | null | null | null |
args_predict.py
|
meirof/aipnd-project
|
2922e0b2644e11e8dc363e9321ab2bf9ff805ad8
|
[
"MIT"
] | null | null | null |
import argparse
def set_args():
"""
Validate the train.py argument running from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument('--img_file', required = True,type = str, help = ' <Fulle path image file name >')
parser.add_argument('--cpt', required = True,type = str, help = 'checkpoint full path ')
parser.add_argument('--top_k',default = 5 ,type = int, help = 'Top # of classes probeblity to display ')
parser.add_argument('--gpu',type=bool, default = False,
help = 'Use this argument to start using GPU else don\'t use it.')
parser.add_argument('--class_dict', type = str, default = "cat_to_name.json" ,
help = ' Json Classes names file full path ')
return parser.parse_args()
| 40.75
| 109
| 0.620859
|
0d902c69812b64755651027f631b21a5ec654c99
| 4,264
|
py
|
Python
|
starthinker/task/email/run.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
starthinker/task/email/run.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | 6
|
2021-03-19T12:00:18.000Z
|
2022-02-10T09:43:42.000Z
|
starthinker/task/email/run.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import gzip
from starthinker.util.project import project
from starthinker.util.data import put_rows, get_rows
from starthinker.util.csv import excel_to_rows, csv_to_rows, rows_trim, rows_header_sanitize, column_header_sanitize
from starthinker.util.email import get_email_messages, get_email_links, get_email_attachments, get_subject, send_email
from starthinker.util.dbm import report_to_rows as dv360_report_to_rows, report_clean as dv360_report_clean
from starthinker.util.dcm import report_to_rows as cm_report_to_rows, report_clean as cm_report_clean, report_schema as cm_report_schema
def email_read():
# process only most recent message
try:
message = next(get_email_messages(
project.task['auth'],
project.task['read']['from'],
project.task['read']['to'],
project.task['read'].get('subject', None)
))
except StopIteration as e:
if project.verbose: print('NO EMAILS FOUND')
exit()
if project.verbose: print('EMAIL:', get_subject(message))
files = []
if project.task['read'].get('attachment'):
files.extend(get_email_attachments(project.task['auth'], message, project.task['read']['attachment']))
if project.task['read'].get('link'):
files.extend(get_email_links(project.task['auth'], message, project.task['read']['link'], download=True))
for filename, data in files:
if project.verbose: print('EMAIL FILENAME:', filename)
# decompress if necessary
if filename.endswith('.gz'):
data = gzip.GzipFile(fileobj=data, mode='rb')
filename = filename[:-3]
# if excel file, save each sheet individually
if filename.endswith('.xlsx'):
for sheet, rows in excel_to_rows(data):
rows = rows_trim(rows)
rows = rows_header_sanitize(rows)
put_rows(project.task['auth'], project.task['read']['out'], rows, sheet)
# if CM report
elif project.task['read']['from'] == 'noreply-cm@google.com':
rows = cm_report_to_rows(data.read().decode())
rows = cm_report_clean(rows)
# if bigquery, remove header and determine schema
schema = None
if 'bigquery' in project.task['read']['out']:
schema = cm_report_schema(next(rows))
project.task['read']['out']['bigquery']['schema'] = schema
project.task['read']['out']['bigquery']['skip_rows'] = 1
put_rows(project.task['auth'], project.task['read']['out'], rows)
# if dv360 report
elif project.task['read']['from'] == 'noreply-dv360@google.com':
rows = dv360_report_to_rows(data.getvalue().decode())
rows = dv360_report_clean(rows)
put_rows(project.task['auth'], project.task['read']['out'], rows)
# if csv
elif filename.endswith('.csv'):
rows = csv_to_rows(data.read().decode())
rows = rows_header_sanitize(rows)
put_rows(project.task['auth'], project.task['read']['out'], rows)
else:
if project.verbose: print('UNSUPPORTED FILE:', filename)
def email_send():
if project.verbose: print('EMAIL SEND')
send_email(
'user',
project.task['send']['from'],
project.task['send']['to'],
project.task['send'].get('cc', ''),
project.task['send']['subject'],
project.task['send']['text'],
project.task['send']['html'],
project.task['send']['attachment']['filename'],
get_rows('user', project.task['send']['attachment']),
)
@project.from_parameters
def email():
if 'read' in project.task: email_read()
elif 'send' in project.task: email_send()
if __name__ == "__main__":
email()
| 34.387097
| 136
| 0.659709
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.