hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace8f132eff29f06f3d50a6f9577e77036ba5fdd | 757 | py | Python | tests/torch2trt/convert.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | 1 | 2021-11-15T19:07:13.000Z | 2021-11-15T19:07:13.000Z | tests/torch2trt/convert.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | tests/torch2trt/convert.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | from torch2trt import torch2trt
import torch
import sys
from transformers import T5ForConditionalGeneration
model = T5ForConditionalGeneration.from_pretrained(
sys.argv[1]
)
model.eval()
model = model.to("cuda")
input_ids = torch.ones((1,128)).int().cuda()
attention_mask = torch.ones((1, 128)).int().cuda()
decoder_input_ids = torch.ones((1,1)).int().cuda()
decoder_attention_mask = torch.ones((1,1)).int().cuda()
model_trt = torch2trt(
model,
(input_ids,attention_mask,decoder_input_ids,decoder_attention_mask),
input_names=["input_ids", "attention_mask","decoder_input_ids","decoder_attention_mask"],
use_onnx=False, max_workspace_size=1<<30)
torch.save(model_trt.state_dict(), 'tiny.pth', _use_new_zipfile_serialization=False) | 34.409091 | 94 | 0.759577 |
ace8f172d4e54e108e261894b6c5f8dd19ceeb4e | 4,295 | py | Python | accounts/migrations/0001_initial.py | IEEE-Software-Hackathon/tell-e-gram | 266cd145f1f7e2491ece5d944ca009ff77f19ca6 | [
"MIT"
] | null | null | null | accounts/migrations/0001_initial.py | IEEE-Software-Hackathon/tell-e-gram | 266cd145f1f7e2491ece5d944ca009ff77f19ca6 | [
"MIT"
] | 2 | 2021-02-08T20:28:01.000Z | 2021-06-01T22:52:48.000Z | accounts/migrations/0001_initial.py | IEEE-Software-Hackathon/tell-e-gram | 266cd145f1f7e2491ece5d944ca009ff77f19ca6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-23 18:01
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CourseId', models.CharField(max_length=8)),
('CourseName', models.CharField(max_length=250)),
('Credit', models.IntegerField(validators=[django.core.validators.MaxValueValidator(4), django.core.validators.MinValueValidator(1)])),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('FacultyID', models.CharField(max_length=8, unique=True)),
('Name', models.CharField(max_length=50)),
('Department', models.IntegerField()),
('Designation', models.IntegerField()),
('ContactNumber', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('Email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='FacultyCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Semester', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(8)])),
('Course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Course')),
('User', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Faculty')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('StudentID', models.CharField(max_length=8, unique=True)),
('Name', models.CharField(max_length=50)),
('Branch', models.IntegerField()),
('YearOfStudy', models.IntegerField()),
('ContactNumber', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('Email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='StudentCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Semester', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(8)])),
('Course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Course')),
('User', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Student')),
],
),
migrations.AddField(
model_name='student',
name='Courses',
field=models.ManyToManyField(through='accounts.StudentCourse', to='accounts.Course'),
),
migrations.AddField(
model_name='student',
name='User',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='faculty',
name='Courses',
field=models.ManyToManyField(through='accounts.FacultyCourse', to='accounts.Course'),
),
migrations.AddField(
model_name='faculty',
name='User',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 46.182796 | 153 | 0.610477 |
ace8f2e618c0df16e0bf057b9bf924c304a649e9 | 9,716 | py | Python | bin/dm_hcp_freesurfer.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | bin/dm_hcp_freesurfer.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | bin/dm_hcp_freesurfer.py | gabiherman/datman | dcbca4981ff7bb1be536d6c62c3b27786cabdef9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Runs the full HCP pipelines FreeSurfer pipeline. This generates myelin maps as
part of the outputs.
Usage:
dm_hcp_freesurfer.py [options] <study>
dm_hcp_freesurfer.py [options] <study> <subject> <T1> <T2>
Arguments:
<study> The name of a datman project. Will find and submit all
subjects that do not already have a complete set of outputs
<subject> The ID of a specific subject to run (instead of running in
batch mode)
<T1> The full path to a subject's T1 nifti
<T2> The full path to a subject's T2 nifti
Options:
--t1-tag STR The tag used to identify T1 files in batch mode.
[default: T1]
--t2-tag STR The tag used to identify T2 files in batch mode.
[default: T2]
--blacklist FILE The path to a blacklist specific to this pipeline. The
blacklist should be a list of file names to ignore (not
full paths), one file per line. Only files that may
match the T1 or T2 tag need to be blacklisted. Note that
the study specific blacklist will be consulted first
even if this option is not set.
--walltime STR The maximum wall time when running in batch mode.
[default: 36:00:00]
--log-to-server If set, all log messages will also be set to the logging
server configured in the site configuration file
--debug
--dry-run
"""
import os
import sys
import glob
import time
import logging
import logging.handlers
from docopt import docopt
import datman.utils as utils
import datman.config
import datman.scan
import datman.scanid as scanid
import datman.fs_log_scraper as log_scraper
logging.basicConfig(level=logging.WARN,
format="[%(name)s] %(levelname)s: %(message)s")
logger = logging.getLogger(os.path.basename(__file__))
DRYRUN = False
def main():
global DRYRUN
arguments = docopt(__doc__)
study = arguments['<study>']
subject = arguments['<subject>']
use_server = arguments['--log-to-server']
debug = arguments['--debug']
DRYRUN = arguments['--dry-run']
config = datman.config.config(study=study)
if use_server:
add_server_handler(config)
if debug:
logging.getLogger().setLevel(logging.DEBUG)
check_environment()
if subject:
run_pipeline(config, subject, arguments['<T1>'], arguments['<T2>'])
return
run_all_subjects(config, arguments)
def add_server_handler(config):
server_ip = config.get_key('LOGSERVER')
server_handler = logging.handlers.SocketHandler(server_ip,
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
logger.addHandler(server_handler)
def check_environment():
try:
utils.check_dependency_configured('FSL', shell_cmd='fsl',
env_vars=['FSLDIR'])
utils.check_dependency_configured('FreeSurfer', shell_cmd='recon-all',
env_vars=['FREESURFER_HOME'])
utils.check_dependency_configured('Connectome-Workbench',
shell_cmd='wb_command')
utils.check_dependency_configured('HCP pipelines', env_vars=['HCPPIPEDIR',
'HCPPIPEDIR_Config', 'HCPPIPEDIR_FS', 'HCPPIPEDIR_Global',
'HCPPIPEDIR_PostFS', 'HCPPIPEDIR_PreFS', 'HCPPIPEDIR_Templates'])
except EnvironmentError as e:
logger.error(e.message)
sys.exit(1)
def run_pipeline(config, subject, t1, t2):
if not input_exists(t1) or not input_exists(t2):
sys.exit(1)
base_dir = utils.define_folder(config.get_path('hcp_fs'))
dest_dir = utils.define_folder(os.path.join(base_dir, subject))
with utils.cd(dest_dir):
hcp_pipeline = "hcp-freesurfer.sh {} {} {} {}".format(base_dir, subject,
t1, t2)
rtn, out = utils.run(hcp_pipeline, dryrun=DRYRUN)
if rtn:
logger.error("hcp-freesurfer.sh exited with non-zero status code. "
"Output: {}".format(out))
def input_exists(anat_input):
if not os.path.exists(anat_input):
logger.error("Input file does not exist: {}".format(anat_input))
return False
return True
def run_all_subjects(config, arguments):
t1_tag = arguments['--t1-tag']
t2_tag = arguments['--t2-tag']
blacklist_file = arguments['--blacklist']
walltime = arguments['--walltime']
subjects = utils.get_subject_metadata(config)
if blacklist_file:
subjects = add_pipeline_blacklist(subjects, blacklist_file)
hcp_fs_path = config.get_path('hcp_fs')
logs = make_log_dir(hcp_fs_path)
update_aggregate_log(hcp_fs_path, subjects)
# Update FS log ?
commands = []
for subject in subjects:
if is_completed(subject, hcp_fs_path):
continue
if is_started(subject, hcp_fs_path):
logger.debug("{} has partial outputs and may still be running. "
"Skipping".format(subject))
continue
scan = datman.scan.Scan(subject, config)
blacklisted_files = subjects[subject]
try:
t1 = get_anatomical_file(scan, t1_tag, blacklisted_files)
t2 = get_anatomical_file(scan, t2_tag, blacklisted_files)
except ValueError as e:
logger.error("Skipping subject. Reason: {}".format(e.message))
continue
cmd = create_command(config.study_name, subject, t1, t2, arguments)
submit_job(cmd, subject, logs, walltime=walltime)
def add_pipeline_blacklist(subjects, blacklist_file):
if not os.path.exists(blacklist_file):
logger.error("The given pipeline specific blacklist does not exist: "
"{}".format(blacklist_file))
sys.exit(1)
try:
with open(blacklist_file, 'r') as blacklist_data:
blacklist = blacklist_data.readlines()
except IOError:
logger.error("Cannot read blacklist {}".format(blacklist_file))
sys.exit(1)
for entry in blacklist:
entry = os.path.basename(entry)
entry = entry.replace('.nii', '').replace('.gz', '').strip()
try:
ident, tag, _, _ = scanid.parse_filename(entry)
except scanid.ParseException:
logger.debug("Cannot parse blacklist entry: {}. "
"Skipping.".format(entry))
continue
subid = ident.get_full_subjectid_with_timepoint()
try:
subjects[subid].append(entry)
except IndexError:
logger.debug("Blacklisted item given for subject not in "
"checklist.csv. Ignoring entry {}".format(entry))
continue
return subjects
def make_log_dir(path):
log_dir = os.path.join(path, 'logs')
try:
if not DRYRUN:
os.mkdir(log_dir)
except:
pass
return log_dir
def update_aggregate_log(pipeline_path, subjects):
fs_output_folders = []
for subject in subjects:
output_dir = os.path.join(pipeline_path, subject)
fs_dir = os.path.join(output_dir, 'T1w', subject)
if os.path.exists(fs_dir):
fs_output_folders.append(fs_dir)
if not fs_output_folders:
# No outputs yet, skip log scraping
return
scraped_data = log_scraper.scrape_logs(fs_output_folders, col_headers=True)
agg_log = os.path.join(pipeline_path, 'aggregate_log.csv')
if DRYRUN:
return
try:
with open(agg_log, 'w') as log:
log.writelines(scraped_data)
except Exception as e:
logger.error("Could not update aggregate log. Reason: {}".format(e.message))
def is_completed(subject, pipeline_dir):
fs_scripts = os.path.join(pipeline_dir, subject, 'MNINonLinear',
'fsaverage_LR32k')
myelin_maps = glob.glob(os.path.join(fs_scripts, '*MyelinMap*'))
if myelin_maps:
return True
return False
def is_started(subject, pipeline_dir):
base_dir = os.path.join(pipeline_dir, subject)
mni = os.path.join(base_dir, 'MNINonLinear')
t1w = os.path.join(base_dir, 'T1w')
t2w = os.path.join(base_dir, 'T2w')
for path in [mni, t1w, t2w]:
if os.path.exists(path):
return True
return False
def get_anatomical_file(scan, tag, blacklist):
anat_files = scan.get_tagged_nii(tag)
anat_files = filter(lambda x: x.file_name.replace(x.ext, '') not in blacklist,
anat_files)
if not anat_files:
raise ValueError("No files with tag {} found for {}".format(tag,
scan.full_id))
if len(anat_files) > 1:
raise ValueError("Multiple files with tag {} found for {}. Please blacklist "
"all but one".format(tag, scan.full_id))
return anat_files[0].path
def create_command(study, subject, t1, t2, args):
cmd = [os.path.basename(__file__), study, subject, t1, t2]
if args['--debug']:
cmd.append('--debug')
if args['--dry-run']:
cmd.append('--dry-run')
if args['--log-to-server']:
cmd.append('--log-to-server')
return " ".join(cmd)
def submit_job(cmd, subid, log_dir, walltime="36:00:00"):
job_name = "dm_hcp_freesurfer_{}_{}".format(subid,
time.strftime("%Y%m%d-%H%M%S"))
rtn, out = utils.run("echo {} | qbatch -N {} --walltime {} "
"--logdir {} -".format(cmd, job_name, walltime, log_dir),
specialquote=False, dryrun=DRYRUN)
if rtn:
logger.error("Job submission failed.")
if out:
logger.error("stdout: {}".format(out))
sys.exit(1)
if __name__ == '__main__':
main()
| 34.94964 | 85 | 0.629374 |
ace8f39be7b46a0b51e8530f5e9f7811da280b3c | 18,111 | py | Python | python/ray/rllib/agents/ppo/appo_policy_graph.py | endymecy/ray | e7651b11177bb4140983626c12f45640caa05bf8 | [
"Apache-2.0"
] | 1 | 2019-11-04T05:08:47.000Z | 2019-11-04T05:08:47.000Z | python/ray/rllib/agents/ppo/appo_policy_graph.py | collinswei/ray | 2e30f7ba386e716bf80f019dcd473b67d83abb95 | [
"Apache-2.0"
] | null | null | null | python/ray/rllib/agents/ppo/appo_policy_graph.py | collinswei/ray | 2e30f7ba386e716bf80f019dcd473b67d83abb95 | [
"Apache-2.0"
] | null | null | null | """Adapted from VTracePolicyGraph to use the PPO surrogate loss.
Keep in sync with changes to VTracePolicyGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import logging
import gym
import ray
from ray.rllib.agents.impala import vtrace
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, \
LearningRateSchedule
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.models.action_dist import Categorical
from ray.rllib.evaluation.postprocessing import compute_advantages
logger = logging.getLogger(__name__)
class PPOSurrogateLoss(object):
"""Loss used when V-trace is disabled.
Arguments:
prev_actions_logp: A float32 tensor of shape [T, B].
actions_logp: A float32 tensor of shape [T, B].
actions_kl: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
valid_mask: A bool tensor of valid RNN input elements (#2992).
advantages: A float32 tensor of shape [T, B].
value_targets: A float32 tensor of shape [T, B].
"""
def __init__(self,
prev_actions_logp,
actions_logp,
action_kl,
actions_entropy,
values,
valid_mask,
advantages,
value_targets,
vf_loss_coeff=0.5,
entropy_coeff=-0.01,
clip_param=0.3):
logp_ratio = tf.exp(actions_logp - prev_actions_logp)
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_kl = tf.reduce_mean(action_kl)
self.pi_loss = -tf.reduce_sum(surrogate_loss)
# The baseline loss
delta = tf.boolean_mask(values - value_targets, valid_mask)
self.value_targets = value_targets
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
# The entropy loss
self.entropy = tf.reduce_sum(
tf.boolean_mask(actions_entropy, valid_mask))
# The summed weighted loss
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff +
self.entropy * entropy_coeff)
class VTraceSurrogateLoss(object):
def __init__(self,
actions,
prev_actions_logp,
actions_logp,
action_kl,
actions_entropy,
dones,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
valid_mask,
vf_loss_coeff=0.5,
entropy_coeff=-0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
clip_param=0.3):
"""PPO surrogate loss with vtrace importance weighting.
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Arguments:
actions: An int32 tensor of shape [T, B, NUM_ACTIONS].
prev_actions_logp: A float32 tensor of shape [T, B].
actions_logp: A float32 tensor of shape [T, B].
actions_kl: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_logits: A float32 tensor of shape [T, B, NUM_ACTIONS].
target_logits: A float32 tensor of shape [T, B, NUM_ACTIONS].
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
valid_mask: A bool tensor of valid RNN input elements (#2992).
"""
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.cast(actions, tf.int32),
discounts=tf.to_float(~dones) * discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold,
tf.float32))
logp_ratio = tf.exp(actions_logp - prev_actions_logp)
advantages = self.vtrace_returns.pg_advantages
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_kl = tf.reduce_mean(action_kl)
self.pi_loss = -tf.reduce_sum(surrogate_loss)
# The baseline loss
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
self.value_targets = self.vtrace_returns.vs
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
# The entropy loss
self.entropy = tf.reduce_sum(
tf.boolean_mask(actions_entropy, valid_mask))
# The summed weighted loss
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff +
self.entropy * entropy_coeff)
class AsyncPPOPolicyGraph(LearningRateSchedule, TFPolicyGraph):
def __init__(self,
observation_space,
action_space,
config,
existing_inputs=None):
config = dict(ray.rllib.agents.impala.impala.DEFAULT_CONFIG, **config)
assert config["batch_mode"] == "truncate_episodes", \
"Must use `truncate_episodes` batch mode with V-trace."
self.config = config
self.sess = tf.get_default_session()
# Policy network model
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Create input placeholders
if existing_inputs:
if self.config["vtrace"]:
actions, dones, behaviour_logits, rewards, observations, \
prev_actions, prev_rewards = existing_inputs[:7]
existing_state_in = existing_inputs[7:-1]
existing_seq_lens = existing_inputs[-1]
else:
actions, dones, behaviour_logits, rewards, observations, \
prev_actions, prev_rewards, adv_ph, value_targets = \
existing_inputs[:9]
existing_state_in = existing_inputs[9:-1]
existing_seq_lens = existing_inputs[-1]
else:
actions = ModelCatalog.get_action_placeholder(action_space)
if (not isinstance(action_space, gym.spaces.Discrete)
and self.config["vtrace"]):
raise UnsupportedSpaceException(
"Action space {} is not supported with vtrace.".format(
action_space))
dones = tf.placeholder(tf.bool, [None], name="dones")
rewards = tf.placeholder(tf.float32, [None], name="rewards")
behaviour_logits = tf.placeholder(
tf.float32, [None, logit_dim], name="behaviour_logits")
observations = tf.placeholder(
tf.float32, [None] + list(observation_space.shape))
existing_state_in = None
existing_seq_lens = None
if not self.config["vtrace"]:
adv_ph = tf.placeholder(
tf.float32, name="advantages", shape=(None, ))
value_targets = tf.placeholder(
tf.float32, name="value_targets", shape=(None, ))
self.observations = observations
# Setup the policy
prev_actions = ModelCatalog.get_action_placeholder(action_space)
prev_rewards = tf.placeholder(tf.float32, [None], name="prev_reward")
self.model = ModelCatalog.get_model(
{
"obs": observations,
"prev_actions": prev_actions,
"prev_rewards": prev_rewards,
},
observation_space,
logit_dim,
self.config["model"],
state_in=existing_state_in,
seq_lens=existing_seq_lens)
action_dist = dist_class(self.model.outputs)
prev_action_dist = dist_class(behaviour_logits)
values = self.model.value_function()
self.value_function = values
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
def to_batches(tensor):
if self.config["model"]["use_lstm"]:
B = tf.shape(self.model.seq_lens)[0]
T = tf.shape(tensor)[0] // B
else:
# Important: chop the tensor into batches at known episode cut
# boundaries. TODO(ekl) this is kind of a hack
T = self.config["sample_batch_size"]
B = tf.shape(tensor)[0] // T
rs = tf.reshape(tensor,
tf.concat([[B, T], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
return tf.transpose(
rs,
[1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0]))))
if self.model.state_in:
max_seq_len = tf.reduce_max(self.model.seq_lens) - 1
mask = tf.sequence_mask(self.model.seq_lens, max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
# Inputs are reshaped from [B * T] => [T - 1, B] for V-trace calc.
if self.config["vtrace"]:
logger.info("Using V-Trace surrogate loss (vtrace=True)")
self.loss = VTraceSurrogateLoss(
actions=to_batches(actions)[:-1],
prev_actions_logp=to_batches(
prev_action_dist.logp(actions))[:-1],
actions_logp=to_batches(action_dist.logp(actions))[:-1],
action_kl=prev_action_dist.kl(action_dist),
actions_entropy=to_batches(action_dist.entropy())[:-1],
dones=to_batches(dones)[:-1],
behaviour_logits=to_batches(behaviour_logits)[:-1],
target_logits=to_batches(self.model.outputs)[:-1],
discount=config["gamma"],
rewards=to_batches(rewards)[:-1],
values=to_batches(values)[:-1],
bootstrap_value=to_batches(values)[-1],
valid_mask=to_batches(mask)[:-1],
vf_loss_coeff=self.config["vf_loss_coeff"],
entropy_coeff=self.config["entropy_coeff"],
clip_rho_threshold=self.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=self.config[
"vtrace_clip_pg_rho_threshold"],
clip_param=self.config["clip_param"])
else:
logger.info("Using PPO surrogate loss (vtrace=False)")
self.loss = PPOSurrogateLoss(
prev_actions_logp=to_batches(prev_action_dist.logp(actions)),
actions_logp=to_batches(action_dist.logp(actions)),
action_kl=prev_action_dist.kl(action_dist),
actions_entropy=to_batches(action_dist.entropy()),
values=to_batches(values),
valid_mask=to_batches(mask),
advantages=to_batches(adv_ph),
value_targets=to_batches(value_targets),
vf_loss_coeff=self.config["vf_loss_coeff"],
entropy_coeff=self.config["entropy_coeff"],
clip_param=self.config["clip_param"])
# KL divergence between worker and learner logits for debugging
model_dist = Categorical(self.model.outputs)
behaviour_dist = Categorical(behaviour_logits)
self.KLs = model_dist.kl(behaviour_dist)
self.mean_KL = tf.reduce_mean(self.KLs)
self.max_KL = tf.reduce_max(self.KLs)
self.median_KL = tf.contrib.distributions.percentile(self.KLs, 50.0)
# Initialize TFPolicyGraph
loss_in = [
("actions", actions),
("dones", dones),
("behaviour_logits", behaviour_logits),
("rewards", rewards),
("obs", observations),
("prev_actions", prev_actions),
("prev_rewards", prev_rewards),
]
if not self.config["vtrace"]:
loss_in.append(("advantages", adv_ph))
loss_in.append(("value_targets", value_targets))
LearningRateSchedule.__init__(self, self.config["lr"],
self.config["lr_schedule"])
TFPolicyGraph.__init__(
self,
observation_space,
action_space,
self.sess,
obs_input=observations,
action_sampler=action_dist.sample(),
action_prob=action_dist.sampled_action_prob(),
loss=self.model.loss() + self.loss.total_loss,
loss_inputs=loss_in,
state_inputs=self.model.state_in,
state_outputs=self.model.state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self.model.seq_lens,
max_seq_len=self.config["model"]["max_seq_len"],
batch_divisibility_req=self.config["sample_batch_size"])
self.sess.run(tf.global_variables_initializer())
if self.config["vtrace"]:
values_batched = to_batches(values)[:-1]
else:
values_batched = to_batches(values)
self.stats_fetches = {
"stats": {
"model_loss": self.model.loss(),
"cur_lr": tf.cast(self.cur_lr, tf.float64),
"policy_loss": self.loss.pi_loss,
"entropy": self.loss.entropy,
"grad_gnorm": tf.global_norm(self._grads),
"var_gnorm": tf.global_norm(self.var_list),
"vf_loss": self.loss.vf_loss,
"vf_explained_var": explained_variance(
tf.reshape(self.loss.value_targets, [-1]),
tf.reshape(values_batched, [-1])),
"mean_KL": self.mean_KL,
"max_KL": self.max_KL,
"median_KL": self.median_KL,
},
}
self.stats_fetches["kl"] = self.loss.mean_kl
def optimizer(self):
if self.config["opt_type"] == "adam":
return tf.train.AdamOptimizer(self.cur_lr)
else:
return tf.train.RMSPropOptimizer(self.cur_lr, self.config["decay"],
self.config["momentum"],
self.config["epsilon"])
def gradients(self, optimizer):
grads = tf.gradients(self.loss.total_loss, self.var_list)
self.grads, _ = tf.clip_by_global_norm(grads, self.config["grad_clip"])
clipped_grads = list(zip(self.grads, self.var_list))
return clipped_grads
def extra_compute_action_fetches(self):
out = {"behaviour_logits": self.model.outputs}
if not self.config["vtrace"]:
out["vf_preds"] = self.value_function
return dict(TFPolicyGraph.extra_compute_action_fetches(self), **out)
def extra_compute_grad_fetches(self):
return self.stats_fetches
def value(self, ob, *args):
feed_dict = {self.observations: [ob], self.model.seq_lens: [1]}
assert len(args) == len(self.model.state_in), \
(args, self.model.state_in)
for k, v in zip(self.model.state_in, args):
feed_dict[k] = v
vf = self.sess.run(self.value_function, feed_dict)
return vf[0]
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
if not self.config["vtrace"]:
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(len(self.model.state_in)):
next_state.append(
[sample_batch["state_out_{}".format(i)][-1]])
last_r = self.value(sample_batch["new_obs"][-1], *next_state)
batch = compute_advantages(
sample_batch,
last_r,
self.config["gamma"],
self.config["lambda"],
use_gae=self.config["use_gae"])
else:
batch = sample_batch
del batch.data["new_obs"] # not used, so save some bandwidth
return batch
def get_initial_state(self):
return self.model.state_init
def copy(self, existing_inputs):
return AsyncPPOPolicyGraph(
self.observation_space,
self.action_space,
self.config,
existing_inputs=existing_inputs)
| 42.614118 | 80 | 0.560985 |
ace8f4392c110dd6f9551ae4b1c448e802b50e53 | 1,344 | py | Python | src/main/python/countBAMregion.py | zym1905/bioinformatics | 31e1b8e06ad9545b2168afd25a6b336bc4c93d3f | [
"Apache-2.0"
] | null | null | null | src/main/python/countBAMregion.py | zym1905/bioinformatics | 31e1b8e06ad9545b2168afd25a6b336bc4c93d3f | [
"Apache-2.0"
] | null | null | null | src/main/python/countBAMregion.py | zym1905/bioinformatics | 31e1b8e06ad9545b2168afd25a6b336bc4c93d3f | [
"Apache-2.0"
] | 1 | 2018-03-14T03:00:32.000Z | 2018-03-14T03:00:32.000Z | import sys
import re
BaseIndex = {'A': 0, 'T': 1, 'C': 2, 'G': 3}
def main(pileupfile):
if len(sys.argv) != 2:
print("please input the mpipeup file from samtools")
sys.exit(1)
pipupfile = open(pileupfile, 'r')
lines = pipupfile.readlines()
for i in range(0, len(lines)):
line = lines[i].rstrip("\n")
print(line)
splits = line.split("\t")
basecount = countpileup(splits)
print("%s\t%s\t%s\t%s\t%d\t%d\t%d\t%d]\t" % (
splits[0], splits[1], splits[2], splits[3], basecount[0], basecount[1], basecount[2],
basecount[3]))
return 0
def countpileup(splits):
depthrecords = [0, 0, 0, 0]
bases = splits[4]
for i in range(0, len(bases)):
upperBase = bases[i].upper()
#处理是否添加reference
if splits[2] != 'N':
if bases[i] == '.' or bases[i] == ',':
upperBase = splits[2].upper()
# 处理正常碱基
if BaseIndex.__contains__(upperBase):
index = BaseIndex.get(upperBase)
depthrecords[index] += 1
# 处理Insert 或者 Delete,直接跳过(需要修正bug:目前这个支持10以内的跳跃,还是需要学习正则来支持更大的)
if upperBase == '-' or upperBase == '+':
i += 1
i += int(bases[i]) + 1
return depthrecords
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
| 25.846154 | 97 | 0.536458 |
ace8f7b65e8c80c51959a4739fe28405af5ed210 | 982 | py | Python | swpt_debtors/extensions.py | epandurski/swpt_debtors | 666dbf5d2730a19de02bb3295daeecafd301b3f5 | [
"MIT"
] | null | null | null | swpt_debtors/extensions.py | epandurski/swpt_debtors | 666dbf5d2730a19de02bb3295daeecafd301b3f5 | [
"MIT"
] | null | null | null | swpt_debtors/extensions.py | epandurski/swpt_debtors | 666dbf5d2730a19de02bb3295daeecafd301b3f5 | [
"MIT"
] | null | null | null | import os
import warnings
from sqlalchemy.exc import SAWarning
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_signalbus import SignalBusMixin, AtomicProceduresMixin
from flask_melodramatiq import RabbitmqBroker
from dramatiq import Middleware
from flask_smorest import Api
MAIN_EXCHANGE_NAME = 'dramatiq'
APP_QUEUE_NAME = os.environ.get('APP_QUEUE_NAME', 'swpt_debtors')
warnings.filterwarnings(
'ignore',
r"this is a regular expression for the text of the warning",
SAWarning,
)
class CustomAlchemy(AtomicProceduresMixin, SignalBusMixin, SQLAlchemy):
pass
class EventSubscriptionMiddleware(Middleware):
@property
def actor_options(self):
return {'event_subscription'}
db = CustomAlchemy()
db.signalbus.autoflush = False
migrate = Migrate()
protocol_broker = RabbitmqBroker(config_prefix='PROTOCOL_BROKER', confirm_delivery=True)
protocol_broker.add_middleware(EventSubscriptionMiddleware())
api = Api()
| 26.540541 | 88 | 0.806517 |
ace8f8e6966fe71a762fc96d4132f09c376f5dc0 | 5,047 | py | Python | egs/sre18/v1.8k/steps_kaldi_diar/make_rttm.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 14 | 2021-12-19T04:24:15.000Z | 2022-03-18T03:24:04.000Z | egs/sre18/v1.8k/steps_kaldi_diar/make_rttm.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | null | null | null | egs/sre18/v1.8k/steps_kaldi_diar/make_rttm.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 5 | 2021-12-14T20:41:27.000Z | 2022-02-24T14:18:11.000Z | #!/usr/bin/env python3
# Copyright 2016 David Snyder
# 2017 Matthew Maciejewski
# Apache 2.0.
"""This script converts a segments and labels file to a NIST RTTM
file. It creates flat segmentation (i.e. no overlapping regions)
from overlapping segments, e.g. the output of a sliding-window
diarization system. The speaker boundary between two overlapping
segments by different speakers is placed at the midpoint between
the end of the first segment and the start of the second segment.
The segments file format is:
<segment-id> <recording-id> <start-time> <end-time>
The labels file format is:
<segment-id> <speaker-id>
The output RTTM format is:
<type> <file> <chnl> <tbeg> \
<tdur> <ortho> <stype> <name> <conf> <slat>
where:
<type> = "SPEAKER"
<file> = <recording-id>
<chnl> = "0"
<tbeg> = start time of segment
<tdur> = duration of segment
<ortho> = "<NA>"
<stype> = "<NA>"
<name> = <speaker-id>
<conf> = "<NA>"
<slat> = "<NA>"
"""
import argparse
import sys
sys.path.append("steps/libs")
import common as common_lib
def get_args():
parser = argparse.ArgumentParser(
description="""This script converts a segments and labels file
to a NIST RTTM file. It handles overlapping segments (e.g. the
output of a sliding-window diarization system)."""
)
parser.add_argument("segments", type=str, help="Input segments file")
parser.add_argument("labels", type=str, help="Input labels file")
parser.add_argument("rttm_file", type=str, help="Output RTTM file")
parser.add_argument(
"--rttm-channel",
type=int,
default=0,
help="The value passed into the RTTM channel field. \
Only affects the format of the RTTM file.",
)
args = parser.parse_args()
return args
def main():
args = get_args()
# File containing speaker labels per segment
seg2label = {}
with common_lib.smart_open(args.labels) as labels_file:
for line in labels_file:
seg, label = line.strip().split()
seg2label[seg] = label
# Segments file
reco2segs = {}
with common_lib.smart_open(args.segments) as segments_file:
for line in segments_file:
seg, reco, start, end = line.strip().split()
try:
if reco in reco2segs:
reco2segs[reco] = (
reco2segs[reco] + " " + start + "," + end + "," + seg2label[seg]
)
else:
reco2segs[reco] = (
reco + " " + start + "," + end + "," + seg2label[seg]
)
except KeyError:
raise RuntimeError("Missing label for segment {0}".format(seg))
# Cut up overlapping segments so they are contiguous
contiguous_segs = []
for reco in sorted(reco2segs):
segs = reco2segs[reco].strip().split()
new_segs = ""
for i in range(1, len(segs) - 1):
start, end, label = segs[i].split(",")
next_start, next_end, next_label = segs[i + 1].split(",")
if float(end) > float(next_start):
done = False
avg = str((float(next_start) + float(end)) / 2.0)
segs[i + 1] = ",".join([avg, next_end, next_label])
new_segs += " " + start + "," + avg + "," + label
else:
new_segs += " " + start + "," + end + "," + label
start, end, label = segs[-1].split(",")
new_segs += " " + start + "," + end + "," + label
contiguous_segs.append(reco + new_segs)
# Merge contiguous segments of the same label
merged_segs = []
for reco_line in contiguous_segs:
segs = reco_line.strip().split()
reco = segs[0]
new_segs = ""
for i in range(1, len(segs) - 1):
start, end, label = segs[i].split(",")
next_start, next_end, next_label = segs[i + 1].split(",")
if float(end) == float(next_start) and label == next_label:
segs[i + 1] = ",".join([start, next_end, next_label])
else:
new_segs += " " + start + "," + end + "," + label
start, end, label = segs[-1].split(",")
new_segs += " " + start + "," + end + "," + label
merged_segs.append(reco + new_segs)
with common_lib.smart_open(args.rttm_file, "w") as rttm_writer:
for reco_line in merged_segs:
segs = reco_line.strip().split()
reco = segs[0]
for i in range(1, len(segs)):
start, end, label = segs[i].strip().split(",")
print(
"SPEAKER {0} {1} {2:7.3f} {3:7.3f} <NA> <NA> {4} <NA> <NA>".format(
reco,
args.rttm_channel,
float(start),
float(end) - float(start),
label,
),
file=rttm_writer,
)
if __name__ == "__main__":
main()
| 34.333333 | 88 | 0.545076 |
ace8fad859e3a60c112071b3ba1976265d638f94 | 1,701 | py | Python | templates/document-a4/makeversion.py | mayersre/sphinx-gitlab-quickstart | f19175f87ff7c1aef97cfe2b687f095bd23356f3 | [
"MIT"
] | null | null | null | templates/document-a4/makeversion.py | mayersre/sphinx-gitlab-quickstart | f19175f87ff7c1aef97cfe2b687f095bd23356f3 | [
"MIT"
] | 1 | 2020-04-01T13:15:52.000Z | 2020-04-01T13:15:52.000Z | templates/document-a4/makeversion.py | mayersre/sphinx-gitlab-quickstart | f19175f87ff7c1aef97cfe2b687f095bd23356f3 | [
"MIT"
] | null | null | null | '''
Created on 16.04.2019
@author: mayers
'''
import os, sys
import git
import datetime
BUILD_TIME=datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")
lib_path = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
sys.path.append(lib_path)
from etc.settings import SOURCE_DIR, GIT_REVISION, GIT_VERSION, GIT_MESSAGE
outfile='''
.. Versions-Informationsdatei, wird beim erstellen angelegt
Bitte nicht bearbeiten
Skript Version und Abbildungen
------------------------------
Dieses Skript wird von einem Versionskontrollsystem verwaltet.
Alle Änderungen werden mit Historie gespeichert und sind mit
den folgenden Informationen reproduzierbar:
GIT Version : {}
GIT Version Message : {}
GIT Revision (hexsha) : {}
Erstellungsdatum : {}
'''.format(GIT_VERSION,GIT_MESSAGE,GIT_REVISION,BUILD_TIME)
outfile_en='''
.. Version Information file, created when running a script build
Do not manually edit, changes will be lost
Script Version-Information
---------------------------
This script is managed with a sourcecode version control system (git).
All changes are saved with a history and can be reproduced with the
following versioning information :
GIT Version : {}
GIT Version Message : {}
GIT Revision (hexsha) : {}
Build date and time : {}
'''.format(GIT_VERSION,GIT_MESSAGE,GIT_REVISION,BUILD_TIME)
dst_file=os.path.join(SOURCE_DIR, 'version.rst')
if os.path.exists(dst_file):
os.remove(dst_file)
{% if values.language == 'en' %}
with open(dst_file,'w') as file__:
file__.writelines(outfile_en)
{% else %}
with open(dst_file,'w') as file__:
file__.writelines(outfile)
{% endif %}
#
#print('\n\n\n',outfile,'\n\n\n')
| 20.011765 | 75 | 0.695473 |
ace8fb2e069fb1ebaae41c69a50dff82e7a3307c | 1,689 | py | Python | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | src/config/setting.py | jack139/trouble | 00f3f8a84229d71aaa507bd6b8eb2ccbd4e32ac0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from pymongo import MongoClient
#####
debug_mode = True # Flase - production, True - staging
#####
#
enable_proxy = True
http_proxy = 'http://192.168.2.108:8888'
https_proxy = 'https://192.168.2.108:8888'
proxy_list = ['192.168.2.103']
enable_local_test = True
#####
db_serv_list='127.0.0.1'
# db_serv_list='mongodb://10.168.11.151:27017,10.252.95.145:27017,10.252.171.8:27017/?replicaSet=rs0'
cli = {
'web' : MongoClient(db_serv_list),
}
# MongoClient('10.168.11.151', replicaset='rs0', readPreference='secondaryPreferred') # 使用secondary 读
# MongoClient('mongodb://10.168.11.151:27017,10.252.95.145:27017,10.252.171.8:27017/?replicaSet=rs0')
db_web = cli['web']['trouble_db']
db_web.authenticate('ipcam','ipcam')
db_primary = db_web
thread_num = 1
auth_user = ['test','gt']
cs_admin = ['cs0']
tmp_path = '/usr/local/nginx/html/xah/static/tmp'
logs_path = '/usr/local/nginx/logs'
image_store_path = '/usr/share/nginx/html/xah/static/upload'
app_host='xah-wx.xmu.edu.cn'
wx_host='xah-wx.xmu.edu.cn'
image_host='xah-wx.xmu.edu.cn/static'
notify_host='xah-wx.xmu.edu.cn'
app_pool=['xah-wx.xmu.edu.cn']
WX_store = {
'000' : { # 测试
'wx_appid' : '',
'wx_appsecret' : '9e9ee82b0f6083311ec1c51e726dedf0',
'mch_id' : '1408035102',
},
}
# 微信设置
region_id = '000'
wx_setting = WX_store[region_id]
order_fuffix=''
http_port=8000
https_port=443
mail_server='127.0.0.1'
sender='"jack139"<jack139@gmail.com>'
worker=['jack139@gmail.com']
web.config.debug = debug_mode
config = web.storage(
email = 'jack139@gmail.com',
site_name = 'ipcam',
site_des = '',
static = '/static'
)
| 22.223684 | 101 | 0.677324 |
ace8fc382f2ed5d3a640f0162c0a7b3b966061df | 8,780 | py | Python | ppci/arch/riscv/rvfx_instructions.py | kl4w3i/ppci | f3be0d4ebc1ac57907fd9c21e886ab70b2805c21 | [
"BSD-2-Clause"
] | 161 | 2020-05-31T03:29:42.000Z | 2022-03-07T08:36:19.000Z | ppci/arch/riscv/rvfx_instructions.py | pfalcon/ppci | ea72967c8984757b4665a2fb0db095f32687eabf | [
"BSD-2-Clause"
] | 74 | 2020-05-26T18:05:48.000Z | 2021-02-13T21:55:39.000Z | ppci/arch/riscv/rvfx_instructions.py | pfalcon/ppci | ea72967c8984757b4665a2fb0db095f32687eabf | [
"BSD-2-Clause"
] | 19 | 2020-05-27T19:22:11.000Z | 2022-02-17T18:53:52.000Z | """ Definitions of Riscv instructions. """
import struct
from ..isa import Isa
from ..encoding import Instruction, Syntax, Operand
from .registers import RiscvRegister, R0
from .tokens import RiscvToken, RiscvIToken, RiscvSToken
from .instructions import Li, B, Bne, Sw, Lw
class RegisterSet(set):
def __repr__(self):
reg_names = sorted(str(r) for r in self)
return ", ".join(reg_names)
rvfxisa = Isa()
class RiscvInstruction(Instruction):
tokens = [RiscvToken]
isa = rvfxisa
def make_fregfregfreg(mnemonic, rounding, func):
rd = Operand("rd", RiscvRegister, write=True)
rn = Operand("rn", RiscvRegister, read=True)
rm = Operand("rm", RiscvRegister, read=True)
syntax = Syntax([mnemonic, ".", "s", " ", rd, ",", " ", rn, ",", " ", rm])
tokens = [RiscvToken]
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": rounding,
"rs1": rn,
"rs2": rm,
"funct7": func,
}
members = {
"syntax": syntax,
"rd": rd,
"rn": rn,
"rm": rm,
"patterns": patterns,
"tokens": tokens,
"rounding": rounding,
"func": func,
}
return type(mnemonic + "_ins", (RiscvInstruction,), members)
FAdd = make_fregfregfreg("fadd", 0b111, 0b0000000)
FSub = make_fregfregfreg("fsub", 0b111, 0b0000100)
FMul = make_fregfregfreg("fmul", 0b111, 0b0001000)
FDiv = make_fregfregfreg("fdiv", 0b111, 0b0001100)
FSgnjn = make_fregfregfreg("fsgnjn", 0b001, 0b0010000)
def negf(dst, src):
""" Move src into dst register """
return FSgnjn(dst, src, src)
class Fcvtsw(RiscvInstruction):
rd = Operand("rd", RiscvRegister, write=True)
rm = Operand("rm", RiscvRegister, read=True)
syntax = Syntax(["fcvt", ".", "s", ".", "w", " ", rd, ",", " ", rm])
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": 0b111,
"rs1": rm,
"rs2": 0,
"funct7": 0b1101000,
}
class Fcvtswu(RiscvInstruction):
rd = Operand("rd", RiscvRegister, write=True)
rm = Operand("rm", RiscvRegister, read=True)
syntax = Syntax(["fcvt", ".", "s", ".", "wu", " ", rd, ",", " ", rm])
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": 0b111,
"rs1": rm,
"rs2": 0b00001,
"funct7": 0b1101000,
}
class Fcvtws(RiscvInstruction):
rd = Operand("rd", RiscvRegister, write=True)
rm = Operand("rm", RiscvRegister, read=True)
syntax = Syntax(["fcvt", ".", "w", ".", "s", " ", rd, ",", " ", rm])
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": 0b111,
"rs1": rm,
"rs2": 0,
"funct7": 0b1100000,
}
class Fcvtwus(RiscvInstruction):
rd = Operand("rd", RiscvRegister, write=True)
rm = Operand("rm", RiscvRegister, read=True)
syntax = Syntax(["fcvt", ".", "wu", ".", "s", " ", rd, ",", " ", rm])
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": 0b111,
"rs1": rm,
"rs2": 0b00001,
"funct7": 0b1100000,
}
def make_fcmp(mnemonic, func3, invert):
""" Factory function for immediate value instructions """
rd = Operand("rd", RiscvRegister, write=True)
rn = Operand("rn", RiscvRegister, read=True)
rm = Operand("rm", RiscvRegister, read=True)
tokens = [RiscvToken]
syntax = Syntax(
["f", ".", mnemonic, ".", "s", " ", rd, ",", " ", rn, ",", " ", rm]
)
if not invert:
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": func3,
"rs1": rn,
"rs2": rm,
"funct7": 0b1010000,
}
else:
patterns = {
"opcode": 0b1010011,
"rd": rd,
"funct3": func3,
"rs1": rm,
"rs2": rn,
"funct7": 0b1010000,
}
members = {
"syntax": syntax,
"rd": rd,
"rn": rn,
"rm": rm,
"patterns": patterns,
"tokens": tokens,
"func3": func3,
}
return type(mnemonic + "_ins", (RiscvInstruction,), members)
Feq = make_fcmp("feq", 0b010, False)
Fle = make_fcmp("fle", 0b000, False)
Flt = make_fcmp("flt", 0b001, False)
Fne = make_fcmp("fne", 0b010, True)
Fgt = make_fcmp("fgt", 0b000, True)
Fge = make_fcmp("fge", 0b001, True)
@rvfxisa.pattern("reg", "CONSTF32", size=2)
@rvfxisa.pattern("reg", "CONSTF64", size=2)
def pattern_const_f32(context, tree):
float_const = struct.pack("f", tree.value)
(c0,) = struct.unpack("i", float_const)
d = context.new_reg(RiscvRegister)
context.emit(Li(d, c0))
return d
@rvfxisa.pattern("reg", "ADDF64(reg, reg)", size=5)
@rvfxisa.pattern("reg", "ADDF32(reg, reg)", size=5)
def pattern_add_f32(context, tree, c0, c1):
d = context.new_reg(RiscvRegister)
context.emit(FAdd(d, c0, c1))
return d
@rvfxisa.pattern("reg", "SUBF64(reg, reg)", size=5)
@rvfxisa.pattern("reg", "SUBF32(reg, reg)", size=5)
def pattern_sub_f32(context, tree, c0, c1):
d = context.new_reg(RiscvRegister)
context.emit(FSub(d, c0, c1))
return d
@rvfxisa.pattern("reg", "MULF64(reg, reg)", size=5)
@rvfxisa.pattern("reg", "MULF32(reg, reg)", size=5)
def pattern_mul_f32(context, tree, c0, c1):
d = context.new_reg(RiscvRegister)
context.emit(FMul(d, c0, c1))
return d
@rvfxisa.pattern("reg", "DIVF64(reg, reg)", size=5)
@rvfxisa.pattern("reg", "DIVF32(reg, reg)", size=5)
def pattern_div_f32(context, tree, c0, c1):
d = context.new_reg(RiscvRegister)
context.emit(FDiv(d, c0, c1))
return d
@rvfxisa.pattern("reg", "NEGF64(reg)", size=5)
@rvfxisa.pattern("reg", "NEGF32(reg)", size=5)
def pattern_neg_f32(context, tree, c0):
d = context.new_reg(RiscvRegister)
context.emit(negf(d, c0))
return d
@rvfxisa.pattern("stm", "MOVF32(reg)", size=5)
@rvfxisa.pattern("stm", "MOVF64(reg)", size=5)
def pattern_mov32(context, tree, c0):
context.move(tree.value, c0)
return tree.value
@rvfxisa.pattern("reg", "REGF32", size=2)
@rvfxisa.pattern("reg", "REGF64", size=2)
def pattern_reg(context, tree):
return tree.value
@rvfxisa.pattern("reg", "F32TOF64(reg)", size=2)
@rvfxisa.pattern("reg", "F64TOF32(reg)", size=2)
def pattern_i32_to_i32(context, tree, c0):
return c0
@rvfxisa.pattern("reg", "F32TOI32(reg)", size=2)
@rvfxisa.pattern("reg", "F64TOI32(reg)", size=2)
def pattern_ftoi_f32(context, tree, c0):
d = context.new_reg(RiscvRegister)
context.emit(Fcvtws(d, c0))
return d
@rvfxisa.pattern("reg", "F32TOU32(reg)", size=2)
@rvfxisa.pattern("reg", "F64TOU32(reg)", size=2)
def pattern_ftou_f32(context, tree, c0):
d = context.new_reg(RiscvRegister)
context.emit(Fcvtws(d, c0))
return d
@rvfxisa.pattern("reg", "I32TOF32(reg)", size=2)
@rvfxisa.pattern("reg", "I32TOF64(reg)", size=2)
def pattern_itof_f32(context, tree, c0):
d = context.new_reg(RiscvRegister)
context.emit(Fcvtsw(d, c0))
return d
@rvfxisa.pattern("reg", "U32TOF32(reg)", size=2)
@rvfxisa.pattern("reg", "U32TOF64(reg)", size=2)
def pattern_utof_f32(context, tree, c0):
d = context.new_reg(RiscvRegister)
context.emit(Fcvtswu(d, c0))
return d
@rvfxisa.pattern("reg", "LDRF32(mem)", size=2)
@rvfxisa.pattern("reg", "LDRF64(mem)", size=2)
def pattern_ldr32_fprel(context, tree, c0):
d = context.new_reg(RiscvRegister)
base_reg, offset = c0
Code = Lw(d, offset, base_reg)
Code.fprel = True
context.emit(Code)
return d
@rvfxisa.pattern("reg", "LDRF32(reg)", size=2)
@rvfxisa.pattern("reg", "LDRF64(reg)", size=2)
def pattern_ldr32_fprel(context, tree, c0):
d = context.new_reg(RiscvRegister)
base_reg, offset = c0, 0
Code = Lw(d, offset, base_reg)
context.emit(Code)
return d
@rvfxisa.pattern("stm", "STRF32(mem, reg)", size=2)
@rvfxisa.pattern("stm", "STRF64(mem, reg)", size=2)
def pattern_sw32(context, tree, c0, c1):
base_reg, offset = c0
Code = Sw(c1, offset, base_reg)
Code.fprel = True
context.emit(Code)
@rvfxisa.pattern("stm", "STRF32(reg, reg)", size=2)
@rvfxisa.pattern("stm", "STRF64(reg, reg)", size=2)
def pattern_sw32(context, tree, c0, c1):
base_reg, offset = c0, 0
Code = Sw(c1, offset, base_reg)
context.emit(Code)
@rvfxisa.pattern("stm", "CJMPF32(reg, reg)", size=2)
@rvfxisa.pattern("stm", "CJMPF64(reg, reg)", size=2)
def pattern_cjmp(context, tree, c0, c1):
op, yes_label, no_label = tree.value
opnames = {"<": Flt, ">": Fgt, "==": Feq, "!=": Fne, ">=": Fge, "<=": Fle}
Bop = opnames[op]
jmp_ins = B(no_label.name, jumps=[no_label])
d = context.new_reg(RiscvRegister)
context.emit(Bop(d, c0, c1))
context.emit(Bne(d, R0, yes_label.name, jumps=[yes_label, jmp_ins]))
context.emit(jmp_ins)
| 27.697161 | 78 | 0.598178 |
ace8fc9d63f0a928e459b37d3f512d8595f12144 | 43,816 | py | Python | sktime/distances/_distance.py | AngelPone/sktime | 3aef06bcdc83bbfc5299a8e20a1fbefb34651e40 | [
"BSD-3-Clause"
] | null | null | null | sktime/distances/_distance.py | AngelPone/sktime | 3aef06bcdc83bbfc5299a8e20a1fbefb34651e40 | [
"BSD-3-Clause"
] | null | null | null | sktime/distances/_distance.py | AngelPone/sktime | 3aef06bcdc83bbfc5299a8e20a1fbefb34651e40 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Compute the distance between two time series."""
from typing import Any, Callable, Union
import numpy as np
from sktime.distances._ddtw import DerivativeCallable, _average_of_slope, _DdtwDistance
from sktime.distances._dtw import _DtwDistance
from sktime.distances._edr import _EdrDistance
from sktime.distances._erp import _ErpDistance
from sktime.distances._euclidean import _EuclideanDistance
from sktime.distances._lcss import _LcssDistance
from sktime.distances._numba_utils import (
_compute_pairwise_distance,
to_numba_pairwise_timeseries,
to_numba_timeseries,
)
from sktime.distances._resolve_metric import _resolve_metric
from sktime.distances._squared import _SquaredDistance
from sktime.distances._wddtw import _WddtwDistance
from sktime.distances._wdtw import _WdtwDistance
from sktime.distances.base import DistanceCallable, MetricInfo, NumbaDistance
from sktime.distances.lower_bounding import LowerBounding
def erp_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
g: float = 0.0,
**kwargs: Any,
) -> float:
"""Compute the Edit distance for real penalty (ERP) distance between two series.
ERP, first proposed in [1]_, attempts align time series
by better considering how indexes are carried forward through the cost matrix.
Usually in the dtw cost matrix, if an alignment can't be found the previous value
is carried forward. Erp instead proposes the idea of gaps or sequences of points
that have no matches. These gaps are then punished based on their distance from 'g'.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
g: float, defaults = 0.
The reference value to penalise gaps.
kwargs: Any
Extra kwargs.
Returns
-------
float
ERP distance between x and y.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 3 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If the metric type cannot be determined
If g is not a float.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> erp_distance(x_1d, y_1d)
16.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> erp_distance(x_2d, y_2d)
32.0
References
----------
.. [1] Lei Chen and Raymond Ng. 2004. On the marriage of Lp-norms and edit distance.
In Proceedings of the Thirtieth international conference on Very large data bases
- Volume 30 (VLDB '04). VLDB Endowment, 792–803.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"g": g,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="erp", **format_kwargs)
def edr_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
epsilon: float = None,
**kwargs: Any,
) -> float:
"""Compute the Edit distance for real sequences (EDR) between two series.
EDR computes the minimum number of elements (as a percentage) that must be removed
from x and y so that the sum of the distance between the remaining signal elements
lies within the tolerance (epsilon). EDR was originally proposed in [1]_.
The value returned will be between 0 and 1 per time series. The value will
represent as a percentage of elements that must be removed for the time series to
be an exact match.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d array)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
epsilon : float, defaults = None
Matching threshold to determine if two subsequences are considered close
enough to be considered 'common'. If not specified as per the original paper
epsilon is set to a quarter of the maximum standard deviation.
kwargs: Any
Extra kwargs.
Returns
-------
float
Edr distance between the x and y. The value will be between 0.0 and 1.0
where 0.0 is an exact match between time series (i.e. they are the same) and
1.0 where there are no matching subsequences.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 3 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If the metric type cannot be determined
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> edr_distance(x_1d, y_1d)
1.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> edr_distance(x_2d, y_2d)
1.0
References
----------
.. [1] Lei Chen, M. Tamer Özsu, and Vincent Oria. 2005. Robust and fast similarity
search for moving object trajectories. In Proceedings of the 2005 ACM SIGMOD
international conference on Management of data (SIGMOD '05). Association for
Computing Machinery, New York, NY, USA, 491–502.
DOI:https://doi.org/10.1145/1066157.1066213
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"epsilon": epsilon,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="edr", **format_kwargs)
def lcss_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
epsilon: float = 1.0,
**kwargs: Any,
) -> float:
"""Compute the longest common subsequence (LCSS) score between two time series.
LCSS attempts to find the longest common sequence between two time series and
returns a value that is the percentage that longest common sequence assumes.
Originally present in [1]_, LCSS is computed by matching indexes that are
similar up until a defined threshold (epsilon).
The value returned will be between 0.0 and 1.0, where 0.0 means the two time series
are exactly the same and 1.0 means they are complete opposites.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
epsilon : float, defaults = 1.
Matching threshold to determine if two subsequences are considered close
enough to be considered 'common'.
kwargs: Any
Extra kwargs.
Returns
-------
float
Lcss distance between x and y. The value returned will be between 0.0 and 1.0,
where 0.0 means the two time series are exactly the same and 1.0 means they
are complete opposites.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If the metric type cannot be determined
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> lcss_distance(x_1d, y_1d)
1.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> lcss_distance(x_2d, y_2d)
1.0
References
----------
.. [1] M. Vlachos, D. Gunopoulos, and G. Kollios. 2002. "Discovering
Similar Multidimensional Trajectories", In Proceedings of the
18th International Conference on Data Engineering (ICDE '02).
IEEE Computer Society, USA, 673.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"epsilon": epsilon,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="lcss", **format_kwargs)
def wddtw_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
compute_derivative: DerivativeCallable = _average_of_slope,
g: float = 0.0,
**kwargs: Any,
) -> float:
r"""Compute the weighted derivative dynamic time warping (WDDTW) distance.
WDDTW was first proposed in [1]_ as an extension of DDTW. By adding a weight
to the derivative it means the alignment isn't only considering the shape of the
time series, but also the phase.
Formally the derivative is calculated as:
.. math::
D_{x}[q] = \frac{{}(q_{i} - q_{i-1} + ((q_{i+1} - q_{i-1}/2)}{2}
Therefore a weighted derivative can be calculated using D (the derivative) as:
.. math::
d_{w}(x_{i}, y_{j}) = ||w_{|i-j|}(D_{x_{i}} - D_{y_{j}})||
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
compute_derivative: Callable[[np.ndarray], np.ndarray],
defaults = average slope difference
Callable that computes the derivative. If none is provided the average of the
slope between two points used.
g: float, defaults = 0.
Constant that controls the curvature (slope) of the function; that is, g
controls the level of penalisation for the points with larger phase
difference.
kwargs: Any
Extra kwargs.
Returns
-------
float
Wddtw distance between x and y.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If the metric type cannot be determined
If the compute derivative callable is not no_python compiled.
If the value of g is not a float
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> wddtw_distance(x_1d, y_1d)
0.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> wddtw_distance(x_2d, y_2d)
0.0
References
----------
.. [1] Young-Seon Jeong, Myong K. Jeong, Olufemi A. Omitaomu, Weighted dynamic time
warping for time series classification, Pattern Recognition, Volume 44, Issue 9,
2011, Pages 2231-2240, ISSN 0031-3203, https://doi.org/10.1016/j.patcog.2010.09.022.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"compute_derivative": compute_derivative,
"g": g,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="wddtw", **format_kwargs)
def wdtw_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
g: float = 0.0,
**kwargs: Any,
) -> float:
"""Compute the weighted dynamic time warping (WDTW) distance between time series.
First proposed in [1]_, WDTW adds a adds a multiplicative weight penalty based on
the warping distance. This means that time series with lower phase difference have
a smaller weight imposed (i.e less penalty imposed) and time series with larger
phase difference have a larger weight imposed (i.e. larger penalty imposed).
Formally this can be described as:
.. math::
d_{w}(x_{i}, y_{j}) = ||w_{|i-j|}(x_{i} - y_{j})||
Where d_w is the distance with a the weight applied to it for points i, j, where
w(|i-j|) is a positive weight between the two points x_i and y_j.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
g: float, defaults = 0.
Constant that controls the curvature (slope) of the function; that is, g
controls the level of penalisation for the points with larger phase
difference.
kwargs: Any
Extra kwargs.
Returns
-------
float
Wdtw distance between the x and y.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If the metric type cannot be determined
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> wdtw_distance(x_1d, y_1d)
29.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> wdtw_distance(x_2d, y_2d)
256.0
References
----------
.. [1] Young-Seon Jeong, Myong K. Jeong, Olufemi A. Omitaomu, Weighted dynamic time
warping for time series classification, Pattern Recognition, Volume 44, Issue 9,
2011, Pages 2231-2240, ISSN 0031-3203, https://doi.org/10.1016/j.patcog.2010.09.022.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"g": g,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="wdtw", **format_kwargs)
def ddtw_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
compute_derivative: DerivativeCallable = _average_of_slope,
**kwargs: Any,
) -> float:
r"""Compute the derivative dynamic time warping (DDTW) distance between time series.
DDTW is an adaptation of DTW originally proposed in [1]_. DDTW attempts to
improve on dtw by better account for the 'shape' of the time series.
This is done by considering y axis data points as higher level features of 'shape'.
To do this the first derivative of the sequence is taken, and then using this
derived sequence a dtw computation is done.
The default derivative used is:
.. math::
D_{x}[q] = \frac{{}(q_{i} - q_{i-1} + ((q_{i+1} - q_{i-1}/2)}{2}
Where q is the original time series and d_q is the derived time series.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
compute_derivative: Callable[[np.ndarray], np.ndarray],
defaults = average slope difference
Callable that computes the derivative. If none is provided the average of the
slope between two points used.
kwargs: Any
Extra kwargs.
Returns
-------
float
Ddtw distance between the x and y.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric or compute derivative callable is not no_python compiled.
If the metric type cannot be determined
If the compute derivative callable is not no_python compiled.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> ddtw_distance(x_1d, y_1d)
0.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> ddtw_distance(x_2d, y_2d)
0.0
References
----------
.. [1] Keogh, Eamonn & Pazzani, Michael. (2002). Derivative Dynamic Time Warping.
First SIAM International Conference on Data Mining.
1. 10.1137/1.9781611972719.1.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
"compute_derivative": compute_derivative,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="ddtw", **format_kwargs)
def dtw_distance(
x: np.ndarray,
y: np.ndarray,
lower_bounding: Union[LowerBounding, int] = LowerBounding.NO_BOUNDING,
window: int = 2,
itakura_max_slope: float = 2.0,
bounding_matrix: np.ndarray = None,
**kwargs: Any,
) -> float:
r"""Compute the dynamic time warping (DTW) distance between two time series.
Originally proposedin [1]_ DTW computes the distance between two time series by
considering their alignments during the calculation. This is done by measuring
the pointwise distance (normally using Euclidean) between all elements of the two
time series and then using dynamic programming to find the warping path
that minimises the total pointwise distance between realigned series.
Mathematically dtw can be defined as:
.. math::
dtw(x, y) = \sqrt{\sum_{(i, j) \in \pi} \|x_{i} - y_{j}\|^2}
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
lower_bounding: LowerBounding or int, defaults = LowerBounding.NO_BOUNDING
Lower bounding technique to use.
If LowerBounding enum provided, the following are valid:
LowerBounding.NO_BOUNDING - No bounding
LowerBounding.SAKOE_CHIBA - Sakoe chiba
LowerBounding.ITAKURA_PARALLELOGRAM - Itakura parallelogram
If int value provided, the following are valid:
1 - No bounding
2 - Sakoe chiba
3 - Itakura parallelogram
window: int, defaults = 2
Integer that is the radius of the sakoe chiba window (if using Sakoe-Chiba
lower bounding).
itakura_max_slope: float, defaults = 2.
Gradient of the slope for itakura parallelogram (if using Itakura
Parallelogram lower bounding).
bounding_matrix: np.ndarray (2d of size mxn where m is len(x) and n is len(y)),
defaults = None)
Custom bounding matrix to use. If defined then other lower_bounding params
are ignored. The matrix should be structure so that indexes considered in
bound should be the value 0. and indexes outside the bounding matrix should be
infinity.
kwargs: Any
Extra kwargs.
Returns
-------
float
Dtw distance between x and y.
Raises
------
ValueError
If the sakoe_chiba_window_radius is not an integer.
If the itakura_max_slope is not a float or int.
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> dtw_distance(x_1d, y_1d)
58.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> dtw_distance(x_2d, y_2d)
512.0
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
"""
format_kwargs = {
"lower_bounding": lower_bounding,
"window": window,
"itakura_max_slope": itakura_max_slope,
"bounding_matrix": bounding_matrix,
}
format_kwargs = {**format_kwargs, **kwargs}
return distance(x, y, metric="dtw", **format_kwargs)
def squared_distance(x: np.ndarray, y: np.ndarray, **kwargs: Any) -> float:
r"""Compute the squared distance between two time series.
The squared distance between two time series is defined as:
.. math::
sd(x, y) = \sum_{i=1}^{n} (x_i - y_i)^2
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
kwargs: Any
Extra kwargs. For squared there are none however, this is kept for
consistency.
Returns
-------
float
Squared distance between x and y.
Raises
------
ValueError
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> squared_distance(x_1d, y_1d)
64.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> squared_distance(x_2d, y_2d)
512.0
"""
return distance(x, y, metric="squared", **kwargs)
def euclidean_distance(x: np.ndarray, y: np.ndarray, **kwargs: Any) -> float:
r"""Compute the Euclidean distance between two time series.
Euclidean distance is supported for 1d, 2d and 3d arrays.
The Euclidean distance between two time series is the square root of the squared
distance and is defined as:
.. math::
ed(x, y) = \sqrt{\sum_{i=1}^{n} (x_i - y_i)^2}
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
kwargs: Any
Extra kwargs. For euclidean there are none however, this is kept for
consistency.
Returns
-------
float
Euclidean distance between x and y.
Raises
------
ValueError
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> euclidean_distance(x_1d, y_1d)
8.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> euclidean_distance(x_2d, y_2d)
22.627416997969522
"""
return distance(x, y, metric="euclidean", **kwargs)
def distance(
x: np.ndarray,
y: np.ndarray,
metric: Union[
str,
Callable[
[np.ndarray, np.ndarray, dict], Callable[[np.ndarray, np.ndarray], float]
],
Callable[[np.ndarray, np.ndarray], float],
NumbaDistance,
],
**kwargs: Any,
) -> float:
"""Compute the distance between two time series.
First the distance metric is 'resolved'. This means the metric that is passed
is resolved to a callable. The callable is then called with x and y and the
value is then returned.
Parameters
----------
x: np.ndarray (1d or 2d array)
First time series.
y: np.ndarray (1d or 2d array)
Second time series.
metric: str or Callable
The distance metric to use.
If a string is given, the value must be one of the following strings:
'euclidean', 'squared', 'dtw', 'ddtw', 'wdtw', 'wddtw', 'lcss', 'edr', 'erp'
If callable then it has to be a distance factory or numba distance callable.
If you want to pass custom kwargs to the distance at runtime, use a distance
factory as it constructs the distance using the kwargs before distance
computation.
A distance callable takes the form (must be no_python compiled):
Callable[[np.ndarray, np.ndarray], float]
A distance factory takes the form (must return a no_python callable):
Callable[[np.ndarray, np.ndarray, bool, dict], Callable[[np.ndarray,
np.ndarray], float]].
kwargs: Any
Arguments for metric. Refer to each metrics documentation for a list of
possible arguments.
Raises
------
ValueError
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> distance(x_1d, y_1d, metric='dtw')
58.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> distance(x_2d, y_2d, metric='dtw')
512.0
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> distance(x_2d, y_2d, metric='dtw', lower_bounding=2, window=3)
512.0
Returns
-------
float
Distance between the x and y.
"""
_x = to_numba_timeseries(x)
_y = to_numba_timeseries(y)
_metric_callable = _resolve_metric(metric, _x, _y, _METRIC_INFOS, **kwargs)
return _metric_callable(_x, _y)
def distance_factory(
x: np.ndarray = None,
y: np.ndarray = None,
metric: Union[
str,
Callable[
[np.ndarray, np.ndarray, dict], Callable[[np.ndarray, np.ndarray], float]
],
Callable[[np.ndarray, np.ndarray], float],
NumbaDistance,
] = "euclidean",
**kwargs: Any,
) -> DistanceCallable:
"""Create a no_python distance callable.
Parameters
----------
x: np.ndarray (1d or 2d array), defaults = None
First time series.
y: np.ndarray (1d or 2d array), defaults = None
Second time series.
metric: str or Callable, defaults = 'euclidean'
The distance metric to use.
If a string is given, the value must be one of the following strings:
'euclidean', 'squared', 'dtw', 'ddtw', 'wdtw', 'wddtw', 'lcss', 'edr', 'erp'
If callable then it has to be a distance factory or numba distance callable.
If you want to pass custom kwargs to the distance at runtime, use a distance
factory as it constructs the distance using the kwargs before distance
computation.
A distance callable takes the form (must be no_python compiled):
Callable[[np.ndarray, np.ndarray], float]
A distance factory takes the form (must return a no_python callable):
Callable[[np.ndarray, np.ndarray, bool, dict], Callable[[np.ndarray,
np.ndarray], float]].
kwargs: Any
Arguments for metric. Refer to each metrics documentation for a list of
possible arguments.
Returns
-------
Callable[[np.ndarray, np.ndarray], float]]
No_python compiled distance callable.
Raises
------
ValueError
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 2 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined.
"""
if x is None:
x = np.zeros((10, 10))
if y is None:
y = np.zeros((10, 10))
_x = to_numba_timeseries(x)
_y = to_numba_timeseries(y)
return _resolve_metric(metric, _x, _y, _METRIC_INFOS, **kwargs)
def pairwise_distance(
x: np.ndarray,
y: np.ndarray,
metric: Union[
str,
Callable[
[np.ndarray, np.ndarray, dict], Callable[[np.ndarray, np.ndarray], float]
],
Callable[[np.ndarray, np.ndarray], float],
NumbaDistance,
],
**kwargs: Any,
) -> np.ndarray:
"""Compute the pairwise distance matrix between two time series.
First the distance metric is 'resolved'. This means the metric that is passed
is resolved to a callable. The callable is then called with x and y and the
value is then returned. Then for each combination of x and y, the distance between
the values are computed resulting in a 2d pairwise matrix.
Parameters
----------
x: np.ndarray (1d, 2d or 3d array)
First time series.
y: np.ndarray (1d, 2d or 3d array)
Second time series.
metric: str or Callable
The distance metric to use.
If a string is given, the value must be one of the following strings:
'euclidean', 'squared', 'dtw', 'ddtw', 'wdtw', 'wddtw', 'lcss', 'edr', 'erp'
If callable then it has to be a distance factory or numba distance callable.
If you want to pass custom kwargs to the distance at runtime, use a distance
factory as it constructs the distance using the kwargs before distance
computation.
A distance callable takes the form (must be no_python compiled):
Callable[[np.ndarray, np.ndarray], float]
A distance factory takes the form (must return a no_python callable):
Callable[[np.ndarray, np.ndarray, bool, dict], Callable[[np.ndarray,
np.ndarray], float]].
kwargs: Any
Extra arguments for metric. Refer to each metric documentation for a list of
possible arguments.
Returns
-------
np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
Pairwise distance matrix between the two time series.
Raises
------
ValueError
If the value of x or y provided is not a numpy array.
If the value of x or y has more than 3 dimensions.
If a metric string provided, and is not a defined valid string.
If a metric object (instance of class) is provided and doesn't inherit from
NumbaDistance.
If a resolved metric is not no_python compiled.
If the metric type cannot be determined.
Examples
--------
>>> x_1d = np.array([1, 2, 3, 4]) # 1d array
>>> y_1d = np.array([5, 6, 7, 8]) # 1d array
>>> pairwise_distance(x_1d, y_1d, metric='dtw')
array([[64.]])
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> pairwise_distance(x_2d, y_2d, metric='dtw')
array([[256., 576.],
[ 58., 256.]])
>>> x_3d = np.array([[[1], [2], [3], [4]], [[5], [6], [7], [8]]]) # 3d array
>>> y_3d = np.array([[[9], [10], [11], [12]], [[13], [14], [15], [16]]]) # 3d array
>>> pairwise_distance(x_3d, y_3d, metric='dtw')
array([[256., 576.],
[ 58., 256.]])
>>> x_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # 2d array
>>> y_2d = np.array([[9, 10, 11, 12], [13, 14, 15, 16]]) # 2d array
>>> pairwise_distance(x_2d, y_2d, metric='dtw', lower_bounding=2, window=3)
array([[256., 576.],
[ 58., 256.]])
"""
_x = to_numba_pairwise_timeseries(x)
_y = to_numba_pairwise_timeseries(y)
symmetric = np.array_equal(_x, _y)
_metric_callable = _resolve_metric(metric, _x[0], _y[0], _METRIC_INFOS, **kwargs)
return _compute_pairwise_distance(_x, _y, symmetric, _metric_callable)
_METRIC_INFOS = [
MetricInfo(
canonical_name="euclidean",
aka={"euclidean", "ed", "euclid", "pythagorean"},
dist_func=euclidean_distance,
dist_instance=_EuclideanDistance(),
),
MetricInfo(
canonical_name="erp",
aka={"erp", "edit distance with real penalty"},
dist_func=erp_distance,
dist_instance=_ErpDistance(),
),
MetricInfo(
canonical_name="edr",
aka={"edr", "edit distance for real sequences"},
dist_func=edr_distance,
dist_instance=_EdrDistance(),
),
MetricInfo(
canonical_name="lcss",
aka={"lcss", "longest common subsequence"},
dist_func=lcss_distance,
dist_instance=_LcssDistance(),
),
MetricInfo(
canonical_name="squared",
aka={"squared"},
dist_func=squared_distance,
dist_instance=_SquaredDistance(),
),
MetricInfo(
canonical_name="dtw",
aka={"dtw", "dynamic time warping"},
dist_func=dtw_distance,
dist_instance=_DtwDistance(),
),
MetricInfo(
canonical_name="ddtw",
aka={"ddtw", "derivative dynamic time warping"},
dist_func=ddtw_distance,
dist_instance=_DdtwDistance(),
),
MetricInfo(
canonical_name="wdtw",
aka={"wdtw", "weighted dynamic time warping"},
dist_func=wdtw_distance,
dist_instance=_WdtwDistance(),
),
MetricInfo(
canonical_name="wddtw",
aka={"wddtw", "weighted derivative dynamic time warping"},
dist_func=wddtw_distance,
dist_instance=_WddtwDistance(),
),
]
_METRICS = {info.canonical_name: info for info in _METRIC_INFOS}
_METRIC_ALIAS = dict((alias, info) for info in _METRIC_INFOS for alias in info.aka)
_METRIC_CALLABLES = dict(
(info.canonical_name, info.dist_func) for info in _METRIC_INFOS
)
_METRICS_NAMES = list(_METRICS.keys())
| 37.226848 | 88 | 0.634015 |
ace8fdf4539a65332f7dd5544c2f0c8dfe42bc97 | 3,937 | py | Python | convrnn/train_blur.py | esizikova/anytime-prediction | 5c2672d6454a91873ca2b40796a29c6f5db5ec99 | [
"MIT"
] | 3 | 2021-06-08T10:43:42.000Z | 2022-02-17T02:20:47.000Z | convrnn/train_blur.py | esizikova/anytime-prediction | 5c2672d6454a91873ca2b40796a29c6f5db5ec99 | [
"MIT"
] | null | null | null | convrnn/train_blur.py | esizikova/anytime-prediction | 5c2672d6454a91873ca2b40796a29c6f5db5ec99 | [
"MIT"
] | 2 | 2021-06-16T17:15:42.000Z | 2021-08-28T06:04:41.000Z | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from PIL import Image, ImageEnhance
import cv2
import urllib
import numpy as np
from tensorflow.keras.utils import to_categorical
import glob
from random import shuffle
import h5py
import torch
from torchvision import transforms
import math
import time
import os
import argparse
# tf.enable_v2_behavior()
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from rcnn_sat import preprocess_image, bl_net
from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5
from custom_transforms import all_random_blur
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
parser = argparse.ArgumentParser()
parser.add_argument('--tag', default='blur-gray', type=str)
parser.add_argument('--color', default='gray', type=str)
parser.add_argument('--download-data', default=False, type=bool)
parser.add_argument('--pretrained', default=True, type=bool)
args = parser.parse_args()
print(args)
data_root = '../data/{}'.format(args.color)
if args.download_data == True:
trainX, trainy, testX, testy = load_dataset()
os.makedirs(data_root, exist_ok = True)
prep_pixels_h5(trainX, trainy, testX, testy, data_root, args.color)
args.download_data = False
if args.download_data == False:
trainX,trainy,testX,testy = load_dataset_h5(data_root)
input_layer = tf.keras.layers.Input((128, 128, 3))
model = bl_net(input_layer, classes=10, cumulative_readout=False)
if args.pretrained:
model.load_weights('bl_imagenet.h5',skip_mismatch=True,by_name=True)
## Lets try fine tuning it
# tf.keras.utils.plot_model(model,to_file='check.png')
skip_layers = ['ReadoutDense','Sotfmax_Time_0','Sotfmax_Time_1',
'Sotfmax_Time_2','Sotfmax_Time_3','Sotfmax_Time_4',
'Sotfmax_Time_5','Sotfmax_Time_6','Sotfmax_Time_7']
for layer in model.layers:
if layer.name in skip_layers:
layer.trainable = True
else:
layer.trainable = False
# compile model with optimizer and loss
"""
B, BL and parameter-matched controls (B-K, B-F and B-D) were trained for a total of 90 epochs
with a batch size of 100. B-U was trained using the same procedure but with a batch size of 64
due to its substantially larger number of parameters.
The cross-entropy between the softmax of the network category readout and the labels
was used as the training loss. For networks with multiple readouts (BL and B-U),
we calculate the cross-entropy at each readout and average this across readouts.
Adam [64] was used for optimisation with a learning rate of 0.005 and epsilon parameter 0.1.
L2-regularisation was applied throughout training with a coefficient of 10−6.
"""
cce = tf.keras.losses.CategoricalCrossentropy()
opt = tf.keras.optimizers.Adam(learning_rate=0.005)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("pretrained_mp_{}.hdf5".format(args.tag), monitor='loss', verbose=1,
save_best_only=True, mode='auto', period=1)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(preprocessing_function=all_random_blur)
# trainy = np.transpose(trainy, (1,2,0))
# testy = np.transpose(testy, (1,2,0))
print(trainX.shape)
print(trainy.shape)
history = model.fit(x=datagen.flow(trainX, trainy[0],batch_size=32),
validation_data=(testX,testy[0]),
steps_per_epoch=len(trainX)//32,
epochs=100,callbacks=[checkpoint])
model.save('./model/{}_{}'.format(
args.tag,
time.strftime('%Y.%m.%d_%H.%M.%S'),
))
| 35.151786 | 97 | 0.753366 |
ace8fe8fa67a03cc9b1795dd1c7b403440f03b30 | 2,051 | py | Python | scripts/build_readme.py | linw1995/data_extractor | ba20fbb8c6dc87f0fb141bd5d64e5d14f71465eb | [
"MIT"
] | 28 | 2019-04-19T11:03:04.000Z | 2022-01-02T10:31:15.000Z | scripts/build_readme.py | linw1995/data_extractor | ba20fbb8c6dc87f0fb141bd5d64e5d14f71465eb | [
"MIT"
] | 82 | 2019-04-19T14:16:50.000Z | 2021-12-24T14:16:17.000Z | scripts/build_readme.py | linw1995/data_extractor | ba20fbb8c6dc87f0fb141bd5d64e5d14f71465eb | [
"MIT"
] | 4 | 2019-07-05T04:15:17.000Z | 2022-02-08T05:18:00.000Z | """
.. _issues-172: https://github.com/github/markup/issues/172
Because Github markup do not render :include: directive. (issues-172_)
"""
# Standard Library
from pathlib import Path
from unittest import mock
# Third Party Library
import click
import docutils.nodes
import docutils.parsers.rst
import docutils.parsers.rst.directives.misc
import docutils.statemachine
import docutils.utils
@click.command()
@click.argument("source_file")
@click.argument("target_file")
def build_readme(source_file, target_file):
old_string2lines = docutils.statemachine.string2lines
old_run = docutils.parsers.rst.directives.misc.Include.run
text = ""
target_text = None
def string2lines(*args, **kwargs):
nonlocal text, target_text
if target_text is not None:
text = text.replace(target_text, args[0])
target_text = None
else:
text += args[0]
rv = old_string2lines(*args, **kwargs)
return rv
def run(self):
nonlocal target_text
target_text = self.block_text
rv = old_run(self)
return rv
with mock.patch.object(
docutils.statemachine, "string2lines", string2lines
), mock.patch.object(docutils.parsers.rst.directives.misc.Include, "run", run):
source_file_path: Path = Path.cwd() / source_file
target_file_path: Path = Path.cwd() / target_file
parser = docutils.parsers.rst.Parser()
default_settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
document = docutils.utils.new_document(source_file_path.name, default_settings)
parser.parse(source_file_path.read_text(encoding="utf-8"), document)
text = text.rstrip() + "\n"
if (
target_file_path.exists()
and target_file_path.read_text(encoding="utf-8") == text
):
return
target_file_path.write_text(text, encoding="utf-8")
if __name__ == "__main__":
build_readme()
| 29.724638 | 87 | 0.67138 |
ace8fec5abf4b0140d37c8bd2da7ac3ff241a378 | 8,209 | py | Python | arctia/stage.py | unternehmen/arctia | 5c0a9b1933199c09dc7312730ed32c3894bc33ac | [
"Unlicense"
] | 1 | 2018-01-12T15:11:03.000Z | 2018-01-12T15:11:03.000Z | arctia/stage.py | unternehmen/arctia | 5c0a9b1933199c09dc7312730ed32c3894bc33ac | [
"Unlicense"
] | 4 | 2018-02-17T00:20:09.000Z | 2018-06-01T19:49:08.000Z | arctia/stage.py | unternehmen/arctia | 5c0a9b1933199c09dc7312730ed32c3894bc33ac | [
"Unlicense"
] | null | null | null | """
The stage module provides a class representing the game world.
"""
import math
import random
import pytmx
from .entity import Entity
from .config import SCREEN_LOGICAL_WIDTH, SCREEN_LOGICAL_HEIGHT
from .common import make_2d_constant_array
from .resources import get_resource_filename
class Stage(object):
"""
A Stage represents the game world, including tiles, objects, etc.
Arguments:
path: a path to a .tmx file containing the stage data
(see examples in "maps/")
"""
def __init__(self, path):
tiled_map = pytmx.TiledMap(get_resource_filename(path))
assert tiled_map is not None
self.mobs = []
self.width = tiled_map.width
self.height = tiled_map.height
self.data = make_2d_constant_array(self.width, self.height, 0)
self._entity_matrix = \
make_2d_constant_array(self.width, self.height, None)
# The list of on-stage entities and their coordinates.
# Contains tuples of the following format: (entity, x, y)
self._entity_list = []
player_start_obj = \
tiled_map.get_object_by_name('Player Start')
player_start_x = player_start_obj.x
player_start_y = player_start_obj.y
self.player_start_loc = player_start_x, player_start_y
self._tile_change_listeners = []
for layer_ref in tiled_map.visible_tile_layers:
layer = tiled_map.layers[layer_ref]
for x, y, img in layer.tiles():
target_x = math.floor(img[1][0] / 16)
target_y = math.floor(img[1][1] / 16)
tid = target_y * 16 + target_x
# Some tiles add an entity instead of a tile.
if tid == 4:
self.create_entity('fish', (x, y))
tid = 1
elif tid == 6:
self.create_entity('rock', (x, y))
tid = 1
self.data[y][x] = tid
def register_tile_change_listener(self, listener):
"""
Register an object to be signalled whenever a tile changes.
The listening object must have a method called tile_changed
accepting the previous tile ID, the new tile ID, and the
position as a pair of (x, y) coordinates. For example:
def tile_changed(self, prev_tid, cur_tid, position)
Whenever a tile changes on this Stage, the tile_changed
method will be called on every listener.
Argument:
listener: the object to signal when a tile changes
"""
self._tile_change_listeners.append(listener)
def _draw_tile_at(self, screen, tileset, camera, loc):
x, y = loc
tid = self.data[y][x]
target_x = tid % 16
target_y = math.floor(tid / 16)
screen.blit(tileset,
camera.transform_game_to_screen(
(x, y), scalar=16),
(target_x * 16, target_y * 16, 16, 16))
def _draw_entity_at(self, screen, tileset, camera, loc):
x, y = loc
if self._entity_matrix[y][x]:
kind = self._entity_matrix[y][x].kind
if kind == 'rock':
target_x = 6
elif kind == 'bug':
target_x = 7
elif kind == 'fish':
target_x = 4
screen.blit(tileset,
camera.transform_game_to_screen(
(x, y), scalar=16),
(target_x * 16, 0, 16, 16))
def draw(self, screen, tileset, camera):
"""
Draw the visible map area onto a screen.
Arguments:
screen: the screen to draw on
tileset: the tileset to use for tiles and objects
camera: the Camera to draw with
"""
clip_left = math.floor(camera.x / 16)
clip_top = math.floor(camera.y / 16)
clip_width = math.floor(SCREEN_LOGICAL_WIDTH / 16)
clip_height = math.floor(SCREEN_LOGICAL_HEIGHT / 16 + 1)
for y in range(clip_top, clip_top + clip_height):
for x in range(clip_left, clip_left + clip_width):
if x < 0 or x >= self.width \
or y < 0 or y >= self.height:
continue
args = screen, tileset, camera, (x, y)
self._draw_tile_at(*args)
self._draw_entity_at(*args)
def get_player_start_pos(self):
"""
Return the default starting location of the player.
Returns: the default starting location of the player
"""
return self.player_start_loc
def get_tile_at(self, x, y):
"""
Return the ID of the tile at (x, y), or None if off-map.
Arguments:
x: the x coordinate of the tile
y: the y coordinate of the tile
Returns: the tile ID of the tile at (x, y),
or None if the coordinates are off-map
"""
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return None
return self.data[y][x]
def set_tile_at(self, x, y, tid):
"""
Set the tile at (x, y) to the tile ID tid.
The coordinates must actually be within the Stage.
Arguments:
x: the x coordinate of the tile to change
y: the y coordinate of the tile to change
tid: the tile ID the tile should be changed to
"""
assert x >= 0
assert x < self.width
assert y >= 0
assert y < self.height
prev_tid = self.data[y][x]
cur_tid = tid
self.data[y][x] = tid
for listener in self._tile_change_listeners:
listener.tile_changed(prev_tid, cur_tid, (x, y))
def add_entity(self, entity, location):
"""
Add an entity to the Stage.
Arguments:
entity: the entity
location: the location at which to place the entity
"""
x, y = location
assert 0 <= x < self.width
assert 0 <= y < self.height
assert not self._entity_matrix[y][x], \
'location is not empty: x=%d, y=%d' % (x, y)
entity.location = location
self._entity_matrix[y][x] = entity
self._entity_list.append((entity, x, y))
def create_entity(self, kind, location):
"""
Create an entity of the given kind at a location in this Stage.
Arguments:
kind: the kind of entity (bug | stone | fish)
location: a tuple (x, y) specifying a location
"""
entity = Entity(kind=kind, location=location)
self.add_entity(entity, location)
def delete_entity(self, entity):
"""
Delete an entity from this Stage.
Arguments:
entity: the entity to delete
"""
for y in range(self.height):
for x in range(self.width):
if self._entity_matrix[y][x] == entity:
self._entity_matrix[y][x] = None
self._entity_list.remove((entity, x, y))
entity.location = None
def find_entity(self, condition):
"""
Find an entity on the stage satisfying a condition.
Arguments:
condition: a lambda taking an entity, the entity's
x coordinate, and the entity's y coordinate,
and returning True if the entity is accepted
or False if the entity is not accepted
Returns:
a tuple (entity, (x, y)) if an entity was accepted,
or None if no entity was accepted
"""
random.shuffle(self._entity_list)
for ent, x, y in self._entity_list:
if condition(ent, x, y):
return ent, (x, y)
return None
def entity_at(self, location):
"""
Return the entity at a location if there is one, otherwise None.
Arguments:
location: the pair of coordinates (x, y)
Returns: the entity at the location, or None if there is none
"""
x, y = location
return self._entity_matrix[y][x]
| 32.44664 | 72 | 0.55963 |
ace8ff32c1650426c38b661cdc57e57a0deb4b07 | 1,537 | py | Python | _unittests/ut_dnotebooks/test_1A_notebook_soft_sql.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_dnotebooks/test_1A_notebook_soft_sql.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_dnotebooks/test_1A_notebook_soft_sql.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@brief test log(time=92s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
class TestNotebookRunner1a_soft_sql(unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails", "jyquickhelper", "mlstatpy"],
__file__, hide=True)
def test_notebook_runner_soft_sql(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_notebook1a_soft")
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a
keepnote = ls_notebooks("td1a_soft")
for n in keepnote:
fLOG(n)
execute_notebooks(temp, keepnote,
lambda i, n: "csharp" not in n and "edit_correction" not in n,
fLOG=fLOG,
clean_function=clean_function_1a,
dump=src.ensae_teaching_cs)
if __name__ == "__main__":
unittest.main()
| 29.557692 | 124 | 0.59987 |
ace8ff47e8ce00f3a9a02849cdbb2d68a7bba802 | 1,067 | py | Python | starthinker/task/dcm_api/schema/platformType.py | dvandra/starthinker | 07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9 | [
"Apache-2.0"
] | 1 | 2019-07-02T18:25:25.000Z | 2019-07-02T18:25:25.000Z | starthinker/task/dcm_api/schema/platformType.py | dvandra/starthinker | 07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9 | [
"Apache-2.0"
] | null | null | null | starthinker/task/dcm_api/schema/platformType.py | dvandra/starthinker | 07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9 | [
"Apache-2.0"
] | null | null | null | ###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
platformType_Schema = [
{
"mode": "NULLABLE",
"type": "STRING",
"description": "",
"name": "kind"
},
{
"mode": "NULLABLE",
"type": "INT64",
"description": "",
"name": "id"
},
{
"mode": "NULLABLE",
"type": "STRING",
"description": "",
"name": "name"
}
]
| 27.358974 | 75 | 0.541706 |
ace8ffcbb4bbae9e945f9c085dc39b73e363eb0a | 3,745 | py | Python | lefi/objects/user.py | Shom770/Lefi | 7d9d45a8356605d82e2b7247715db4992d21c377 | [
"MIT"
] | null | null | null | lefi/objects/user.py | Shom770/Lefi | 7d9d45a8356605d82e2b7247715db4992d21c377 | [
"MIT"
] | null | null | null | lefi/objects/user.py | Shom770/Lefi | 7d9d45a8356605d82e2b7247715db4992d21c377 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional
from ..utils import Snowflake
from .channel import DMChannel
from .enums import PremiumType
from .flags import UserFlags
if TYPE_CHECKING:
from ..state import State
from .message import Message
__all__ = ("User",)
class User(Snowflake):
"""
Represents a user.
"""
def __init__(self, state: State, data: Dict) -> None:
self._state = state
self._data = data
self._channel: Optional[DMChannel] = None
def __repr__(self) -> str:
name = self.__class__.__name__
return f"<{name} username={self.username!r} discriminator={self.discriminator!r} id={self.id} bot={self.bot}>"
async def create_dm_channel(self) -> DMChannel:
"""
Creates a DMChannel for the user if one isn't open already.
"""
if self._channel is not None:
return self._channel
data = await self._state.http.create_dm_channel(self.id)
self._channel = DMChannel(self._state, data)
return self._channel
async def send(self, content: str) -> Message:
"""
Sends a message to the user.
Parameters:
content (str): The content of the message.
Returns:
The [lefi.Message][] instance of the message sent.
"""
if self._channel is None:
self._channel = await self.create_dm_channel()
return await self._channel.send(content)
@property
def username(self) -> str:
"""
The username of the user.
"""
return self._data["username"]
@property
def discriminator(self) -> str:
"""
The discriminator of the user.
"""
return self._data["discriminator"]
@property
def id(self) -> int: # type: ignore
"""
The ID of the user.
"""
return int(self._data["id"])
@property
def bot(self) -> bool:
"""
Whether or not the user is a bot.
"""
return self._data.get("bot", False)
@property
def system(self) -> bool:
"""
Whether or not the user is a discord system user..
"""
return self._data.get("system", False)
@property
def mfa_enabled(self) -> bool:
"""
Whether or not the user has 2fa enabled.
"""
return self._data.get("mfa_enabled", False)
@property
def accent_color(self) -> int:
"""
The accent color of the user.
"""
return self._data.get("accent_color", 0)
@property
def locale(self) -> Optional[str]:
"""
The locale of the user.
"""
return self._data.get("locale")
@property
def verified(self) -> bool:
"""
Whether the email on the users account is verified.
"""
return self._data.get("verified", False)
@property
def email(self) -> Optional[str]:
"""
The email of the user.
"""
return self._data.get("email")
@property
def flags(self) -> UserFlags:
"""
The flags of the user.
"""
return UserFlags(self._data.get("flags", 0))
@property
def premium_type(self) -> PremiumType:
"""
The premium type of the user.
"""
return PremiumType(self._data.get("premium_type", 0))
@property
def public_flags(self) -> UserFlags:
"""
The users public flags.
"""
return UserFlags(self._data.get("public_flags", 0))
@property
def channel(self) -> Optional[DMChannel]:
"""
The users DMChannel.
"""
return self._channel
| 23.853503 | 118 | 0.564753 |
ace90049bf394a4b460695643f97d0149ce04a72 | 2,995 | py | Python | setup.py | khourhin/rnaseq | 8d5ced3d7bcbef05c8dbc95b6c2a4f9182a1307a | [
"BSD-3-Clause"
] | 1 | 2020-04-20T13:33:00.000Z | 2020-04-20T13:33:00.000Z | setup.py | khourhin/rnaseq | 8d5ced3d7bcbef05c8dbc95b6c2a4f9182a1307a | [
"BSD-3-Clause"
] | 7 | 2019-12-02T20:34:12.000Z | 2020-12-06T19:40:38.000Z | setup.py | khourhin/rnaseq | 8d5ced3d7bcbef05c8dbc95b6c2a4f9182a1307a | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_namespace_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import subprocess
_MAJOR = 0
_MINOR = 15
_MICRO = 1
version = "%d.%d.%d" % (_MAJOR, _MINOR, _MICRO)
release = "%d.%d" % (_MAJOR, _MINOR)
metainfo = {
"authors": {"main": ("thomas cokelaer", "thomas.cokelaer@pasteur.fr")},
"version": version,
"license": "new BSD",
"url": "https://github.com/sequana/",
"description": "A RNAseq pipeline from raw reads to feature counts",
"platforms": ["Linux", "Unix", "MacOsX", "Windows"],
"keywords": ["snakemake, sequana, RNAseq, RNADiff, differential analysis"],
"classifiers": [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
],
}
NAME = "rnaseq"
class Install(install):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try:
subprocess.run(cmd.split())
except:
pass
install.run(self)
class Develop(develop):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try:
subprocess.run(cmd.split())
except:
pass
develop.run(self)
setup(
name="sequana_{}".format(NAME),
version=version,
maintainer=metainfo["authors"]["main"][0],
maintainer_email=metainfo["authors"]["main"][1],
author=metainfo["authors"]["main"][0],
author_email=metainfo["authors"]["main"][1],
long_description=open("README.rst").read(),
keywords=metainfo["keywords"],
description=metainfo["description"],
license=metainfo["license"],
platforms=metainfo["platforms"],
url=metainfo["url"],
classifiers=metainfo["classifiers"],
# package installation
packages=["sequana_pipelines.rnaseq"],
install_requires=open("requirements.txt").read(),
# This is recursive include of data files
exclude_package_data={"": ["__pycache__"]},
package_data={
"": ["*.yaml", "*.rules", "*.json", "requirements.txt", "*png", "fastq_screen.conf"],
},
zip_safe=False,
entry_points={"console_scripts": ["sequana_rnaseq=sequana_pipelines.rnaseq.main:main"]},
# cmdclass={"install": Install, "develop": Develop}
)
| 34.034091 | 93 | 0.628381 |
ace90081c7273b46dd5717c1280fca85d7706b96 | 24,836 | py | Python | accounts/views.py | juned8236/copyCRM | e5028b99cddbc90ebb4103186f531dc9387ac0e8 | [
"MIT"
] | null | null | null | accounts/views.py | juned8236/copyCRM | e5028b99cddbc90ebb4103186f531dc9387ac0e8 | [
"MIT"
] | null | null | null | accounts/views.py | juned8236/copyCRM | e5028b99cddbc90ebb4103186f531dc9387ac0e8 | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import (
CreateView, UpdateView, DetailView, TemplateView, View, DeleteView, FormView)
from accounts.forms import AccountForm, AccountCommentForm, \
AccountAttachmentForm, EmailForm
from accounts.models import Account, Tags, Email
from common.models import User, Comment, Attachments
from common.utils import INDCHOICES, COUNTRIES, \
CURRENCY_CODES, CASE_TYPE, PRIORITY_CHOICE, STATUS_CHOICE
from contacts.models import Contact
from opportunity.models import Opportunity, STAGES, SOURCES
from cases.models import Case
from django.urls import reverse_lazy, reverse
from leads.models import Lead
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from accounts.tasks import send_email, send_email_to_assigned_user
from common.tasks import send_email_user_mentions
from django.contrib.sites.shortcuts import get_current_site
from common.access_decorators_mixins import (
sales_access_required, marketing_access_required, SalesAccessRequiredMixin, MarketingAccessRequiredMixin)
from teams.models import Teams
class AccountsListView(SalesAccessRequiredMixin, LoginRequiredMixin, TemplateView):
model = Account
context_object_name = "accounts_list"
template_name = "accounts.html"
def get_queryset(self):
queryset = self.model.objects.all()
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
queryset = queryset.filter(
Q(created_by=self.request.user) | Q(assigned_to=self.request.user)).distinct()
if self.request.GET.get('tag', None):
queryset = queryset.filter(tags__in = self.request.GET.getlist('tag'))
request_post = self.request.POST
if request_post:
if request_post.get('name'):
queryset = queryset.filter(
name__icontains=request_post.get('name'))
if request_post.get('city'):
queryset = queryset.filter(
billing_city__contains=request_post.get('city'))
if request_post.get('industry'):
queryset = queryset.filter(
industry__icontains=request_post.get('industry'))
if request_post.get('tag'):
queryset = queryset.filter(tags__in=request_post.getlist('tag'))
return queryset.distinct()
def get_context_data(self, **kwargs):
context = super(AccountsListView, self).get_context_data(**kwargs)
open_accounts = self.get_queryset().filter(status='open')
close_accounts = self.get_queryset().filter(status='close')
context["accounts_list"] = self.get_queryset()
context["users"] = User.objects.filter(
is_active=True).order_by('email')
context['open_accounts'] = open_accounts
context['close_accounts'] = close_accounts
context["industries"] = INDCHOICES
context["per_page"] = self.request.POST.get('per_page')
tag_ids = list(set(Account.objects.values_list('tags', flat=True)))
context["tags"] = Tags.objects.filter(id__in=tag_ids)
if self.request.POST.get('tag', None):
context["request_tags"] = self.request.POST.getlist('tag')
elif self.request.GET.get('tag', None):
context["request_tags"] = self.request.GET.getlist('tag')
else:
context["request_tags"] = None
search = False
if (
self.request.POST.get('name') or self.request.POST.get('city') or
self.request.POST.get('industry') or self.request.POST.get('tag')
):
search = True
context["search"] = search
tab_status = 'Open'
if self.request.POST.get('tab_status'):
tab_status = self.request.POST.get('tab_status')
context['tab_status'] = tab_status
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class CreateAccountView(SalesAccessRequiredMixin, LoginRequiredMixin, CreateView):
model = Account
form_class = AccountForm
template_name = "create_account.html"
def dispatch(self, request, *args, **kwargs):
if self.request.user.role == 'ADMIN' or self.request.user.is_superuser:
self.users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
self.users = []
else:
self.users = User.objects.filter(role='ADMIN').order_by('email')
return super(
CreateAccountView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CreateAccountView, self).get_form_kwargs()
kwargs.update({"account": True})
kwargs.update({"request_user": self.request.user})
# if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
# kwargs.update({"request_user": self.request.user})
return kwargs
def post(self, request, *args, **kwargs):
self.object = None
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
# Save Account
account_object = form.save(commit=False)
account_object.created_by = self.request.user
account_object.save()
if self.request.POST.get('tags', ''):
tags = self.request.POST.get("tags")
splitted_tags = tags.split(",")
for t in splitted_tags:
tag = Tags.objects.filter(name=t.lower())
if tag:
tag = tag[0]
else:
tag = Tags.objects.create(name=t.lower())
account_object.tags.add(tag)
if self.request.POST.getlist('contacts', []):
account_object.contacts.add(*self.request.POST.getlist('contacts'))
if self.request.POST.getlist('assigned_to', []):
account_object.assigned_to.add(*self.request.POST.getlist('assigned_to'))
if self.request.FILES.get('account_attachment'):
attachment = Attachments()
attachment.created_by = self.request.user
attachment.file_name = self.request.FILES.get(
'account_attachment').name
attachment.account = account_object
attachment.attachment = self.request.FILES.get(
'account_attachment')
attachment.save()
if self.request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=self.request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = account_object.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
account_object.assigned_to.add(user_id)
assigned_to_list = list(account_object.assigned_to.all().values_list('id', flat=True))
current_site = get_current_site(self.request)
recipients = assigned_to_list
send_email_to_assigned_user.delay(recipients, account_object.id, domain=current_site.domain,
protocol=self.request.scheme)
if self.request.POST.get("savenewform"):
return redirect("accounts:new_account")
if self.request.is_ajax():
data = {'success_url': reverse_lazy(
'accounts:list'), 'error': False}
return JsonResponse(data)
return redirect("accounts:list")
def form_invalid(self, form):
if self.request.is_ajax():
return JsonResponse({'error': True, 'errors': form.errors})
return self.render_to_response(
self.get_context_data(form=form)
)
def get_context_data(self, **kwargs):
context = super(CreateAccountView, self).get_context_data(**kwargs)
context["account_form"] = context["form"]
context["users"] = self.users
context["industries"] = INDCHOICES
context["countries"] = COUNTRIES
# context["contact_count"] = Contact.objects.count()
if self.request.user.role == 'ADMIN':
context["leads"] = Lead.objects.exclude(
status__in=['converted', 'closed'])
context["contacts"] = Contact.objects.all()
else:
context["leads"] = Lead.objects.filter(
Q(assigned_to__in=[self.request.user]) | Q(created_by=self.request.user)).exclude(
status__in=['converted', 'closed'])
context["lead_count"] = context["leads"].count()
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
context["lead_count"] = Lead.objects.filter(
Q(assigned_to__in=[self.request.user]) | Q(created_by=self.request.user)).exclude(status='closed').count()
context["contacts"] = Contact.objects.filter(
Q(assigned_to__in=[self.request.user]) | Q(created_by=self.request.user))
context["contact_count"] = context["contacts"].count()
return context
class AccountDetailView(SalesAccessRequiredMixin, LoginRequiredMixin, DetailView):
model = Account
context_object_name = "account_record"
template_name = "view_account.html"
def get_context_data(self, **kwargs):
context = super(AccountDetailView, self).get_context_data(**kwargs)
account_record = context["account_record"]
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not ((self.request.user == account_record.created_by) or
(self.request.user in account_record.assigned_to.all())):
raise PermissionDenied
comment_permission = True if (
self.request.user == account_record.created_by or
self.request.user.is_superuser or self.request.user.role == 'ADMIN'
) else False
if self.request.user.is_superuser or self.request.user.role == 'ADMIN':
users_mention = list(User.objects.filter(is_active=True).values('username'))
elif self.request.user != account_record.created_by:
if account_record.created_by:
users_mention = [{'username': account_record.created_by.username}]
else:
users_mention = []
else:
users_mention = []
context.update({
"comments": account_record.accounts_comments.all(),
"attachments": account_record.account_attachment.all(),
"opportunity_list": Opportunity.objects.filter(
account=account_record),
"contacts": account_record.contacts.all(),
"users": User.objects.filter(is_active=True).order_by('email'),
"cases": Case.objects.filter(account=account_record),
"stages": STAGES,
"sources": SOURCES,
"countries": COUNTRIES,
"currencies": CURRENCY_CODES,
"case_types": CASE_TYPE,
"case_priority": PRIORITY_CHOICE,
"case_status": STATUS_CHOICE,
'comment_permission': comment_permission,
'tasks':account_record.accounts_tasks.all(),
'invoices':account_record.accounts_invoices.all(),
'users_mention': users_mention,
})
return context
class AccountUpdateView(SalesAccessRequiredMixin, LoginRequiredMixin, UpdateView):
model = Account
form_class = AccountForm
template_name = "create_account.html"
def dispatch(self, request, *args, **kwargs):
self.users = User.objects.filter(is_active=True).order_by('email')
# if self.request.user.role == 'ADMIN' or self.request.user.is_superuser:
# elif request.user.google.all():
# self.users = []
# else:
# self.users = User.objects.filter(role='ADMIN').order_by('email')
return super(AccountUpdateView, self).dispatch(
request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AccountUpdateView, self).get_form_kwargs()
kwargs.update({"account": True})
kwargs.update({"request_user": self.request.user})
# if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
# kwargs.update({"request_user": self.request.user})
return kwargs
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
# Save Account
account_object = form.save(commit=False)
account_object.save()
account_object.tags.clear()
if self.request.POST.get('tags', ''):
tags = self.request.POST.get("tags")
splitted_tags = tags.split(",")
for t in splitted_tags:
tag = Tags.objects.filter(name=t.lower())
if tag:
tag = tag[0]
else:
tag = Tags.objects.create(name=t.lower())
account_object.tags.add(tag)
if self.request.POST.getlist('contacts', []):
account_object.contacts.clear()
account_object.contacts.add(*self.request.POST.getlist('contacts'))
if self.request.POST.getlist('assigned_to', []):
account_object.assigned_to.clear()
account_object.assigned_to.add(*self.request.POST.getlist('assigned_to'))
else:
account_object.assigned_to.clear()
if self.request.FILES.get('account_attachment'):
attachment = Attachments()
attachment.created_by = self.request.user
attachment.file_name = self.request.FILES.get(
'account_attachment').name
attachment.account = account_object
attachment.attachment = self.request.FILES.get(
'account_attachment')
attachment.save()
assigned_to_list = list(account_object.assigned_to.all().values_list('id', flat=True))
current_site = get_current_site(self.request)
recipients = assigned_to_list
send_email_to_assigned_user.delay(recipients, account_object.id, domain=current_site.domain,
protocol=self.request.scheme)
if self.request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=self.request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = account_object.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
account_object.assigned_to.add(user_id)
if self.request.is_ajax():
data = {'success_url': reverse_lazy(
'accounts:list'), 'error': False}
return JsonResponse(data)
return redirect("accounts:list")
def form_invalid(self, form):
if self.request.is_ajax():
return JsonResponse({'error': True, 'errors': form.errors})
return self.render_to_response(
self.get_context_data(form=form)
)
def get_context_data(self, **kwargs):
context = super(AccountUpdateView, self).get_context_data(**kwargs)
context["account_obj"] = self.object
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if ((self.request.user != context['account_obj'].created_by ) and
(self.request.user not in context['account_obj'].assigned_to.all())):
raise PermissionDenied
context["account_form"] = context["form"]
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
self.users = self.users.filter(Q(role='ADMIN') | Q(id__in=[self.request.user.id,]))
context["users"] = self.users
context["industries"] = INDCHOICES
context["countries"] = COUNTRIES
context["contact_count"] = Contact.objects.count()
if self.request.user.role == 'ADMIN':
context["leads"] = Lead.objects.exclude(
status__in=['converted', 'closed'])
else:
context["leads"] = Lead.objects.filter(
Q(assigned_to__in=[self.request.user]) | Q(created_by=self.request.user)).exclude(
status__in=['converted', 'closed'])
context["lead_count"] = context["leads"].count()
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
context["lead_count"] = Lead.objects.filter(
Q(assigned_to__in=[self.request.user]) | Q(created_by=self.request.user)).exclude(status='closed').count()
return context
class AccountDeleteView(SalesAccessRequiredMixin, LoginRequiredMixin, DeleteView):
model = Account
template_name = 'view_account.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if self.request.user != self.object.created_by:
raise PermissionDenied
self.object.delete()
return redirect("accounts:list")
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = AccountCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.account = get_object_or_404(
Account, id=request.POST.get('accountid'))
if (
request.user == self.account.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.account = self.account
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'accounts', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_on_arrow": comment.commented_on_arrow,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = AccountCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'accounts', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = AccountAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.account = get_object_or_404(
Account, id=request.POST.get('accountid'))
if (
request.user == self.account.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.account = self.account
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_on_arrow": attachment.created_on_arrow,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
@login_required
def create_mail(request, account_id):
if request.method == 'GET':
account = get_object_or_404(Account, pk=account_id)
contacts_list = list(account.contacts.all().values('email'))
email_form = EmailForm()
return render(request, 'create_mail_accounts.html', {'account_id': account_id,
'contacts_list': contacts_list, 'email_form': email_form})
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
send_email.delay(form.data.get('message_subject'), form.data.get('message_body'),
from_email=account_id, recipients=form.data.get('recipients').split(','))
return JsonResponse({'error': False})
else:
return JsonResponse({'error': True, 'errors': form.errors})
# @login_required
# def get_account_details(request, account_id):
# from django.core import serializers
# import json
# fields = ['name', 'email', 'phone', 'industry', 'billing_address_line', 'billing_street', 'billing_city',
# 'billing_state', 'billing_postcode', 'billing_country', 'website', 'description',
# 'created_by__email', 'created_on', 'tags__name', 'status', 'contacts__email', 'assigned_to__email']
# data = serializers.serialize('json', Account.objects.filter(id=account_id), fields=fields)
# return JsonResponse({'data': json.loads(data)[0]}) | 42.67354 | 122 | 0.633153 |
ace900b8b7ebc350fde003660861ce8856a8b310 | 1,360 | py | Python | utils.py | TevenLeScao/awd-lstm-lm | 0cfd295310addf551bbff32d5de5e4db76c9be5f | [
"BSD-3-Clause"
] | null | null | null | utils.py | TevenLeScao/awd-lstm-lm | 0cfd295310addf551bbff32d5de5e4db76c9be5f | [
"BSD-3-Clause"
] | null | null | null | utils.py | TevenLeScao/awd-lstm-lm | 0cfd295310addf551bbff32d5de5e4db76c9be5f | [
"BSD-3-Clause"
] | null | null | null | import torch
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = source[i:i + seq_len]
target = source[i + 1:i + 1 + seq_len].view(-1)
return data, target
def openai_compute(n_params, batch_size, training_steps):
# given in PF/s (hence the / 24 / 3600)
return 6 * n_params * batch_size * training_steps / 24 / 3600
def excluded_from_params(parameter: torch.nn.Parameter, vocab_size=-1):
return vocab_size in parameter.shape
def non_emb_param_count(model: torch.nn.Module, vocab_size=-1):
return sum(p.numel() for p in model.parameters() if not excluded_from_params(p, vocab_size))
| 31.627907 | 96 | 0.678676 |
ace901d994f00cc9b45911d08d196b99fbfcb1e4 | 4,064 | py | Python | alipay/aop/api/request/AlipayMsaasMediarecogMmtcaftscvPicvideoQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMsaasMediarecogMmtcaftscvPicvideoQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMsaasMediarecogMmtcaftscvPicvideoQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMsaasMediarecogMmtcaftscvPicvideoQueryModel import AlipayMsaasMediarecogMmtcaftscvPicvideoQueryModel
class AlipayMsaasMediarecogMmtcaftscvPicvideoQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMsaasMediarecogMmtcaftscvPicvideoQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMsaasMediarecogMmtcaftscvPicvideoQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.msaas.mediarecog.mmtcaftscv.picvideo.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.027586 | 148 | 0.652559 |
ace902623b321dbdaf42093eecb497ababc80c17 | 1,929 | py | Python | Project/FrontEnd/forms.py | conorshortt123/facial-recognition-project | 7440c9c9920c91b0db2c0c41628fec89ceaea464 | [
"MIT"
] | 5 | 2020-02-03T16:09:56.000Z | 2020-04-19T16:31:50.000Z | Project/FrontEnd/forms.py | conorshortt123/facial-recognition-project | 7440c9c9920c91b0db2c0c41628fec89ceaea464 | [
"MIT"
] | 8 | 2020-02-03T12:01:43.000Z | 2022-03-12T00:16:48.000Z | Project/FrontEnd/forms.py | conorshortt123/facial-recognition-project | 7440c9c9920c91b0db2c0c41628fec89ceaea464 | [
"MIT"
] | 2 | 2020-06-26T13:32:55.000Z | 2021-04-21T13:24:25.000Z | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo
from flask_wtf.file import FileField, FileRequired, FileAllowed
from flask_uploads import UploadSet, IMAGES
images = UploadSet('images', IMAGES)
"""Create the registration form.
Set requirements of certain fields.
"""
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
firstName = StringField('First name',
validators=[DataRequired()])
secondName = StringField('Second name',
validators=[DataRequired()])
address = StringField('Address',
validators=[DataRequired()])
MobileNum = StringField('Mobile Number',
validators=[DataRequired()])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
image = FileField('Image',validators=[FileRequired(),
FileAllowed(images, 'Images only!')])
submit = SubmitField('Sign up')
"""Create the form for searching the
database. Set requirements
"""
class searchForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
"""Create the Login form.
Set the username and password
to be required.
"""
class LoginForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
| 38.58 | 90 | 0.654743 |
ace902e7ec2b13c388f2ae92f89e88f4c18d091c | 14,736 | py | Python | aiida_quantumespresso_hp/parsers/hp.py | sphuber/aiida-uscf | 1fbea899424f63e71054815e9abe5d09b0a2b1cf | [
"MIT"
] | 1 | 2019-10-09T14:09:15.000Z | 2019-10-09T14:09:15.000Z | aiida_quantumespresso_hp/parsers/hp.py | sphuber/aiida-uscf | 1fbea899424f63e71054815e9abe5d09b0a2b1cf | [
"MIT"
] | 6 | 2018-01-25T09:17:31.000Z | 2020-09-22T12:15:42.000Z | aiida_quantumespresso_hp/parsers/hp.py | sphuber/aiida-quantumespresso-uscf | 1fbea899424f63e71054815e9abe5d09b0a2b1cf | [
"MIT"
] | 2 | 2020-03-24T17:59:13.000Z | 2021-01-12T23:54:36.000Z | # -*- coding: utf-8 -*-
import glob, os, re, numpy, enum
from aiida.common.exceptions import InvalidOperation
from aiida.common.datastructures import calc_states
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.singlefile import SinglefileData
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso_hp.calculations.hp import HpCalculation
class HpParser(Parser):
"""
Parser implementation for Quantum ESPRESSO Hp calculations
"""
_parser_version = '0.1'
_parser_name = 'AiiDA Quantum ESPRESSO HP parser'
ERROR_NO_OUTPUT = 1
ERROR_PREMATURE_TERMINATION = 2
ERROR_CONVERGENCE_NOT_REACHED = 3
ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS = 4
ERROR_MISSING_PERTURBATION_FILE = 5
def __init__(self, calculation):
"""
Initialize the instance of HpParser
"""
if not isinstance(calculation, HpCalculation):
raise QEOutputParsingError('input calculation must be a HpCalculation')
self.calculation = calculation
super(HpParser, self).__init__(calculation)
def get_linkname_outparams(self):
"""
Returns the name of the link to the standard output ParameterData
"""
return 'output_parameters'
def get_linkname_hubbard(self):
"""
Returns the name of the link to the Hubbard output ParameterData
"""
return 'hubbard'
def get_linkname_hubbard_file(self):
"""
Returns the name of the link to the Hubbard output SinglefileData that can be used for a PwCalculation
"""
return 'hubbard_file'
def get_linkname_matrices(self):
"""
Returns the name of the link to the output matrices ArrayData
"""
return 'matrices'
def get_linkname_chi(self):
"""
Returns the name of the link to the output chi ArrayData
"""
return 'chi'
def parse_result_template(self):
"""
Returns a dictionary
"""
return {
'parser_info': '{} v{}'.format(self._parser_name, self._parser_version),
'parser_warnings': [],
'warnings': []
}
def parse_with_retrieved(self, retrieved):
"""
Parse the results of retrieved nodes
:param retrieved: dictionary of retrieved nodes
"""
is_success = True
output_nodes = []
try:
output_folder = retrieved[self.calculation._get_linkname_retrieved()]
except KeyError:
self.logger.error('no retrieved folder found')
return False, ()
# Verify the standard output file is present, parse it and attach as output parameters
try:
filepath_stdout = output_folder.get_abs_path(self.calculation.output_file_name)
except OSError as exception:
self.logger.error("expected output file '{}' was not found".format(filepath))
return False, ()
is_success, dict_stdout = self.parse_stdout(filepath_stdout)
output_nodes.append((self.get_linkname_outparams(), ParameterData(dict=dict_stdout)))
# The final chi and hubbard files are only written by a serial or post-processing calculation
complete_calculation = True
# We cannot use get_abs_path of the output_folder, since that will check for file existence and will throw
output_path = output_folder.get_abs_path('.')
filepath_chi = os.path.join(output_path, self.calculation.output_file_name_chi)
filepath_hubbard = os.path.join(output_path, self.calculation.output_file_name_hubbard)
filepath_hubbard_file = os.path.join(output_path, self.calculation.output_file_name_hubbard_file)
for filepath in [filepath_chi, filepath_hubbard]:
if not os.path.isfile(filepath):
complete_calculation = False
self.logger.info("output file '{}' was not found, assuming partial calculation".format(filepath))
if os.path.isfile(filepath_hubbard_file):
output_hubbard_file = SinglefileData(file=filepath_hubbard_file)
output_nodes.append((self.get_linkname_hubbard_file(), output_hubbard_file))
if complete_calculation:
dict_hubbard = self.parse_hubbard(filepath_hubbard)
dict_chi = self.parse_chi(filepath_chi)
output_matrices = ArrayData()
output_matrices.set_array('chi0', dict_hubbard['chi0'])
output_matrices.set_array('chi1', dict_hubbard['chi1'])
output_matrices.set_array('chi0_inv', dict_hubbard['chi0_inv'])
output_matrices.set_array('chi1_inv', dict_hubbard['chi1_inv'])
output_matrices.set_array('hubbard', dict_hubbard['hubbard'])
output_chi = ArrayData()
output_chi.set_array('chi0', dict_chi['chi0'])
output_chi.set_array('chi1', dict_chi['chi1'])
output_hubbard = ParameterData(dict=dict_hubbard['hubbard_U'])
output_nodes.append((self.get_linkname_matrices(), output_matrices))
output_nodes.append((self.get_linkname_hubbard(), output_hubbard))
output_nodes.append((self.get_linkname_chi(), output_chi))
return is_success, output_nodes
def parse_stdout(self, filepath):
"""
Parse the output parameters from the output of a Hp calculation written to standard out
:param filepath: path to file containing output written to stdout
:returns: boolean representing success status of parsing, True equals parsing was successful
:returns: dictionary with the parsed parameters
"""
exit_status = 0
is_terminated = True
result = self.parse_result_template()
try:
with open(filepath, 'r') as handle:
output = handle.readlines()
except IOError:
raise QEOutputParsingError('failed to read file: {}.'.format(filepath))
# Empty output can be considered as a problem
if not output:
exit_status = self.ERROR_NO_OUTPUT
return exit_status, result
# Parse the output line by line by creating an iterator of the lines
it = iter(output)
for line in it:
# If the output does not contain the line with 'JOB DONE' the program was prematurely terminated
if 'JOB DONE' in line:
is_terminated = False
# If the atoms were not ordered correctly in the parent calculation
if 'WARNING! All Hubbard atoms must be listed first in the ATOMIC_POSITIONS card of PWscf' in line:
exit_status = self.ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS
return exit_status, result
# If not all expected perturbation files were found for a chi_collect calculation
if 'Error in routine hub_read_chi (1)' in line:
exit_status = self.ERROR_MISSING_PERTURBATION_FILE
return exit_status, result
# If the run did not convergence we expect to find the following string
match = re.search(r'.*Convergence has not been reached after\s+([0-9]+)\s+iterations!.*', line)
if match:
exit_status = self.ERROR_CONVERGENCE_NOT_REACHED
return exit_status, result
# Determine the atomic sites that will be perturbed, or that the calculation expects
# to have been calculated when post-processing the final matrices
match = re.search(r'.*List of\s+([0-9]+)\s+atoms which will be perturbed.*', line)
if match:
hubbard_sites = {}
number_of_perturbed_atoms = int(match.group(1))
blank_line = next(it)
for i in range(number_of_perturbed_atoms):
values = next(it).split()
index = values[0]
kind = values[1]
hubbard_sites[index] = kind
result['hubbard_sites'] = hubbard_sites
# A calculation that will only perturb a single atom will only print one line
match = re.search(r'.*Atom which will be perturbed.*', line)
if match:
hubbard_sites = {}
number_of_perturbed_atoms = 1
blank_line = next(it)
for i in range(number_of_perturbed_atoms):
values = next(it).split()
index = values[0]
kind = values[1]
hubbard_sites[index] = kind
result['hubbard_sites'] = hubbard_sites
if is_terminated:
exit_status = self.ERROR_PREMATURE_TERMINATION
return exit_status, result
def parse_chi(self, filepath):
"""
Parse the contents of the file {prefix}.chi.dat as written by a HpCalculation
:param filepath: absolute filepath to the chi.dat output file
:returns: dictionary with parsed contents
"""
try:
with open(filepath, 'r') as handle:
data = handle.readlines()
except IOError as exception:
raise OutputParsingError("could not read the '{}' output file".format(os.path.basename(filepath)))
result = {}
blocks = {
'chi0': [None, None],
'chi1': [None, None],
}
for line_number, line in enumerate(data):
if 'chi0' in line:
blocks['chi0'][0] = line_number + 1
if 'chi1' in line:
blocks['chi0'][1] = line_number
blocks['chi1'][0] = line_number + 1
blocks['chi1'][1] = len(data)
break
if not all(sum(blocks.values(), [])):
raise OutputParsingError("could not determine beginning and end of all blocks in '{}'"
.format(os.path.basename(filepath)))
for matrix_name in ('chi0', 'chi1'):
matrix_block = blocks[matrix_name]
matrix_data = data[matrix_block[0]:matrix_block[1]]
matrix = numpy.matrix(self.parse_hubbard_matrix(matrix_data))
result[matrix_name] = matrix
return result
def parse_hubbard(self, filepath):
"""
Parse the contents of the file {prefix}.Hubbard_U.dat as written by a HpCalculation
:param filepath: absolute filepath to the Hubbard_U.dat output file
:returns: dictionary with parsed contents
"""
try:
with open(filepath, 'r') as handle:
data = handle.readlines()
except IOError as exception:
raise OutputParsingError("could not read the '{}' output file".format(os.path.basename(filepath)))
result = {
'hubbard_U': {
'sites': []
}
}
blocks = {
'chi0': [None, None],
'chi1': [None, None],
'chi0_inv': [None, None],
'chi1_inv': [None, None],
'hubbard': [None, None],
}
for line_number, line in enumerate(data):
if 'site n.' in line:
parsed = False
subline_number = line_number + 1
while not parsed:
subline = data[subline_number].strip()
if subline:
subline_number += 1
subdata = subline.split()
result['hubbard_U']['sites'].append({
'index': subdata[0],
'type': subdata[1],
'kind': subdata[2],
'spin': subdata[3],
'new_type': subdata[4],
'new_kind': subdata[5],
'value': subdata[6],
})
else:
parsed = True
if 'chi0 matrix' in line:
blocks['chi0'][0] = line_number + 1
if 'chi1 matrix' in line:
blocks['chi0'][1] = line_number
blocks['chi1'][0] = line_number + 1
if 'chi0^{-1} matrix' in line:
blocks['chi1'][1] = line_number
blocks['chi0_inv'][0] = line_number + 1
if 'chi1^{-1} matrix' in line:
blocks['chi0_inv'][1] = line_number
blocks['chi1_inv'][0] = line_number + 1
if 'Hubbard matrix' in line:
blocks['chi1_inv'][1] = line_number
blocks['hubbard'][0] = line_number + 1
blocks['hubbard'][1] = len(data)
break
if not all(sum(blocks.values(), [])):
raise OutputParsingError("could not determine beginning and end of all matrix blocks in '{}'"
.format(os.path.basename(filepath)))
for matrix_name in ('chi0', 'chi1', 'chi0_inv', 'chi1_inv', 'hubbard'):
matrix_block = blocks[matrix_name]
matrix_data = data[matrix_block[0]:matrix_block[1]]
matrix = self.parse_hubbard_matrix(matrix_data)
if len(set(matrix.shape)) != 1:
raise OutputParsingError("the matrix '{}' in '{}'' is not square but has shape {}"
.format(matrix_name, os.path.basename(filepath), matrix.shape))
result[matrix_name] = matrix
return result
def parse_hubbard_matrix(self, data):
"""
Utility function to parse one of the matrices that are written to the {prefix}.Hubbard_U.dat
file by a HpCalculation. Each matrix should be square of size N, which is given by the product
of the number of q-points and the number of Hubbard species
Each matrix row is printed with a maximum number of 8 elements per line and each line is followed
by an empty line. In the parsing of the data, we will use the empty line to detect the end of
the current matrix row
:param data: a list of strings representing lines in the Hubbard_U.dat file of a certain matrix
:returns: square numpy matrix of floats representing the parsed matrix
"""
matrix = []
row = []
for line in data:
if line.strip():
for f in line.split():
row.append(float(f))
else:
if row:
matrix.append(row)
row = []
return numpy.matrix(matrix)
| 39.296 | 114 | 0.591612 |
ace902fdb656fdd2dc70e2dd662cf8285394a7fb | 154 | py | Python | djangoFiles/oldrepo/apps.py | silvrwolfboy/theJekyllProject | e36aa6605e762d8b14277e636322096d19455aa9 | [
"MIT"
] | 20 | 2017-09-29T20:00:33.000Z | 2021-11-08T15:01:40.000Z | djangoFiles/oldrepo/apps.py | silvrwolfboy/theJekyllProject | e36aa6605e762d8b14277e636322096d19455aa9 | [
"MIT"
] | 75 | 2017-10-03T12:32:05.000Z | 2022-01-13T00:44:15.000Z | djangoFiles/oldrepo/apps.py | singh1114/theJekyllProject | e36aa6605e762d8b14277e636322096d19455aa9 | [
"MIT"
] | 14 | 2017-12-29T12:32:08.000Z | 2020-03-06T12:42:19.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class OldRepoConfig(AppConfig):
name = 'oldrepo'
| 17.111111 | 39 | 0.733766 |
ace903023e362013b5bb72e43bee726b5132dbe8 | 975 | py | Python | insta/urls.py | Calebu6214/Instagram-clone | 4d13920e662e75280884bd8ef6d073fc57390d5b | [
"MIT"
] | null | null | null | insta/urls.py | Calebu6214/Instagram-clone | 4d13920e662e75280884bd8ef6d073fc57390d5b | [
"MIT"
] | null | null | null | insta/urls.py | Calebu6214/Instagram-clone | 4d13920e662e75280884bd8ef6d073fc57390d5b | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('',views.indexPage, name='home'),
path('profile/', views.profile,name='profile'),
url('edit/', views.edit_profile,name='edit_profile'),
url('user/',views.search_username,name='search_username'),
url('image/', views.upload_image,name='upload_image'),
url('likes/(\d+)/' , views.image_likes, name='likes'),
url('new_comment/(\d+)/' ,views.add_comment,name='Comments'),
path('post/', views.create_post,name='create_post'),
path('delete/<int:id>',views.delete_image, name = 'deletepost'),
path('registration/', views.registerPage, name="registration"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 42.391304 | 81 | 0.70359 |
ace90534c6a4c2676f7214b9c296350438c8cda2 | 2,381 | py | Python | tailf/__init__.py | shadowleaves/acr | 7731b37d3049a1cf1971861d6ab831c03c966d48 | [
"MIT"
] | null | null | null | tailf/__init__.py | shadowleaves/acr | 7731b37d3049a1cf1971861d6ab831c03c966d48 | [
"MIT"
] | null | null | null | tailf/__init__.py | shadowleaves/acr | 7731b37d3049a1cf1971861d6ab831c03c966d48 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import fcntl
import subprocess
from optparse import OptionParser
import select
# Mwa-ha-ha, this is easiest way. Hardly portable to windowz, but who cares?
TAILF_COMMAND = ['/usr/bin/tail', '-F', '-n']
def tailf_init(filename, start_count):
process = subprocess.Popen(
TAILF_COMMAND + [str(start_count), filename],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# set non-blocking mode for file
fl = fcntl.fcntl(process.stdout, fcntl.F_GETFL)
fcntl.fcntl(process.stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
fl = fcntl.fcntl(process.stderr, fcntl.F_GETFL)
fcntl.fcntl(process.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return process
def tailf(filename, start_count=0, ignore_stderr=True):
process = tailf_init(filename, start_count)
buf = ''
while True:
reads, writes, errors = select.select([process.stdout, process.stderr], [], [
process.stdout, process.stderr], 0.1)
if process.stdout in reads:
buf += process.stdout.read()
lines = buf.split('\n')
if lines[-1] == '':
# whole line received
buf = ''
else:
buf = lines[-1]
lines = lines[:-1]
if lines:
for line in lines:
if ignore_stderr:
yield line
else:
yield (line, None)
if process.stderr in reads:
stderr_input = process.stderr.read()
if not ignore_stderr:
yield (None, stderr_input)
if process.stderr in errors or process.stdout in errors:
print "Error received. Errors: ", errors
process = tailf_init(filename)
if __name__ == "__main__":
parser = OptionParser(
usage=u"%prog <filename>\nWatch for file tail (with retry) and process all incoming data")
parser.add_option("-n", "--lines", dest="start_count", type="int",
default=0, help="Output last N lines (default: %DEFAULT)")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Please provide filename as argument")
for line in tailf(args[0], options.start_count):
print line
| 29.7625 | 98 | 0.585468 |
ace905bc03ddc570e8faf40878f1b309b3678b6b | 3,353 | py | Python | finicityapi/models/voie_payroll_report_constraints.py | monarchmoney/finicity-python | b2ab1ded435db75c78d42261f5e4acd2a3061487 | [
"MIT"
] | null | null | null | finicityapi/models/voie_payroll_report_constraints.py | monarchmoney/finicity-python | b2ab1ded435db75c78d42261f5e4acd2a3061487 | [
"MIT"
] | null | null | null | finicityapi/models/voie_payroll_report_constraints.py | monarchmoney/finicity-python | b2ab1ded435db75c78d42261f5e4acd2a3061487 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import finicityapi.models.payroll_provider_data
import finicityapi.models.report_custom_field
class VOIEPayrollReportConstraints(object):
"""Implementation of the 'VOIE - Payroll Report Constraints' model.
TODO: type model description here.
Attributes:
payroll_provider_data (list of PayrollProviderData): TODO: type
description here.
report_custom_fields (list of ReportCustomField): Designate up to 5
custom fields that you would like associated with the report upon
generation by providing a label for the field and a value for the
field. Set the shown variable to true if you want the custom field
to display in the PDF reports. Set the shown variable to false to
limit seeing the variable to JSON, XML report but not in the PDF
report. All custom fields will display in the Reseller Billing
endpoint. This is optional.
"""
# Create a mapping from Model property names to API property names
_names = {
"payroll_provider_data":'payrollProviderData',
"report_custom_fields":'reportCustomFields'
}
def __init__(self,
payroll_provider_data=None,
report_custom_fields=None,
additional_properties = {}):
"""Constructor for the VOIEPayrollReportConstraints class"""
# Initialize members of the class
self.payroll_provider_data = payroll_provider_data
self.report_custom_fields = report_custom_fields
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
payroll_provider_data = None
if dictionary.get('payrollProviderData') != None:
payroll_provider_data = list()
for structure in dictionary.get('payrollProviderData'):
payroll_provider_data.append(finicityapi.models.payroll_provider_data.PayrollProviderData.from_dictionary(structure))
report_custom_fields = None
if dictionary.get('reportCustomFields') != None:
report_custom_fields = list()
for structure in dictionary.get('reportCustomFields'):
report_custom_fields.append(finicityapi.models.report_custom_field.ReportCustomField.from_dictionary(structure))
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(payroll_provider_data,
report_custom_fields,
dictionary)
| 38.988372 | 134 | 0.64748 |
ace906255d0222165342ddb56225c2e8ed32a327 | 4,147 | py | Python | pytorch_classification/MobileNet/model_v2.py | cilinyan/image-processing | 7375bdf4af0ead415e5a957e6ac57d517de37d58 | [
"MIT"
] | null | null | null | pytorch_classification/MobileNet/model_v2.py | cilinyan/image-processing | 7375bdf4af0ead415e5a957e6ac57d517de37d58 | [
"MIT"
] | null | null | null | pytorch_classification/MobileNet/model_v2.py | cilinyan/image-processing | 7375bdf4af0ead415e5a957e6ac57d517de37d58 | [
"MIT"
] | null | null | null | import torch
from torch import nn
def _make_divisible(ch, divisor=8, min_ch=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_ch is None:
min_ch = divisor
new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_ch < 0.9 * ch:
new_ch += divisor
return new_ch
class ConvBNReLU(nn.Sequential):
def __init__(self, in_channel, out_channel, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU6(inplace=True),
)
class InvertedResidual(nn.Module):
def __init__(self, in_channel, out_channel, stride, expand_ratio):
super(InvertedResidual, self).__init__()
hidden_channel = in_channel * expand_ratio
self.use_shortcut = stride == 1 and in_channel == out_channel
layers = []
if expand_ratio != 1:
# 1x1 pointwise conv
layers.append(ConvBNReLU(in_channel, hidden_channel, kernel_size=1))
layers.extend([
# 3x3 depthwise conv
ConvBNReLU(hidden_channel, hidden_channel, stride=stride, groups=hidden_channel),
# 1x1 pointwise conv(linear)
nn.Conv2d(hidden_channel, out_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channel),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_shortcut:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, alpha=1.0, round_nearest=8):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = _make_divisible(32 * alpha, round_nearest)
last_channel = _make_divisible(1280 * alpha, round_nearest)
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
features = []
# conv1 layer
features.append(ConvBNReLU(3, input_channel, stride=2))
# building inverted residual residual blockes
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * alpha, round_nearest)
for i in range(n):
stride = s if i == 0 else 1 # 这里的stride只针对block第一层,其余层stride均为1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, last_channel, 1))
# combine feature layers
self.features = nn.Sequential(*features)
# building classifier
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(last_channel, num_classes)
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| 35.444444 | 104 | 0.590789 |
ace90634b7e4c85facb976fb195c5825115b2f6b | 4,011 | py | Python | api/tacticalrmm/clients/serializers.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | api/tacticalrmm/clients/serializers.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | api/tacticalrmm/clients/serializers.py | v2cloud/tacticalrmm | 12f599f9749985f66ff9b559c5e5abd36064b182 | [
"MIT"
] | null | null | null | from rest_framework.serializers import (
ModelSerializer,
ReadOnlyField,
SerializerMethodField,
ValidationError,
)
from .models import Client, ClientCustomField, Deployment, Site, SiteCustomField
class SiteCustomFieldSerializer(ModelSerializer):
class Meta:
model = SiteCustomField
fields = (
"id",
"field",
"site",
"value",
"string_value",
"bool_value",
"multiple_value",
)
extra_kwargs = {
"string_value": {"write_only": True},
"bool_value": {"write_only": True},
"multiple_value": {"write_only": True},
}
class SiteSerializer(ModelSerializer):
client_name = ReadOnlyField(source="client.name")
custom_fields = SiteCustomFieldSerializer(many=True, read_only=True)
maintenance_mode = ReadOnlyField()
agent_count = ReadOnlyField()
class Meta:
model = Site
fields = (
"id",
"name",
"server_policy",
"workstation_policy",
"alert_template",
"client_name",
"client",
"custom_fields",
"agent_count",
"block_policy_inheritance",
"maintenance_mode",
"failing_checks",
)
def validate(self, val):
if "name" in val.keys() and "|" in val["name"]:
raise ValidationError("Site name cannot contain the | character")
return val
class SiteMinimumSerializer(ModelSerializer):
client_name = ReadOnlyField(source="client.name")
class Meta:
model = Site
fields = "__all__"
class ClientMinimumSerializer(ModelSerializer):
class Meta:
model = Client
fields = "__all__"
class ClientCustomFieldSerializer(ModelSerializer):
class Meta:
model = ClientCustomField
fields = (
"id",
"field",
"client",
"value",
"string_value",
"bool_value",
"multiple_value",
)
extra_kwargs = {
"string_value": {"write_only": True},
"bool_value": {"write_only": True},
"multiple_value": {"write_only": True},
}
class ClientSerializer(ModelSerializer):
sites = SerializerMethodField()
custom_fields = ClientCustomFieldSerializer(many=True, read_only=True)
maintenance_mode = ReadOnlyField()
agent_count = ReadOnlyField()
def get_sites(self, obj):
return SiteSerializer(
obj.filtered_sites,
many=True,
).data
class Meta:
model = Client
fields = (
"id",
"name",
"server_policy",
"workstation_policy",
"alert_template",
"block_policy_inheritance",
"sites",
"custom_fields",
"agent_count",
"maintenance_mode",
"failing_checks",
)
def validate(self, val):
if "name" in val.keys() and "|" in val["name"]:
raise ValidationError("Client name cannot contain the | character")
return val
class DeploymentSerializer(ModelSerializer):
client_id = ReadOnlyField(source="client.id")
site_id = ReadOnlyField(source="site.id")
client_name = ReadOnlyField(source="client.name")
site_name = ReadOnlyField(source="site.name")
class Meta:
model = Deployment
fields = [
"id",
"uid",
"client_id",
"site_id",
"client_name",
"site_name",
"mon_type",
"arch",
"expiry",
"install_flags",
"created",
]
class SiteAuditSerializer(ModelSerializer):
class Meta:
model = Site
fields = "__all__"
class ClientAuditSerializer(ModelSerializer):
class Meta:
model = Client
fields = "__all__"
| 24.913043 | 80 | 0.552979 |
ace90646f4fcf9a5722e8d55cf55e8a63de8d1e6 | 1,884 | py | Python | tests/test_compile.py | ApeWorX/ape-cairo | 8084ed3efb2a5d134643ed94c3e0820fa3de6ddc | [
"Apache-2.0"
] | null | null | null | tests/test_compile.py | ApeWorX/ape-cairo | 8084ed3efb2a5d134643ed94c3e0820fa3de6ddc | [
"Apache-2.0"
] | null | null | null | tests/test_compile.py | ApeWorX/ape-cairo | 8084ed3efb2a5d134643ed94c3e0820fa3de6ddc | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from tests.conftest import SOURCE_CODE_DIRECTORY, SOURCE_FILES
def test_compile_all_files(compiler, project):
source_files = [SOURCE_CODE_DIRECTORY / s for s in SOURCE_FILES]
compiler.compile(source_files, base_path=SOURCE_CODE_DIRECTORY)
for src_file in SOURCE_FILES:
expected = get_expected_contract_type_name(src_file)
assert project.get_contract(expected)
assert getattr(project, expected)
# Make sure can call compile twice
compiler.compile(source_files, base_path=SOURCE_CODE_DIRECTORY)
# Make sure can actually use dot-access
assert project.namespace0.library
assert project.namespace1.library
def test_compile_individual_files(compiler, contract, project):
compiler.compile([contract], base_path=SOURCE_CODE_DIRECTORY)
expected = get_expected_contract_type_name(contract)
assert project.get_contract(expected)
assert getattr(project, expected)
# Make sure can call compile twice
compiler.compile([contract], base_path=SOURCE_CODE_DIRECTORY)
def test_event_abi_migration(compiler):
contract_with_event = SOURCE_CODE_DIRECTORY / "oz_proxy_lib.cairo"
contract_type = compiler.compile([contract_with_event], base_path=SOURCE_CODE_DIRECTORY)[0]
event_abi = [abi for abi in contract_type.abi if abi.type == "event"][0]
assert len(event_abi.inputs) == 1
assert event_abi.inputs[0].name == "implementation"
assert event_abi.inputs[0].type == "felt"
assert not event_abi.inputs[0].indexed
def get_expected_contract_type_name(contract_path: Path) -> str:
"""
Converts paths like Path("path/to/base_dir/namespace/library.cairo") -> "namespace.library".
"""
return (
str(contract_path)
.replace(str(SOURCE_CODE_DIRECTORY), "")
.replace(".cairo", "")
.strip("/")
.replace("/", ".")
)
| 35.54717 | 96 | 0.733015 |
ace9065bf95beff5cf7eff4b45f6f2cc55c40754 | 30,227 | py | Python | azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class PoolOperations(object):
"""PoolOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to be used with the HTTP request. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def list_by_batch_account(
self, resource_group_name, account_name, maxresults=None, select=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the pools in the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the
response.
:type maxresults: int
:param select: Comma separated list of properties that should be
returned. e.g. "properties/provisioningState". Only top level
properties under properties/ are valid for selection.
:type select: str
:param filter: OData filter expression. Valid properties for filtering
are:
name
properties/allocationState
properties/allocationStateTransitionTime
properties/creationTime
properties/provisioningState
properties/provisioningStateTransitionTime
properties/lastModified
properties/vmSize
properties/interNodeCommunication
properties/scaleSettings/autoScale
properties/scaleSettings/fixedScale
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Pool
:rtype:
~azure.mgmt.batch.models.PoolPaged[~azure.mgmt.batch.models.Pool]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PoolPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, pool_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates a new pool inside the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param parameters: Additional parameters for pool creation.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to
update. A value of "*" can be used to apply the operation only if the
pool already exists. If omitted, this operation will always be
applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new pool to be created,
but to prevent updating an existing pool. Other values will be
ignored.
:type if_none_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Pool or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Pool]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Pool')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Pool', response)
header_dict = {
'ETag': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, account_name, pool_name, parameters, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Updates the properties of an existing pool.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param parameters: Pool properties that should be updated. Properties
that are supplied will be updated, any property not supplied will be
unchanged.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to
update. This value can be omitted or set to "*" to apply the operation
unconditionally.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Pool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Pool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Pool')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Pool', response)
header_dict = {
'ETag': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified pool.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
'Retry-After': 'int',
})
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified pool.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Pool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Pool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Pool', response)
header_dict = {
'ETag': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def disable_auto_scale(
self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, **operation_config):
"""Disables automatic scaling for a pool.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Pool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Pool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/disableAutoScale'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Pool', response)
header_dict = {
'ETag': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def stop_resize(
self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, **operation_config):
"""Stops an ongoing resize operation on the pool.
This does not restore the pool to its previous state before the resize
operation: it only stops any further changes being made, and the pool
maintains its current state. After stopping, the pool stabilizes at the
number of nodes it was at when the stop operation was done. During the
stop operation, the pool allocation state changes first to stopping and
then to steady. A resize operation need not be an explicit resize pool
request; this API can also be used to halt the initial sizing of the
pool when it is created.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the
account.
:type pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Pool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Pool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('Pool', response)
header_dict = {
'ETag': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
| 47.009331 | 170 | 0.653522 |
ace9068f2f3f9d350d2a0b9a7f2cc6a558a163fe | 2,210 | py | Python | src/z3_solver.py | oulkaid/Sudoku-SAT-Solver | a26f6fad10a7f36d40fe88697e132248286fc562 | [
"MIT"
] | null | null | null | src/z3_solver.py | oulkaid/Sudoku-SAT-Solver | a26f6fad10a7f36d40fe88697e132248286fc562 | [
"MIT"
] | null | null | null | src/z3_solver.py | oulkaid/Sudoku-SAT-Solver | a26f6fad10a7f36d40fe88697e132248286fc562 | [
"MIT"
] | null | null | null | from z3 import *
from math import sqrt
import sys
from utils import tools
lines = []
with open(sys.argv[1], 'r') as f:
lines = f.readlines()
n, grid = tools.input_parser(lines)
print(">> Problem")
tools.print_grid(grid, n)
# Create a Solver Object
s = Solver()
# Initialize solution vector
sol = [ Int(f"s{tools.int_to_hex(i)}_{tools.int_to_hex(j)}") for i in range(n) for j in range(n) ]
# Specify initial values of the sudoku problem as defined in the grid
grid_elements = [ If(grid[int(i/n)][i%n] == 0,
True,
sol[i] == grid[int(i/n)][i%n])
for i in range(n*n) ]
s.add(grid_elements)
# `sol` values must be contained in [1,n]
valid_range_val = [ And(1 <= sol[i], sol[i] <= n) for i in range(n*n) ]
s.add(valid_range_val)
# Distinct values in each line
dist_line_val = [ If(i == ii,
True,
sol[i+n*line] != sol[ii+n*line])
for line in range(n) for i in range(n) for ii in range(n) ]
s.add(dist_line_val)
# Distinct values in each column
dist_col_val = [ If(j == jj,
True,
sol[n*j+col] != sol[n*jj+col])
for col in range(n) for j in range(n) for jj in range(n) ]
s.add(dist_col_val)
# Distinct values in each box
box_index = []
for box_i in range(int(sqrt(n))):
for box_j in range(int(sqrt(n))):
box_index.append(n*int(sqrt(n))*box_i + int(sqrt(n))*box_j)
for box in range(n):
for item in range(n):
for ii in range(item+1, n):
s.add( \
sol[box_index[box] + ii%int(sqrt(n)) + n*int(ii/int(sqrt(n)))] != \
sol[box_index[box] + item%int(sqrt(n)) + n*int(item/int(sqrt(n)))] )
# Check satisfiability
satResult = s.check()
print(satResult)
# If satisfiable, then print solution
if (satResult == z3.sat):
m = s.model()
m_sorted_tmp = sorted ([(d, m[d]) for d in m], key = lambda x: str(x[0]))
m_sorted = []
for i in range(n*n):
m_sorted.append(m_sorted_tmp[i][1])
print("\n>> Solution")
tools.print_solution(m_sorted, n)
| 30.694444 | 98 | 0.552941 |
ace90736a5fbf56b06c3faddb71c74cedc7a607e | 9,089 | py | Python | tests/functional/test_apple_events.py | memo-off/pyinstaller | de511a1c602d0eaf534e0a8e941108891003bac8 | [
"Apache-2.0"
] | 2 | 2017-02-08T22:22:09.000Z | 2020-10-08T12:28:36.000Z | tests/functional/test_apple_events.py | memo-off/pyinstaller | de511a1c602d0eaf534e0a8e941108891003bac8 | [
"Apache-2.0"
] | 3 | 2020-04-06T15:48:37.000Z | 2021-03-23T10:22:21.000Z | tests/functional/test_apple_events.py | memo-off/pyinstaller | de511a1c602d0eaf534e0a8e941108891003bac8 | [
"Apache-2.0"
] | 4 | 2018-06-04T20:40:37.000Z | 2020-10-13T22:38:40.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
OSX-specific test to check handling AppleEvents by bootloader
"""
# Library imports
# ---------------
import json
import os
import subprocess
import time
# Third-party imports
# -------------------
import pytest
# Local imports
# -------------
from PyInstaller.utils.tests import importorskip
@pytest.mark.darwin
def test_osx_custom_protocol_handler(tmpdir, pyi_builder_spec):
tmpdir = str(tmpdir) # Fix for Python 3.5
app_path = os.path.join(tmpdir, 'dist',
'pyi_osx_custom_protocol_handler.app')
logfile_path = os.path.join(tmpdir, 'dist', 'args.log')
# Generate new URL scheme to avoid collisions
custom_url_scheme = "pyi-test-%i" % time.time()
os.environ["PYI_CUSTOM_URL_SCHEME"] = custom_url_scheme
pyi_builder_spec.test_spec('pyi_osx_custom_protocol_handler.spec')
# First run using 'open' registers custom protocol handler
subprocess.check_call(['open', app_path])
# 'open' starts program in a different process
# so we need to wait for it to finish
time.sleep(5)
# Call custom protocol handler
url = custom_url_scheme + "://url-args"
subprocess.check_call(['open', url])
# Wait for the program to finish
time.sleep(5)
assert os.path.exists(logfile_path), 'Missing args logfile'
with open(logfile_path, 'r') as fh:
log_lines = fh.readlines()
assert log_lines and log_lines[-1] == url, 'Invalid arg appended'
@pytest.mark.darwin
@importorskip('PyQt5')
def test_osx_event_forwarding(tmpdir, pyi_builder_spec):
tmpdir = str(tmpdir) # Fix for Python 3.5
app_path = os.path.join(tmpdir, 'dist',
'pyi_osx_event_forwarding.app')
logfile_path = os.path.join(tmpdir, 'dist', 'events.log')
# Generate unique URL scheme & file ext to avoid collisions
unique_key = int(time.time())
custom_url_scheme = "pyi-test-%i" % unique_key
custom_file_ext = 'pyi_test_%i' % unique_key
os.environ["PYI_CUSTOM_URL_SCHEME"] = custom_url_scheme
os.environ["PYI_CUSTOM_FILE_EXT"] = custom_file_ext
# test_script builds the app then implicitly runs the script, so we
# pass arg "0" to tell the built script to exit right away here.
pyi_builder_spec.test_spec('pyi_osx_event_forwarding.spec',
app_args=["0"])
timeout = 60.0 # Give up after 60 seconds
polltime = 0.25 # Poll events.log every 250ms
def wait_for_started():
t0 = time.time() # mark start time
# Poll logfile for app to be started (it writes "started" to the first
# log line)
while True:
elapsed = time.time() - t0
if elapsed > timeout:
return
if os.path.exists(logfile_path):
with open(logfile_path) as fh:
log_lines = fh.readlines()
if log_lines:
first = log_lines[0]
assert first.startswith('started '), \
"Unexpected line in log file"
# Now, parse the logged args
# e.g. 'started {"argv": ["Arg1, ...]}'
dd = json.loads(first.split(" ", 1)[-1])
assert 'argv' in dd, "First line missing argv"
return dd['argv'] # it started ok, abort loop
else:
# Try again later
time.sleep(polltime)
# wait for the app started for us by test_spec to exit
assert wait_for_started(), "App did not start"
time.sleep(2) # presumably app has exited after 2 seconds
# clean up the log file created by test_spec() running the app
os.remove(logfile_path)
# Run using 'open', passing a 0-timeout as an arg.
# macOS will auto-register the custom protocol handler and extension
# association. Then app will quit immediately due to the "0" arg.
subprocess.check_call(['open', app_path, '--args', "0"])
assert wait_for_started(), 'App start timed out'
time.sleep(2) # wait for app to exit
# App exited immediately, clean-up
os.remove(logfile_path)
# At this point both the protocol handler and the file ext are registered
# 1. Try the file extension -- this tests the AppleEvent rewrite of
# a "file://" event to a regular filesystem path.
# Create 32 files that are associated with this app.
# This tests the robustness of the argv-emu by spamming it with
# lots of args and seeing what happens.
n_files = 32
assoc_files = []
for ii in range(n_files):
assoc_path = os.path.join(tmpdir, 'dist',
'AFile{}.{}'.format(ii, custom_file_ext))
with open(assoc_path, 'wt') as fh:
fh.write("File contents #{}\n".format(ii))
assoc_files.append(assoc_path)
# Open app again by "open"ing the associated files.
#
# These are sent as Apple Events to the app immediately after it starts,
# which the bootloader translates back into file paths at startup, passing
# them as argv to the subordinate app.
#
# The generator below produces odd numbered files as "file://" URLs, and
# even numbered are just file paths. They all should end up appended to
# sys.argv in the app as simple file paths.
subprocess.check_call(['open', *[('file://' if ii % 2 else '') + ff
for ii, ff in enumerate(assoc_files)]])
args = wait_for_started()
assert args is not None, 'App start timed out'
# Test that all the file paths were received in argv via pre-startup
# translation of file:// AppleEvent -> argv filesystem path.
assert assoc_files == args[1:], \
"An expected file path was not received by the app"
# At this point the app is running.
# This is a trick to make our app lose focus so that Qt forwards
# the "Activated" events properly to our event handler in
# pyi_pyqt5_log_events.py
subprocess.check_call(['osascript', "-e",
'tell application "System Events" to activate'])
time.sleep(1.0) # delay for above applescript
# The app is running now, in the background, and doesn't have focus
# 2. Call open passing the app path again -- this should activate the
# already-running app and the activation_count should be 2 after it
# exits.
subprocess.check_call(['open', app_path])
time.sleep(1.0) # the activate event gets sent with a delay
# 3. Call open again using the url associated with the app. This should
# forward the Apple URL event to the already-running app.
url = custom_url_scheme + "://lowecase_required/hello_world/"
# Test support for large URL data ~64KB.
# Note: We would have gone larger but 'open' itself seems to not
# consistently like data over a certain size.
url += 'x' * 64000 # Append 64 KB of data to URL to stress-test
subprocess.check_call(['open', url])
activation_count = None
def wait_for_event_in_logfile():
t0 = time.time() # mark start time
# Wait for the program to finish -- poll for expected line to appear
# in events.log
while True:
assert os.path.exists(logfile_path), 'Missing events logfile'
with open(logfile_path, 'rt') as fh:
log_lines = fh.readlines()
if len(log_lines) >= 3:
url_line = log_lines[1]
activation_line = log_lines[2]
assert url_line.startswith("url ")
assert activation_line.startswith("activate_count ")
url_part = url_line.split(" ", 1)[-1]
assert url_part.strip().lower() == url.lower(), \
'Logged url does not match expected'
activation_part = activation_line.split(" ", 1)[-1]
nonlocal activation_count
activation_count = int(activation_part.strip())
return True
else:
# Try again later
time.sleep(polltime)
elapsed = time.time() - t0
if elapsed > timeout:
return False
assert wait_for_event_in_logfile(), \
'URL event did not appear in log before timeout'
assert activation_count == 2, \
"App did not receive rapp (re-Open app) event properly"
# Delete all the temp files to be polite
for ff in assoc_files:
try:
os.remove(ff)
except OSError:
pass
| 39.517391 | 78 | 0.613489 |
ace9085c59fc2ac3ab676602bbf914dbd959783c | 13,906 | py | Python | spare/farmer/farmer_api.py | rongou/spare-blockchain | 9ed13f9b380445dacbe8f1665848aee843af357d | [
"Apache-2.0"
] | null | null | null | spare/farmer/farmer_api.py | rongou/spare-blockchain | 9ed13f9b380445dacbe8f1665848aee843af357d | [
"Apache-2.0"
] | null | null | null | spare/farmer/farmer_api.py | rongou/spare-blockchain | 9ed13f9b380445dacbe8f1665848aee843af357d | [
"Apache-2.0"
] | null | null | null | import time
from typing import Callable, Optional
from blspy import AugSchemeMPL, G2Element
import spare.server.ws_connection as ws
from spare.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from spare.farmer.farmer import Farmer
from spare.protocols import farmer_protocol, harvester_protocol
from spare.protocols.protocol_message_types import ProtocolMessageTypes
from spare.server.outbound_message import NodeType, make_msg
from spare.types.blockchain_format.pool_target import PoolTarget
from spare.types.blockchain_format.proof_of_space import ProofOfSpace
from spare.util.api_decorators import api_request, peer_required
from spare.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSspareConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
# This will likely never happen for any farmer with less than 10% of global space
# It's meant to make testnets more stable
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# Double check that the iters are good
assert required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters)
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
]
else:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
return make_msg(ProtocolMessageTypes.request_signatures, request)
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk)
assert agg_pk == pospace.plot_public_key
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate([challenge_chain_sp_harv_sig, farmer_share_cc_sp])
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate([reward_chain_sp_harv_sig, farmer_share_rc_sp])
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk)
assert agg_pk == pospace.plot_public_key
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate([foliage_sig_harvester, foliage_sig_farmer])
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[foliage_transaction_block_sig_harvester, foliage_transaction_block_sig_farmer]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
| 47.29932 | 120 | 0.624479 |
ace908840bda73b90738db8210e441a5cc79c273 | 668 | py | Python | manage.py | Nosso-Cafofo/website | 4ae7e7563f924eccd3dd0999f2cee3591718e428 | [
"MIT"
] | null | null | null | manage.py | Nosso-Cafofo/website | 4ae7e7563f924eccd3dd0999f2cee3591718e428 | [
"MIT"
] | 6 | 2020-10-21T03:25:08.000Z | 2021-01-04T19:17:41.000Z | manage.py | Nosso-Cafofo/website | 4ae7e7563f924eccd3dd0999f2cee3591718e428 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nosso_cafofo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.043478 | 76 | 0.681138 |
ace90ab193459db2ac78b4b9184db22f6b52bc48 | 4,752 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/videooptimization/videooptimizationparameter.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/videooptimization/videooptimizationparameter.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/videooptimization/videooptimizationparameter.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class videooptimizationparameter(base_resource) :
""" Configuration for VideoOptimization parameter resource. """
def __init__(self) :
self._randomsamplingpercentage = None
self._quicpacingrate = None
@property
def randomsamplingpercentage(self) :
r"""Random Sampling Percentage.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._randomsamplingpercentage
except Exception as e:
raise e
@randomsamplingpercentage.setter
def randomsamplingpercentage(self, randomsamplingpercentage) :
r"""Random Sampling Percentage.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._randomsamplingpercentage = randomsamplingpercentage
except Exception as e:
raise e
@property
def quicpacingrate(self) :
r"""QUIC Video Pacing Rate (Kbps).<br/>Maximum length = 2147483647.
"""
try :
return self._quicpacingrate
except Exception as e:
raise e
@quicpacingrate.setter
def quicpacingrate(self, quicpacingrate) :
r"""QUIC Video Pacing Rate (Kbps).<br/>Maximum length = 2147483647
"""
try :
self._quicpacingrate = quicpacingrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(videooptimizationparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.videooptimizationparameter
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = videooptimizationparameter()
updateresource.randomsamplingpercentage = resource.randomsamplingpercentage
updateresource.quicpacingrate = resource.quicpacingrate
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update videooptimizationparameter.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of videooptimizationparameter resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = videooptimizationparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the videooptimizationparameter resources that are configured on netscaler.
"""
try :
if not name :
obj = videooptimizationparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class videooptimizationparameter_response(base_response) :
def __init__(self, length=1) :
self.videooptimizationparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.videooptimizationparameter = [videooptimizationparameter() for _ in range(length)]
| 32.326531 | 128 | 0.746843 |
ace90c430c9fd51e63be29f77a9cb00bae06dc1d | 14,745 | py | Python | game/gamesrc/objects/world/quests.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | game/gamesrc/objects/world/quests.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | game/gamesrc/objects/world/quests.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | import random
from prettytable import PrettyTable
from src.utils import create, utils
from ev import Object
from game.gamesrc.objects import copyreader
class QuestManager(Object):
"""
This object is attached to the character and manages all quests received.
"""
def at_object_creation(self):
"""
Set some typical attributes from the management object.
"""
self.db.active_quests = {}
self.db.completed_quests = {}
self.db.to_remove_in_active = {}
self.db.character = None
self.db.is_equipped = False
def add_quest(self, quest_to_add):
active_quests = self.db.active_quests
active_quests['%s' % quest_to_add.name] = quest_to_add
self.db.active_quests = active_quests
def complete_quest(self, quest_to_remove):
character = self.db.character
character.msg("{yYou have completed the quest: %s!{n" % quest_to_remove.name)
if quest_to_remove.db.exp_reward is not None:
character.award_exp(quest_to_remove.db.exp_reward)
if quest_to_remove.db.gold_reward is not None:
character.award_gold(quest_to_remove.db.gold_reward)
if quest_to_remove.db.loot_reward is not None:
for item in quest_to_remove.db.loot_reward:
item.move_to(character, quiet=False)
if quest_to_remove.db.faction_reward is not None:
print "QuestManager->complete_quest: trying deity faction."
if not hasattr(quest_to_remove.db.faction, 'lower'):
print "QuestManager->complete_quest: trying faction_indexing"
if character.db.attributes['deity'] in "an'karith":
faction_index = quest_to_remove.db.faction.index("karith")
elif character.db.attributes['deity'] in "green warden":
faction_index = quest_to_remove.db.faction.index("warden")
else:
faction_index = quest_to_remove.db.faction.index(character.db.attributes['deity'])
faction = quest_to_remove.db.faction[faction_index]
else:
faction = quest_to_remove.db.faction
if "an'karith" in faction:
faction = 'karith'
elif "green warden" in faction:
faction = "warden"
factions = character.db.factions
factions[faction] += quest_to_remove.db.faction_reward
character.db.factions = factions
quest_to_remove.db.completed = True
completed_quests = self.db.completed_quests
completed_quests[quest_to_remove.name] = quest_to_remove
self.db.to_remove_in_active[quest_to_remove.name] = quest_to_remove
self.db.completed_quests = completed_quests
def complete_quest_objective(self, quest, objective):
character = self.db.character
character.msg("{yYou have completed a quest objective for %s!{n" % quest.name)
quest.complete_objective(objective)
def cleanup_completed_quests(self):
to_remove = self.db.to_remove_in_active
if len(self.db.to_remove_in_active) > 0:
for quest in self.db.to_remove_in_active:
print "attempting to remove the quest from active quests"
self.remove_quest(self.db.to_remove_in_active[quest])
to_remove = {}
self.db.to_remove_in_active = to_remove
def remove_quest(self, quest_to_remove):
active_quests = self.db.active_quests
del active_quests[quest_to_remove.name]
self.db.active_quests = active_quests
def check_quest_flags(self, mob=None, item=None):
character = self.db.character
print character.db.lair.db.structure_manager_id
structure_manager = self.search(character.db.lair.db.structure_manager_id, location=character.db.lair, global_search=False)
active_quests = self.db.active_quests
active_quests_temp = active_quests
print "QuestManager.check_quest_flags: Checking active quests"
for quest in active_quests_temp:
quest_obj = active_quests[quest]
quest_objectives = quest_obj.db.objectives
print "QuestManager.check_quest_flags: Checking objectives for %s" % quest_obj.name
for objective in quest_objectives:
print "QuestManager.check_quest_flags: Checking %s" % objective
if quest_objectives[objective]['completed']:
continue
if mob is not None:
if 'kill_%s' % mob.db.mob_type in quest_objectives[objective]['type']:
if 'kill_%s' % mob.db.mob_type in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif '%s' % quest_objectives[objective]['type'] in mob.aliases:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'boss_mob' in mob.aliases and 'kill_boss' in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.db.deity in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill_%s' % mob.location.db.dungeon_type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'kill' in quest_objectives[objective]['type']:
if 'kill' in mob.aliases and 'counter' in quest_objectives[objective].keys():
quest_obj.tick_counter_objective(objective, caller=self.db.character)
if item is not None:
if 'gather_%s' % item.db.type in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'gather_%s' % item.name.lower() in quest_objectives[objective]['type']:
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'loot_rare_item' in quest_objectives[objective]['type'] and item.db.lootset == 'rare':
quest_obj.tick_counter_objective(objective, caller=self.db.character)
elif 'build' in quest_objectives[objective]['type']:
if 'gold_mine' in quest_objectives[objective]['type']:
if "Gold Mine" in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'training_ground' in quest_objectives[objective]['type']:
if 'Training Grounds' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'defenses' in quest_objectives[objective]['type']:
if 'Defenses' in structure_manager.db.already_built:
quest_obj.tick_counter_objective(objective, caller=character)
elif 'level_structure' in quest_objectives[objective]['type']:
for struct in structure_manager.db.structures:
if structure_manager.db.structures[struct].db.level > 1:
quest_obj.tick_counter_objective(objective, caller=character)
break
elif 'use' in quest_objectives[objective]['type']:
command = quest_objectives[objective]['type'].split('_')[1]
print command, character.last_cmd
try:
if character.last_cmd.strip() == command.strip():
quest_obj.tick_counter_objective(objective, caller=character)
except AttributeError:
return
self.cleanup_completed_quests()
# def check_prereqs(self):
def find_quest(self, quest, completed=False):
active_quests = self.db.active_quests
completed_quests = self.db.completed_quests
if completed:
if quest in completed_quests:
quest = completed_quests[quest]
return quest
else:
return None
if quest in active_quests:
quest = active_quests[quest]
return quest
else:
return None
def quest_log_short_display(self, caller):
active_quests = self.db.active_quests
if len(active_quests) < 1:
caller.msg("You have no active quests currently.")
return
table = PrettyTable()
table._set_field_names(["Name", "Description", "Level", "Objectives"])
for quest in active_quests:
obj = active_quests[quest]
objective_string = obj.format_objectives()
table.add_row(["%s" % obj.name, "%s" % obj.db.short_description, "%s" % obj.db.quest_level, "%s" % objective_string])
msg = table.get_string()
caller.msg(msg)
caller.msg("For more detailed information, try help <questname>")
def completed_quests_view(self, caller):
completed_quests = self.db.completed_quests
completed_number = len(completed_quests)
if len(completed_quests) < 1:
caller.msg("You have no completed quests.")
return
titles = '{{c{0:<25} {1:<30} {2}{{n'.format('Name', 'Description', 'Level')
caller.msg(titles)
caller.msg('{c--------------------------------------------------------------------{n')
m = ""
for quest in completed_quests:
quest_obj = completed_quests[quest]
m += '{{C{0:<25}{{n {1:<30} {2}\n{{n'.format(quest_obj.name, quest_obj.db.short_description, quest_obj.db.quest_level)
caller.msg(m)
caller.msg('{c--------------------------------------------------------------------{n')
caller.msg("{CCompleted Quests:{n %s" % completed_number)
def quest_objectives_display(self, caller, quest):
caller.msg("%s" % quest.title())
quest = self.find_quest(quest.title())
if quest is None:
caller.msg("You are not on any quest named: %s" % quest)
return
else:
titles = '{0:<25} {1:<10}'.format('Short Description', 'Progress')
caller.msg(titles)
caller.msg("{c------------------------------------------------------------------")
objectives_message = quest.format_objectives()
caller.msg(objectives_message)
caller.msg("{c------------------------------------------------------------------")
class Quest(Object):
"""
Typical quest object.
"""
def at_object_creation(self):
self.db.level_requirement = 1
self.db.prereq = None
self.db.repeatable = False
self.db.gold_reward = 10
self.db.exp_reward = 10
self.db.loot_reward = []
self.db.faction_reward = 10
self.db.faction = None
self.db.objectives = {}
self.db.quest_level = 1
self.db.quest_type = None
self.db.long_description = ""
self.db.short_description = "Something short, and sweet"
self.db.exclusions = None
self.db.completed = False
def set_quest_aliases(self):
if 'kill' in self.db.quest_type:
self.aliases = ['kill']
elif 'gather' in self.db.quest_type:
self.aliases = ['gather']
elif 'fedex' in self.db.quest_type:
self.aliases = ['fedex']
elif 'explore' in self.db.quest_type:
self.aliases = ['explore']
def add_objective(self, objectives_dict):
objectives = self.db.objectives
objectives[objectives_dict['objective_name']] = objectives_dict
self.db.objectives = objectives
def complete_objective(self, objectives, objective, caller):
objectives[objective]['completed'] = True
caller.msg("{yYou have completed a quest objective!{n")
self.check_objectives(objectives,caller)
def tick_counter_objective(self, objective, caller):
objectives = self.db.objectives
objectives[objective]['counter'] = objectives[objective]['counter'] + 1
caller.msg("{yQuest objective advanced! %s: %s/%s{n" % (objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold']))
if objectives[objective]['counter'] > objectives[objective]['threshold']:
objectives[objective]['counter'] = objectives[objective]['threshold']
if objectives[objective]['counter'] >= objectives[objective]['threshold']:
self.complete_objective(objectives, objective, caller)
self.db.objectives = objectives
def check_objectives(self, objectives, caller):
quest_log = caller.db.quest_log
is_false = False
for objective in objectives:
if objectives[objective]['completed'] is False:
is_false = True
return
if is_false is not True:
self.db.completed = True
quest_log.complete_quest(self)
def set_description(self, copy_file):
self.db.long_description = copyreader.read_file(copy_file)
def add_help_entry(self):
entry = create.create_help_entry(self.name, self.db.long_description, category="Quests", locks="view:onquest(%s)" % self.name)
def format_objectives(self):
objectives = self.db.objectives
m = ""
for objective in objectives:
if len(objectives) < 2:
m += '{0:<30} {1}/{2}'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
else:
m += '{0:<30} {1}/{2}\n'.format(objectives[objective]['objective_name'], objectives[objective]['counter'], objectives[objective]['threshold'])
return m.rstrip('\n')
| 48.503289 | 175 | 0.592947 |
ace90c7fa212653b6caa8b15ab7bded2efdedf3a | 8,392 | py | Python | test/orm/inheritance/test_manytomany.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | 1 | 2015-11-07T12:34:26.000Z | 2015-11-07T12:34:26.000Z | test/orm/inheritance/test_manytomany.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | 1 | 2021-08-07T12:14:52.000Z | 2021-08-07T12:14:52.000Z | test/orm/inheritance/test_manytomany.py | Thhhza/sqlalchemy | f2b267043e17b2b769dc2a5b8139f6be2a3d4e84 | [
"MIT"
] | null | null | null | from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
class InheritTest(fixtures.MappedTest):
"""deals with inheritance and many-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global principals
global users
global groups
global user_group_map
principals = Table('principals', metadata,
Column('principal_id', Integer,
Sequence('principal_id_seq', optional=False),
primary_key=True),
Column('name', String(50), nullable=False))
users = Table('prin_users', metadata,
Column('principal_id', Integer,
ForeignKey('principals.principal_id'), primary_key=True),
Column('password', String(50), nullable=False),
Column('email', String(50), nullable=False),
Column('login_id', String(50), nullable=False))
groups = Table('prin_groups', metadata,
Column('principal_id', Integer,
ForeignKey('principals.principal_id'), primary_key=True))
user_group_map = Table('prin_user_group_map', metadata,
Column('user_id', Integer, ForeignKey( "prin_users.principal_id"),
primary_key=True ),
Column('group_id', Integer, ForeignKey( "prin_groups.principal_id"),
primary_key=True ),
)
def testbasic(self):
class Principal(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class User(Principal):
pass
class Group(Principal):
pass
mapper(Principal, principals)
mapper(User, users, inherits=Principal)
mapper(Group, groups, inherits=Principal, properties={
'users': relationship(User, secondary=user_group_map,
lazy='select', backref="groups")
})
g = Group(name="group1")
g.users.append(User(name="user1", password="pw", email="foo@bar.com", login_id="lg1"))
sess = create_session()
sess.add(g)
sess.flush()
# TODO: put an assertion
class InheritTest2(fixtures.MappedTest):
"""deals with inheritance and many-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, foo_bar
foo = Table('foo', metadata,
Column('id', Integer, Sequence('foo_id_seq', optional=True),
primary_key=True),
Column('data', String(20)),
)
bar = Table('bar', metadata,
Column('bid', Integer, ForeignKey('foo.id'), primary_key=True),
#Column('fid', Integer, ForeignKey('foo.id'), )
)
foo_bar = Table('foo_bar', metadata,
Column('foo_id', Integer, ForeignKey('foo.id')),
Column('bar_id', Integer, ForeignKey('bar.bid')))
def testget(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
class Bar(Foo):pass
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
print(foo.join(bar).primary_key)
print(class_mapper(Bar).primary_key)
b = Bar('somedata')
sess = create_session()
sess.add(b)
sess.flush()
sess.expunge_all()
# test that "bar.bid" does not need to be referenced in a get
# (ticket 185)
assert sess.query(Bar).get(b.id).id == b.id
def testbasic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
mapper(Foo, foo)
class Bar(Foo):
pass
mapper(Bar, bar, inherits=Foo, properties={
'foos': relationship(Foo, secondary=foo_bar, lazy='joined')
})
sess = create_session()
b = Bar('barfoo')
sess.add(b)
sess.flush()
f1 = Foo('subfoo1')
f2 = Foo('subfoo2')
b.foos.append(f1)
b.foos.append(f2)
sess.flush()
sess.expunge_all()
l = sess.query(Bar).all()
print(l[0])
print(l[0].foos)
self.assert_unordered_result(l, Bar,
# {'id':1, 'data':'barfoo', 'bid':1, 'foos':(Foo, [{'id':2,'data':'subfoo1'}, {'id':3,'data':'subfoo2'}])},
{'id':b.id, 'data':'barfoo', 'foos':(Foo, [{'id':f1.id,'data':'subfoo1'}, {'id':f2.id,'data':'subfoo2'}])},
)
class InheritTest3(fixtures.MappedTest):
"""deals with inheritance and many-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub, bar_foo, blub_bar, blub_foo
# the 'data' columns are to appease SQLite which cant handle a blank INSERT
foo = Table('foo', metadata,
Column('id', Integer, Sequence('foo_seq', optional=True),
primary_key=True),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, ForeignKey('bar.id'), primary_key=True),
Column('blub_data', String(20)))
bar_foo = Table('bar_foo', metadata,
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('foo_id', Integer, ForeignKey('foo.id')))
blub_bar = Table('bar_blub', metadata,
Column('blub_id', Integer, ForeignKey('blub.id')),
Column('bar_id', Integer, ForeignKey('bar.id')))
blub_foo = Table('blub_foo', metadata,
Column('blub_id', Integer, ForeignKey('blub.id')),
Column('foo_id', Integer, ForeignKey('foo.id')))
def testbasic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo, properties={
'foos' :relationship(Foo, secondary=bar_foo, lazy='select')
})
sess = create_session()
b = Bar('bar #1')
sess.add(b)
b.foos.append(Foo("foo #1"))
b.foos.append(Foo("foo #2"))
sess.flush()
compare = [repr(b)] + sorted([repr(o) for o in b.foos])
sess.expunge_all()
l = sess.query(Bar).all()
print(repr(l[0]) + repr(l[0].foos))
found = [repr(l[0])] + sorted([repr(o) for o in l[0].foos])
eq_(found, compare)
def testadvanced(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo)
class Blub(Bar):
def __repr__(self):
return "Blub id %d, data %s, bars %s, foos %s" % (
self.id, self.data, repr([b for b in self.bars]),
repr([f for f in self.foos]))
mapper(Blub, blub, inherits=Bar, properties={
'bars': relationship(Bar, secondary=blub_bar, lazy='joined'),
'foos': relationship(Foo, secondary=blub_foo, lazy='joined'),
})
sess = create_session()
f1 = Foo("foo #1")
b1 = Bar("bar #1")
b2 = Bar("bar #2")
bl1 = Blub("blub #1")
for o in (f1, b1, b2, bl1):
sess.add(o)
bl1.foos.append(f1)
bl1.bars.append(b2)
sess.flush()
compare = repr(bl1)
blubid = bl1.id
sess.expunge_all()
l = sess.query(Blub).all()
print(l)
self.assert_(repr(l[0]) == compare)
sess.expunge_all()
x = sess.query(Blub).filter_by(id=blubid).one()
print(x)
self.assert_(repr(x) == compare)
| 33.434263 | 119 | 0.543375 |
ace90c8ea58607efef63d7f13f6a64abab12e6d0 | 2,714 | py | Python | toggl_to_sqlite/utils.py | ryancheley/toggle-to-sqlite | 7dbc9786dbf7dcfbde8d687d2c961fd633cf162b | [
"Apache-2.0"
] | 4 | 2021-03-08T04:28:15.000Z | 2022-01-11T04:15:12.000Z | toggl_to_sqlite/utils.py | ryancheley/toggle-to-sqlite | 7dbc9786dbf7dcfbde8d687d2c961fd633cf162b | [
"Apache-2.0"
] | 14 | 2021-03-08T16:46:35.000Z | 2022-01-11T04:33:12.000Z | toggl_to_sqlite/utils.py | ryancheley/toggle-to-sqlite | 7dbc9786dbf7dcfbde8d687d2c961fd633cf162b | [
"Apache-2.0"
] | 1 | 2021-07-10T20:43:33.000Z | 2021-07-10T20:43:33.000Z | import requests
import json
import datetime
import math
def get_start_datetime(api_token, since: datetime = None):
toggl = requests.get(
"https://api.track.toggl.com/api/v8/me", auth=(api_token, "api_token")
)
if toggl.status_code == 200:
data = json.loads(toggl.text)
if not since:
start_time = data["data"]["workspaces"][0]["at"]
start_time = datetime.datetime.strptime(
start_time, "%Y-%m-%dT%H:%M:%S+00:00"
)
else:
start_time = since
return start_time.date()
else:
return datetime.date.today()
def get_workspaces(api_token):
workspaces = []
response = requests.get(
"https://api.track.toggl.com/api/v8/workspaces", auth=(api_token, "api_token")
)
if response.status_code == 200:
workspaces.append(json.loads(response.text))
for workspace in workspaces[0]:
try:
workspace.pop("api_token", None)
except AttributeError:
pass
return workspaces
def get_projects(api_token):
projects = []
workspaces = get_workspaces(api_token)
if len(workspaces) > 0:
for workspace in workspaces[0]:
response = requests.get(
f'https://api.track.toggl.com/api/v8/workspaces/{workspace["id"]}/projects',
params={"active": "both"},
auth=(api_token, "api_token"),
)
project = json.loads(response.text)
if project:
projects.append(project)
return projects
def get_time_entries(api_token, days, since: datetime = None):
start_date = get_start_datetime(api_token, since)
today = datetime.date.today()
data = []
if days > 0:
cycles = math.ceil((today - start_date).days / days)
for cycle in range(cycles):
_start_date = (start_date + datetime.timedelta(days=days) * cycle).strftime(
"%Y-%m-%dT00:00:00-00:00"
)
_end_date = (
start_date + datetime.timedelta(days=days) * (cycle + 1)
).strftime("%Y-%m-%dT00:00:00-00:00")
params = (
("start_date", _start_date),
("end_date", _end_date),
)
response = requests.get(
"https://api.track.toggl.com/api/v8/time_entries",
params=params,
auth=(api_token, "api_token"),
)
data.append(json.loads(response.text))
return data
def save_items(items, table, db):
for item in items:
data = item
db[table].insert_all(data, pk="id", alter=True, replace=True)
| 31.195402 | 92 | 0.55969 |
ace90d3549c31c09e14ec1c2f86ac1cf3a6927fa | 5,839 | py | Python | archive/holyoke/people.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 67 | 2015-04-28T19:28:18.000Z | 2022-01-31T03:27:17.000Z | archive/holyoke/people.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 202 | 2015-01-15T18:43:12.000Z | 2021-11-23T15:09:10.000Z | archive/holyoke/people.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 54 | 2015-01-27T03:15:45.000Z | 2021-09-10T19:35:32.000Z | from pupa.scrape import Scraper, Person, Membership
from pupa.utils import make_pseudo_id
import lxml.html
CITY_CLERK = "http://www.holyoke.org/departments/city-clerk/"
CITY_TREASURER = "http://www.holyoke.org/departments/treasurer/"
CITY_COUNCIL = "http://www.holyoke.org/departments/city-council/"
CITY_MAYOR = "http://www.holyoke.org/departments/mayors-office"
class HolyokePersonScraper(Scraper):
def lxmlize(self, url):
entry = self.urlopen(url)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def scrape_council(self):
page = self.lxmlize(CITY_COUNCIL)
seen = set()
for member in page.xpath(
"//a[contains(@href, 'holyoke.org/city-council/')]"
):
url = member.attrib['href']
if url in seen:
continue
seen.add(url)
yield from self.scrape_counciler(member.attrib['href'])
def scrape_counciler(self, url):
page = self.lxmlize(url)
who, = page.xpath("//h3[@class='subtitle']/text()")
district, = page.xpath("//div[@class='right-bar']//h2/text()")
image, = page.xpath(
"//div[@class='left-bar']//a[@class='image lightbox']//img"
)
member = Person(
primary_org='legislature',
name=who, district=district,
image=image.attrib['src']
)
member.add_source(url)
details = page.xpath("//table[@align='center']//td")
for detail in details:
detail = detail.text_content().strip()
if detail is None or detail == "":
continue
type_, value = detail.split(":", 1)
cdtype = {
"Home Phone": "voice",
"Address": "address",
"Email": "email",
"Cell Phone": "voice",
}[type_]
member.add_contact_detail(type=cdtype,
note=type_,
value=value)
yield member
def scrape_clerk(self):
yield from self.scrape_staff(CITY_CLERK, 'clerk')
def scrape_treasurer(self):
yield from self.scrape_staff(CITY_CLERK, 'treasurer')
def scrape_mayor(self):
yield from self.scrape_staff(CITY_MAYOR, 'mayor')
def scrape_staff(self, url, role):
page = self.lxmlize(url)
bar, = page.xpath("//div[@class='right-bar']")
head, office, contact, _ = bar.xpath(".//div[@class='module']")
name, = head.xpath(".//h4")
title, social = head.xpath(".//p")
head = Person(name=name.text_content())
head.add_source(url)
membership = Membership(
post_id=make_pseudo_id(role=role,),
role=role,
label=title.text_content(),
person_id=head._id,
organization_id=make_pseudo_id(
classification="legislature"))
yield membership
emails = social.xpath(".//a[contains(@href, 'mailto:')]")
for email in emails:
head.add_contact_detail(type='email',
value=email.attrib['href'],
note='Office Email')
offices = office.xpath(".//p")
for office in offices:
head.add_contact_detail(type='address',
value=office.text_content(),
note='Office Address')
contacts = contact.xpath(".//span")
for contact in contacts:
class_ = contact.attrib['class']
type_ = {"icon-phone": "voice",
"icon-fax": "fax",
"icon-email": "email"}[class_]
value = contact.tail
if value is None:
value = contact.getnext()
value = value.text_content() if value is not None else None
if value is None:
continue
head.add_contact_detail(type=type_,
value=value,
note="Office Contact Detail")
yield head
staff, = page.xpath("//div[@id='staff']")
for member in staff.xpath(
"//div[@class='table-item clearfix remove-clickable']"
):
name, = member.xpath(".//span[@class='title1']")
name = name.text
name, staff_role = name.rsplit("-", 1)
name = name.strip()
staff_role = staff_role.strip()
staffer = Person(name=name)
staffer.add_source(url)
details = member.xpath(".//p/span")
membership = Membership(
role=staff_role,
label="%s-staff" % (role),
person_id=staffer._id,
organization_id=make_pseudo_id(
classification="legislature",))
yield membership
for detail in details:
type_ = {
"icon-phone marker": "voice",
"icon-email marker": "email",
}[detail.attrib['class']]
value = detail.tail
if value is None:
value = detail.getnext()
value = value.text_content() if value is not None else None
if value is None:
continue
staffer.add_contact_detail(type=type_,
value=value,
note="Office")
yield staffer
def scrape(self):
yield from self.scrape_clerk()
yield from self.scrape_treasurer()
yield from self.scrape_mayor()
yield from self.scrape_council()
| 34.146199 | 79 | 0.511731 |
ace90e1147c76eeb780028308895d3e3b4d8b797 | 14,190 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ldpLabelAbortRequest_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ldpLabelAbortRequest_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ldpLabelAbortRequest_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LdpLabelAbortRequest(Base):
__slots__ = ()
_SDM_NAME = 'ldpLabelAbortRequest'
_SDM_ATT_MAP = {
'HeaderVersion': 'ldpLabelAbortRequest.header.version-1',
'HeaderPduLengthinOctets': 'ldpLabelAbortRequest.header.pduLengthinOctets-2',
'HeaderLsrID': 'ldpLabelAbortRequest.header.lsrID-3',
'HeaderLabelSpace': 'ldpLabelAbortRequest.header.labelSpace-4',
'HeaderUBit': 'ldpLabelAbortRequest.header.uBit-5',
'HeaderType': 'ldpLabelAbortRequest.header.type-6',
'HeaderLength': 'ldpLabelAbortRequest.header.length-7',
'HeaderMessageID': 'ldpLabelAbortRequest.header.messageID-8',
'FecTLVUBit': 'ldpLabelAbortRequest.header.fecTLV.uBit-9',
'FecTLVFBit': 'ldpLabelAbortRequest.header.fecTLV.fBit-10',
'FecTLVType': 'ldpLabelAbortRequest.header.fecTLV.type-11',
'FecTLVLength': 'ldpLabelAbortRequest.header.fecTLV.length-12',
'WildcardType': 'ldpLabelAbortRequest.header.fecTLV.fecElement.wildcard.type-13',
'PrefixType': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.type-14',
'Ipv4PrefixAddressFamily': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv4Prefix.addressFamily-15',
'Ipv4PrefixPrelen': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv4Prefix.prelen-16',
'Ipv4PrefixPrefix': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv4Prefix.prefix-17',
'Ipv6PrefixAddressFamily': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv6Prefix.addressFamily-18',
'Ipv6PrefixPrelen': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv6Prefix.prelen-19',
'Ipv6PrefixPrefix': 'ldpLabelAbortRequest.header.fecTLV.fecElement.prefix.addressFamily.ipv6Prefix.prefix-20',
'HostAddressType': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.type-21',
'Ipv4HostAddressAddressFamily': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv4HostAddress.addressFamily-22',
'Ipv4HostAddressHostAddressLength': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv4HostAddress.hostAddressLength-23',
'Ipv4HostAddressHostAddress': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv4HostAddress.hostAddress-24',
'Ipv6HostAddressAddressFamily': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv6HostAddress.addressFamily-25',
'Ipv6HostAddressHostAddressLength': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv6HostAddress.hostAddressLength-26',
'Ipv6HostAddressHostAddress': 'ldpLabelAbortRequest.header.fecTLV.fecElement.hostAddress.addressFamily.ipv6HostAddress.hostAddress-27',
'LabelRequestMessageIDTLVUBit': 'ldpLabelAbortRequest.header.labelRequestMessageIDTLV.uBit-28',
'LabelRequestMessageIDTLVFBit': 'ldpLabelAbortRequest.header.labelRequestMessageIDTLV.fBit-29',
'LabelRequestMessageIDTLVType': 'ldpLabelAbortRequest.header.labelRequestMessageIDTLV.type-30',
'LabelRequestMessageIDTLVLength': 'ldpLabelAbortRequest.header.labelRequestMessageIDTLV.length-31',
'LabelRequestMessageIDTLVMessageID': 'ldpLabelAbortRequest.header.labelRequestMessageIDTLV.messageID-32',
}
def __init__(self, parent, list_op=False):
super(LdpLabelAbortRequest, self).__init__(parent, list_op)
@property
def HeaderVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderVersion']))
@property
def HeaderPduLengthinOctets(self):
"""
Display Name: PDU length(in octets)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderPduLengthinOctets']))
@property
def HeaderLsrID(self):
"""
Display Name: LSR ID
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLsrID']))
@property
def HeaderLabelSpace(self):
"""
Display Name: Label space
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLabelSpace']))
@property
def HeaderUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderUBit']))
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 0x0404
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
@property
def HeaderLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLength']))
@property
def HeaderMessageID(self):
"""
Display Name: Message ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMessageID']))
@property
def FecTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVUBit']))
@property
def FecTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVFBit']))
@property
def FecTLVType(self):
"""
Display Name: Type
Default Value: 0x0100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVType']))
@property
def FecTLVLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVLength']))
@property
def WildcardType(self):
"""
Display Name: Type
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['WildcardType']))
@property
def PrefixType(self):
"""
Display Name: Type
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixType']))
@property
def Ipv4PrefixAddressFamily(self):
"""
Display Name: Address family
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4PrefixAddressFamily']))
@property
def Ipv4PrefixPrelen(self):
"""
Display Name: Prelen
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4PrefixPrelen']))
@property
def Ipv4PrefixPrefix(self):
"""
Display Name: Prefix
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4PrefixPrefix']))
@property
def Ipv6PrefixAddressFamily(self):
"""
Display Name: Address family
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6PrefixAddressFamily']))
@property
def Ipv6PrefixPrelen(self):
"""
Display Name: Prelen
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6PrefixPrelen']))
@property
def Ipv6PrefixPrefix(self):
"""
Display Name: Prefix
Default Value: 0:0:0:0:0:0:0:0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6PrefixPrefix']))
@property
def HostAddressType(self):
"""
Display Name: Type
Default Value: 0x03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HostAddressType']))
@property
def Ipv4HostAddressAddressFamily(self):
"""
Display Name: Address family
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4HostAddressAddressFamily']))
@property
def Ipv4HostAddressHostAddressLength(self):
"""
Display Name: Host address length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4HostAddressHostAddressLength']))
@property
def Ipv4HostAddressHostAddress(self):
"""
Display Name: Host address
Default Value: 0*
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4HostAddressHostAddress']))
@property
def Ipv6HostAddressAddressFamily(self):
"""
Display Name: Address family
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6HostAddressAddressFamily']))
@property
def Ipv6HostAddressHostAddressLength(self):
"""
Display Name: Host address length
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6HostAddressHostAddressLength']))
@property
def Ipv6HostAddressHostAddress(self):
"""
Display Name: Host address
Default Value: 0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6HostAddressHostAddress']))
@property
def LabelRequestMessageIDTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestMessageIDTLVUBit']))
@property
def LabelRequestMessageIDTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestMessageIDTLVFBit']))
@property
def LabelRequestMessageIDTLVType(self):
"""
Display Name: Type
Default Value: 0x0600
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestMessageIDTLVType']))
@property
def LabelRequestMessageIDTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestMessageIDTLVLength']))
@property
def LabelRequestMessageIDTLVMessageID(self):
"""
Display Name: Message ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestMessageIDTLVMessageID']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 38.042895 | 155 | 0.681889 |
ace90f2162d3183a0e97affb34f72078c750595b | 4,235 | py | Python | examples/adwords/v201601/campaign_management/add_keywords_in_bulk.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/adwords/v201601/campaign_management/add_keywords_in_bulk.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201601/campaign_management/add_keywords_in_bulk.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code sample illustrates how to perform asynchronous requests.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import random
import re
import time
from googleads import adwords
from googleads import errors
RETRY_INTERVAL = 10
RETRIES_COUNT = 30
KEYWORD_NUMBER = 100
INDEX_REGEX = r'operations\[(\d+)\].operand'
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
mutate_job_service = client.GetService('MutateJobService', version='v201601')
# Create list of all operations for the job.
operations = []
# Create AdGroupCriterionOperations to add keywords.
for i in range(KEYWORD_NUMBER):
keyword = 'mars%d' % i
if random.randint(1, 10) == 1:
keyword += '!!!'
operations.append({
'xsi_type': 'AdGroupCriterionOperation',
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': keyword
}
}
})
# You can specify up to 3 job IDs that must successfully complete before
# this job can be processed.
policy = {
'prerequisiteJobIds': []
}
# Call mutate to create a new job.
response = mutate_job_service.mutate(operations, policy)
if not response:
raise errors.GoogleAdsError('Failed to submit a job; aborting.')
job_id = response['id']
print 'Job with ID %s was successfully created.' % job_id
# Create selector to retrieve job status and wait for it to complete.
selector = {
'xsi_type': 'BulkMutateJobSelector',
'jobIds': [job_id]
}
time.sleep(RETRY_INTERVAL)
# Poll for job status until it's finished.
print 'Retrieving job status...'
for i in range(RETRIES_COUNT):
job_status_response = mutate_job_service.get(selector)
status = job_status_response[0]['status']
if status in ('COMPLETED', 'FAILED'):
break
print ('[%d] Current status is \'%s\', waiting %d seconds to retry...' %
(i, status, RETRY_INTERVAL))
time.sleep(RETRY_INTERVAL)
if status == 'FAILED':
raise errors.GoogleAdsError('Job failed with reason: \'%s\'' %
job_status_response[0]['failure_reason'])
if status in ('PROCESSING', 'PENDING'):
raise errors.GoogleAdsError('Job did not complete within %d seconds' %
(RETRY_INTERVAL * (RETRIES_COUNT - 1)))
# Status must be COMPLETED.
# Get the job result. Here we re-use the same selector.
result_response = mutate_job_service.getResult(selector)
# Output results.
index = 0
for result in result_response['SimpleMutateResult']['results']:
if 'PlaceHolder' in result:
print 'Operation [%d] - FAILED' % index
else:
print 'Operation [%d] - SUCCEEDED' % index
index += 1
# Output errors
for error in result_response['SimpleMutateResult']['errors']:
index = int(re.search(INDEX_REGEX, error['fieldPath']).group(1))
reason = error['reason']
keyword = operations[index]['operand']['criterion']['text']
print ('ERROR - keyword \'%s\' failed due to \'%s\'' %
(keyword, reason))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| 31.37037 | 79 | 0.674616 |
ace90f8237c74b2726acc762597b4369512be319 | 1,678 | py | Python | usage-based-subscriptions/server/python/report_usage.py | jamiehowarth0/subscription-use-cases | 90bce2f33da487f4d75fe9e12d4502540644dabb | [
"MIT"
] | 455 | 2020-06-11T04:33:59.000Z | 2022-03-31T16:33:31.000Z | usage-based-subscriptions/server/python/report_usage.py | jamiehowarth0/subscription-use-cases | 90bce2f33da487f4d75fe9e12d4502540644dabb | [
"MIT"
] | 122 | 2020-06-17T14:01:23.000Z | 2022-03-28T16:18:00.000Z | usage-based-subscriptions/server/python/report_usage.py | jamiehowarth0/subscription-use-cases | 90bce2f33da487f4d75fe9e12d4502540644dabb | [
"MIT"
] | 293 | 2020-06-11T20:34:26.000Z | 2022-03-22T17:29:55.000Z | #! /usr/bin/env python3.6
"""
report_usage.py
Stripe Recipe.
This code can be run on an interval (e.g., every 24 hours) for each active
metered subscription.
Python 3.6 or newer required.
"""
import os
import time
import uuid
import stripe
from dotenv import load_dotenv, find_dotenv
# Setup Stripe python client library
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
#stripe.api_version = os.getenv('STRIPE_API_VERSION')
def report_usage():
# You need to write some of your own business logic before creating the
# usage record. Pull a record of a customer from your database
# and extract the customer's Stripe Subscription Item ID and usage
# for the day. If you aren't storing subscription item IDs,
# you can retrieve the subscription and check for subscription items
# https://stripe.com/docs/api/subscriptions/object#subscription_object-items.
subscription_item_id = ''
# The usage number you've been keeping track of in your database for
# the last 24 hours.
usage_quantity = 100
timestamp = int(time.time())
# The idempotency key allows you to retry this usage record call if it fails.
idempotency_key = str(uuid.uuid4())
try:
stripe.SubscriptionItem.create_usage_record(
subscription_item_id,
quantity=usage_quantity,
timestamp=timestamp,
action='set',
idempotency_key=idempotency_key
)
except stripe.error.StripeError as e:
print('Usage report failed for item ID %s with idempotency key %s: %s' % (subscription_item_id, idempotency_key, e))
if __name__ == '__main__':
report_usage()
| 32.901961 | 124 | 0.713349 |
ace91018ccd98f985fc6e8bb2facea9e44ec58c2 | 2,921 | py | Python | tests/components/test_input_boolean.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | tests/components/test_input_boolean.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | tests/components/test_input_boolean.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | 1 | 2018-11-20T17:44:08.000Z | 2018-11-20T17:44:08.000Z | """
tests.components.test_input_boolean
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests input_boolean component.
"""
# pylint: disable=too-many-public-methods,protected-access
import unittest
from homeassistant.components import input_boolean
from homeassistant.const import (
STATE_ON, STATE_OFF, ATTR_ICON, ATTR_FRIENDLY_NAME)
from tests.common import get_test_home_assistant
class TestInputBoolean(unittest.TestCase):
""" Test the input boolean module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_config(self):
"""Test config."""
self.assertFalse(input_boolean.setup(self.hass, {
'input_boolean': None
}))
self.assertFalse(input_boolean.setup(self.hass, {
'input_boolean': {
}
}))
self.assertFalse(input_boolean.setup(self.hass, {
'input_boolean': {
'name with space': None
}
}))
def test_methods(self):
""" Test is_on, turn_on, turn_off methods. """
self.assertTrue(input_boolean.setup(self.hass, {
'input_boolean': {
'test_1': None,
}
}))
entity_id = 'input_boolean.test_1'
self.assertFalse(
input_boolean.is_on(self.hass, entity_id))
input_boolean.turn_on(self.hass, entity_id)
self.hass.pool.block_till_done()
self.assertTrue(
input_boolean.is_on(self.hass, entity_id))
input_boolean.turn_off(self.hass, entity_id)
self.hass.pool.block_till_done()
self.assertFalse(
input_boolean.is_on(self.hass, entity_id))
def test_config_options(self):
count_start = len(self.hass.states.entity_ids())
self.assertTrue(input_boolean.setup(self.hass, {
'input_boolean': {
'test_1': None,
'test_2': {
'name': 'Hello World',
'icon': 'work',
'initial': True,
},
},
}))
self.assertEqual(count_start + 2, len(self.hass.states.entity_ids()))
state_1 = self.hass.states.get('input_boolean.test_1')
state_2 = self.hass.states.get('input_boolean.test_2')
self.assertIsNotNone(state_1)
self.assertIsNotNone(state_2)
self.assertEqual(STATE_OFF, state_1.state)
self.assertNotIn(ATTR_ICON, state_1.attributes)
self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes)
self.assertEqual(STATE_ON, state_2.state)
self.assertEqual('Hello World',
state_2.attributes.get(ATTR_FRIENDLY_NAME))
self.assertEqual('work', state_2.attributes.get(ATTR_ICON))
| 29.21 | 77 | 0.598083 |
ace910fd55d98ec9e157f14ad0b257552a82ffca | 28,737 | py | Python | designate/tests/unit/test_objects/test_base.py | ISCAS-VDI/designate-base | bd945607e3345fbef8645c3441e96b032b70b098 | [
"Apache-2.0"
] | null | null | null | designate/tests/unit/test_objects/test_base.py | ISCAS-VDI/designate-base | bd945607e3345fbef8645c3441e96b032b70b098 | [
"Apache-2.0"
] | null | null | null | designate/tests/unit/test_objects/test_base.py | ISCAS-VDI/designate-base | bd945607e3345fbef8645c3441e96b032b70b098 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from operator import attrgetter
import copy
import unittest
from oslo_log import log as logging
import mock
from oslo_serialization import jsonutils
import oslotest.base
import testtools
from designate import exceptions
from designate import objects
LOG = logging.getLogger(__name__)
class TestObject(objects.DesignateObject):
FIELDS = {
'id': {},
'name': {},
'nested': {
'relation': True,
'relation_cls': 'TestObject',
},
'nested_list': {
'relation': True,
'relation_cls': 'TestObjectList',
},
}
class TestObjectDict(objects.DictObjectMixin, TestObject):
pass
class TestObjectList(objects.ListObjectMixin, objects.DesignateObject):
LIST_ITEM_TYPE = TestObject
class TestValidatableObject(objects.DesignateObject):
FIELDS = {
'id': {
'schema': {
'type': 'string',
'format': 'uuid',
},
'required': True,
},
'nested': {
'relation': True,
'relation_cls': 'TestValidatableObject',
'schema': {
'$ref': 'obj://TestValidatableObject#/'
}
}
}
class DesignateObjectTest(oslotest.base.BaseTestCase):
def test_obj_cls_from_name(self):
cls = objects.DesignateObject.obj_cls_from_name('TestObject')
self.assertEqual(TestObject, cls)
cls = objects.DesignateObject.obj_cls_from_name('TestObjectDict')
self.assertEqual(TestObjectDict, cls)
cls = objects.DesignateObject.obj_cls_from_name('TestObjectList')
self.assertEqual(TestObjectList, cls)
def test_from_primitive(self):
primitive = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID',
},
'designate_object.changes': [],
'designate_object.original_values': {},
}
obj = objects.DesignateObject.from_primitive(primitive)
# Validate it has been thawed correctly
self.assertEqual('MyID', obj.id)
# Ensure the ID field has a value
self.assertTrue(obj.obj_attr_is_set('id'))
# Ensure the name field has no value
self.assertFalse(obj.obj_attr_is_set('name'))
# Ensure the changes list is empty
self.assertEqual(0, len(obj.obj_what_changed()))
def test_from_primitive_recursive(self):
primitive = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID',
'nested': {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID-Nested',
},
'designate_object.changes': [],
'designate_object.original_values': {},
}
},
'designate_object.changes': [],
'designate_object.original_values': {},
}
obj = objects.DesignateObject.from_primitive(primitive)
# Validate it has been thawed correctly
self.assertEqual('MyID', obj.id)
self.assertEqual('MyID-Nested', obj.nested.id)
def test_from_dict(self):
obj = TestObject.from_dict({
'id': 'MyID',
})
# Validate it has been thawed correctly
self.assertEqual('MyID', obj.id)
# Ensure the ID field has a value
self.assertTrue(obj.obj_attr_is_set('id'))
# Ensure the name field has no value
self.assertFalse(obj.obj_attr_is_set('name'))
# Ensure the changes list has one entry for the id field
self.assertEqual(set(['id']), obj.obj_what_changed())
def test_from_dict_recursive(self):
obj = TestObject.from_dict({
'id': 'MyID',
'nested': {
'id': 'MyID-Nested',
},
})
# Validate it has been thawed correctly
self.assertEqual('MyID', obj.id)
self.assertEqual('MyID-Nested', obj.nested.id)
# Ensure the changes list has two entries, one for the id field and the
# other for the nested field
self.assertEqual(set(['id', 'nested']), obj.obj_what_changed())
# Ensure the changes list has one entry for the id field
self.assertEqual(set(['id']), obj.nested.obj_what_changed())
def test_from_dict_nested_list(self):
obj = TestObject.from_dict({
'id': 'MyID',
'nested_list': [{
'id': 'MyID-Nested1',
}, {
'id': 'MyID-Nested2',
}],
})
# Validate it has been thawed correctly
self.assertEqual('MyID', obj.id)
self.assertEqual('MyID-Nested1', obj.nested_list[0].id)
self.assertEqual('MyID-Nested2', obj.nested_list[1].id)
# Ensure the changes list has two entries, one for the id field and the
# other for the nested field
self.assertEqual(set(['id', 'nested_list']), obj.obj_what_changed())
def test_from_list(self):
with testtools.ExpectedException(NotImplementedError):
TestObject.from_list([])
def test_get_schema(self):
obj = TestValidatableObject()
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
obj.validate()
self.assertTrue(hasattr(obj, '_obj_validator'))
expected = {
'description': 'Designate TestValidatableObject Object',
'title': 'TestValidatableObject', 'required': ['id'],
'additionalProperties': False,
'$schema': 'http://json-schema.org/draft-04/hyper-schema',
'type': 'object',
'properties': {
'id': {
'type': 'string', 'format': 'uuid'
}
}
}
schema = obj._obj_validator.schema
self.assertEqual(expected, schema)
with testtools.ExpectedException(AttributeError): # bug
schema = obj.obj_get_schema()
@unittest.expectedFailure # bug
def test__schema_ref_resolver(self):
from designate.objects.base import _schema_ref_resolver
_schema_ref_resolver(
'obj://TestValidatableObject#/subpathA/subpathB')
def test_init_invalid(self):
with testtools.ExpectedException(TypeError):
TestObject(extra_field='Fail')
def test_hasattr(self):
obj = TestObject()
# Success Cases
self.assertTrue(hasattr(obj, 'id'),
"Should have id attribute")
self.assertTrue(hasattr(obj, 'name'),
"Should have name attribute")
# Failure Cases
self.assertFalse(hasattr(obj, 'email'),
"Should not have email attribute")
self.assertFalse(hasattr(obj, 'names'),
"Should not have names attribute")
def test_setattr(self):
obj = TestObject()
obj.id = 'MyID'
self.assertEqual('MyID', obj.id)
self.assertEqual(1, len(obj.obj_what_changed()))
obj.name = 'MyName'
self.assertEqual('MyName', obj.name)
self.assertEqual(2, len(obj.obj_what_changed()))
def test_setattr_neg(self):
obj = TestObject()
with testtools.ExpectedException(AttributeError):
obj.badthing = 'demons'
def test_to_primitive(self):
obj = TestObject(id='MyID')
# Ensure only the id attribute is returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID',
},
'designate_object.changes': ['id'],
'designate_object.original_values': {},
}
self.assertEqual(expected, primitive)
# Set the name attribute to a None value
obj.name = None
# Ensure both the id and name attributes are returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID',
'name': None,
},
'designate_object.changes': ['id', 'name'],
'designate_object.original_values': {},
}
self.assertEqual(expected, primitive)
def test_to_primitive_recursive(self):
obj = TestObject(id='MyID', nested=TestObject(id='MyID-Nested'))
# Ensure only the id attribute is returned
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID',
'nested': {
'designate_object.name': 'TestObject',
'designate_object.data': {
'id': 'MyID-Nested',
},
'designate_object.changes': ['id'],
'designate_object.original_values': {},
}
},
'designate_object.changes': ['id', 'nested'],
'designate_object.original_values': {},
}
self.assertEqual(expected, primitive)
def test_to_dict(self):
obj = TestObject(id='MyID')
# Ensure only the id attribute is returned
dict_ = obj.to_dict()
expected = {
'id': 'MyID',
}
self.assertEqual(expected, dict_)
# Set the name attribute to a None value
obj.name = None
# Ensure both the id and name attributes are returned
dict_ = obj.to_dict()
expected = {
'id': 'MyID',
'name': None,
}
self.assertEqual(expected, dict_)
def test_to_dict_recursive(self):
obj = TestObject(id='MyID', nested=TestObject(id='MyID-Nested'))
# Ensure only the id attribute is returned
dict_ = obj.to_dict()
expected = {
'id': 'MyID',
'nested': {
'id': 'MyID-Nested',
},
}
self.assertEqual(expected, dict_)
def test_update(self):
obj = TestObject(id='MyID', name='test')
obj.update({'id': 'new_id', 'name': 'new_name'})
self.assertEqual('new_id', obj.id)
self.assertEqual('new_name', obj.name)
def test_update_unexpected_attribute(self):
obj = TestObject(id='MyID', name='test')
with testtools.ExpectedException(AttributeError):
obj.update({'id': 'new_id', 'new_key': 3})
def test_is_valid(self):
obj = TestValidatableObject(id='MyID')
# ID should be a UUID, So - Not Valid.
self.assertFalse(obj.is_valid)
# Correct the ID field
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
# ID is now a UUID, So - Valid.
self.assertTrue(obj.is_valid)
def test_is_valid_recursive(self):
obj = TestValidatableObject(
id='MyID',
nested=TestValidatableObject(id='MyID'))
# ID should be a UUID, So - Not Valid.
self.assertFalse(obj.is_valid)
# Correct the outer objects ID field
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
# Outer ID is now a UUID, Nested ID is Not. So - Invalid.
self.assertFalse(obj.is_valid)
# Correct the nested objects ID field
obj.nested.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
# Outer and Nested IDs are now UUIDs. So - Valid.
self.assertTrue(obj.is_valid)
def test_validate(self):
obj = TestValidatableObject()
# ID is required, so the object is not valid
with testtools.ExpectedException(exceptions.InvalidObject):
obj.validate()
# Set the ID field to an invalid value
obj.id = 'MyID'
# ID is now set, but to an invalid value, still invalid
with testtools.ExpectedException(exceptions.InvalidObject):
obj.validate()
# Set the ID field to a valid value
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
obj.validate()
def test_validate_recursive(self):
obj = TestValidatableObject(
id='MyID',
nested=TestValidatableObject(id='MyID'))
# ID should be a UUID, So - Invalid.
with testtools.ExpectedException(exceptions.InvalidObject):
obj.validate()
# Correct the outer objects ID field
obj.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
# Outer ID is now set, Inner ID is not, still invalid.
e = self.assertRaises(exceptions.InvalidObject, obj.validate)
# Ensure we have exactly one error and fetch it
self.assertEqual(1, len(e.errors))
error = e.errors.pop(0)
# Ensure the format validator has triggered the failure.
self.assertEqual('format', error.validator)
# Ensure the nested ID field has triggered the failure.
# For some reason testtools turns lists into deques :/
self.assertEqual(error.path, ['nested', 'id'])
# Set the Nested ID field to a valid value
obj.nested.id = 'ffded5c4-e4f6-4e02-a175-48e13c5c12a0'
obj.validate()
def test_obj_attr_is_set(self):
obj = TestObject()
self.assertFalse(obj.obj_attr_is_set('name'))
obj.name = "My Name"
self.assertTrue(obj.obj_attr_is_set('name'))
def test_obj_what_changed(self):
obj = TestObject()
self.assertEqual(set([]), obj.obj_what_changed())
obj.name = "My Name"
self.assertEqual(set(['name']), obj.obj_what_changed())
def test_obj_get_changes(self):
obj = TestObject()
self.assertEqual({}, obj.obj_get_changes())
obj.name = "My Name"
self.assertEqual({'name': "My Name"}, obj.obj_get_changes())
def test_obj_reset_changes(self):
obj = TestObject()
obj.name = "My Name"
self.assertEqual(1, len(obj.obj_what_changed()))
obj.obj_reset_changes()
self.assertEqual(0, len(obj.obj_what_changed()))
def test_obj_reset_changes_subset(self):
obj = TestObject()
obj.id = "My ID"
obj.name = "My Name"
self.assertEqual(2, len(obj.obj_what_changed()))
obj.obj_reset_changes(['id'])
self.assertEqual(1, len(obj.obj_what_changed()))
self.assertEqual({'name': "My Name"}, obj.obj_get_changes())
def test_obj_get_original_value(self):
# Create an object
obj = TestObject()
obj.id = "My ID"
obj.name = "My Name"
# Rset one of the changes
obj.obj_reset_changes(['id'])
# Update the reset field
obj.id = "My New ID"
# Ensure the "current" value is correct
self.assertEqual("My New ID", obj.id)
# Ensure the "original" value is correct
self.assertEqual("My ID", obj.obj_get_original_value('id'))
self.assertEqual("My Name", obj.obj_get_original_value('name'))
# Update the reset field again
obj.id = "My New New ID"
# Ensure the "current" value is correct
self.assertEqual("My New New ID", obj.id)
# Ensure the "original" value is still correct
self.assertEqual("My ID", obj.obj_get_original_value('id'))
self.assertEqual("My Name", obj.obj_get_original_value('name'))
# Ensure a KeyError is raised when value exists
with testtools.ExpectedException(KeyError):
obj.obj_get_original_value('nested')
def test_deepcopy(self):
# Create the Original object
o_obj = TestObject()
o_obj.id = "My ID"
o_obj.name = "My Name"
# Clear the "changed" flag for one of the two fields we set
o_obj.obj_reset_changes(['name'])
# Deepcopy the object
c_obj = copy.deepcopy(o_obj)
# Ensure the copy was successful
self.assertEqual(o_obj.id, c_obj.id)
self.assertEqual(o_obj.name, c_obj.name)
self.assertEqual(o_obj.obj_attr_is_set('nested'),
c_obj.obj_attr_is_set('nested'))
self.assertEqual(o_obj.obj_get_changes(), c_obj.obj_get_changes())
self.assertEqual(o_obj.to_primitive(), c_obj.to_primitive())
def test_eq(self):
# Create two equal objects
obj_one = TestObject(id="My ID", name="My Name")
obj_two = TestObject(id="My ID", name="My Name")
# Ensure they evaluate to equal
self.assertEqual(obj_one, obj_two)
# Change a value on one object
obj_two.name = 'Other Name'
# Ensure they do not evaluate to equal
self.assertNotEqual(obj_one, obj_two)
def test_eq_false(self):
obj = TestObject(id="My ID", name="My Name")
self.assertFalse(obj == tuple())
self.assertNotEqual(obj, tuple())
def test_ne(self):
# Create two equal objects
obj_one = TestObject(id="My ID", name="My Name")
obj_two = TestObject(id="My ID", name="My Name")
# Ensure they evaluate to equal
self.assertEqual(obj_one, obj_two)
# Change a value on one object
obj_two.name = 'Other Name'
# Ensure they do not evaluate to equal
self.assertNotEqual(obj_one, obj_two)
class DictObjectMixinTest(oslotest.base.BaseTestCase):
def test_cast_to_dict(self):
# Create an object
obj = TestObjectDict()
obj.id = "My ID"
obj.name = "My Name"
expected = {
'id': 'My ID',
'name': 'My Name',
}
self.assertEqual(expected, dict(obj))
def test_gititem(self):
obj = TestObjectDict(name=1)
self.assertEqual(1, obj['name'])
def test_setitem(self):
obj = TestObjectDict()
obj['name'] = 1
self.assertEqual(1, obj.name)
def test_contains(self):
obj = TestObjectDict(name=1)
self.assertIn('name', obj)
def test_get(self):
obj = TestObjectDict(name=1)
v = obj.get('name')
self.assertEqual(1, v)
def test_get_missing(self):
obj = TestObjectDict(name=1)
self.assertFalse(obj.obj_attr_is_set('foo'))
with testtools.ExpectedException(AttributeError):
obj.get('foo')
def test_get_default(self):
obj = TestObjectDict(name='n')
v = obj.get('name', default='default')
self.assertEqual('n', v)
def test_get_default_with_patch(self):
obj = TestObjectDict(name='v')
fname = 'designate.objects.base.DesignateObject.obj_attr_is_set'
with mock.patch(fname) as attr_is_set:
attr_is_set.return_value = False
v = obj.get('name', default='default')
self.assertEqual('default', v)
def test_iteritems(self):
obj = TestObjectDict(name=None, id=1)
items = tuple(obj.items())
self.assertEqual(
[('id', 1), ('name', None)],
sorted(items)
)
def test_jsonutils_to_primitive(self):
obj = TestObjectDict(name="foo")
dumped = jsonutils.to_primitive(obj, convert_instances=True)
self.assertIsInstance(dumped, dict)
self.assertEqual('foo', dumped['name'])
class ListObjectMixinTest(oslotest.base.BaseTestCase):
def test_from_primitive(self):
primitive = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}},
],
},
'designate_object.changes': ['objects'],
'designate_object.original_values': {},
}
obj = objects.DesignateObject.from_primitive(primitive)
self.assertEqual(2, len(obj))
self.assertEqual(2, len(obj.objects))
self.assertIsInstance(obj[0], TestObject)
self.assertIsInstance(obj[1], TestObject)
self.assertEqual('One', obj[0].id)
self.assertEqual('Two', obj[1].id)
def test_cast_to_list(self):
# Create a few objects
obj_one = TestObject()
obj_one.id = "One"
obj_two = TestObject()
obj_two.id = "Two"
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
expected = [obj_one, obj_two]
self.assertEqual(expected, list(obj))
def test_to_primitive(self):
# Create a few objects
obj_one = TestObject()
obj_one.id = "One"
obj_two = TestObject()
obj_two.id = "Two"
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObjectList',
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'One'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}},
],
},
'designate_object.changes': ['objects'],
'designate_object.original_values': {},
}
self.assertEqual(expected, primitive)
def test_to_primitive_nested_obj(self):
# Create a few objects
obj_one = TestObject()
obj_two = TestObject()
obj_two.id = "Two"
obj_one.id = obj_two
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
primitive = obj.to_primitive()
expected = {
'designate_object.name': 'TestObjectList',
'designate_object.changes': ['objects'],
'designate_object.data': {
'objects': [
{'designate_object.changes': ['id'],
'designate_object.data': {'id':
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}}},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}},
{'designate_object.changes': ['id'],
'designate_object.data': {'id': 'Two'},
'designate_object.name': 'TestObject',
'designate_object.original_values': {}}]},
'designate_object.original_values': {}}
self.assertEqual(expected, primitive)
def test_obj_what_changed(self):
# Create a few objects
obj_one = TestObject()
obj_two = TestObject()
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
# Make sure there are no changes
obj.obj_reset_changes()
changes = obj.obj_what_changed()
expected = set([])
self.assertEqual(expected, changes)
# Make some changes
obj_one.id = "One"
obj_two.id = "Two"
changes = obj.obj_what_changed()
expected = set(['objects'])
self.assertEqual(expected, changes)
def test_get_slice(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
theslice = obj[1:]
expected = TestObjectList(objects=[obj_two])
self.assertEqual(expected.objects, theslice.objects)
self.assertNotEqual(obj.objects, theslice.objects)
def test_setitem(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
obj[1] = obj_one
self.assertEqual(obj.objects, [obj_one, obj_one])
def test_contains(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
self.assertIn(obj_one, obj)
self.assertIn(obj_two, obj)
self.assertNotIn(obj_three, obj)
def test_extend(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
ext_obj = TestObjectList(objects=[obj_one])
obj = TestObjectList(objects=[obj_one, obj_two, obj_three])
ext_obj.extend([obj_two, obj_three])
self.assertEqual(obj.objects, ext_obj.objects)
def test_insert(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_three])
obj.insert(1, obj_two)
self.assertEqual([obj_one, obj_two, obj_three], obj.objects)
def test_remove(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two])
obj.remove(obj_one)
self.assertEqual([obj_two], obj.objects)
def test_index(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two, obj_three])
self.assertEqual(1, obj.index(obj_two))
def test_count(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_two = TestObject(id="Two")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_two, obj_two])
self.assertEqual(2, obj.count(obj_two))
def test_sort(self):
# Create a few objects
obj_one = TestObject(id=1)
obj_two = TestObject(id=2)
obj_three = TestObject(id=3)
# Create a ListObject
obj = TestObjectList(objects=[obj_two, obj_three, obj_one])
obj.sort(key=attrgetter('id'))
self.assertEqual([obj_one, obj_two, obj_three], obj.objects)
def test_to_dict(self):
# Create a ListObject containing a DesignateObject
obj_one = objects.DesignateObject()
obj = TestObjectList(objects=obj_one)
dict_ = obj.to_dict()
expected = {'objects': {}}
self.assertEqual(expected, dict_)
def test_to_dict_list_mixin(self):
# Create a ListObject containing an ObjectList
obj = TestObjectList(objects=TestObjectList())
dict_ = obj.to_dict()
expected = {'objects': []}
self.assertEqual(expected, dict_)
def test_to_list(self):
# Create a few objects
obj_one = TestObject(id="One")
obj_three = TestObject(id="Three")
# Create a ListObject
obj = TestObjectList(objects=[obj_one, obj_three])
li = obj.to_list()
self.assertEqual([{'id': 'One'}, {'id': 'Three'}], li)
| 31.613861 | 79 | 0.582176 |
ace9118d0dbfec0bedb3b72237f78bb3e9e2fbe5 | 17,280 | py | Python | MinkowskiEngine/MinkowskiTensorField.py | cnheider/MinkowskiEngine | ae6db31203ba012df2f695576e2d3819d49bf2d7 | [
"MIT"
] | null | null | null | MinkowskiEngine/MinkowskiTensorField.py | cnheider/MinkowskiEngine | ae6db31203ba012df2f695576e2d3819d49bf2d7 | [
"MIT"
] | null | null | null | MinkowskiEngine/MinkowskiTensorField.py | cnheider/MinkowskiEngine | ae6db31203ba012df2f695576e2d3819d49bf2d7 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import numpy as np
from collections import Sequence
from typing import Union, List, Tuple
import torch
from MinkowskiCommon import convert_to_int_list, StrideType
from MinkowskiEngineBackend._C import (
GPUMemoryAllocatorType,
MinkowskiAlgorithm,
CoordinateMapKey,
CoordinateMapType,
)
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiTensor import (
SparseTensorOperationMode,
SparseTensorQuantizationMode,
Tensor,
sparse_tensor_operation_mode,
global_coordinate_manager,
set_global_coordinate_manager,
COORDINATE_MANAGER_DIFFERENT_ERROR,
COORDINATE_KEY_DIFFERENT_ERROR,
)
from MinkowskiSparseTensor import SparseTensor
from sparse_matrix_functions import MinkowskiSPMMFunction, MinkowskiSPMMAverageFunction
from MinkowskiPooling import MinkowskiDirectMaxPoolingFunction
class TensorField(Tensor):
def __init__(
self,
features: torch.Tensor,
coordinates: torch.Tensor = None,
# optional coordinate related arguments
tensor_stride: StrideType = 1,
coordinate_field_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
quantization_mode: SparseTensorQuantizationMode = SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
# optional manager related arguments
allocator_type: GPUMemoryAllocatorType = None,
minkowski_algorithm: MinkowskiAlgorithm = None,
requires_grad=None,
device=None,
):
r"""
Args:
:attr:`features` (:attr:`torch.FloatTensor`,
:attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
:attr:`torch.cuda.DoubleTensor`): The features of a sparse
tensor.
:attr:`coordinates` (:attr:`torch.IntTensor`): The coordinates
associated to the features. If not provided, :attr:`coordinate_map_key`
must be provided.
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
of the current sparse tensor. By default, it is 1.
:attr:`coordinate_field_map_key`
(:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
are already cached in the MinkowskiEngine, we could reuse the same
coordinate map by simply providing the coordinate map key. In most
case, this process is done automatically. When you provide a
`coordinate_field_map_key`, `coordinates` will be be ignored.
:attr:`coordinate_manager`
(:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
manages all coordinate maps using the `_C.CoordinateMapManager`. If
not provided, the MinkowskiEngine will create a new computation
graph. In most cases, this process is handled automatically and you
do not need to use this.
:attr:`quantization_mode`
(:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines how
continuous coordinates will be quantized to define a sparse tensor.
Please refer to :attr:`SparseTensorQuantizationMode` for details.
:attr:`allocator_type`
(:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
memory allocator type. By default, it uses the c10 allocator.
:attr:`minkowski_algorithm`
(:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
minkowski engine runs, Use
:attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
the memory footprint. Or use
:attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
run fasterat the cost of more memory.
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
:attr:`device` (:attr:`torch.device`): Set the device the sparse
tensor is defined.
"""
# Type checks
assert isinstance(features, torch.Tensor), "Features must be a torch.Tensor"
assert (
features.ndim == 2
), f"The feature should be a matrix, The input feature is an order-{features.ndim} tensor."
assert isinstance(quantization_mode, SparseTensorQuantizationMode)
assert quantization_mode in [
SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
SparseTensorQuantizationMode.UNWEIGHTED_SUM,
SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
SparseTensorQuantizationMode.MAX_POOL,
], "invalid quantization mode"
self.quantization_mode = quantization_mode
if coordinates is not None:
assert isinstance(coordinates, torch.Tensor)
if coordinate_field_map_key is not None:
assert isinstance(coordinate_field_map_key, CoordinateMapKey)
if coordinate_manager is not None:
assert isinstance(coordinate_manager, CoordinateManager)
if coordinates is None and (
coordinate_field_map_key is None or coordinate_manager is None
):
raise ValueError(
"Either coordinates or (coordinate_field_map_key, coordinate_manager) pair must be provided."
)
Tensor.__init__(self)
# To device
if device is not None:
features = features.to(device)
if coordinates is not None:
# assertion check for the map key done later
coordinates = coordinates.to(device)
self._D = (
coordinates.size(1) - 1 if coordinates is not None else coordinate_manager.D
)
##########################
# Setup CoordsManager
##########################
if coordinate_manager is None:
# If set to share the coords man, use the global coords man
if (
sparse_tensor_operation_mode()
== SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
):
coordinate_manager = global_coordinate_manager()
if coordinate_manager is None:
coordinate_manager = CoordinateManager(
D=self._D,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
set_global_coordinate_manager(coordinate_manager)
else:
coordinate_manager = CoordinateManager(
D=coordinates.size(1) - 1,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
self._manager = coordinate_manager
##########################
# Initialize coords
##########################
# Coordinate Management
if coordinates is not None:
assert (
features.shape[0] == coordinates.shape[0]
), "The number of rows in features and coordinates must match."
assert (
features.is_cuda == coordinates.is_cuda
), "Features and coordinates must have the same backend."
coordinate_field_map_key = CoordinateMapKey(
convert_to_int_list(tensor_stride, self._D), ""
)
coordinate_field_map_key = self._manager.insert_field(
coordinates.float(), convert_to_int_list(tensor_stride, self._D), ""
)
else:
assert (
coordinate_field_map_key.is_key_set()
), "The coordinate field map key must be valid."
if requires_grad is not None:
features.requires_grad_(requires_grad)
self._F = features
self._C = coordinates
self.coordinate_field_map_key = coordinate_field_map_key
self._batch_rows = None
self._inverse_mapping = {}
@property
def C(self):
r"""The alias of :attr:`coords`."""
return self.coordinates
@property
def coordinates(self):
r"""
The coordinates of the current sparse tensor. The coordinates are
represented as a :math:`N \times (D + 1)` dimensional matrix where
:math:`N` is the number of points in the space and :math:`D` is the
dimension of the space (e.g. 3 for 3D, 4 for 3D + Time). Additional
dimension of the column of the matrix C is for batch indices which is
internally treated as an additional spatial dimension to disassociate
different instances in a batch.
"""
if self._C is None:
self._C = self._get_coordinate_field()
return self._C
@property
def _batchwise_row_indices(self):
if self._batch_rows is None:
batch_inds = torch.unique(self._C[:, 0])
self._batch_rows = [self._C[:, 0] == b for b in batch_inds]
return self._batch_rows
def _get_coordinate_field(self):
return self._manager.get_coordinate_field(self.coordinate_field_map_key)
def sparse(
self,
tensor_stride: Union[int, Sequence, np.array] = 1,
coordinate_map_key: CoordinateMapKey = None,
quantization_mode=None,
):
r"""Converts the current sparse tensor field to a sparse tensor."""
if quantization_mode is None:
quantization_mode = self.quantization_mode
if coordinate_map_key is None:
tensor_stride = convert_to_int_list(tensor_stride, self.D)
coordinate_map_key, (
unique_index,
inverse_mapping,
) = self._manager.field_to_sparse_insert_and_map(
self.coordinate_field_map_key,
tensor_stride,
)
N_rows = len(unique_index)
else:
# sparse index, field index
inverse_mapping, unique_index = self._manager.field_to_sparse_map(
self.coordinate_field_map_key,
coordinate_map_key,
)
N_rows = self._manager.size(coordinate_map_key)
self._inverse_mapping[coordinate_map_key] = inverse_mapping
if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM:
spmm = MinkowskiSPMMFunction()
N = len(self._F)
cols = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device)
size = torch.Size([N_rows, len(inverse_mapping)])
features = spmm.apply(inverse_mapping, cols, vals, size, self._F)
elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:
spmm_avg = MinkowskiSPMMAverageFunction()
N = len(self._F)
cols = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
size = torch.Size([N_rows, len(inverse_mapping)])
features = spmm_avg.apply(inverse_mapping, cols, size, self._F)
elif quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:
features = self._F[unique_index]
elif quantization_mode == SparseTensorQuantizationMode.MAX_POOL:
N = len(self._F)
in_map = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
features = MinkowskiDirectMaxPoolingFunction().apply(
in_map, inverse_mapping, self._F, N_rows
)
else:
# No quantization
raise ValueError("Invalid quantization mode")
sparse_tensor = SparseTensor(
features,
coordinate_map_key=coordinate_map_key,
coordinate_manager=self._manager,
)
return sparse_tensor
def inverse_mapping(self, sparse_tensor_map_key: CoordinateMapKey):
if sparse_tensor_map_key not in self._inverse_mapping:
if not self._manager.exists_field_to_sparse(
self.coordinate_field_map_key, sparse_tensor_map_key
):
sparse_keys = self.coordinate_manager.field_to_sparse_keys(
self.coordinate_field_map_key
)
one_key = None
for key in sparse_keys:
if np.prod(key.get_tensor_stride()) == 1:
one_key = key
if one_key is not None:
if one_key not in self._inverse_mapping:
(
_,
self._inverse_mapping[one_key],
) = self._manager.get_field_to_sparse_map(
self.coordinate_field_map_key, one_key
)
_, stride_map = self.coordinate_manager.stride_map(
one_key, sparse_tensor_map_key
)
field_map = self._inverse_mapping[one_key]
self._inverse_mapping[sparse_tensor_map_key] = stride_map[field_map]
else:
raise ValueError(
f"The field to sparse tensor mapping does not exists for the key: {sparse_tensor_map_key}. Please run TensorField.sparse() before you call slice."
)
else:
# Extract the mapping
(
self._inverse_mapping[sparse_tensor_map_key],
_,
) = self._manager.get_field_to_sparse_map(
self.coordinate_field_map_key, sparse_tensor_map_key
)
return self._inverse_mapping[sparse_tensor_map_key]
def _is_same_key(self, other):
assert isinstance(other, self.__class__)
assert self._manager == other._manager, COORDINATE_MANAGER_DIFFERENT_ERROR
assert (
self.coordinate_field_map_key == other.coordinate_field_map_key
), COORDINATE_KEY_DIFFERENT_ERROR
def _binary_functor(self, other, binary_fn):
assert isinstance(other, (self.__class__, torch.Tensor))
if isinstance(other, self.__class__):
self._is_same_key(other)
return self.__class__(
binary_fn(self._F, other.F),
coordinate_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
else: # when it is a torch.Tensor
return self.__class__(
binary_fn(self._F, other),
coordinate_field_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ os.linesep
+ " coordinates="
+ str(self.C)
+ os.linesep
+ " features="
+ str(self.F)
+ os.linesep
+ " coordinate_field_map_key="
+ str(self.coordinate_field_map_key)
+ os.linesep
+ " coordinate_manager="
+ str(self._manager)
+ " spatial dimension="
+ str(self._D)
+ ")"
)
__slots__ = (
"_C",
"_F",
"_D",
"coordinate_field_map_key",
"_manager",
"quantization_mode",
"_inverse_mapping",
"_batch_rows",
)
| 40.658824 | 170 | 0.612442 |
ace9128770697829befa2f9ca4b0264c1e6f6737 | 1,916 | py | Python | displayserver/DisplayServer.py | i7sid/Smart-Freezer-Project | b6e43225cbec5070745f66ad08981891db8451ab | [
"MIT"
] | 1 | 2022-03-18T17:23:23.000Z | 2022-03-18T17:23:23.000Z | displayserver/DisplayServer.py | i7sid/Smart-Freezer-Project | b6e43225cbec5070745f66ad08981891db8451ab | [
"MIT"
] | null | null | null | displayserver/DisplayServer.py | i7sid/Smart-Freezer-Project | b6e43225cbec5070745f66ad08981891db8451ab | [
"MIT"
] | null | null | null | #!/usr/bin/python
import zmq
import json
import time
from ssd1803a import ssd1803a
port = 5757
class DisplayServer:
msg_standby = 'DisplayServer ready on port %s' % port
msg_flash = None
flash_timeout = 0
prev_message = None
# ZeroMQ
socket = None
poller = None
ssd = None
def __init__(self, port):
self.ssd = ssd1803a()
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://127.0.0.1:%s" % port)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
def update_message(self):
if int(time.time()) <= self.flash_timeout:
message = self.msg_flash
else:
message = self.msg_standby
if self.prev_message == message:
return
# Send message to display
if type(message) is list:
self.ssd.dis_print_lines(message)
else:
self.ssd.cmd_clear()
self.ssd.dis_print(message)
self.prev_message = message
def handle_request(self, message):
if message['type'] == 'standby':
self.msg_standby = message['message']
if message['type'] == 'flash':
self.msg_flash = message['message']
self.flash_timeout = int(time.time()) + message['timeout']
def loop(self):
self.update_message()
evts = self.poller.poll(1000)
if len(evts) > 0:
message_encoded = self.socket.recv()
message = json.loads(message_encoded)
if not type(message) is dict or not 'type' in message or not 'message' in message:
self.socket.send('invalid request')
else:
self.handle_request(message)
self.socket.send('okay')
if __name__ == '__main__':
serv = DisplayServer(port)
while True:
serv.loop()
| 25.891892 | 94 | 0.580376 |
ace912ea80375c0fcdd882d79e9043a30e0f16c2 | 136 | py | Python | App-9-Instant-Dictionary-API/main.py | JuanjoFernandez/udemyPythonOOP | 631e6c71ff0c05c9a332d690fcb7b033a416034d | [
"MIT"
] | null | null | null | App-9-Instant-Dictionary-API/main.py | JuanjoFernandez/udemyPythonOOP | 631e6c71ff0c05c9a332d690fcb7b033a416034d | [
"MIT"
] | null | null | null | App-9-Instant-Dictionary-API/main.py | JuanjoFernandez/udemyPythonOOP | 631e6c71ff0c05c9a332d690fcb7b033a416034d | [
"MIT"
] | null | null | null | import api
import documentation
import justpy as jp
jp.Route("/api", api.Api.serve)
jp.Route("/", documentation.Doc.serve)
jp.justpy() | 17 | 38 | 0.742647 |
ace9152d69c6309eb4b6a3fd1126179a1d6982dc | 5,406 | py | Python | forse/scripts/reproject2fullsky.py | ai4cmb/ForSE | 8ceab3b2e47f077b9d5dbaee879a5385c3a76073 | [
"MIT"
] | null | null | null | forse/scripts/reproject2fullsky.py | ai4cmb/ForSE | 8ceab3b2e47f077b9d5dbaee879a5385c3a76073 | [
"MIT"
] | null | null | null | forse/scripts/reproject2fullsky.py | ai4cmb/ForSE | 8ceab3b2e47f077b9d5dbaee879a5385c3a76073 | [
"MIT"
] | null | null | null | import healpy as hp
import pylab as pl
import astropy
from astropy import units as u
import collections
import reproject
import numpy as np
import astropy.io.fits as fits
import argparse
import time
import warnings
warnings.filterwarnings("ignore")
from projection_tools import (
get_lonlat, get_lonlat_adaptive,
reproject2fullsky, make_mosaic_from_healpix )
def main(args):
Npix= pl.int_(args.npix )
pixel_size = args.pixelsize *u.arcmin
overlap = args.overlap *u.deg
nside_in=args.nside
hpxsize = hp.nside2resol(nside_in, arcmin=True ) *u.arcmin
nside_out = pl.int_( nside_in )
if args.flat2hpx :
"""
I assume that each set of square patches encode just T or Q or U maps,
if you want a TQU .fits map hpx reprojected map needs to further postprocessing
"""
if args.verbose : print(f"reading patches from {args.flat_projection}")
patches = pl.load(args.flat_projection , allow_pickle=True)
size_patch = pixel_size.to(u.deg) *Npix
if args.adaptive_reprojection :
lon,lat =get_lonlat_adaptive(size_patch , overlap )
else:
lon,lat =get_lonlat(size_patch , overlap )
filename =args.flat_projection .replace( '.npy','.fits')
if args.verbose :
print("reprojecting back to HPX")
print (f"files will be stored in {filename}")
s= time.clock()
newmap, weightsmap = reproject2fullsky( tiles=patches, lon=lon, lat=lat,
nside_out=nside_out, pixel_size=pixel_size ,
apodization_file =args.apodization_file ,
Npix = Npix, verbose=True ,
)
e= time.clock ()
if args.apodization_file is not None:
try :
apomap= hp.read_map(args.apodization_file .replace('.npy', '.fits'), verbose= args.verbose)
print('Apodized map already saved ')
except FileNotFoundError:
apomap, _ = reproject2fullsky( tiles=np.ones_like(patches) , lon=lon, lat=lat,
nside_out=nside_out, pixel_size=pixel_size ,
apodization_file =args.apodization_file ,
Npix = Npix, verbose=True
)
hp.write_map( args.apodization_file .replace('.npy', '.fits') , apomap , overwrite=True )
hp.write_map(filename , [newmap /apomap , newmap, weightsmap], overwrite=True )
else:
hp.write_map(filename , [newmap /weightsmap , newmap, weightsmap], overwrite=True )
if args.verbose : print(f"process took {e-s} sec ")
elif args.hpx2flat :
if args.has_polarization :
inputmap = hp.read_map(args.hpxmap, verbose =args.verbose, field=[0,1,2] )
stringmap ='TQU'
else:
stringmap='T'
inputmap = [ hp.read_map(args.hpxmap, verbose =args.verbose ) ]
filename = args.hpxmap.replace('.fits','.npy')
assert len(stringmap)== len(inputmap )
assert nside_in == hp.get_nside(inputmap)
if args.verbose :
print(f"Making square tile patches {pixel_size.to(u.deg) *Npix } x {pixel_size.to(u.deg) *Npix } from {args.hpxmap}")
print (f"files will be stored in {filename}")
for imap,maptype in zip(inputmap, stringmap ) :
s= time.clock()
patches, lon, lat = make_mosaic_from_healpix( imap, Npix, pixel_size.to(u.deg) , overlap= overlap ,adaptive=args.adaptive_reprojection )
e= time.clock ()
pl.save(filename.replace('.npy',f'_{maptype}.npy') , [patches, lon , lat ] )
if args.verbose : print(f"process took {e-s} sec ")
pass
if __name__=="__main__":
parser = argparse.ArgumentParser( description="Script to perform the projection in serial. This should take few minutes to \
project a healpix map into flat coordinates (nside=2048), and ~8 hours to reproject it back into healpix." )
parser.add_argument("--hpxmap" , help='path to the healpix map to be stacked' )
parser.add_argument("--pixelsize", help = 'pixel size in arcminutes of the input map', type=np.float , default = 3.75 )
parser.add_argument("--npix", help='size of patches', default = 320, type = np.int )
parser.add_argument("--nside", help='nside of output map ', default = 2048, type = np.int )
parser.add_argument("--overlap", help='partial patch overlap in deg', default=5, type=np.float)
parser.add_argument("--flat2hpx", action="store_true" , default=False )
parser.add_argument("--hpx2flat", action="store_true" , default=False )
parser.add_argument("--verbose", action="store_true" , default=False )
parser.add_argument("--flat-projection", help='path to the file with list of patches ', default ='' )
parser.add_argument("--has-polarization", help='include polarization', default =False, action="store_true" )
parser.add_argument("--apodization-file", help='path of the apodization mask', default =None )
args = parser.parse_args()
main( args)
| 47.008696 | 152 | 0.601924 |
ace91693745423cc548c5c85f0146d19bcbf3855 | 4,593 | py | Python | nicos/clients/flowui/panels/scans.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos/clients/flowui/panels/scans.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos/clients/flowui/panels/scans.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Michele Brambilla <michele.brambilla@psi.ch>
#
# *****************************************************************************
from nicos.clients.flowui.panels import get_icon
from nicos.clients.gui.panels.scans import ScansPanel as DefaultScansPanel
from nicos.guisupport.qt import QActionGroup, QCheckBox, QComboBox, QFrame, \
QHBoxLayout, QToolBar, QWidgetAction
class ScansPanel(DefaultScansPanel):
def __init__(self, parent, client, options):
DefaultScansPanel.__init__(self, parent, client, options)
self.bars = self.createPanelToolbar()
for index, bar in enumerate(self.bars):
self.layout().insertWidget(index, bar)
self.set_icons()
def set_icons(self):
self.actionPrint.setIcon(get_icon('print-24px.svg'))
self.actionSavePlot.setIcon(get_icon('save-24px.svg'))
self.actionUnzoom.setIcon(get_icon('zoom_out-24px.svg'))
self.actionClose.setIcon(get_icon('zoom_out-24px.svg'))
def createPanelToolbar(self):
bar = QToolBar('Scans')
bar.addAction(self.actionSavePlot)
bar.addAction(self.actionPrint)
bar.addSeparator()
bar.addAction(self.actionXAxis)
bar.addAction(self.actionYAxis)
bar.addAction(self.actionNormalized)
bar.addSeparator()
bar.addAction(self.actionLogXScale)
bar.addAction(self.actionLogScale)
bar.addAction(self.actionUnzoom)
bar.addSeparator()
bar.addAction(self.actionAutoScale)
bar.addAction(self.actionScaleX)
bar.addAction(self.actionScaleY)
bar.addAction(self.actionLegend)
bar.addAction(self.actionErrors)
bar.addAction(self.actionResetPlot)
bar.addAction(self.actionDeletePlot)
bar.addSeparator()
bar.addAction(self.actionAutoDisplay)
bar.addAction(self.actionCombine)
fitbar = QToolBar('Scan fitting')
fitbar.addAction(self.actionFitPeak)
wa = QWidgetAction(fitbar)
self.fitPickCheckbox = QCheckBox(fitbar)
self.fitPickCheckbox.setText('Pick')
self.fitPickCheckbox.setChecked(True)
self.actionPickInitial.setChecked(True)
self.fitPickCheckbox.toggled.connect(self.actionPickInitial.setChecked)
self.actionPickInitial.toggled.connect(self.fitPickCheckbox.setChecked)
layout = QHBoxLayout()
layout.setContentsMargins(10, 0, 10, 0)
layout.addWidget(self.fitPickCheckbox)
frame = QFrame(fitbar)
frame.setLayout(layout)
wa.setDefaultWidget(frame)
fitbar.addAction(wa)
ag = QActionGroup(fitbar)
ag.addAction(self.actionFitPeakGaussian)
ag.addAction(self.actionFitPeakLorentzian)
ag.addAction(self.actionFitPeakPV)
ag.addAction(self.actionFitPeakPVII)
ag.addAction(self.actionFitTc)
ag.addAction(self.actionFitCosine)
ag.addAction(self.actionFitSigmoid)
ag.addAction(self.actionFitLinear)
ag.addAction(self.actionFitExponential)
wa = QWidgetAction(fitbar)
self.fitComboBox = QComboBox(fitbar)
for a in ag.actions():
itemtext = a.text().replace('&', '')
self.fitComboBox.addItem(itemtext)
self.fitfuncmap[itemtext] = a
self.fitComboBox.currentIndexChanged.connect(
self.on_fitComboBox_currentIndexChanged)
wa.setDefaultWidget(self.fitComboBox)
fitbar.addAction(wa)
fitbar.addSeparator()
fitbar.addAction(self.actionFitArby)
bars = [bar, fitbar]
return bars
def getToolbars(self):
return []
| 39.93913 | 79 | 0.67015 |
ace916f038c6c5dd36e8659330606c11100973d2 | 7,367 | py | Python | metricq/sink.py | Daddelhai/metricq-python | ed3d4fdf7d7e3ea2326df79b62a1b5f0e9684bcf | [
"BSD-3-Clause"
] | null | null | null | metricq/sink.py | Daddelhai/metricq-python | ed3d4fdf7d7e3ea2326df79b62a1b5f0e9684bcf | [
"BSD-3-Clause"
] | null | null | null | metricq/sink.py | Daddelhai/metricq-python | ed3d4fdf7d7e3ea2326df79b62a1b5f0e9684bcf | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, ZIH, Technische Universitaet Dresden, Federal Republic of Germany
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of metricq nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
from asyncio import CancelledError, Task
from typing import Optional, Set
import aio_pika
from aio_pika.queue import Queue
from .data_client import DataClient
from .datachunk_pb2 import DataChunk
from .logging import get_logger
from .types import Timestamp
logger = get_logger(__name__)
class SinkError(Exception):
pass
class SinkResubscribeError(SinkError):
pass
class Sink(DataClient):
def __init__(self, *args, add_uuid=True, **kwargs):
super().__init__(*args, add_uuid=add_uuid, **kwargs)
self._data_queue: Optional[Queue] = None
self._data_consumer_tag: Optional[str] = None
self._subscribed_metrics: Set[str] = set()
self._subscribe_args: dict = dict()
self._resubscribe_task: Optional[Task] = None
async def _declare_data_queue(self, name: str):
self._data_queue = await self.data_channel.declare_queue(
name=name, robust=False, passive=True
)
async def sink_config(self, dataQueue, **kwargs):
await self.data_config(**kwargs)
await self._declare_data_queue(dataQueue)
logger.info("starting sink consume")
self._data_consumer_tag = await self._data_queue.consume(self._on_data_message)
def _on_data_connection_reconnect(self, connection):
logger.info("Sink data connection ({}) reestablished!", connection)
if self._resubscribe_task is not None and not self._resubscribe_task.done():
logger.warning(
"Sink data connection was reestablished, but another resubscribe task is still running!"
)
self._resubscribe_task.cancel()
self._resubscribe_task = self.event_loop.create_task(
self._resubscribe(connection)
)
def resubscribe_done(task: Task):
try:
exception = task.exception()
if exception is None:
self._data_connection_watchdog.set_established()
else:
errmsg = "Resubscription failed with an unhandled exception"
logger.error("{}: {}", errmsg, exception)
raise SinkResubscribeError(errmsg) from exception
except CancelledError:
logger.warning("Resubscribe task was cancelled!")
self._resubscribe_task.add_done_callback(resubscribe_done)
async def _resubscribe(self, connection):
# Reuse manager-assigned data queue name for resubscription.
self._subscribe_args.update(dataQueue=self._data_queue.name)
metrics = tuple(self._subscribed_metrics)
logger.info(
"Resubscribing to {} metric(s) with RPC parameters {}...",
len(metrics),
self._subscribe_args,
)
response = await self.rpc(
"sink.subscribe", metrics=metrics, **self._subscribe_args
)
await self._declare_data_queue(response["dataQueue"])
logger.debug("Restarting consume...")
await self._data_queue.consume(
self._on_data_message, consumer_tag=self._data_consumer_tag
)
async def subscribe(self, metrics, **kwargs):
"""Subscribe to a list of metrics.
:param metrics:
names of the metrics to subscribe to
:param expires:
(optional) queue expiration time in seconds
:param metadata: bool
whether to return metric metadata in the response
:return: rpc response
"""
if self._data_queue is not None:
kwargs["dataQueue"] = self._data_queue.name
response = await self.rpc("sink.subscribe", metrics=metrics, **kwargs)
self._subscribed_metrics.update(metrics)
# Save the subscription RPC args in case we need to resubscribe (after a reconnect).
self._subscribe_args = kwargs
if self._data_queue is None:
await self.sink_config(**response)
return response
async def unsubscribe(self, metrics):
assert self._data_queue
await self.rpc(
"sink.unsubscribe", dataQueue=self._data_queue.name, metrics=metrics
)
self._subscribed_metrics.difference_update(metrics)
# If we just unsubscribed from all metrics, reset the subscription args
# to their defaults.
if not self._subscribed_metrics:
self._subscribe_args = dict()
async def _on_data_message(self, message: aio_pika.IncomingMessage):
with message.process(requeue=True):
body = message.body
from_token = message.app_id
metric = message.routing_key
logger.debug("received message from {}", from_token)
data_response = DataChunk()
data_response.ParseFromString(body)
await self._on_data_chunk(metric, data_response)
async def _on_data_chunk(self, metric, data_chunk: DataChunk):
""" Only override this if absolutely necessary for performance """
last_timed = 0
zipped_tv = zip(data_chunk.time_delta, data_chunk.value)
for time_delta, value in zipped_tv:
last_timed += time_delta
await self.on_data(metric, Timestamp(last_timed), value)
@abstractmethod
async def on_data(self, metric, timestamp, value):
pass
class DurableSink(Sink):
def __init__(self, *args, **kwargs):
super().__init__(*args, add_uuid=False, **kwargs)
async def connect(self):
await super().connect()
response = await self.rpc("sink.register")
assert response is not None
logger.info("register response: {}", response)
await self.rpc_dispatch("config", **response["config"])
| 37.974227 | 104 | 0.676395 |
ace917f6f9cc7a58e0872c3901f7967aa82dd348 | 854 | py | Python | pythonteste/aula08.py | WESLLEYROCHA87/Curso_Python | 695de1bbbdbc0b711c9e2a6cd39a561bdabcf44d | [
"MIT"
] | 1 | 2022-01-09T05:09:31.000Z | 2022-01-09T05:09:31.000Z | pythonteste/aula08.py | WESLLEYROCHA87/Curso_Python | 695de1bbbdbc0b711c9e2a6cd39a561bdabcf44d | [
"MIT"
] | null | null | null | pythonteste/aula08.py | WESLLEYROCHA87/Curso_Python | 695de1bbbdbc0b711c9e2a6cd39a561bdabcf44d | [
"MIT"
] | null | null | null | # https://www.youtube.com/watch?v=oOUyhGNib2Q&list=PLvE-ZAFRgX8hnECDn1v9HNTI71veL3oW0&index=24
# Utilizando Módulos
# Comando para a utilização de Módulos:
# 1. import -> bebidas -> Todos
# 2. from -> doce -> import -> produto específico
# 3. from -> doce -> import -> produto1, produto2
# Biblioteca padrao: math => significa Matemática - (Vai trazer biblioteca matemática)
# Exemplos ->
# math -> ceil => (faz o arredondamento para cima)
# math -> floor => (faz o arredondamento para baixo)
# math -> trunc => (vai eliminar da vírgula para frente, sem arredondamento algum )
# math -> pow => (Potência)
# math -> sqty => (calc raiz quadrada)
# math -> factorial => (calculo de fatorial)
#import math
from math import sqrt, floor
num = int(input('Digite um número: '))
raiz = sqrt(num)
print('A raiz de {} é igual a {:.2f}'.format(num, floor(raiz)))
| 38.818182 | 94 | 0.694379 |
ace918ec488b1cb7abf8bf2a9e4dd7afb3660380 | 25,816 | py | Python | examples/flax/language-modeling/run_clm_flax.py | madlag/transformers | ff5cdc086be1e0c3e2bbad8e3469b34cffb55a85 | [
"Apache-2.0"
] | 1 | 2021-06-15T14:38:44.000Z | 2021-06-15T14:38:44.000Z | examples/flax/language-modeling/run_clm_flax.py | madlag/transformers | ff5cdc086be1e0c3e2bbad8e3469b34cffb55a85 | [
"Apache-2.0"
] | null | null | null | examples/flax/language-modeling/run_clm_flax.py | madlag/transformers | ff5cdc086be1e0c3e2bbad8e3469b34cffb55a85 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pre-training/Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=causal-lm
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
logger = logging.getLogger(__name__)
# Cache the result
has_tensorboard = is_tensorboard_available()
if has_tensorboard:
try:
from flax.metrics.tensorboard import SummaryWriter
except ImportError as ie:
has_tensorboard = False
print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}")
else:
print(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray
def replicate(self):
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False
)
if "validation" not in dataset.keys():
dataset["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
dataset["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = FlaxAutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
else:
model = FlaxAutoModelForCausalLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = dataset["train"].column_names
else:
column_names = dataset["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
tokenized_datasets = dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > config.max_position_embeddings:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Enable tensorboard only on the master node
if has_tensorboard and jax.process_index() == 0:
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix())
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
rng, dropout_rng = jax.random.split(rng)
# Store some constant
num_epochs = int(training_args.num_train_epochs)
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
steps_per_epoch = len(train_dataset) // train_batch_size
total_train_steps = steps_per_epoch * num_epochs
# Create learning rate schedule
linear_decay_lr_schedule_fn = create_learning_rate_fn(
len(train_dataset),
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
adamw = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
def loss_fn(logits, labels):
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))
return loss.mean()
# Define gradient update step fn
def train_step(state, batch):
dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
def compute_loss(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = loss_fn(logits, labels)
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics
# Define eval fn
def eval_step(params, batch):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
loss = loss_fn(logits, labels)
# summarize metrics
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return metrics
# Create parallel version of the train and eval step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
p_eval_step = jax.pmap(eval_step, "batch")
# Replicate the train state on each device
state = state.replicate()
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
logger.info(f" Total optimization steps = {total_train_steps}")
train_time = 0
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
train_start = time.time()
# Create sampling rng
rng, input_rng = jax.random.split(rng)
train_metrics = []
# Generate an epoch by shuffling sampling indices from the train dataset
train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)
steps_per_epoch = len(train_dataset) // train_batch_size
# train
for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
batch = next(train_loader)
state, train_metric = p_train_step(state, batch)
train_metrics.append(train_metric)
train_time += time.time() - train_start
train_metric = unreplicate(train_metric)
epochs.write(
f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
)
# ======================== Evaluating ==============================
eval_metrics = []
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
eval_steps = len(eval_dataset) // eval_batch_size
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
# Model forward
batch = next(eval_loader)
metrics = p_eval_step(state.params, batch)
eval_metrics.append(metrics)
# normalize eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
try:
eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
except OverflowError:
eval_metrics["perplexity"] = float("inf")
# Print metrics and update progress bar
desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
epochs.write(desc)
epochs.desc = desc
# Save metrics
if has_tensorboard and jax.process_index() == 0:
cur_step = epoch * (len(train_dataset) // train_batch_size)
write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step)
# save last checkpoint
if jax.process_index() == 0:
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(training_args.output_dir, params=params)
if __name__ == "__main__":
main()
| 41.977236 | 152 | 0.678843 |
ace919a064f39abb8f33d171eee43dbfa0bd3f29 | 305 | py | Python | setup.py | garethsion/UCL_RSD_Assessment_1 | 9c1d0db55ce381586c44967de882ad390d159af2 | [
"MIT"
] | null | null | null | setup.py | garethsion/UCL_RSD_Assessment_1 | 9c1d0db55ce381586c44967de882ad390d159af2 | [
"MIT"
] | null | null | null | setup.py | garethsion/UCL_RSD_Assessment_1 | 9c1d0db55ce381586c44967de882ad390d159af2 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = "greengraph",
version = "0.1.0",
packages = find_packages(exclude=['*test']),
scripts = ['scripts/greengraph'],
#install_requires = ['argparse']
install_requires = ['numpy', 'geopy', 'matplotlib', 'requests', 'argparse']
) | 30.5 | 79 | 0.652459 |
ace91c9ace25f90e9643d664b35c4e759c2eabb3 | 19,985 | py | Python | flash/image/classification/adapters.py | voxel51/lightning-flash | d190c7675ccc2198fe954828a50a29c9a96006f2 | [
"Apache-2.0"
] | 2 | 2021-05-25T20:06:10.000Z | 2022-02-13T22:08:59.000Z | flash/image/classification/adapters.py | voxel51/lightning-flash | d190c7675ccc2198fe954828a50a29c9a96006f2 | [
"Apache-2.0"
] | 1 | 2021-06-16T14:46:06.000Z | 2021-06-16T14:46:06.000Z | flash/image/classification/adapters.py | voxel51/lightning-flash | d190c7675ccc2198fe954828a50a29c9a96006f2 | [
"Apache-2.0"
] | 3 | 2021-06-03T10:03:04.000Z | 2021-08-08T21:49:16.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from collections import defaultdict
from functools import partial
from typing import Any, Callable, List, Optional, Type
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.plugins import DataParallelPlugin, DDPPlugin, DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.warnings import WarningCache
from torch.utils.data import DataLoader, IterableDataset, Sampler
import flash
from flash.core.adapter import Adapter, AdapterTask
from flash.core.data.auto_dataset import BaseAutoDataset
from flash.core.data.data_source import DefaultDataKeys
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _LEARN2LEARN_AVAILABLE
from flash.core.utilities.providers import _LEARN2LEARN
from flash.core.utilities.url_error import catch_url_error
from flash.image.classification.integrations.learn2learn import TaskDataParallel, TaskDistributedDataParallel
warning_cache = WarningCache()
if _LEARN2LEARN_AVAILABLE:
import learn2learn as l2l
from learn2learn.data.transforms import RemapLabels as Learn2LearnRemapLabels
else:
class Learn2LearnRemapLabels:
pass
class RemapLabels(Learn2LearnRemapLabels):
def remap(self, data, mapping):
# remap needs to be adapted to Flash API.
data[DefaultDataKeys.TARGET] = mapping(data[DefaultDataKeys.TARGET])
return data
class NoModule:
"""This class is used to prevent nn.Module infinite recursion."""
def __init__(self, task):
self.task = task
def __getattr__(self, key):
if key != "task":
return getattr(self.task, key)
return self.task
def __setattr__(self, key: str, value: Any) -> None:
if key == "task":
object.__setattr__(self, key, value)
return
setattr(self.task, key, value)
class Model(torch.nn.Module):
def __init__(self, backbone: torch.nn.Module, head: Optional[torch.nn.Module]):
super().__init__()
self.backbone = backbone
self.head = head
def forward(self, x):
x = self.backbone(x)
if x.dim() == 4:
x = x.mean(-1).mean(-1)
if self.head is None:
return x
return self.head(x)
class Learn2LearnAdapter(Adapter):
required_extras: str = "image"
def __init__(
self,
task: AdapterTask,
backbone: torch.nn.Module,
head: torch.nn.Module,
algorithm_cls: Type[LightningModule],
ways: int,
shots: int,
meta_batch_size: int,
queries: int = 1,
num_task: int = -1,
epoch_length: Optional[int] = None,
test_epoch_length: Optional[int] = None,
test_ways: Optional[int] = None,
test_shots: Optional[int] = None,
test_queries: Optional[int] = None,
test_num_task: Optional[int] = None,
default_transforms_fn: Optional[Callable] = None,
seed: int = 42,
**algorithm_kwargs,
):
"""The ``Learn2LearnAdapter`` is an :class:`~flash.core.adapter.Adapter` for integrating with `learn 2
learn` library (https://github.com/learnables/learn2learn).
Args:
task: Task to be used. This adapter should work with any Flash Classification task
backbone: Feature extractor to be used.
head: Predictive head.
algorithm_cls: Algorithm class coming
from: https://github.com/learnables/learn2learn/tree/master/learn2learn/algorithms/lightning
ways: Number of classes conserved for generating the task.
shots: Number of samples used for adaptation.
meta_batch_size: Number of task to be sampled and optimized over before doing a meta optimizer step.
queries: Number of samples used for computing the meta loss after the adaption on the `shots` samples.
num_task: Total number of tasks to be sampled during training. If -1, a new task will always be sampled.
epoch_length: Total number of tasks to be sampled to make an epoch.
test_ways: Number of classes conserved for generating the validation and testing task.
test_shots: Number of samples used for adaptation during validation and testing phase.
test_queries: Number of samples used for computing the meta loss during validation or testing
after the adaption on `shots` samples.
epoch_length: Total number of tasks to be sampled to make an epoch during validation and testing phase.
default_transforms_fn: A Callable to create the task transform.
The callable should take the dataset, ways and shots as arguments.
algorithm_kwargs: Keyword arguments to be provided to the algorithm class from learn2learn
"""
super().__init__()
self._task = NoModule(task)
self.backbone = backbone
self.head = head
self.algorithm_cls = algorithm_cls
self.meta_batch_size = meta_batch_size
self.num_task = num_task
self.default_transforms_fn = default_transforms_fn
self.seed = seed
self.epoch_length = epoch_length or meta_batch_size
self.ways = ways
self.shots = shots
self.queries = queries
self.test_ways = test_ways or ways
self.test_shots = test_shots or shots
self.test_queries = test_queries or queries
self.test_num_task = test_num_task or num_task
self.test_epoch_length = test_epoch_length or self.epoch_length
params = inspect.signature(self.algorithm_cls).parameters
algorithm_kwargs["train_ways"] = ways
algorithm_kwargs["train_shots"] = shots
algorithm_kwargs["train_queries"] = queries
algorithm_kwargs["test_ways"] = self.test_ways
algorithm_kwargs["test_shots"] = self.test_shots
algorithm_kwargs["test_queries"] = self.test_queries
if "model" in params:
algorithm_kwargs["model"] = Model(backbone=backbone, head=head)
if "features" in params:
algorithm_kwargs["features"] = Model(backbone=backbone, head=None)
if "classifier" in params:
algorithm_kwargs["classifier"] = head
self.model = self.algorithm_cls(**algorithm_kwargs)
# this algorithm requires a special treatment
self._algorithm_has_validated = self.algorithm_cls != l2l.algorithms.LightningPrototypicalNetworks
def _default_transform(self, dataset, ways: int, shots: int, queries) -> List[Callable]:
return [
l2l.data.transforms.FusedNWaysKShots(dataset, n=ways, k=shots + queries),
l2l.data.transforms.LoadData(dataset),
RemapLabels(dataset),
l2l.data.transforms.ConsecutiveLabels(dataset),
]
@staticmethod
def _labels_to_indices(data):
out = defaultdict(list)
for idx, sample in enumerate(data):
label = sample[DefaultDataKeys.TARGET]
if torch.is_tensor(label):
label = label.item()
out[label].append(idx)
return out
def _convert_dataset(
self,
trainer: flash.Trainer,
dataset: BaseAutoDataset,
ways: int,
shots: int,
queries: int,
num_workers: int,
num_task: int,
epoch_length: int,
):
if isinstance(dataset, BaseAutoDataset):
metadata = getattr(dataset, "data", None)
if metadata is None or (metadata is not None and not isinstance(dataset.data, list)):
raise MisconfigurationException("Only dataset built out of metadata is supported.")
labels_to_indices = self._labels_to_indices(dataset.data)
if len(labels_to_indices) < ways:
raise MisconfigurationException(
"Provided `ways` should be lower or equal to number of classes within your dataset."
)
if min(len(indice) for indice in labels_to_indices.values()) < (shots + queries):
raise MisconfigurationException(
"Provided `shots + queries` should be lower than the lowest number of sample per class."
)
# convert the dataset to MetaDataset
dataset = l2l.data.MetaDataset(dataset, indices_to_labels=None, labels_to_indices=labels_to_indices)
transform_fn = self.default_transforms_fn or self._default_transform
taskset = l2l.data.TaskDataset(
dataset=dataset,
task_transforms=transform_fn(dataset, ways=ways, shots=shots, queries=queries),
num_tasks=num_task,
task_collate=self._identity_task_collate_fn,
)
if isinstance(
trainer.training_type_plugin,
(
DDPPlugin,
DDPSpawnPlugin,
),
):
# when running in a distributed data parallel way,
# we are actually sampling one task per device.
dataset = TaskDistributedDataParallel(
taskset=taskset,
global_rank=trainer.global_rank,
world_size=trainer.world_size,
num_workers=num_workers,
epoch_length=epoch_length,
seed=os.getenv("PL_GLOBAL_SEED", self.seed),
requires_divisible=trainer.training,
)
self.trainer.accumulated_grad_batches = self.meta_batch_size / trainer.world_size
else:
devices = 1
if isinstance(trainer.training_type_plugin, DataParallelPlugin):
# when using DP, we need to sample n tasks, so it can splitted across multiple devices.
devices = trainer.accelerator_connector.devices
dataset = TaskDataParallel(taskset, epoch_length=epoch_length, devices=devices, collate_fn=None)
self.trainer.accumulated_grad_batches = self.meta_batch_size / devices
return dataset
@staticmethod
def _identity_task_collate_fn(x: Any) -> Any:
return x
@classmethod
@catch_url_error
def from_task(
cls,
*args,
task: AdapterTask,
backbone: torch.nn.Module,
head: torch.nn.Module,
algorithm: Type[LightningModule],
**kwargs,
) -> Adapter:
if "meta_batch_size" not in kwargs:
raise MisconfigurationException(
"The `meta_batch_size` should be provided as training_strategy_kwargs={'meta_batch_size'=...}. "
"This is equivalent to the epoch length."
)
if "shots" not in kwargs:
raise MisconfigurationException(
"The `shots` should be provided training_strategy_kwargs={'shots'=...}. "
"This is equivalent to the number of sample per label to select within a task."
)
return cls(task, backbone, head, algorithm, **kwargs)
def training_step(self, batch, batch_idx) -> Any:
input = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return self.model.training_step(input, batch_idx)
def validation_step(self, batch, batch_idx):
# Should be True only for trainer.validate
if self.trainer.state.fn == TrainerFn.VALIDATING:
self._algorithm_has_validated = True
input = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return self.model.validation_step(input, batch_idx)
def validation_epoch_end(self, outpus: Any):
self.model.validation_epoch_end(outpus)
def test_step(self, batch, batch_idx):
input = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return self.model.test_step(input, batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
return self.model.predict_step(batch[DefaultDataKeys.INPUT], batch_idx, dataloader_idx=dataloader_idx)
def _sanetize_batch_size(self, batch_size: int) -> int:
if batch_size != 1:
warning_cache.warn(
"When using a meta-learning training_strategy, the batch_size should be set to 1. "
"HINT: You can modify the `meta_batch_size` to 100 for example by doing "
f"{type(self._task.task)}" + "(training_strategies_kwargs={'meta_batch_size': 100})"
)
return 1
def process_train_dataset(
self,
dataset: BaseAutoDataset,
trainer: flash.Trainer,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool,
drop_last: bool,
sampler: Optional[Sampler],
) -> DataLoader:
dataset = self._convert_dataset(
trainer=trainer,
dataset=dataset,
ways=self.ways,
shots=self.shots,
queries=self.queries,
num_workers=num_workers,
num_task=self.num_task,
epoch_length=self.epoch_length,
)
if isinstance(dataset, IterableDataset):
shuffle = False
sampler = None
return super().process_train_dataset(
dataset,
trainer,
self._sanetize_batch_size(batch_size),
num_workers,
False,
collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
persistent_workers=True,
)
def process_val_dataset(
self,
dataset: BaseAutoDataset,
trainer: flash.Trainer,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool = False,
drop_last: bool = False,
sampler: Optional[Sampler] = None,
) -> DataLoader:
dataset = self._convert_dataset(
trainer=trainer,
dataset=dataset,
ways=self.test_ways,
shots=self.test_shots,
queries=self.test_queries,
num_workers=num_workers,
num_task=self.test_num_task,
epoch_length=self.test_epoch_length,
)
if isinstance(dataset, IterableDataset):
shuffle = False
sampler = None
return super().process_train_dataset(
dataset,
trainer,
self._sanetize_batch_size(batch_size),
num_workers,
False,
collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
persistent_workers=True,
)
def process_test_dataset(
self,
dataset: BaseAutoDataset,
trainer: flash.Trainer,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool = False,
drop_last: bool = False,
sampler: Optional[Sampler] = None,
) -> DataLoader:
dataset = self._convert_dataset(
trainer=trainer,
dataset=dataset,
ways=self.test_ways,
shots=self.test_shots,
queries=self.test_queries,
num_workers=num_workers,
num_task=self.test_num_task,
epoch_length=self.test_epoch_length,
)
if isinstance(dataset, IterableDataset):
shuffle = False
sampler = None
return super().process_train_dataset(
dataset,
trainer,
self._sanetize_batch_size(batch_size),
num_workers,
False,
collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
persistent_workers=True,
)
def process_predict_dataset(
self,
dataset: BaseAutoDataset,
batch_size: int = 1,
num_workers: int = 0,
pin_memory: bool = False,
collate_fn: Callable = lambda x: x,
shuffle: bool = False,
drop_last: bool = True,
sampler: Optional[Sampler] = None,
) -> DataLoader:
if not self._algorithm_has_validated:
raise MisconfigurationException(
"This training_strategies requires to be validated. Call trainer.validate(...)."
)
return super().process_predict_dataset(
dataset,
batch_size,
num_workers,
pin_memory,
collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
)
class DefaultAdapter(Adapter):
"""The ``DefaultAdapter`` is an :class:`~flash.core.adapter.Adapter`."""
required_extras: str = "image"
def __init__(self, task: AdapterTask, backbone: torch.nn.Module, head: torch.nn.Module):
super().__init__()
self._task = NoModule(task)
self.backbone = backbone
self.head = head
@classmethod
@catch_url_error
def from_task(
cls,
*args,
task: AdapterTask,
backbone: torch.nn.Module,
head: torch.nn.Module,
**kwargs,
) -> Adapter:
return cls(task, backbone, head)
def training_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return Task.training_step(self._task.task, batch, batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return Task.validation_step(self._task.task, batch, batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
return Task.test_step(self._task.task, batch, batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
batch[DefaultDataKeys.PREDS] = Task.predict_step(
self._task.task, (batch[DefaultDataKeys.INPUT]), batch_idx, dataloader_idx=dataloader_idx
)
return batch
def forward(self, x) -> torch.Tensor:
# TODO: Resolve this hack
if x.dim() == 3:
x = x.unsqueeze(0)
x = self.backbone(x)
if x.dim() == 4:
x = x.mean(-1).mean(-1)
return self.head(x)
TRAINING_STRATEGIES = FlashRegistry("training_strategies")
TRAINING_STRATEGIES(name="default", fn=partial(DefaultAdapter.from_task))
if _LEARN2LEARN_AVAILABLE:
from learn2learn import algorithms
for algorithm in dir(algorithms):
# skip base class
if algorithm == "LightningEpisodicModule":
continue
try:
if "lightning" in algorithm.lower() and issubclass(getattr(algorithms, algorithm), LightningModule):
TRAINING_STRATEGIES(
name=algorithm.lower().replace("lightning", ""),
fn=partial(Learn2LearnAdapter.from_task, algorithm=getattr(algorithms, algorithm)),
providers=[_LEARN2LEARN],
)
except Exception:
pass
| 36.468978 | 116 | 0.631824 |
ace91fc0ca8d5ab663112a65eb4692bc7f08c3bd | 802 | py | Python | DjangoProject/urls.py | duythanhcn/Django-Template | 5fb46c08874b168f19196a592f8bded0de360b3e | [
"MIT"
] | null | null | null | DjangoProject/urls.py | duythanhcn/Django-Template | 5fb46c08874b168f19196a592f8bded0de360b3e | [
"MIT"
] | null | null | null | DjangoProject/urls.py | duythanhcn/Django-Template | 5fb46c08874b168f19196a592f8bded0de360b3e | [
"MIT"
] | null | null | null | """DjangoProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path("admin", admin.site.urls),
path('api/', include('api.urls')),
]
| 34.869565 | 77 | 0.704489 |
ace9212d17d059464a1870c5e2cd32d48aa428d8 | 6,262 | py | Python | Thesis Janne/Full image capsnet/Data augmentation/Full-image capsnet augmentation.py | janne1994/CapsNet-Keras | 0522f4250f63cafeb55d8e6948a9f65d274f2e0d | [
"MIT"
] | null | null | null | Thesis Janne/Full image capsnet/Data augmentation/Full-image capsnet augmentation.py | janne1994/CapsNet-Keras | 0522f4250f63cafeb55d8e6948a9f65d274f2e0d | [
"MIT"
] | null | null | null | Thesis Janne/Full image capsnet/Data augmentation/Full-image capsnet augmentation.py | janne1994/CapsNet-Keras | 0522f4250f63cafeb55d8e6948a9f65d274f2e0d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
import cv2
from keras import layers
from keras import models
from keras.models import Sequential
import h5py
from pathlib import Path
import os
import os
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
import tensorflow as tf
import keras
import keras.backend as K
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
from keras import layers, models, optimizers
from keras.applications import vgg16
from keras.layers import Conv2D, MaxPooling2D
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import classification_report
"""## Loading images into Python and displaying them"""
wd = os.getcwd()
hdf5_dir = Path(wd + '/drive/My Drive/Colab Notebooks/Thesis/')
#https://realpython.com/storing-images-in-python/#storing-with-hdf5
def read_many_hdf5(num_images):
""" Reads image from HDF5.
Parameters:
---------------
num_images number of images to read
Returns:
----------
images images array, (N, 32, 32, 3) to be stored
labels associated meta data, int label (N, 1)
"""
images, labels = [], []
# Open the HDF5 file
file = h5py.File(hdf5_dir / f"{num_images}.h5", "r+")
images = np.array(file["/images"]).astype("uint8")
labels = np.array(file["/meta"]).astype("uint8")
return images, labels
x_train1, y_train = read_many_hdf5(161)
x_test1, y_test = read_many_hdf5(41)
x_val1, y_val = read_many_hdf5(51)
print(x_train.shape)
print(x_test.shape)
print(x_val.shape)
plt.imshow(x_val[0])
from skimage import color
x_train = color.rgb2gray(x_train1)
x_test = color.rgb2gray(x_test1)
x_val = color.rgb2gray(x_val1)
"""Resize to model can train"""
def resizing_more(data):
new_list = []
for i in data:
new = cv2.resize(i, (64,64))
new_list.append(new)
return np.array(new_list)
x_train = resizing_more(x_train)
x_test = resizing_more(x_test)
x_val = resizing_more(x_val)
"""Adapted from Xifeng Guo (GITHUB)"""
path = wd + '/drive/My Drive/Colab Notebooks/Thesis/'
os.chdir(path)
np.random.seed(4)
K.set_image_data_format('channels_last')
def CapsNet(input_shape, n_class, routings):
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class)) # YES
decoder.add(layers.Dense(1024, activation='relu')) # YES
decoder.add(layers.Dense(4096, activation='relu')) # YES
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
#Reshaping so I can feed it to the network
x_train = x_train.reshape((2995,64,64,1))
x_val = x_val.reshape((749,64,64,1))
x_test = x_test.reshape((40, 64, 64, 1))
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:],
n_class=1,
routings=2)
# compile the model
model.compile(optimizer=optimizers.Adam(lr=3e-3),
loss=[margin_loss, 'mse'],
metrics={'capsnet': 'accuracy'})
model.summary()
#Fitting the model
history = model.fit(
[x_train, y_train],[y_train,x_train],
batch_size=128,
epochs=30,
validation_data=([x_val, y_val], [y_val, x_val]),
shuffle=True)
print(history.history.keys())
#version 2.5
plt.plot(history.history['capsnet_acc'])
plt.plot(history.history['val_capsnet_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['capsnet_loss'])
plt.plot(history.history['val_capsnet_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#Metrics
y_pred, x_recon = eval_model.predict(x_test)
predictions = [1 if x>0.5 else 0 for x in y_pred]
print(np.array(predictions).shape)
confusion_mtx = confusion_matrix(y_test, predictions)
print(confusion_mtx)
print(f1_score(y_test, predictions ))
print(classification_report(y_test, predictions))
print(accuracy_score(y_test, predictions))
| 28.990741 | 119 | 0.709677 |
ace9218b5a18ffac745a6a6fdd23b82e420af4ee | 9,652 | py | Python | logdevice/ops/ldops/types/maintenance_view.py | yzhdanov/LogDevice | 809262310df981eb8ed1e2b3252b1ccf4f6aa875 | [
"BSD-3-Clause"
] | 1 | 2020-03-03T05:46:28.000Z | 2020-03-03T05:46:28.000Z | logdevice/ops/ldops/types/maintenance_view.py | yzhdanov/LogDevice | 809262310df981eb8ed1e2b3252b1ccf4f6aa875 | [
"BSD-3-Clause"
] | null | null | null | logdevice/ops/ldops/types/maintenance_view.py | yzhdanov/LogDevice | 809262310df981eb8ed1e2b3252b1ccf4f6aa875 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import operator
from datetime import datetime, timedelta
from typing import Any, Collection, Dict, Optional, Tuple
from ldops.const import INTERNAL_USER
from ldops.exceptions import NodeIsNotASequencerError
from ldops.types.node_view import NodeView
from ldops.types.sequencer_maintenance_progress import SequencerMaintenanceProgress
from ldops.types.shard_maintenance_progress import ShardMaintenanceProgress
from logdevice.admin.maintenance.types import MaintenanceDefinition, MaintenanceProgress
from logdevice.admin.nodes.types import (
MaintenanceStatus,
SequencerState,
SequencingState,
ShardOperationalState,
ShardState,
)
from logdevice.common.types import NodeID, ShardID
class MaintenanceView:
def __init__(
self,
maintenance: MaintenanceDefinition,
node_index_to_node_view: Dict[int, NodeView],
) -> None:
self._maintenance = maintenance
self._node_index_to_node_view = node_index_to_node_view
# pyre-ignore
def __getattr__(self, name: str) -> Any:
return getattr(self._maintenance, name)
@property
def affected_sequencer_node_ids(self) -> Tuple[NodeID, ...]:
return tuple(
sorted(set(self.sequencer_nodes), key=operator.attrgetter("node_index"))
)
@property
def affected_sequencer_node_indexes(self) -> Tuple[int, ...]:
return tuple(
n.node_index
for n in self.affected_sequencer_node_ids
if n.node_index is not None
)
@property
def affected_storage_node_ids(self) -> Tuple[NodeID, ...]:
return tuple(
sorted({s.node for s in self.shards}, key=operator.attrgetter("node_index"))
)
@property
def affected_storage_node_indexes(self) -> Tuple[int, ...]:
return tuple(
n.node_index
for n in self.affected_storage_node_ids
if n.node_index is not None
)
@property
def affected_node_ids(self) -> Tuple[NodeID, ...]:
return tuple(
sorted(
set(self.sequencer_nodes).union({s.node for s in self.shards}),
key=operator.attrgetter("node_index"),
)
)
@property
def affected_node_indexes(self) -> Tuple[int, ...]:
return tuple(
n.node_index for n in self.affected_node_ids if n.node_index is not None
)
@property
def shard_target_state(self) -> Optional[ShardOperationalState]:
if self.affects_shards:
return self._maintenance.shard_target_state
else:
return None
@property
def sequencer_target_state(self) -> Optional[SequencingState]:
if self.affects_sequencers:
return self._maintenance.sequencer_target_state
else:
return None
@property
def sequencer_nodes(self) -> Collection[NodeID]:
return self._maintenance.sequencer_nodes or []
@property
def ttl(self) -> Optional[timedelta]:
if self._maintenance.ttl_seconds == 0:
return None
else:
return timedelta(seconds=self._maintenance.ttl_seconds)
@property
def created_on(self) -> Optional[datetime]:
if self._maintenance.created_on is None:
return None
else:
# pyre-fixme[6]: Expected `int` for 1st param but got `Optional[int]`.
return datetime.fromtimestamp(self._maintenance.created_on // 1000)
@property
def expires_on(self) -> Optional[datetime]:
if self._maintenance.expires_on is None:
return None
else:
# pyre-fixme[6]: Expected `int` for 1st param but got `Optional[int]`.
return datetime.fromtimestamp(self._maintenance.expires_on // 1000)
@property
def expires_in(self) -> Optional[timedelta]:
if self.expires_on is None:
return None
else:
# pyre-fixme[16]: `Optional` has no attribute `__sub__`.
return self.expires_on - datetime.now()
@property
def affects_shards(self) -> bool:
return len(self._maintenance.shards) > 0
@property
def affects_sequencers(self) -> bool:
return len(self._maintenance.sequencer_nodes) > 0
@property
def num_shards_total(self) -> int:
return len(self._maintenance.shards)
@property
def num_shards_done(self) -> int:
return sum(
1
if self.get_shard_maintenance_status(s) == MaintenanceStatus.COMPLETED
else 0
for s in self.shards
)
@property
def are_all_shards_done(self) -> bool:
return self.num_shards_done == self.num_shards_total
@property
def num_sequencers_total(self) -> int:
return len(self._maintenance.sequencer_nodes)
@property
def num_sequencers_done(self) -> int:
return sum(
1
if self.get_sequencer_maintenance_status(n) == MaintenanceStatus.COMPLETED
else 0
for n in self.sequencer_nodes
)
@property
def are_all_sequencers_done(self) -> bool:
return self.num_sequencers_done == self.num_sequencers_total
@property
def is_everything_done(self) -> bool:
return self._maintenance.progress == MaintenanceProgress.COMPLETED
@property
def is_blocked(self) -> bool:
return self._maintenance.progress in [
MaintenanceProgress.BLOCKED_UNTIL_SAFE,
MaintenanceProgress.UNKNOWN,
]
@property
def is_completed(self) -> bool:
return self._maintenance.progress == MaintenanceProgress.COMPLETED
@property
def is_in_progress(self) -> bool:
return self._maintenance.progress == MaintenanceProgress.IN_PROGRESS
@property
def is_internal(self) -> bool:
return self.user == INTERNAL_USER
def get_shard_state(self, shard: ShardID) -> Optional[ShardState]:
assert shard.node.node_index is not None
node = self._node_index_to_node_view[shard.node.node_index]
if node.is_storage:
return node.shard_states[shard.shard_index]
return None
def get_sequencer_state(self, sequencer: NodeID) -> Optional[SequencerState]:
assert sequencer.node_index is not None
return self._node_index_to_node_view[sequencer.node_index].sequencer_state
def get_shards_by_node_index(self, node_index: int) -> Tuple[ShardID, ...]:
return tuple(s for s in self.shards if s.node.node_index == node_index)
@property
def shards(self) -> Tuple[ShardID, ...]:
return tuple(
sorted(
self._maintenance.shards,
key=lambda x: (x.node.node_index, x.shard_index),
)
)
def get_shard_maintenance_status(self, shard: ShardID) -> MaintenanceStatus:
shard_state = self.get_shard_state(shard)
if shard_state is None:
# This is not a storage node, we assume that these shards are
# COMPLETED already since there is nothing to be done.
return MaintenanceStatus.COMPLETED
if self.shard_target_state == ShardOperationalState.MAY_DISAPPEAR:
if shard_state.current_operational_state in {
ShardOperationalState.DRAINED,
ShardOperationalState.MAY_DISAPPEAR,
ShardOperationalState.MIGRATING_DATA,
ShardOperationalState.PROVISIONING,
}:
return MaintenanceStatus.COMPLETED
if self.shard_target_state == ShardOperationalState.DRAINED:
if shard_state.current_operational_state == ShardOperationalState.DRAINED:
return MaintenanceStatus.COMPLETED
if shard_state.maintenance is not None:
return shard_state.maintenance.status
return MaintenanceStatus.NOT_STARTED
def get_shard_last_updated_at(self, shard: ShardID) -> Optional[datetime]:
shard_state = self.get_shard_state(shard)
if shard_state is None:
return None
if shard_state.maintenance is not None:
return ShardMaintenanceProgress.from_thrift(
shard_state.maintenance
).last_updated_at
else:
return None
def get_sequencer_maintenance_status(self, sequencer: NodeID) -> MaintenanceStatus:
sequencer_state = self.get_sequencer_state(sequencer)
if sequencer_state is None:
raise NodeIsNotASequencerError(f"{sequencer}")
if self.sequencer_target_state == sequencer_state.state:
return MaintenanceStatus.COMPLETED
elif sequencer_state.maintenance is not None:
return sequencer_state.maintenance.status
else:
return MaintenanceStatus.NOT_STARTED
def get_sequencer_last_updated_at(self, sequencer: NodeID) -> Optional[datetime]:
sequencer_state = self.get_sequencer_state(sequencer)
if sequencer_state is None:
raise NodeIsNotASequencerError(f"{sequencer}")
elif sequencer_state.maintenance is not None:
return SequencerMaintenanceProgress.from_thrift(
sequencer_state.maintenance
).last_updated_at
else:
return None
@property
def overall_status(self) -> MaintenanceProgress:
return self._maintenance.progress
| 33.985915 | 88 | 0.660174 |
ace92232470d90936925285d4aaaa71d0fa0f667 | 9,218 | py | Python | virtual/lib/python3.8/site-packages/django_registration/validators.py | Maureen-1998DEV/watch_Hood | 44b644dc8a5c4dfbea7a1e90ac7fe79c5dbf9abb | [
"MIT"
] | 859 | 2015-02-06T14:54:22.000Z | 2022-03-28T21:42:31.000Z | venv/lib/python3.6/site-packages/django_registration/validators.py | alvynah/Awwards-Clone | b5fa6075e97df3595e46ad5e8c214ee0af606d84 | [
"MIT"
] | 190 | 2015-03-21T14:41:04.000Z | 2021-10-03T22:36:40.000Z | venv/lib/python3.6/site-packages/django_registration/validators.py | alvynah/Awwards-Clone | b5fa6075e97df3595e46ad5e8c214ee0af606d84 | [
"MIT"
] | 315 | 2015-03-03T09:19:48.000Z | 2022-01-20T18:44:22.000Z | """
Error messages, data and custom validation code used in
django-registration's various user-registration form classes.
"""
import re
import unicodedata
from confusable_homoglyphs import confusables
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator, RegexValidator
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
CONFUSABLE = _("This name cannot be registered. " "Please choose a different name.")
CONFUSABLE_EMAIL = _(
"This email address cannot be registered. "
"Please supply a different email address."
)
DUPLICATE_EMAIL = _(
"This email address is already in use. " "Please supply a different email address."
)
DUPLICATE_USERNAME = _("A user with that username already exists.")
FREE_EMAIL = _(
"Registration using free email addresses is prohibited. "
"Please supply a different email address."
)
RESERVED_NAME = _("This name is reserved and cannot be registered.")
TOS_REQUIRED = _("You must agree to the terms to register")
# WHATWG HTML5 spec, section 4.10.5.1.5.
HTML5_EMAIL_RE = (
r"^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]"
r"+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}"
r"[a-zA-Z0-9])?(?:\.[a-zA-Z0-9]"
r"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"
)
# Below we construct a large but non-exhaustive list of names which
# users probably should not be able to register with, due to various
# risks:
#
# * For a site which creates email addresses from username, important
# common addresses must be reserved.
#
# * For a site which creates subdomains from usernames, important
# common hostnames/domain names must be reserved.
#
# * For a site which uses the username to generate a URL to the user's
# profile, common well-known filenames must be reserved.
#
# etc., etc.
#
# Credit for basic idea and most of the list to Geoffrey Thomas's blog
# post about names to reserve:
# https://ldpreload.com/blog/names-to-reserve
SPECIAL_HOSTNAMES = [
# Hostnames with special/reserved meaning.
"autoconfig", # Thunderbird autoconfig
"autodiscover", # MS Outlook/Exchange autoconfig
"broadcasthost", # Network broadcast hostname
"isatap", # IPv6 tunnel autodiscovery
"localdomain", # Loopback
"localhost", # Loopback
"wpad", # Proxy autodiscovery
]
PROTOCOL_HOSTNAMES = [
# Common protocol hostnames.
"ftp",
"imap",
"mail",
"news",
"pop",
"pop3",
"smtp",
"usenet",
"uucp",
"webmail",
"www",
]
CA_ADDRESSES = [
# Email addresses known used by certificate authorities during
# verification.
"admin",
"administrator",
"hostmaster",
"info",
"is",
"it",
"mis",
"postmaster",
"root",
"ssladmin",
"ssladministrator",
"sslwebmaster",
"sysadmin",
"webmaster",
]
RFC_2142 = [
# RFC-2142-defined names not already covered.
"abuse",
"marketing",
"noc",
"sales",
"security",
"support",
]
NOREPLY_ADDRESSES = [
# Common no-reply email addresses.
"mailer-daemon",
"nobody",
"noreply",
"no-reply",
]
SENSITIVE_FILENAMES = [
# Sensitive filenames.
"clientaccesspolicy.xml", # Silverlight cross-domain policy file.
"crossdomain.xml", # Flash cross-domain policy file.
"favicon.ico",
"humans.txt",
"keybase.txt", # Keybase ownership-verification URL.
"robots.txt",
".htaccess",
".htpasswd",
]
OTHER_SENSITIVE_NAMES = [
# Other names which could be problems depending on URL/subdomain
# structure.
"account",
"accounts",
"auth",
"authorize",
"blog",
"buy",
"cart",
"clients",
"contact",
"contactus",
"contact-us",
"copyright",
"dashboard",
"doc",
"docs",
"download",
"downloads",
"enquiry",
"faq",
"help",
"inquiry",
"license",
"login",
"logout",
"me",
"myaccount",
"oauth",
"pay",
"payment",
"payments",
"plans",
"portfolio",
"preferences",
"pricing",
"privacy",
"profile",
"register",
"secure",
"settings",
"signin",
"signup",
"ssl",
"status",
"store",
"subscribe",
"terms",
"tos",
"user",
"users",
"weblog",
"work",
]
DEFAULT_RESERVED_NAMES = (
SPECIAL_HOSTNAMES
+ PROTOCOL_HOSTNAMES
+ CA_ADDRESSES
+ RFC_2142
+ NOREPLY_ADDRESSES
+ SENSITIVE_FILENAMES
+ OTHER_SENSITIVE_NAMES
)
@deconstructible
class ReservedNameValidator:
"""
Validator which disallows many reserved names as form field
values.
"""
def __init__(self, reserved_names=DEFAULT_RESERVED_NAMES):
self.reserved_names = reserved_names
def __call__(self, value):
# GH issue 82: this validator only makes sense when the
# username field is a string type.
if not isinstance(value, str):
return
if value in self.reserved_names or value.startswith(".well-known"):
raise ValidationError(RESERVED_NAME, code="invalid")
def __eq__(self, other):
return self.reserved_names == other.reserved_names
@deconstructible
class CaseInsensitiveUnique:
"""
Validator which performs a case-insensitive uniqueness check.
"""
def __init__(self, model, field_name, error_message):
self.model = model
self.field_name = field_name
self.error_message = error_message
def __call__(self, value):
# Only run if the username is a string.
if not isinstance(value, str):
return
value = unicodedata.normalize("NFKC", value).casefold()
if self.model._default_manager.filter(
**{"{}__iexact".format(self.field_name): value}
).exists():
raise ValidationError(self.error_message, code="unique")
def __eq__(self, other):
return (
self.model == other.model
and self.field_name == other.field_name
and self.error_message == other.error_message
)
@deconstructible
class HTML5EmailValidator(RegexValidator):
"""
Validator which applies HTML5's email address rules.
"""
message = EmailValidator.message
regex = re.compile(HTML5_EMAIL_RE)
def validate_confusables(value):
"""
Validator which disallows 'dangerous' usernames likely to
represent homograph attacks.
A username is 'dangerous' if it is mixed-script (as defined by
Unicode 'Script' property) and contains one or more characters
appearing in the Unicode Visually Confusable Characters file.
"""
if not isinstance(value, str):
return
if confusables.is_dangerous(value):
raise ValidationError(CONFUSABLE, code="invalid")
def validate_confusables_email(value):
"""
Validator which disallows 'dangerous' email addresses likely to
represent homograph attacks.
An email address is 'dangerous' if either the local-part or the
domain, considered on their own, are mixed-script and contain one
or more characters appearing in the Unicode Visually Confusable
Characters file.
"""
# Email addresses are extremely difficult.
#
# The current RFC governing syntax of email addresses is RFC 5322
# which, as the HTML5 specification succinctly states, "defines a
# syntax for e-mail addresses that is simultaneously too strict
# ... too vague ... and too lax ... to be of practical use".
#
# In order to be useful, this validator must consider only the
# addr-spec portion of an email address, and must examine the
# local-part and the domain of that addr-spec
# separately. Unfortunately, there are no good general-purpose
# Python libraries currently available (that the author of
# django-registration is aware of), supported on all versions of
# Python django-registration supports, which can reliably provide
# an RFC-complient parse of either a full address or an addr-spec
# which allows the local-part and domain to be treated separately.
#
# To work around this shortcoming, RegistrationForm applies the
# HTML5 email validation rule, which HTML5 admits (in section
# 4.10.5.1.5) is a "willful violation" of RFC 5322, to the
# submitted email address. This will reject many technically-valid
# but problematic email addresses, including those which make use
# of comments, or which embed otherwise-illegal characters via
# quoted-string.
#
# That in turn allows this validator to take a much simpler
# approach: it considers any value containing exactly one '@'
# (U+0040) to be an addr-spec, and consders everything prior to
# the '@' to be the local-part and everything after to be the
# domain, and performs validation on them. Any value not
# containing exactly one '@' is assumed not to be an addr-spec,
# and is thus "accepted" by not being validated at all.
if value.count("@") != 1:
return
local_part, domain = value.split("@")
if confusables.is_dangerous(local_part) or confusables.is_dangerous(domain):
raise ValidationError(CONFUSABLE_EMAIL, code="invalid")
| 27.516418 | 87 | 0.663484 |
ace922d4a0589cef10f61f58c8e0026aa2dfec4a | 157 | py | Python | hardware/lcd/driver.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/lcd/driver.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/lcd/driver.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | import time
from RPLCD.i2c import CharLCD
from typing import AnyStr
def update_LCD(msg: str):
lcd = CharLCD('PCF8574', 0x27)
lcd.write_string(msg)
| 17.444444 | 34 | 0.732484 |
ace922e1db5cfa0dc08a4e237e422c824db97c3f | 7,923 | py | Python | Code/BaselineAlgorithms/sumy/evaluation/__main__.py | tafseer-nayeem/BengaliSummarization | 31334d12bd6bbd2db92f4b0ecc436e006cb96fd8 | [
"MIT"
] | 4 | 2021-07-11T05:08:12.000Z | 2022-01-23T08:34:51.000Z | Code/BaselineAlgorithms/sumy/evaluation/__main__.py | tafseer-nayeem/BengaliSummarization | 31334d12bd6bbd2db92f4b0ecc436e006cb96fd8 | [
"MIT"
] | null | null | null | Code/BaselineAlgorithms/sumy/evaluation/__main__.py | tafseer-nayeem/BengaliSummarization | 31334d12bd6bbd2db92f4b0ecc436e006cb96fd8 | [
"MIT"
] | 4 | 2021-03-07T12:32:58.000Z | 2022-02-04T19:11:19.000Z | # -*- coding: utf-8 -*-
"""
Sumy - evaluation of automatic text summary.
Usage:
sumy_eval (random | luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) <reference_summary> [--length=<length>] [--language=<lang>]
sumy_eval (random | luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) <reference_summary> [--length=<length>] [--language=<lang>] --url=<url>
sumy_eval (random | luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) <reference_summary> [--length=<length>] [--language=<lang>] --file=<file_path> --format=<file_format>
sumy_eval --version
sumy_eval --help
Options:
<reference_summary> Path to the file with reference summary.
--url=<url> URL address of summarized message.
--file=<file> Path to file with summarized text.
--format=<format> Format of input file. [default: plaintext]
--length=<length> Length of summarized text. It may be count of sentences
or percentage of input text. [default: 20%]
--language=<lang> Natural language of summarized text. [default: english]
--version Displays version of application.
--help Displays this text.
"""
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import sys
from itertools import chain
from docopt import docopt
from Python.baselineAlgorithm.sumy import __version__
from Python.baselineAlgorithm.sumy.utils import ItemsCount, get_stop_words, fetch_url
from Python.baselineAlgorithm.sumy.models import TfDocumentModel
from Python.baselineAlgorithm.sumy._compat import to_string
from Python.baselineAlgorithm.sumy.nlp.tokenizers import Tokenizer
from Python.baselineAlgorithm.sumy.parsers.html import HtmlParser
from Python.baselineAlgorithm.sumy.parsers.plaintext import PlaintextParser
from Python.baselineAlgorithm.sumy.summarizers.random import RandomSummarizer
from Python.baselineAlgorithm.sumy.summarizers.luhn import LuhnSummarizer
from Python.baselineAlgorithm.sumy.summarizers.edmundson import EdmundsonSummarizer
from Python.baselineAlgorithm.sumy.summarizers.lsa import LsaSummarizer
from Python.baselineAlgorithm.sumy.summarizers.text_rank import TextRankSummarizer
from Python.baselineAlgorithm.sumy.summarizers.lex_rank import LexRankSummarizer
from Python.baselineAlgorithm.sumy.summarizers.sum_basic import SumBasicSummarizer
from Python.baselineAlgorithm.sumy.summarizers.kl import KLSummarizer
from Python.baselineAlgorithm.sumy.nlp.stemmers import Stemmer
from . import precision, recall, f_score, cosine_similarity, unit_overlap
from . import rouge_1, rouge_2, rouge_l_sentence_level, rouge_l_summary_level
PARSERS = {
"html": HtmlParser,
"plaintext": PlaintextParser,
}
def build_random(parser, language):
return RandomSummarizer()
def build_luhn(parser, language):
summarizer = LuhnSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def build_edmundson(parser, language):
summarizer = EdmundsonSummarizer(Stemmer(language))
summarizer.null_words = get_stop_words(language)
summarizer.bonus_words = parser.significant_words
summarizer.stigma_words = parser.stigma_words
return summarizer
def build_lsa(parser, language):
summarizer = LsaSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def build_text_rank(parser, language):
summarizer = TextRankSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def build_lex_rank(parser, language):
summarizer = LexRankSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def build_sum_basic(parser, language):
summarizer = SumBasicSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def build_kl(parser, language):
summarizer = KLSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
return summarizer
def evaluate_cosine_similarity(evaluated_sentences, reference_sentences):
evaluated_words = tuple(chain(*(s.words for s in evaluated_sentences)))
reference_words = tuple(chain(*(s.words for s in reference_sentences)))
evaluated_model = TfDocumentModel(evaluated_words)
reference_model = TfDocumentModel(reference_words)
return cosine_similarity(evaluated_model, reference_model)
def evaluate_unit_overlap(evaluated_sentences, reference_sentences):
evaluated_words = tuple(chain(*(s.words for s in evaluated_sentences)))
reference_words = tuple(chain(*(s.words for s in reference_sentences)))
evaluated_model = TfDocumentModel(evaluated_words)
reference_model = TfDocumentModel(reference_words)
return unit_overlap(evaluated_model, reference_model)
AVAILABLE_METHODS = {
"random": build_random,
"luhn": build_luhn,
"edmundson": build_edmundson,
"lsa": build_lsa,
"text-rank": build_text_rank,
"lex-rank": build_lex_rank,
"sum-basic": build_sum_basic,
"kl": build_kl,
}
AVAILABLE_EVALUATIONS = (
("Precision", False, precision),
("Recall", False, recall),
("F-score", False, f_score),
("Cosine similarity", False, evaluate_cosine_similarity),
("Cosine similarity (document)", True, evaluate_cosine_similarity),
("Unit overlap", False, evaluate_unit_overlap),
("Unit overlap (document)", True, evaluate_unit_overlap),
("Rouge-1", False, rouge_1),
("Rouge-2", False, rouge_2),
("Rouge-L (Sentence Level)", False, rouge_l_sentence_level),
("Rouge-L (Summary Level)", False, rouge_l_summary_level)
)
def main(args=None):
args = docopt(to_string(__doc__), args, version=__version__)
summarizer, document, items_count, reference_summary = handle_arguments(args)
evaluated_sentences = summarizer(document, items_count)
reference_document = PlaintextParser.from_string(reference_summary,
Tokenizer(args["--language"]))
reference_sentences = reference_document.document.sentences
for name, evaluate_document, evaluate in AVAILABLE_EVALUATIONS:
if evaluate_document:
result = evaluate(evaluated_sentences, document.sentences)
else:
result = evaluate(evaluated_sentences, reference_sentences)
print("%s: %f" % (name, result))
return 0
def handle_arguments(args):
document_format = args["--format"]
if document_format is not None and document_format not in PARSERS:
raise ValueError("Unsupported format of input document. Possible values are: %s. Given: %s." % (
", ".join(PARSERS.keys()),
document_format,
))
if args["--url"] is not None:
parser = PARSERS["html"]
document_content = fetch_url(args["--url"])
elif args["--file"] is not None:
parser = PARSERS.get(document_format, PlaintextParser)
with open(args["--file"], "rb") as file:
document_content = file.read()
else:
parser = PARSERS["plaintext"]
document_content = sys.stdin.read()
summarizer_builder = AVAILABLE_METHODS["luhn"]
for method, builder in AVAILABLE_METHODS.items():
if args[method]:
summarizer_builder = builder
break
items_count = ItemsCount(args["--length"])
parser = parser(document_content, Tokenizer(args["--language"]))
with open(args["<reference_summary>"], "rb") as file:
reference_summmary = file.read().decode("utf-8")
return summarizer_builder(parser, args["--language"]), parser.document, items_count, reference_summmary
if __name__ == "__main__":
try:
exit_code = main()
exit(exit_code)
except KeyboardInterrupt:
exit(1)
except Exception as e:
print(e)
exit(1)
| 35.850679 | 189 | 0.728638 |
ace924b0e0a5f08556fbcc5566f44db9942aec61 | 13,744 | py | Python | utils/py27/Lib/test/test_ordered_dict.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 42 | 2018-12-12T01:00:59.000Z | 2022-03-27T07:32:29.000Z | utils/py27/Lib/test/test_ordered_dict.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 13 | 2020-11-06T13:50:45.000Z | 2022-01-25T07:17:37.000Z | utils/py27/Lib/test/test_ordered_dict.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 8 | 2020-11-14T04:30:26.000Z | 2021-01-16T17:55:19.000Z | import copy
import pickle
from random import shuffle
import unittest
from collections import OrderedDict
from collections import MutableMapping
from test import mapping_tests, test_support
class TestOrderedDict(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
self.assertRaises(TypeError, OrderedDict, 42)
self.assertRaises(TypeError, OrderedDict, (), ())
self.assertRaises(TypeError, OrderedDict.__init__)
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
def test_abc(self):
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(od.keys()[:], [t[0] for t in pairs])
self.assertEqual(od.values()[:], [t[1] for t in pairs])
self.assertEqual(od.items()[:], pairs)
self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
self.assertEqual(list(od.iteritems()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for i, dup in enumerate([
od.copy(),
copy.copy(od),
copy.deepcopy(od),
pickle.loads(pickle.dumps(od, 0)),
pickle.loads(pickle.dumps(od, 1)),
pickle.loads(pickle.dumps(od, 2)),
pickle.loads(pickle.dumps(od, -1)),
eval(repr(od)),
update_test,
OrderedDict(od),
]):
self.assertTrue(dup is not od)
self.assertEqual(dup, od)
self.assertEqual(list(dup.items()), list(od.items()))
self.assertEqual(len(dup), len(od))
self.assertEqual(type(dup), type(od))
def test_yaml_linkage(self):
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertEqual(len(od.__reduce__()), 2)
od.x = 10
self.assertEqual(len(od.__reduce__()), 3)
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_repr_recursive_values(self):
od = OrderedDict()
od[42] = od.viewvalues()
r = repr(od)
# Cannot perform a stronger test, as the contents of the repr
# are implementation-dependent. All we can say is that we
# want a str result, not an exception of any sort.
self.assertIsInstance(r, str)
od[42] = od.viewitems()
r = repr(od)
# Again.
self.assertIsInstance(r, str)
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_views(self):
s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
od = OrderedDict.fromkeys(s)
self.assertEqual(list(od.viewkeys()), s)
self.assertEqual(list(od.viewvalues()), [None for k in s])
self.assertEqual(list(od.viewitems()), [(k, None) for k in s])
# See http://bugs.python.org/issue24286
self.assertEqual(od.viewkeys(), dict(od).viewkeys())
self.assertEqual(od.viewitems(), dict(od).viewitems())
def test_override_update(self):
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
def test_free_after_iterating(self):
test_support.check_free_after_iterating(self, iter, OrderedDict)
test_support.check_free_after_iterating(self, lambda d: d.iterkeys(), OrderedDict)
test_support.check_free_after_iterating(self, lambda d: d.itervalues(), OrderedDict)
test_support.check_free_after_iterating(self, lambda d: d.iteritems(), OrderedDict)
test_support.check_free_after_iterating(self, lambda d: iter(d.viewkeys()), OrderedDict)
test_support.check_free_after_iterating(self, lambda d: iter(d.viewvalues()), OrderedDict)
test_support.check_free_after_iterating(self, lambda d: iter(d.viewitems()), OrderedDict)
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
def test_main(verbose=None):
test_classes = [TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main(verbose=True)
| 43.493671 | 100 | 0.528376 |
ace924b8007d40fd87aa830a20148e08e78f5453 | 162 | py | Python | lind/analysis/__init__.py | James-Montgomery/lind | 53284c2c2c121482b56d6a6bc1a30d49a8db1e5c | [
"MIT"
] | 1 | 2021-08-12T03:37:43.000Z | 2021-08-12T03:37:43.000Z | lind/analysis/__init__.py | James-Montgomery/lind | 53284c2c2c121482b56d6a6bc1a30d49a8db1e5c | [
"MIT"
] | 1 | 2021-04-27T17:01:24.000Z | 2021-04-27T17:01:24.000Z | lind/analysis/__init__.py | James-Montgomery/lind | 53284c2c2c121482b56d6a6bc1a30d49a8db1e5c | [
"MIT"
] | null | null | null | """
Analysis
========
This module is dedicated to analysis of experiments (AOE).
"""
from lind.analysis import (
freq,
multiple_comparison_procedures
)
| 13.5 | 58 | 0.685185 |
ace925286268022df7849912665aaef161bd7270 | 6,860 | py | Python | lyrics_extractor/lyrics.py | JasonLCY3/PyLyrics-Extractor | 8dde6d2acc8bc89450535060e7438945c6ac196e | [
"MIT"
] | null | null | null | lyrics_extractor/lyrics.py | JasonLCY3/PyLyrics-Extractor | 8dde6d2acc8bc89450535060e7438945c6ac196e | [
"MIT"
] | null | null | null | lyrics_extractor/lyrics.py | JasonLCY3/PyLyrics-Extractor | 8dde6d2acc8bc89450535060e7438945c6ac196e | [
"MIT"
] | null | null | null | import time
import urllib.parse
import urllib.request
import json
from bs4 import BeautifulSoup
class LyricScraperException(Exception):
"""Handles all lyrics extractor exceptions."""
class _ScraperFactory:
"""All scrapers are defined here."""
PARAGRAPH_BREAK = '\n\n'
source_code = None
title = None
def __call__(self, source_code, title):
self.source_code = source_code
self.title = title
def _update_title(self, title):
self.title = title
def _genius_scraper_method_1(self):
extract = self.source_code.select(".lyrics")
if not extract:
return None
lyrics = (extract[0].get_text()).replace('<br>', '\n').strip()
return lyrics
def _genius_scraper_method_2(self):
all_extracts = self.source_code.select(
'div[class*="Lyrics__Container-sc-"]')
if not all_extracts:
return None
lyrics = ''
for extract in all_extracts:
for br in extract.find_all("br"):
br.replace_with("\n")
lyrics += extract.get_text()
return lyrics.strip()
def genius_scraper(self):
lyrics = self._genius_scraper_method_1() or self._genius_scraper_method_2()
self._update_title(self.title[:-16])
return lyrics
def glamsham_scraper(self):
extract = self.source_code.find_all('font', class_='general')[5]
if not extract:
return None
for br in extract.find_all("br"):
br.replace_with("\n")
lyrics = extract.get_text()
self._update_title(self.title[:-14].strip())
return lyrics
def lyricsbell_scraper(self):
extract = self.source_code.select(".lyrics-col p")
if not extract:
return None
lyrics = ''
for i in range(len(extract)):
lyrics += extract[i].get_text() + self.PARAGRAPH_BREAK
lyrics = lyrics.replace('<br>', '\n').strip()
self._update_title(self.title[:-13])
return lyrics
def lyricsted_scraper(self):
extract = self.source_code.select(".lyric-content p")
if not extract:
return None
lyrics = ''
for i in range(len(extract)):
lyrics += extract[i].get_text().strip() + self.PARAGRAPH_BREAK
lyrics = lyrics.replace('<br>', '\n').strip()
return lyrics
def lyricsoff_scraper(self):
extract = self.source_code.select("#main_lyrics p")
if not extract:
return None
lyrics = ''
for i in range(len(extract)):
lyrics += extract[i].get_text(separator="\n").strip() + \
self.PARAGRAPH_BREAK
return lyrics.strip()
def lyricsmint_scraper(self):
extract = self.source_code.find(
'section', {'id': 'lyrics'}).find_all('p')
if not extract:
return None
lyrics = ''
for i in range(len(extract)):
lyrics += extract[i].get_text().strip() + \
self.PARAGRAPH_BREAK
return lyrics.strip()
def kkbox_scraper(self):
extract = self.source_code.select("div.lyrics > p:nth-of-type(2)")
if not extract:
return None
return extract[0].get_text().strip()
def musixmatch_scraper(self):
extract = self.source_code.select("div.mxm-lyrics > span")
if not extract:
return None
return extract[0].get_text().strip()
class SongLyrics:
"""
Takes in Google Custom Search API & Google Engine ID in contructor args.
Call get_lyrics function with song_name as args to get started.
Handle raised LyricScraperException by importing it alongside.
"""
scraper_factory = _ScraperFactory()
SCRAPERS = {
"genius": scraper_factory.genius_scraper,
'glamsham': scraper_factory.glamsham_scraper,
'lyricsbell': scraper_factory.lyricsbell_scraper,
'lyricsted': scraper_factory.lyricsted_scraper,
'lyricsoff': scraper_factory.lyricsoff_scraper,
'lyricsmint': scraper_factory.lyricsmint_scraper,
'kkbox': scraper_factory.kkbox_scraper,
'musixmatch': scraper_factory.musixmatch_scraper,
}
def __init__(self, gcs_api_key: str, gcs_engine_id: str):
if type(gcs_api_key) != str or type(gcs_engine_id) != str:
raise TypeError("API key and engine ID must be a string.")
self.GCS_API_KEY = gcs_api_key
self.GCS_ENGINE_ID = gcs_engine_id
def __handle_search_request(self, song_name):
url = "https://www.googleapis.com/customsearch/v1/siterestrict"
params = {
'key': self.GCS_API_KEY,
'cx': self.GCS_ENGINE_ID,
'q': '{}'.format(song_name),
}
response = urllib.request.urlopen(url+'?'+urllib.parse.urlencode(params))
data = json.loads(response.read().decode())
return data
def __extract_lyrics(self, result_url, title):
# Get the page source code
req = urllib.request.Request(
result_url,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
}
)
page = urllib.request.urlopen(req)
source_code = BeautifulSoup(page.read().decode(), 'lxml')
self.scraper_factory(source_code, title)
for domain, scraper in self.SCRAPERS.items():
if domain in result_url:
lyrics = scraper()
return lyrics
def get_lyrics(self, song_name: str) -> dict:
"""
Fetches and autocorrects (if incorrect) song name.
Gets URL and title of the top Results.
Extracts lyrics by using one of the available scrapers.
Raises LyricScraperException on handling errors.
Returns dict with title and lyrics.
"""
data = self.__handle_search_request(song_name)
spell = data.get('spelling', {}).get('correctedQuery')
data = (spell and self.__handle_search_request(spell)) or data
query_results = data.get('items', [])
# Try scraping lyrics from top results
for i in range(len(query_results)):
result_url = query_results[i]["link"]
title = query_results[i]["title"]
try:
lyrics = self.__extract_lyrics(result_url, title)
except Exception as err:
raise LyricScraperException(err)
if lyrics:
return {
"title": self.scraper_factory.title,
"lyrics": lyrics
}
raise LyricScraperException({"error": "No results found"})
| 31.324201 | 151 | 0.597085 |
ace9265d8a5d24ee1324a843bf4700ab72a89754 | 569 | py | Python | module_build_service/resolver/__init__.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | module_build_service/resolver/__init__.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | module_build_service/resolver/__init__.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import pkg_resources
from module_build_service.resolver.base import GenericResolver
# NOTE: if you are adding a new resolver to MBS please note that you also have to add
# a new resolver to your setup.py and update you egg-info
for entrypoint in pkg_resources.iter_entry_points("mbs.resolver_backends"):
GenericResolver.register_backend_class(entrypoint.load())
if not GenericResolver.backends:
raise ValueError("No resolver plugins are installed or available.")
| 35.5625 | 85 | 0.796134 |
ace926d0b64ce840f75e92091f60d80e0b018e01 | 4,722 | py | Python | kubails/services/infra.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 2 | 2019-05-28T00:26:52.000Z | 2019-08-02T23:02:19.000Z | kubails/services/infra.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 51 | 2019-12-23T04:34:40.000Z | 2022-02-12T02:28:44.000Z | kubails/services/infra.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 1 | 2019-09-11T20:12:18.000Z | 2019-09-11T20:12:18.000Z | import logging
import sys
from typing import List
from kubails.external_services import dependency_checker, gcloud, terraform
from kubails.services import config_store, cluster
logger = logging.getLogger(__name__)
@dependency_checker.check_dependencies()
class Infra:
def __init__(self):
self.config = config_store.ConfigStore()
self.gcloud = gcloud.GoogleCloud(
self.config.gcp_project_id,
self.config.gcp_project_region,
self.config.gcp_project_zone
)
self.terraform = terraform.Terraform(self.config.get_flattened_config(), root_folder=self.config.config_dir)
self.cluster = cluster.Cluster()
def setup(self) -> None:
# Enable the APIs first before anything else so that subsequent commands can use those resources.
self.gcloud.enable_apis(self.config.apis_to_enable)
self.gcloud.deploy_builder_image()
# Create the service account that will be used for Terraform and whatnot.
self.gcloud.create_service_account(self.config.service_account, self.config.project_title)
self.gcloud.add_role_to_service_account(self.config.service_account, self.config.service_account_role)
self.gcloud.add_role_to_service_account(self.config.service_account, self.config.repo_admin_role)
self.gcloud.add_role_to_service_account(self.config.service_account, self.config.logs_configuration_writer_role)
self.gcloud.add_role_to_service_account(self.config.service_account, self.config.project_iam_admin_role)
self.gcloud.create_key_for_service_account(self.config.service_account)
# Enable the Cloud Build service account to be able to administer GKE and generate service account keys.
cloud_build_service_account = self.gcloud.get_cloud_build_service_account()
self.gcloud.add_role_to_entity(
"serviceAccount", cloud_build_service_account, self.config.container_admin_role
)
self.gcloud.add_role_to_entity(
"serviceAccount", cloud_build_service_account, self.config.service_account_key_admin_role
)
self.gcloud.add_role_to_entity(
"serviceAccount", cloud_build_service_account, self.config.crypto_key_decrypter_role
)
self.gcloud.add_role_to_entity(
"serviceAccount", cloud_build_service_account, self.config.secret_manager_role
)
# Create the Terraform state bucket (if it doesn't already exist) and initialize Terraform to use it.
terraform_bucket = self.config.terraform_state_bucket
if self.gcloud.does_bucket_exist_in_another_project(terraform_bucket):
print()
logger.info(
"Sorry, bucket '{}' already exists in another project. "
"Please add/change the '__terraform_bucket' value in 'kubails.json' "
"to a different bucket name.".format(terraform_bucket)
)
sys.exit(1)
elif not self.gcloud.does_bucket_exist_in_project(terraform_bucket):
self.gcloud.create_bucket(terraform_bucket)
else:
print()
logger.info("Terraform bucket '{}' already exists in project.".format(terraform_bucket))
self.terraform.init()
def cleanup(self) -> None:
self.gcloud.delete_builder_image()
self.gcloud.delete_role_from_service_account(self.config.service_account, self.config.service_account_role)
self.gcloud.delete_service_account(self.config.service_account)
self.gcloud.delete_bucket(self.config.terraform_state_bucket)
def authenticate(self) -> bool:
result = self.gcloud.create_key_for_service_account(
self.config.service_account,
key_folder=self.config.config_dir
)
if result:
result = result and self.terraform.init()
return result
def unauthenticate(self) -> bool:
return self.gcloud.delete_key_for_service_account(
self.config.service_account,
key_folder=self.config.config_dir
)
def deploy(self) -> bool:
result = self.terraform.deploy()
if result:
self.cluster.update_manifests_from_terraform()
return result
def deploy_builder(self):
self.gcloud.deploy_builder_image()
def destroy(self) -> None:
self.cluster.destroy_ingress()
self.terraform.destroy()
def terraform_command(self, command: str, arguments: List[str], with_vars=False) -> None:
self.terraform.run_command(command, arguments, with_vars=with_vars)
def get_name_servers(self) -> str:
return self.terraform.get_name_servers()
| 37.47619 | 120 | 0.704151 |
ace92770bc64b0e13c3b1e617ab497754db770ac | 12,101 | py | Python | magical/benchmarks/cluster.py | SeanJia/magical | 34b95bd968fa7517581890a38cf8fbb00ff42f1b | [
"0BSD"
] | 56 | 2020-11-04T04:58:15.000Z | 2022-03-07T08:10:59.000Z | magical/benchmarks/cluster.py | SeanJia/magical | 34b95bd968fa7517581890a38cf8fbb00ff42f1b | [
"0BSD"
] | null | null | null | magical/benchmarks/cluster.py | SeanJia/magical | 34b95bd968fa7517581890a38cf8fbb00ff42f1b | [
"0BSD"
] | 8 | 2020-11-11T01:55:57.000Z | 2021-08-12T03:22:46.000Z | import abc
import enum
from gym.utils import EzPickle
import numpy as np
from magical.base_env import BaseEnv, ez_init
import magical.entities as en
import magical.geom as geom
class BaseClusterEnv(BaseEnv, abc.ABC):
"""There are blocks of many colours and types. You must arrange them into
distinct clusters. Depending on the demo, cluster membership must either be
determined by shape type or shape colour (but if it's determined by one
characteristic in the demo then it should be independent of the other
characteristic). There should be exactly one cluster for each value of the
membership characteristic (e.g. if clustering on colour, there should be
one blue cluster, one red cluster, etc.).
This class should not be instantiated directly. Instead, you should use
ClusterColourEnv or ClusterShapeEnv."""
class ClusterBy(str, enum.Enum):
"""What characteristic should blocks be clustered by?"""
COLOUR = 'colour'
TYPE = 'type'
# TODO: add a 'both' option! (will require another demo scenario)
def __init__(
self,
# should we randomise assignment of colours to blocks, or use
# default ordering?
rand_shape_colour=False,
# should we randomise assignment of types to blocks, or use default
# ordering?
rand_shape_type=False,
# should we jitter the positions of blocks and the robot?
rand_layout_minor=False,
# should we fully randomise the positions of blocks and the robot?
rand_layout_full=False,
# should we randomise number of blocks? (this requires us to
# randomise everything else, too)
rand_shape_count=False,
# which block characteristic do we want the user to pay attention
# to for clustering? (colour vs. shape type)
cluster_by=ClusterBy.COLOUR,
**kwargs):
super().__init__(**kwargs)
self.rand_shape_colour = rand_shape_colour
self.rand_shape_type = rand_shape_type
self.rand_shape_count = rand_shape_count
assert not (rand_layout_minor and rand_layout_full)
self.rand_layout_minor = rand_layout_minor
self.rand_layout_full = rand_layout_full
self.cluster_by = cluster_by
if self.rand_shape_count:
assert self.rand_layout_full, \
"if shape count is randomised then layout must also be " \
"fully randomised"
assert self.rand_shape_type, \
"if shape count is randomised then shape type must also be " \
"randomised"
assert self.rand_shape_colour, \
"if shape count is randomised then colour must be " \
"randomised too"
def on_reset(self):
# make the robot at default position (will be randomised at end if
# rand_layout is true)
robot = self._make_robot(*self.DEFAULT_ROBOT_POSE)
# 3x blue & 2x of each other colour
default_colours = self.DEFAULT_BLOCK_COLOURS
# 3x pentagon & 2x of each other shape type
default_shape_types = self.DEFAULT_BLOCK_SHAPES
# these were generated by randomly scattering shapes about the chosen
# default robot position and then rounding down values a bit
default_poses = self.DEFAULT_BLOCK_POSES
default_n_shapes = len(default_colours)
if self.rand_shape_count:
n_shapes = self.rng.randint(7, 10 + 1)
# rand_shape_count=True implies rand_layout=True, so these MUST be
# randomised at the end
poses = [((0, 0), 0)] * n_shapes
else:
n_shapes = default_n_shapes
# if rand_layout=True, these will be randomised at the end
poses = default_poses
if self.rand_shape_colour:
# make sure we have at least one of each colour
colours = list(en.SHAPE_COLOURS)
colours.extend([
self.rng.choice(en.SHAPE_COLOURS)
for _ in range(n_shapes - len(colours))
])
self.rng.shuffle(colours)
else:
colours = default_colours
if self.rand_shape_type:
# make sure we have at least one of each type, too
shape_types = list(en.SHAPE_TYPES)
shape_types.extend([
self.rng.choice(en.SHAPE_TYPES)
for _ in range(n_shapes - len(shape_types))
])
self.rng.shuffle(shape_types)
else:
shape_types = default_shape_types
assert len(poses) == n_shapes
assert len(colours) == n_shapes
assert len(shape_types) == n_shapes
shape_ents = []
for ((x, y), angle), colour, shape_type \
in zip(poses, colours, shape_types):
shape = self._make_shape(shape_type=shape_type,
colour_name=colour,
init_pos=(x, y),
init_angle=angle)
shape_ents.append(shape)
self.add_entities(shape_ents)
# make index mapping characteristic values to blocks
if self.cluster_by == self.ClusterBy.COLOUR:
c_values_list = np.asarray(colours, dtype='object')
self.__characteristic_values = np.unique(c_values_list)
elif self.cluster_by == self.ClusterBy.TYPE:
c_values_list = np.asarray(shape_types, dtype='object')
self.__characteristic_values = np.unique(c_values_list)
else:
raise NotImplementedError(
f"don't know how to cluster by '{self.cluster_by}'")
self.__blocks_by_characteristic = {}
assert len(c_values_list) == len(shape_ents)
for shape, c_value in zip(shape_ents, c_values_list):
c_list = self.__blocks_by_characteristic.setdefault(c_value, [])
c_list.append(shape)
# as in match_regions.py, this should be added after all shapes so it
# appears on top, but before layout randomisation so that it gets added
# to the space correctly
self.add_entities([robot])
if self.rand_layout_full or self.rand_layout_minor:
if self.rand_layout_full:
pos_limit = rot_limit = None
else:
pos_limit = self.JITTER_POS_BOUND
rot_limit = self.JITTER_ROT_BOUND
geom.pm_randomise_all_poses(space=self._space,
entities=[robot, *shape_ents],
arena_lrbt=self.ARENA_BOUNDS_LRBT,
rng=self.rng,
rand_pos=True,
rand_rot=True,
rel_pos_linf_limits=pos_limit,
rel_rot_limits=rot_limit)
# set up index for lookups
self.__ent_index = en.EntityIndex(shape_ents)
def score_on_end_of_traj(self):
# Compute centroids for each value of the relevant characteristic
# (either colour or shape type). Also compute mean squared distance
# from centroid for each block in the cluster.
nvals = len(self.__characteristic_values)
centroids = np.zeros((nvals, 2))
for c_idx, c_value in enumerate(self.__characteristic_values):
c_blocks = self.__blocks_by_characteristic.get(c_value)
if not c_blocks:
centroid = (0, 0)
else:
positions = np.asarray([(b.shape_body.position.x,
b.shape_body.position.y)
for b in c_blocks])
centroid = np.mean(positions, axis=0)
centroids[c_idx] = centroid
# Now for each block compute whether squared distance to nearest
# incorrect centroid. A block is correctly clustered if the true
# centroid is closer than the next-nearest centroid by a margin of at
# least min_margin * (mean variation within true centroid). This
# rewards tight clusterings.
min_margin = 2.0 # higher = more strict
n_blocks = 0
n_correct = 0
for c_idx, c_value in enumerate(self.__characteristic_values):
for block in self.__blocks_by_characteristic.get(c_value, []):
n_blocks += 1
block_pos = np.array([[
block.shape_body.position.x,
block.shape_body.position.y,
]])
centroid_sses = np.sum((block_pos - centroids)**2, axis=1)
indices = np.arange(len(self.__characteristic_values))
true_sse, = centroid_sses[indices == c_idx]
bad_sses = centroid_sses[indices != c_idx]
nearest_bad_centroid = np.min(bad_sses)
true_centroid_sse = centroid_sses[c_idx]
margin = min_margin * true_centroid_sse
n_correct += int(
np.sqrt(true_sse) < np.sqrt(nearest_bad_centroid) - margin)
# rescale so that frac_correct <= thresh gives 0 score, frac_correct ==
# 1.0 gives 1 score. I've found it's common to frac_correct ranging
# from 0.2 up to 0.4 just from random init; this clipping process means
# that random init gives close to 0 average score.
frac_correct = float(n_correct) / max(n_blocks, 1)
thresh = 0.75
score = max(frac_correct - thresh, 0) / (1 - thresh)
return score
class ClusterColourEnv(BaseClusterEnv, EzPickle):
DEFAULT_ROBOT_POSE = ((0.71692, -0.34374), 0.83693)
DEFAULT_BLOCK_COLOURS = [
en.ShapeColour.BLUE,
en.ShapeColour.BLUE,
en.ShapeColour.BLUE,
en.ShapeColour.GREEN,
en.ShapeColour.GREEN,
en.ShapeColour.RED,
en.ShapeColour.YELLOW,
en.ShapeColour.YELLOW,
]
DEFAULT_BLOCK_SHAPES = [
en.ShapeType.CIRCLE,
en.ShapeType.STAR,
en.ShapeType.SQUARE,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.SQUARE,
en.ShapeType.STAR,
en.ShapeType.PENTAGON,
]
DEFAULT_BLOCK_POSES = [
((-0.5147, 0.14149), -0.38871),
((-0.1347, -0.71414), 1.0533),
((-0.74247, -0.097592), 1.1571),
((-0.077363, -0.42964), -0.64379),
((0.51978, 0.1853), -1.1762),
((-0.5278, -0.21642), 2.9356),
((-0.54039, 0.48292), 0.072818),
((-0.16761, 0.64303), -2.3255),
]
@ez_init()
def __init__(self, *args, **kwargs):
super().__init__(*args,
cluster_by=BaseClusterEnv.ClusterBy.COLOUR,
**kwargs)
class ClusterShapeEnv(BaseClusterEnv, EzPickle):
# demo variant
DEFAULT_ROBOT_POSE = ((0.286, -0.202), -1.878)
DEFAULT_BLOCK_COLOURS = [
en.ShapeColour.YELLOW,
en.ShapeColour.BLUE,
en.ShapeColour.RED,
en.ShapeColour.RED,
en.ShapeColour.GREEN,
en.ShapeColour.YELLOW,
en.ShapeColour.BLUE,
en.ShapeColour.GREEN,
]
DEFAULT_BLOCK_SHAPES = [
en.ShapeType.SQUARE,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.CIRCLE,
en.ShapeType.STAR,
en.ShapeType.STAR,
en.ShapeType.CIRCLE,
]
DEFAULT_BLOCK_POSES = [
((-0.414, 0.297), -1.731),
((0.068, 0.705), 2.184),
((0.821, 0.220), 0.650),
((-0.461, -0.749), -2.673),
((0.867, -0.149), -2.215),
((-0.785, -0.140), -0.405),
((-0.305, -0.226), 1.341),
((0.758, -0.708), -2.140),
]
@ez_init()
def __init__(self, *args, **kwargs):
super().__init__(*args,
cluster_by=BaseClusterEnv.ClusterBy.TYPE,
**kwargs)
| 40.607383 | 79 | 0.586646 |
ace9277a0ba68f377b1cb98e1cd8354914cfc8db | 9,677 | py | Python | tensorflow/python/ops/tensor_array_ops.py | jylinman/tensorflow | 5248d111c3aeaf9f560cd77bff0f183f38e31e0b | [
"Apache-2.0"
] | 21 | 2016-03-10T11:55:45.000Z | 2021-02-03T02:49:11.000Z | tensorflow/python/ops/tensor_array_ops.py | jylinman/tensorflow | 5248d111c3aeaf9f560cd77bff0f183f38e31e0b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/tensor_array_ops.py | jylinman/tensorflow | 5248d111c3aeaf9f560cd77bff0f183f38e31e0b | [
"Apache-2.0"
] | 39 | 2016-03-25T05:13:09.000Z | 2020-06-16T01:30:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`While` loops, and supports gradient back-propagation via special "flow"
control flow dependencies.
@@handle
@@flow
@@read
@@unpack
@@split
@@write
@@pack
@@concat
@@grad
"""
def __init__(self, dtype, size=None, dynamic_size=None,
tensor_array_name=None,
handle=None, flow=None, name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
TensorArray.flow.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
dynamic_size = dynamic_size or False
self._dtype = dtype
with ops.op_scope([handle, size, flow], name, "TensorArray") as scope:
if handle:
self._handle = handle
else:
self._handle = gen_data_flow_ops._tensor_array(
dtype=dtype, size=size, dynamic_size=dynamic_size,
tensor_array_name=tensor_array_name, name=scope)
self._flow = flow or constant_op.constant(0, dtype=_dtypes.float32)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def grad(self, source, flow=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
g_handle = gen_data_flow_ops._tensor_array_grad(
handle=self._handle, source=source, flow_in=flow or self.flow)
g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow or self.flow)
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray."""
value = gen_data_flow_ops._tensor_array_read(
handle=self._handle, index=index, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_write(
handle=self._handle, index=index, value=value, flow_in=self._flow,
name=name)
# Size below is ignored
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def pack(self, name=None):
"""Return the values in the TensorArray as a packed `Tensor`."""
value = gen_data_flow_ops._tensor_array_pack(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`."""
value, _ = gen_data_flow_ops._tensor_array_concat(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def unpack(self, value, name=None):
"""Pack the values of a `Tensor` in the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_unpack(
handle=self._handle, value=value, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray."""
with ops.op_scope(
[self._handle, value, lengths], name, "TensorArraySplit"):
lengths = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split(
handle=self._handle, value=value, lengths=lengths, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size(
handle=self._handle, flow_in=self.flow, name=name)
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close(
handle=self._handle, name=name)
@ops.RegisterShape("TensorArray")
def _TensorArrayShape(op):
# size is a scalar
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayRead")
def _TensorArrayReadShape(op):
# handle, index, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayWrite")
def _TensorArrayWriteShape(op):
# handle, index, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArraySize")
def _TensorArraySizeShape(op):
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayClose")
def _TensorArrayCloseShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return []
@ops.RegisterShape("TensorArrayGrad")
def _TensorArrayGradShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayPack")
def _TensorArrayPackShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayConcat")
def _TensorArrayConcatShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value, lengths
return [tensor_shape.unknown_shape(), tensor_shape.vector(None)]
@ops.RegisterShape("TensorArraySplit")
def _TensorArraySplitShape(op):
# handle, value, lengths, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.vector(None))
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayUnpack")
def _TensorArrayUnpackShape(op):
# handle, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
# pylint: enable=protected-access
| 35.317518 | 80 | 0.710447 |
ace927c4fc1e25bbda7ec5e5f7a33fa84304d5ec | 9,388 | py | Python | videoServer.py | Hugoargui/eyeDetector | 8c0361f90dacc2e5d8262cca40b34165fdda841a | [
"MIT"
] | null | null | null | videoServer.py | Hugoargui/eyeDetector | 8c0361f90dacc2e5d8262cca40b34165fdda841a | [
"MIT"
] | null | null | null | videoServer.py | Hugoargui/eyeDetector | 8c0361f90dacc2e5d8262cca40b34165fdda841a | [
"MIT"
] | 3 | 2015-04-11T15:23:22.000Z | 2021-02-09T07:19:07.000Z | ## MIT LICENSE
#Copyright (c) 2014 Hugo Arguinariz.
#http://www.hugoargui.com
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
## This module requires the SimpleWebSocketServer module by Opiate
## http://opiate.github.io/SimpleWebSocketServer/
## That software is also distributed under MIT license
## I am in not the author of SimpleWebSocketServer.py
####################################################################################################!/
## videoServer.py
## Inputs: NONE
## Outputs: NONE
## Non standard modules: eyeDetector, SimpleWebSocketServer
####################################################################################################!/usr/bin/env python
## This module runs on the server side
## It is is expected to continuously run on the background
## This is not a web server, a web app will need a real web server (Apache?) running in parallel
## On the client side (website) the browser is expected to open a WebSocket to this server
## The browser can capture webcam images from the user using Javascript + WebRTC
## The browser then sends several video frames per second to this server via the WebRTC socket
## For each video frame, this server uses the eyeDetector module to detect the eyes on the image
## This is done in 3 steps:
## A) The received image is decoded (it had been encoded by the client javascript before sending it over websocket
## B) The eyes are detected on the image.
## ## This returns: Eye coordinates (int X, int Y)
## Image modified to include green rectangles around the person eyes
## C) The new image is encoded to a format suitable to be sent back to the client via websockets
## Once the video frames have been processed, the data can be sent back to the browser via the same websocket connection
## In addition to the eye coordinates (X, Y)
## The image from step C can be sent too.
## This last step is optional, it may be enough to send only the eye coordinate variables (X, Y)
## This coordinates could be used on the client side to draw the exact same rectangles
## If the image is not going to be sent, step C should be removed in order to improve performace.
####################################################################################################
####################################################################################################
import signal, sys, ssl, logging
import time
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
from optparse import OptionParser
import cv2
import numpy as np
import base64
## Import custom packages
import eyeDetector
import clientAnimation
try:
import simplejson as json
except:
import json
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
##################################################################################################
class VideoServer(WebSocket):
##############################################################################################
def handleMessage(self):
## STEP A
# Handle incoming video frame
if self.data is None:
self.data = ''
decImg = None ## Image after being decoded
procImg = None ## Image with rectangles around the eyes
encImg = None ## Image encoded in a format suitable to be sent over websocket
# #################################################
# Try processing the frame
try:
#########################################
# Decode image
# The image should have been received from the client in binary form
img = str(self.data)
img = np.fromstring(img, dtype=np.uint8)
decImg = eyeDetector.decodeImage(img)
if (decImg == None):
print self.address, 'ERROR: Could not decode image. System time: '+ str(time.clock())
if ( decImg != None):
## STEP B
## Nothing wrong, detect eyes in the image
procImg, eyesX, eyesY = eyeDetector.detectEyes(decImg)
else:
# Neither None nor !None... no image in the first place!
print self.address, 'ERROR: Could not find an image to process! '+ str(time.clock())
#########################################
# Encode image to send it back
if (procImg != None):
## STEP C
retval, encImg = eyeDetector.encodeImage(procImg)
if False == retval:
print self.address, ('ERROR: Could not encode image!'+ str(time.clock()))
else:
encImg = base64.b64encode(encImg)
else:
print self.address, 'ERROR: Could not find an image to encode!'
except Exception as n:
print 'OpenCV catch fail' + str(n)
# #################################################
# Try sending the frame back to the client
try:
if (encImg != None):
# eyesX and eyesY are of numpy.int type, which is not json serializable
# We get them back to normal python int
eyesX = np.asscalar(np.int16(eyesX))
eyesY = np.asscalar(np.int16(eyesY))
#jsonize all data to send
## If we don't wish to send encImage it should be removed from here
out = {'frame': encImg, 'eyesX': eyesX, 'eyesY': eyesY}
jsonMessage = json.dumps(out, default=lambda obj: obj.__dict__)
message = encImg
else:
print self.address, 'ERROR: Something went wrong, NOT sending any image. '+ str(time.clock())
self.sendMessage( jsonMessage )
except Exception as n:
print n
##############################################################################################
def handleConnected(self):
## Incoming websocket connection from a browser
## Several connections can be handled at the same time from different browsers
print self.address, 'Video Server: Connection received from client at system time: '+ str(time.clock())
##############################################################################################
def handleClose(self):
## The client closed the connection with the server
print self.address, 'Video Server: Connection closed at system time: '+ str(time.clock())
##################################################################################################
if __name__ == "__main__":
print ' '
print 'Video server waiting for requests. System time: '+ str(time.clock())
print '*****************************************************************'
## When launched from command line we parse OPTIONAL input arguments
## The defaults will work just fine most times
## The http port used by websocket connections is set by --port
parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0")
parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)")
parser.add_option("--port", default=8090, type='int', action="store", dest="port", help="port (8000)")
parser.add_option("--example", default='VideoServer', type='string', action="store", dest="example", help="VideoServer, others")
parser.add_option("--ssl", default=0, type='int', action="store", dest="ssl", help="ssl (1: on, 0: off (default))")
parser.add_option("--cert", default='./cert.pem', type='string', action="store", dest="cert", help="cert (./cert.pem)")
parser.add_option("--ver", default=ssl.PROTOCOL_TLSv1, type=int, action="store", dest="ver", help="ssl version")
(options, args) = parser.parse_args()
cls = VideoServer
## If we wish to encode the websocket data stream
if options.ssl == 1:
server = SimpleSSLWebSocketServer(options.host, options.port, cls, options.cert, options.cert, version=options.ver)
else:
server = SimpleWebSocketServer(options.host, options.port, cls)
## Handle when shooting this server down
def close_sig_handler(signal, frame):
server.close()
sys.exit()
## START the server
signal.signal(signal.SIGINT, close_sig_handler)
server.serveforever()
| 46.246305 | 132 | 0.586387 |
ace92815d8d90993c7378b71ba05c9e46c70b570 | 9,499 | py | Python | oauth_provider/views.py | ovidioreyna/django-oauth-plus | b9b64a3ac24fd11f471763c88462bbf3c53e46e6 | [
"BSD-3-Clause"
] | null | null | null | oauth_provider/views.py | ovidioreyna/django-oauth-plus | b9b64a3ac24fd11f471763c88462bbf3c53e46e6 | [
"BSD-3-Clause"
] | 4 | 2018-01-11T20:59:12.000Z | 2020-05-12T12:48:53.000Z | oauth_provider/views.py | ovidioreyna/django-oauth-plus | b9b64a3ac24fd11f471763c88462bbf3c53e46e6 | [
"BSD-3-Clause"
] | 3 | 2017-12-18T20:01:36.000Z | 2018-12-17T05:35:53.000Z | from __future__ import absolute_import
import oauth2 as oauth
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import get_callable
from django.http import HttpResponse, HttpResponseBadRequest
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from six.moves.urllib.parse import urlencode
from oauth_provider.compat import UnsafeRedirect
from .consts import OUT_OF_BAND
from .decorators import oauth_required
from .forms import AuthorizeRequestTokenForm
from .responses import (COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE,
INVALID_CONSUMER_RESPONSE,
INVALID_PARAMS_RESPONSE)
from .store import InvalidConsumerError, InvalidTokenError, store
from .utils import (get_oauth_request,
is_xauth_request,
require_params,
send_oauth_error,
verify_oauth_request)
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
UNSAFE_REDIRECTS = getattr(settings, "OAUTH_UNSAFE_REDIRECTS", False)
@csrf_exempt
def request_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
missing_params = require_params(oauth_request, ('oauth_callback',))
if missing_params is not None:
return missing_params
if is_xauth_request(oauth_request):
return HttpResponseBadRequest('xAuth not allowed for this method.')
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return INVALID_CONSUMER_RESPONSE
if not verify_oauth_request(request, oauth_request, consumer):
return COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
try:
request_token = store.create_request_token(request, oauth_request, consumer, oauth_request['oauth_callback'])
except oauth.Error as err:
return send_oauth_error(err)
ret = urlencode({
'oauth_token': request_token.key,
'oauth_token_secret': request_token.secret,
'oauth_callback_confirmed': 'true'
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@login_required
def user_authorization(request, form_class=AuthorizeRequestTokenForm):
oauth_token = request.POST.get('oauth_token', request.GET.get('oauth_token'))
if not oauth_token:
return HttpResponseBadRequest('No request token specified.')
oauth_request = get_oauth_request(request)
try:
request_token = store.get_request_token(request, oauth_request, oauth_token)
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
consumer = store.get_consumer_for_request_token(request, oauth_request, request_token)
if request.method == 'POST':
form = form_class(request.POST)
if request.session.get('oauth', '') == request_token.key and form.is_valid():
request.session['oauth'] = ''
if form.cleaned_data['authorize_access']:
request_token = store.authorize_request_token(request, oauth_request, request_token)
args = {'oauth_token': request_token.key}
else:
args = {'error': _('Access not granted by user.')}
if request_token.callback is not None and request_token.callback != OUT_OF_BAND:
callback_url = request_token.get_callback_url(args)
if UNSAFE_REDIRECTS:
response = UnsafeRedirect(callback_url)
else:
response = HttpResponseRedirect(callback_url)
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
view_callable = get_callable(callback_view_str)
except AttributeError:
raise Exception("%s view doesn't exist." % callback_view_str)
# try to treat it as Class Based View (CBV)
try:
callback_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
callback_view = view_callable
response = callback_view(request, **args)
else:
response = send_oauth_error(oauth.Error(_('Action not allowed.')))
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
view_callable = get_callable(authorize_view_str)
except AttributeError:
raise Exception("%s view doesn't exist." % authorize_view_str)
# try to treat it as Class Based View (CBV)
try:
authorize_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
authorize_view = view_callable
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = request_token.key
response = authorize_view(request, request_token, request_token.get_callback_url(), params)
return response
@csrf_exempt
def access_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
# Consumer
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid consumer.')
is_xauth = is_xauth_request(oauth_request)
if not is_xauth:
# Check Parameters
missing_params = require_params(oauth_request, ('oauth_token', 'oauth_verifier'))
if missing_params is not None:
return missing_params
# Check Request Token
try:
request_token = store.get_request_token(request, oauth_request, oauth_request['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
if not request_token.is_approved:
return HttpResponseBadRequest('Request Token not approved by the user.')
# Verify Signature
if not verify_oauth_request(request, oauth_request, consumer, request_token):
return HttpResponseBadRequest('Could not verify OAuth request.')
# Check Verifier
if oauth_request.get('oauth_verifier', None) != request_token.verifier:
return HttpResponseBadRequest('Invalid OAuth verifier.')
else: # xAuth
# Check Parameters
missing_params = require_params(oauth_request, ('x_auth_username', 'x_auth_password', 'x_auth_mode'))
if missing_params is not None:
return missing_params
# Check if Consumer allows xAuth
if not consumer.xauth_allowed:
return HttpResponseBadRequest('xAuth not allowed for this method')
# Check Signature
if not verify_oauth_request(request, oauth_request, consumer):
return HttpResponseBadRequest('Could not verify xAuth request.')
user = authenticate(
x_auth_username=oauth_request.get_parameter('x_auth_username'),
x_auth_password=oauth_request.get_parameter('x_auth_password'),
x_auth_mode=oauth_request.get_parameter('x_auth_mode')
)
if not user:
return HttpResponseBadRequest('xAuth username or password is not valid')
else:
request.user = user
# Handle Request Token
try:
# request_token = store.create_request_token(request, oauth_request, consumer, oauth_request.get('oauth_callback'))
request_token = store.create_request_token(request, oauth_request, consumer, OUT_OF_BAND)
request_token = store.authorize_request_token(request, oauth_request, request_token)
except oauth.Error as err:
return send_oauth_error(err)
access_token = store.create_access_token(request, oauth_request, consumer, request_token)
ret = urlencode({
'oauth_token': access_token.key,
'oauth_token_secret': access_token.secret
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@oauth_required
def protected_resource_example(request):
"""
Test view for accessing a Protected Resource.
"""
return HttpResponse('Protected Resource access!')
@login_required
def fake_authorize_view(request, token, callback, params):
"""
Fake view for tests. It must return an ``HttpResponse``.
You need to define your own in ``settings.OAUTH_AUTHORIZE_VIEW``.
"""
return HttpResponse('Fake authorize view for %s with params: %s.' % (token.consumer.name, params))
def fake_callback_view(request, **args):
"""
Fake view for tests. It must return an ``HttpResponse``.
You can define your own in ``settings.OAUTH_CALLBACK_VIEW``.
"""
return HttpResponse('Fake callback view.')
| 38.148594 | 127 | 0.68123 |
ace928adca3815b57f3c5716a4574ad78f93db66 | 3,199 | py | Python | doc/tutorials/pymunk_platformer/pymunk_demo_platformer_04.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | 1 | 2021-03-04T14:02:29.000Z | 2021-03-04T14:02:29.000Z | doc/tutorials/pymunk_platformer/pymunk_demo_platformer_04.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | 1 | 2019-08-11T18:47:27.000Z | 2019-08-12T03:02:11.000Z | doc/tutorials/pymunk_platformer/pymunk_demo_platformer_04.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | null | null | null | """
Example of Pymunk Physics Engine Platformer
"""
import math
from typing import Optional
import arcade
SCREEN_TITLE = "PyMunk Platformer"
# How big are our image tiles?
SPRITE_IMAGE_SIZE = 128
# Scale sprites up or down
SPRITE_SCALING_PLAYER = 0.5
SPRITE_SCALING_TILES = 0.5
# Scaled sprite size for tiles
SPRITE_SIZE = int(SPRITE_IMAGE_SIZE * SPRITE_SCALING_PLAYER)
# Size of grid to show on screen, in number of tiles
SCREEN_GRID_WIDTH = 25
SCREEN_GRID_HEIGHT = 15
# Size of screen to show, in pixels
SCREEN_WIDTH = SPRITE_SIZE * SCREEN_GRID_WIDTH
SCREEN_HEIGHT = SPRITE_SIZE * SCREEN_GRID_HEIGHT
class GameWindow(arcade.Window):
""" Main Window """
def __init__(self, width, height, title):
""" Create the variables """
# Init the parent class
super().__init__(width, height, title)
# Player sprite
self.player_sprite: Optional[arcade.Sprite] = None
# Sprite lists we need
self.player_list: Optional[arcade.SpriteList] = None
self.wall_list: Optional[arcade.SpriteList] = None
self.bullet_list: Optional[arcade.SpriteList] = None
self.item_list: Optional[arcade.SpriteList] = None
# Track the current state of what key is pressed
self.left_pressed: bool = False
self.right_pressed: bool = False
# Set background color
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up everything with the game """
# Create the sprite lists
self.player_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
# Read in the tiled map
map_name = "pymunk_test_map.tmx"
my_map = arcade.tilemap.read_tmx(map_name)
# Read in the map layers
self.wall_list = arcade.tilemap.process_layer(my_map, 'Platforms', SPRITE_SCALING_TILES)
self.item_list = arcade.tilemap.process_layer(my_map, 'Dynamic Items', SPRITE_SCALING_TILES)
# Create player sprite
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png",
SPRITE_SCALING_PLAYER)
# Set player location
grid_x = 1
grid_y = 1
self.player_sprite.center_x = SPRITE_SIZE * grid_x + SPRITE_SIZE / 2
self.player_sprite.center_y = SPRITE_SIZE * grid_y + SPRITE_SIZE / 2
# Add to player sprite list
self.player_list.append(self.player_sprite)
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
pass
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
pass
def on_update(self, delta_time):
""" Movement and game logic """
pass
def on_draw(self):
""" Draw everything """
arcade.start_render()
self.wall_list.draw()
self.bullet_list.draw()
self.item_list.draw()
self.player_list.draw()
def main():
""" Main method """
window = GameWindow(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 29.348624 | 119 | 0.659581 |
ace928f631faa64da68156736a1ef9aa0200805c | 3,268 | py | Python | examples/slash_commands/injections.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | 1 | 2021-08-28T04:50:31.000Z | 2021-08-28T04:50:31.000Z | examples/slash_commands/injections.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | null | null | null | examples/slash_commands/injections.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Any, Literal, Optional
import discord
# this file uses pytz in one of its examples but it is completely optional
import pytz
from discord.ext import commands
bot = commands.Bot("!")
# Instead of repeating boiler-plate code you may use injections
# Here we give each command a config and a few options in case they're not set
# very useful with sub commands
@dataclass
class Config:
locale: str
timezone: pytz.BaseTzInfo
theme: str
async def get_config(
inter: discord.CommandInteraction,
locale: Optional[str] = None,
timezone: str = "UTC",
theme: Literal["light", "dark", "amoled"] = "dark",
) -> Config:
"""Let the user enter a config
Note:
The docstring header of injections does not show up in the final command description,
only the option descriptions matter
Parameters
----------
locale: The prefered locale, defaults to the server's locale
timezone: Your current timezone, must be in the format of "US/Eastern" or "Europe/London"
theme: Your prefered theme, defaults to the dark theme
"""
# if a locale is not provided use the guild's locale
if locale is None:
locale = inter.guild and inter.guild.preferred_locale or "en-US"
# parse a timezone from a string using pytz (maybe even use the locale if you feel like it)
tzinfo = pytz.timezone(timezone)
return Config(locale, tzinfo, theme)
# Note that the following command will have 4 options: `number`, `locale`, `timezone` and `theme`.
# `config` will be whatever `get_config()` returns.
@bot.slash_command()
async def injected1(
inter: discord.CommandInteraction,
number: int,
config: Config = commands.inject(get_config),
):
"""A command which takes in a number and some config parameters
Parameters
----------
number: A number
"""
@bot.slash_command()
async def injected2(
inter: discord.CommandInteraction,
string: str,
config: Config = commands.inject(get_config),
):
"""A command which takes in a string and some config parameters
Parameters
----------
string: A string
"""
# If the injection returns a custom object and has a return type annotation
# then even the `commands.inject()` can be left out of the command signature
class GameUser:
username: str
level: int
...
conn: Any = ... # a placeholder for an actual database connection
@commands.register_injection
async def get_game_user(
inter: discord.CommandInteraction,
user: str = None,
server: Literal["eu", "us", "cn"] = None,
) -> GameUser:
"""Search a game user from the database
Parameters
----------
user: The username of the user, uses the author by default
server: The server to search
"""
if user is None:
return await conn.get_game_user(id=inter.author.id)
game_user: GameUser = await conn.search_game_user(username=user, server=server)
if game_user is None:
raise commands.CommandError(f"User with username {user!r} could not be found")
return game_user
@bot.slash_command()
async def implicit_injection(inter: discord.CommandInteraction, user: GameUser):
"""A command which uses an implicit injection""" | 28.920354 | 98 | 0.69339 |
ace92992fca6a41b8b5607c8d4dfea4617b299cf | 6,218 | py | Python | BaseTools/Tests/TestTools.py | GlovePuppet/edk2 | 8028f03032182f2c72e7699e1d14322bb5586581 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | BaseTools/Tests/TestTools.py | HouQiming/edk2 | ba07eef98ec49068d6453aba2aed73f6e7d7f600 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | BaseTools/Tests/TestTools.py | HouQiming/edk2 | ba07eef98ec49068d6453aba2aed73f6e7d7f600 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | from __future__ import print_function
## @file
# Utility functions and classes for BaseTools unit tests
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import base64
import os
import os.path
import random
import shutil
import subprocess
import sys
import unittest
import codecs
TestsDir = os.path.realpath(os.path.split(sys.argv[0])[0])
BaseToolsDir = os.path.realpath(os.path.join(TestsDir, '..'))
CSourceDir = os.path.join(BaseToolsDir, 'Source', 'C')
PythonSourceDir = os.path.join(BaseToolsDir, 'Source', 'Python')
TestTempDir = os.path.join(TestsDir, 'TestTempDir')
if PythonSourceDir not in sys.path:
#
# Allow unit tests to import BaseTools python modules. This is very useful
# for writing unit tests.
#
sys.path.append(PythonSourceDir)
def MakeTheTestSuite(localItems):
tests = []
for name, item in localItems.items():
if isinstance(item, type):
if issubclass(item, unittest.TestCase):
tests.append(unittest.TestLoader().loadTestsFromTestCase(item))
elif issubclass(item, unittest.TestSuite):
tests.append(item())
return lambda: unittest.TestSuite(tests)
def GetBaseToolsPaths():
if sys.platform in ('win32', 'win64'):
return [ os.path.join(BaseToolsDir, 'Bin', sys.platform.title()) ]
else:
uname = os.popen('uname -sm').read().strip()
for char in (' ', '/'):
uname = uname.replace(char, '-')
return [
os.path.join(BaseToolsDir, 'Bin', uname),
os.path.join(BaseToolsDir, 'BinWrappers', uname),
os.path.join(BaseToolsDir, 'BinWrappers', 'PosixLike')
]
BaseToolsBinPaths = GetBaseToolsPaths()
class BaseToolsTest(unittest.TestCase):
def cleanOutDir(self, dir):
for dirItem in os.listdir(dir):
if dirItem in ('.', '..'): continue
dirItem = os.path.join(dir, dirItem)
self.RemoveFileOrDir(dirItem)
def CleanUpTmpDir(self):
if os.path.exists(self.testDir):
self.cleanOutDir(self.testDir)
def HandleTreeDeleteError(self, function, path, excinfo):
os.chmod(path, stat.S_IWRITE)
function(path)
def RemoveDir(self, dir):
shutil.rmtree(dir, False, self.HandleTreeDeleteError)
def RemoveFileOrDir(self, path):
if not os.path.exists(path):
return
elif os.path.isdir(path):
self.RemoveDir(path)
else:
os.remove(path)
def DisplayBinaryData(self, description, data):
print(description, '(base64 encoded):')
b64data = base64.b64encode(data)
print(b64data)
def DisplayFile(self, fileName):
sys.stdout.write(self.ReadTmpFile(fileName))
sys.stdout.flush()
def FindToolBin(self, toolName):
for binPath in BaseToolsBinPaths:
bin = os.path.join(binPath, toolName)
if os.path.exists(bin):
break
assert os.path.exists(bin)
return bin
def RunTool(self, *args, **kwd):
if 'toolName' in kwd: toolName = kwd['toolName']
else: toolName = None
if 'logFile' in kwd: logFile = kwd['logFile']
else: logFile = None
if toolName is None: toolName = self.toolName
bin = self.FindToolBin(toolName)
if logFile is not None:
logFile = open(os.path.join(self.testDir, logFile), 'w')
popenOut = logFile
else:
popenOut = subprocess.PIPE
args = [toolName] + list(args)
Proc = subprocess.Popen(
args, executable=bin,
stdout=popenOut, stderr=subprocess.STDOUT
)
if logFile is None:
Proc.stdout.read()
return Proc.wait()
def GetTmpFilePath(self, fileName):
return os.path.join(self.testDir, fileName)
def OpenTmpFile(self, fileName, mode = 'r'):
return open(os.path.join(self.testDir, fileName), mode)
def ReadTmpFile(self, fileName):
f = open(self.GetTmpFilePath(fileName), 'r')
data = f.read()
f.close()
return data
def WriteTmpFile(self, fileName, data):
if isinstance(data, bytes):
with open(self.GetTmpFilePath(fileName), 'wb') as f:
f.write(data)
else:
with codecs.open(self.GetTmpFilePath(fileName), 'w', encoding='utf-8') as f:
f.write(data)
def GenRandomFileData(self, fileName, minlen = None, maxlen = None):
if maxlen is None: maxlen = minlen
f = self.OpenTmpFile(fileName, 'w')
f.write(self.GetRandomString(minlen, maxlen))
f.close()
def GetRandomString(self, minlen = None, maxlen = None):
if minlen is None: minlen = 1024
if maxlen is None: maxlen = minlen
return ''.join(
[chr(random.randint(0, 255))
for x in range(random.randint(minlen, maxlen))
])
def setUp(self):
self.savedEnvPath = os.environ['PATH']
self.savedSysPath = sys.path[:]
for binPath in BaseToolsBinPaths:
os.environ['PATH'] = \
os.path.pathsep.join((os.environ['PATH'], binPath))
self.testDir = TestTempDir
if not os.path.exists(self.testDir):
os.mkdir(self.testDir)
else:
self.cleanOutDir(self.testDir)
def tearDown(self):
self.RemoveFileOrDir(self.testDir)
os.environ['PATH'] = self.savedEnvPath
sys.path = self.savedSysPath
| 32.554974 | 89 | 0.600193 |
ace929b071fc3f0dee3d59782c9bda64f756d8a5 | 18 | py | Python | pigeonpost/__init__.py | dragonfly-science/django-pigeonpost | 5630e03cf536ec1b66dfd8481a85e9a78deab3ba | [
"MIT"
] | null | null | null | pigeonpost/__init__.py | dragonfly-science/django-pigeonpost | 5630e03cf536ec1b66dfd8481a85e9a78deab3ba | [
"MIT"
] | null | null | null | pigeonpost/__init__.py | dragonfly-science/django-pigeonpost | 5630e03cf536ec1b66dfd8481a85e9a78deab3ba | [
"MIT"
] | null | null | null | version = '0.3.5'
| 9 | 17 | 0.555556 |
ace92a2509b3f2cc37818b18b1c365606b10ad8c | 63 | py | Python | enthought/logger/custom_excepthook.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/logger/custom_excepthook.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/logger/custom_excepthook.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from apptools.logger.custom_excepthook import *
| 21 | 47 | 0.825397 |
ace92aec0280f3f2d518a1a2a8f7725a200f4005 | 33,603 | py | Python | darts/models/forecasting/nbeats.py | gdevos010/darts | 96c97c1e241500ae7b91d32bbfa21d811e4a7d71 | [
"Apache-2.0"
] | null | null | null | darts/models/forecasting/nbeats.py | gdevos010/darts | 96c97c1e241500ae7b91d32bbfa21d811e4a7d71 | [
"Apache-2.0"
] | null | null | null | darts/models/forecasting/nbeats.py | gdevos010/darts | 96c97c1e241500ae7b91d32bbfa21d811e4a7d71 | [
"Apache-2.0"
] | 1 | 2022-02-15T11:06:29.000Z | 2022-02-15T11:06:29.000Z | """
N-BEATS
-------
"""
from enum import Enum
from typing import List, NewType, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from darts.logging import get_logger, raise_if_not, raise_log
from darts.models.forecasting.pl_forecasting_module import PLPastCovariatesModule
from darts.models.forecasting.torch_forecasting_model import PastCovariatesTorchModel
from darts.utils.torch import MonteCarloDropout
logger = get_logger(__name__)
ACTIVATIONS = [
"ReLU",
"RReLU",
"PReLU",
"ELU",
"Softplus",
"Tanh",
"SELU",
"LeakyReLU",
"Sigmoid",
"GELU",
]
class _GType(Enum):
GENERIC = 1
TREND = 2
SEASONALITY = 3
GTypes = NewType("GTypes", _GType)
class _TrendGenerator(nn.Module):
def __init__(self, expansion_coefficient_dim, target_length):
super().__init__()
# basis is of size (expansion_coefficient_dim, target_length)
basis = torch.stack(
[
(torch.arange(target_length) / target_length) ** i
for i in range(expansion_coefficient_dim)
],
dim=1,
).T
self.basis = nn.Parameter(basis, requires_grad=False)
def forward(self, x):
return torch.matmul(x, self.basis)
class _SeasonalityGenerator(nn.Module):
def __init__(self, target_length):
super().__init__()
half_minus_one = int(target_length / 2 - 1)
cos_vectors = [
torch.cos(torch.arange(target_length) / target_length * 2 * np.pi * i)
for i in range(1, half_minus_one + 1)
]
sin_vectors = [
torch.sin(torch.arange(target_length) / target_length * 2 * np.pi * i)
for i in range(1, half_minus_one + 1)
]
# basis is of size (2 * int(target_length / 2 - 1) + 1, target_length)
basis = torch.stack(
[torch.ones(target_length)] + cos_vectors + sin_vectors, dim=1
).T
self.basis = nn.Parameter(basis, requires_grad=False)
def forward(self, x):
return torch.matmul(x, self.basis)
class _Block(nn.Module):
def __init__(
self,
num_layers: int,
layer_width: int,
nr_params: int,
expansion_coefficient_dim: int,
input_chunk_length: int,
target_length: int,
g_type: GTypes,
batch_norm: bool,
dropout: float,
activation: str,
):
"""PyTorch module implementing the basic building block of the N-BEATS architecture.
The blocks produce outputs of size (target_length, nr_params); i.e.
"one vector per parameter". The parameters are predicted only for forecast outputs.
Backcast outputs are in the original "domain".
Parameters
----------
num_layers
The number of fully connected layers preceding the final forking layers.
layer_width
The number of neurons that make up each fully connected layer.
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used)
expansion_coefficient_dim
The dimensionality of the waveform generator parameters, also known as expansion coefficients.
Used in the generic architecture and the trend module of the interpretable architecture, where it determines
the degree of the polynomial basis.
input_chunk_length
The length of the input sequence fed to the model.
target_length
The length of the forecast of the model.
g_type
The type of function that is implemented by the waveform generator.
batch_norm
Whether to use batch norm
dropout
Dropout probability
activation
The activation function of encoder/decoder intermediate layer.
Inputs
------
x of shape `(batch_size, input_chunk_length)`
Tensor containing the input sequence.
Outputs
-------
x_hat of shape `(batch_size, input_chunk_length)`
Tensor containing the 'backcast' of the block, which represents an approximation of `x`
given the constraints of the functional space determined by `g`.
y_hat of shape `(batch_size, output_chunk_length)`
Tensor containing the forward forecast of the block.
"""
super().__init__()
self.num_layers = num_layers
self.layer_width = layer_width
self.target_length = target_length
self.nr_params = nr_params
self.g_type = g_type
self.dropout = dropout
self.batch_norm = batch_norm
raise_if_not(
activation in ACTIVATIONS, f"'{activation}' is not in {ACTIVATIONS}"
)
self.activation = getattr(nn, activation)()
# fully connected stack before fork
self.linear_layer_stack_list = [nn.Linear(input_chunk_length, layer_width)]
for _ in range(num_layers - 1):
self.linear_layer_stack_list.append(nn.Linear(layer_width, layer_width))
if self.batch_norm:
self.linear_layer_stack_list.append(
nn.BatchNorm1d(num_features=self.layer_width)
)
if self.dropout > 0:
self.linear_layer_stack_list.append(MonteCarloDropout(p=self.dropout))
self.fc_stack = nn.ModuleList(self.linear_layer_stack_list)
# Fully connected layer producing forecast/backcast expansion coeffcients (waveform generator parameters).
# The coefficients are emitted for each parameter of the likelihood.
if g_type == _GType.SEASONALITY:
self.backcast_linear_layer = nn.Linear(
layer_width, 2 * int(input_chunk_length / 2 - 1) + 1
)
self.forecast_linear_layer = nn.Linear(
layer_width, nr_params * (2 * int(target_length / 2 - 1) + 1)
)
else:
self.backcast_linear_layer = nn.Linear(
layer_width, expansion_coefficient_dim
)
self.forecast_linear_layer = nn.Linear(
layer_width, nr_params * expansion_coefficient_dim
)
# waveform generator functions
if g_type == _GType.GENERIC:
self.backcast_g = nn.Linear(expansion_coefficient_dim, input_chunk_length)
self.forecast_g = nn.Linear(expansion_coefficient_dim, target_length)
elif g_type == _GType.TREND:
self.backcast_g = _TrendGenerator(
expansion_coefficient_dim, input_chunk_length
)
self.forecast_g = _TrendGenerator(expansion_coefficient_dim, target_length)
elif g_type == _GType.SEASONALITY:
self.backcast_g = _SeasonalityGenerator(input_chunk_length)
self.forecast_g = _SeasonalityGenerator(target_length)
else:
raise_log(ValueError("g_type not supported"), logger)
def forward(self, x):
batch_size = x.shape[0]
# fully connected layer stack
for layer in self.linear_layer_stack_list:
x = self.activation(layer(x))
# forked linear layers producing waveform generator parameters
theta_backcast = self.backcast_linear_layer(x)
theta_forecast = self.forecast_linear_layer(x)
# set the expansion coefs in last dimension for the forecasts
theta_forecast = theta_forecast.view(batch_size, self.nr_params, -1)
# waveform generator applications (project the expansion coefs onto basis vectors)
x_hat = self.backcast_g(theta_backcast)
y_hat = self.forecast_g(theta_forecast)
# Set the distribution parameters as the last dimension
y_hat = y_hat.reshape(x.shape[0], self.target_length, self.nr_params)
return x_hat, y_hat
class _Stack(nn.Module):
def __init__(
self,
num_blocks: int,
num_layers: int,
layer_width: int,
nr_params: int,
expansion_coefficient_dim: int,
input_chunk_length: int,
target_length: int,
g_type: GTypes,
batch_norm: bool,
dropout: float,
activation: str,
):
"""PyTorch module implementing one stack of the N-BEATS architecture that comprises multiple basic blocks.
Parameters
----------
num_blocks
The number of blocks making up this stack.
num_layers
The number of fully connected layers preceding the final forking layers in each block.
layer_width
The number of neurons that make up each fully connected layer in each block.
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used)
expansion_coefficient_dim
The dimensionality of the waveform generator parameters, also known as expansion coefficients.
input_chunk_length
The length of the input sequence fed to the model.
target_length
The length of the forecast of the model.
g_type
The function that is implemented by the waveform generators in each block.
batch_norm
whether to apply batch norm on first block of this stack
dropout
Dropout probability
activation
The activation function of encoder/decoder intermediate layer.
Inputs
------
stack_input of shape `(batch_size, input_chunk_length)`
Tensor containing the input sequence.
Outputs
-------
stack_residual of shape `(batch_size, input_chunk_length)`
Tensor containing the 'backcast' of the block, which represents an approximation of `x`
given the constraints of the functional space determined by `g`.
stack_forecast of shape `(batch_size, output_chunk_length)`
Tensor containing the forward forecast of the stack.
"""
super().__init__()
self.input_chunk_length = input_chunk_length
self.target_length = target_length
self.nr_params = nr_params
self.dropout = dropout
self.batch_norm = batch_norm
self.activation = activation
if g_type == _GType.GENERIC:
self.blocks_list = [
_Block(
num_layers,
layer_width,
nr_params,
expansion_coefficient_dim,
input_chunk_length,
target_length,
g_type,
batch_norm=(
self.batch_norm and i == 0
), # batch norm only on first block of first stack
dropout=self.dropout,
activation=self.activation,
)
for i in range(num_blocks)
]
else:
# same block instance is used for weight sharing
interpretable_block = _Block(
num_layers,
layer_width,
nr_params,
expansion_coefficient_dim,
input_chunk_length,
target_length,
g_type,
batch_norm=self.batch_norm,
dropout=self.dropout,
activation=self.activation,
)
self.blocks_list = [interpretable_block] * num_blocks
self.blocks = nn.ModuleList(self.blocks_list)
def forward(self, x):
# One forecast vector per parameter in the distribution
stack_forecast = torch.zeros(
x.shape[0],
self.target_length,
self.nr_params,
device=x.device,
dtype=x.dtype,
)
for block in self.blocks_list:
# pass input through block
x_hat, y_hat = block(x)
# add block forecast to stack forecast
stack_forecast = stack_forecast + y_hat
# subtract backcast from input to produce residual
x = x - x_hat
stack_residual = x
return stack_residual, stack_forecast
class _NBEATSModule(PLPastCovariatesModule):
def __init__(
self,
input_dim: int,
output_dim: int,
nr_params: int,
generic_architecture: bool,
num_stacks: int,
num_blocks: int,
num_layers: int,
layer_widths: List[int],
expansion_coefficient_dim: int,
trend_polynomial_degree: int,
batch_norm: bool,
dropout: float,
activation: str,
**kwargs,
):
"""PyTorch module implementing the N-BEATS architecture.
Parameters
----------
output_dim
Number of output components in the target
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used).
generic_architecture
Boolean value indicating whether the generic architecture of N-BEATS is used.
If not, the interpretable architecture outlined in the paper (consisting of one trend
and one seasonality stack with appropriate waveform generator functions).
num_stacks
The number of stacks that make up the whole model. Only used if `generic_architecture` is set to `True`.
num_blocks
The number of blocks making up every stack.
num_layers
The number of fully connected layers preceding the final forking layers in each block of every stack.
Only used if `generic_architecture` is set to `True`.
layer_widths
Determines the number of neurons that make up each fully connected layer in each block of every stack.
If a list is passed, it must have a length equal to `num_stacks` and every entry in that list corresponds
to the layer width of the corresponding stack. If an integer is passed, every stack will have blocks
with FC layers of the same width.
expansion_coefficient_dim
The dimensionality of the waveform generator parameters, also known as expansion coefficients.
Only used if `generic_architecture` is set to `True`.
trend_polynomial_degree
The degree of the polynomial used as waveform generator in trend stacks. Only used if
`generic_architecture` is set to `False`.
batch_norm
Whether to apply batch norm on first block of the first stack
dropout
Dropout probability
activation
The activation function of encoder/decoder intermediate layer.
**kwargs
all parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class.
Inputs
------
x of shape `(batch_size, input_chunk_length)`
Tensor containing the input sequence.
Outputs
-------
y of shape `(batch_size, output_chunk_length, target_size/output_dim, nr_params)`
Tensor containing the output of the NBEATS module.
"""
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.nr_params = nr_params
self.input_chunk_length_multi = self.input_chunk_length * input_dim
self.target_length = self.output_chunk_length * input_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.activation = activation
if generic_architecture:
self.stacks_list = [
_Stack(
num_blocks,
num_layers,
layer_widths[i],
nr_params,
expansion_coefficient_dim,
self.input_chunk_length_multi,
self.target_length,
_GType.GENERIC,
batch_norm=(
self.batch_norm and i == 0
), # batch norm only on first block of first stack
dropout=self.dropout,
activation=self.activation,
)
for i in range(num_stacks)
]
else:
num_stacks = 2
trend_stack = _Stack(
num_blocks,
num_layers,
layer_widths[0],
nr_params,
trend_polynomial_degree + 1,
self.input_chunk_length_multi,
self.target_length,
_GType.TREND,
batch_norm=self.batch_norm,
dropout=self.dropout,
activation=self.activation,
)
seasonality_stack = _Stack(
num_blocks,
num_layers,
layer_widths[1],
nr_params,
-1,
self.input_chunk_length_multi,
self.target_length,
_GType.SEASONALITY,
batch_norm=self.batch_norm,
dropout=self.dropout,
activation=self.activation,
)
self.stacks_list = [trend_stack, seasonality_stack]
self.stacks = nn.ModuleList(self.stacks_list)
# setting the last backcast "branch" to be not trainable (without next block/stack, it doesn't need to be
# backpropagated). Removing this lines would cause logtensorboard to crash, since no gradient is stored
# on this params (the last block backcast is not part of the final output of the net).
self.stacks_list[-1].blocks[-1].backcast_linear_layer.requires_grad_(False)
self.stacks_list[-1].blocks[-1].backcast_g.requires_grad_(False)
def forward(self, x_in: Tuple):
x, _ = x_in
# if x1, x2,... y1, y2... is one multivariate ts containing x and y, and a1, a2... one covariate ts
# we reshape into x1, y1, a1, x2, y2, a2... etc
x = torch.reshape(x, (x.shape[0], self.input_chunk_length_multi, 1))
# squeeze last dimension (because model is univariate)
x = x.squeeze(dim=2)
# One vector of length target_length per parameter in the distribution
y = torch.zeros(
x.shape[0],
self.target_length,
self.nr_params,
device=x.device,
dtype=x.dtype,
)
for stack in self.stacks_list:
# compute stack output
stack_residual, stack_forecast = stack(x)
# add stack forecast to final output
y = y + stack_forecast
# set current stack residual as input for next stack
x = stack_residual
# In multivariate case, we get a result [x1_param1, x1_param2], [y1_param1, y1_param2], [x2..], [y2..], ...
# We want to reshape to original format. We also get rid of the covariates and keep only the target dimensions.
# The covariates are by construction added as extra time series on the right side. So we need to get rid of this
# right output (keeping only :self.output_dim).
y = y.view(
y.shape[0], self.output_chunk_length, self.input_dim, self.nr_params
)[:, :, : self.output_dim, :]
return y
class NBEATSModel(PastCovariatesTorchModel):
def __init__(
self,
input_chunk_length: int,
output_chunk_length: int,
generic_architecture: bool = True,
num_stacks: int = 30,
num_blocks: int = 1,
num_layers: int = 4,
layer_widths: Union[int, List[int]] = 256,
expansion_coefficient_dim: int = 5,
trend_polynomial_degree: int = 2,
dropout: float = 0.0,
activation: str = "ReLU",
**kwargs,
):
"""Neural Basis Expansion Analysis Time Series Forecasting (N-BEATS).
This is an implementation of the N-BEATS architecture, as outlined in [1]_.
In addition to the univariate version presented in the paper, our implementation also
supports multivariate series (and covariates) by flattening the model inputs to a 1-D series
and reshaping the outputs to a tensor of appropriate dimensions. Furthermore, it also
supports producing probabilistic forecasts (by specifying a `likelihood` parameter).
This model supports past covariates (known for `input_chunk_length` points before prediction time).
Parameters
----------
input_chunk_length
The length of the input sequence fed to the model.
output_chunk_length
The length of the forecast of the model.
generic_architecture
Boolean value indicating whether the generic architecture of N-BEATS is used.
If not, the interpretable architecture outlined in the paper (consisting of one trend
and one seasonality stack with appropriate waveform generator functions).
num_stacks
The number of stacks that make up the whole model. Only used if `generic_architecture` is set to `True`.
The interpretable architecture always uses two stacks - one for trend and one for seasonality.
num_blocks
The number of blocks making up every stack.
num_layers
The number of fully connected layers preceding the final forking layers in each block of every stack.
Only used if `generic_architecture` is set to `True`.
layer_widths
Determines the number of neurons that make up each fully connected layer in each block of every stack.
If a list is passed, it must have a length equal to `num_stacks` and every entry in that list corresponds
to the layer width of the corresponding stack. If an integer is passed, every stack will have blocks
with FC layers of the same width.
expansion_coefficient_dim
The dimensionality of the waveform generator parameters, also known as expansion coefficients.
Only used if `generic_architecture` is set to `True`.
trend_polynomial_degree
The degree of the polynomial used as waveform generator in trend stacks. Only used if
`generic_architecture` is set to `False`.
dropout
The dropout probability to be used in fully connected layers. This is compatible with Monte Carlo dropout
at inference time for model uncertainty estimation (enabled with ``mc_dropout=True`` at
prediction time).
activation
The activation function of encoder/decoder intermediate layer (default='ReLU').
Supported activations: ['ReLU','RReLU', 'PReLU', 'Softplus', 'Tanh', 'SELU', 'LeakyReLU', 'Sigmoid']
**kwargs
Optional arguments to initialize the pytorch_lightning.Module, pytorch_lightning.Trainer, and
Darts' :class:`TorchForecastingModel`.
loss_fn
PyTorch loss function used for training.
This parameter will be ignored for probabilistic models if the ``likelihood`` parameter is specified.
Default: ``torch.nn.MSELoss()``.
torch_metrics
A torch metric or a ``MetricCollection`` used for evaluation. A full list of available metrics can be found
at https://torchmetrics.readthedocs.io/en/latest/. Default: ``None``.
likelihood
One of Darts' :meth:`Likelihood <darts.utils.likelihood_models.Likelihood>` models to be used for
probabilistic forecasts. Default: ``None``.
optimizer_cls
The PyTorch optimizer class to be used. Default: ``torch.optim.Adam``.
optimizer_kwargs
Optionally, some keyword arguments for the PyTorch optimizer (e.g., ``{'lr': 1e-3}``
for specifying a learning rate). Otherwise the default values of the selected ``optimizer_cls``
will be used. Default: ``None``.
lr_scheduler_cls
Optionally, the PyTorch learning rate scheduler class to be used. Specifying ``None`` corresponds
to using a constant learning rate. Default: ``None``.
lr_scheduler_kwargs
Optionally, some keyword arguments for the PyTorch learning rate scheduler. Default: ``None``.
batch_size
Number of time series (input and output sequences) used in each training pass. Default: ``32``.
n_epochs
Number of epochs over which to train the model. Default: ``100``.
model_name
Name of the model. Used for creating checkpoints and saving tensorboard data. If not specified,
defaults to the following string ``"YYYY-mm-dd_HH:MM:SS_torch_model_run_PID"``, where the initial part
of the name is formatted with the local date and time, while PID is the processed ID (preventing models
spawned at the same time by different processes to share the same model_name). E.g.,
``"2021-06-14_09:53:32_torch_model_run_44607"``.
work_dir
Path of the working directory, where to save checkpoints and Tensorboard summaries.
Default: current working directory.
log_tensorboard
If set, use Tensorboard to log the different parameters. The logs will be located in:
``"{work_dir}/darts_logs/{model_name}/logs/"``. Default: ``False``.
nr_epochs_val_period
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``.
torch_device_str
Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None``
which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use
GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only.
.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs``
dict:
- ``{"accelerator": "cpu"}`` for CPU,
- ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer),
- ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS.
For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded). Default: ``False``.
save_checkpoints
Whether or not to automatically save the untrained model and checkpoints from training.
To load the model from checkpoint, call :func:`MyModelClass.load_from_checkpoint()`, where
:class:`MyModelClass` is the :class:`TorchForecastingModel` class that was used (such as :class:`TFTModel`,
:class:`NBEATSModel`, etc.). If set to ``False``, the model can still be manually saved using
:func:`save_model()` and loaded using :func:`load_model()`. Default: ``False``.
add_encoders
A large number of past and future covariates can be automatically generated with `add_encoders`.
This can be done by adding multiple pre-defined index encoders and/or custom user-made functions that
will be used as index encoders. Additionally, a transformer such as Darts' :class:`Scaler` can be added to
transform the generated covariates. This happens all under one hood and only needs to be specified at
model creation.
Read :meth:`SequentialEncoder <darts.utils.data.encoders.SequentialEncoder>` to find out more about
``add_encoders``. Default: ``None``. An example showing some of ``add_encoders`` features:
.. highlight:: python
.. code-block:: python
add_encoders={
'cyclic': {'future': ['month']},
'datetime_attribute': {'future': ['hour', 'dayofweek']},
'position': {'past': ['absolute'], 'future': ['relative']},
'custom': {'past': [lambda idx: (idx.year - 1950) / 50]},
'transformer': Scaler()
}
..
random_state
Control the randomness of the weights initialization. Check this
`link <https://scikit-learn.org/stable/glossary.html#term-random_state>`_ for more details.
Default: ``None``.
pl_trainer_kwargs
By default :class:`TorchForecastingModel` creates a PyTorch Lightning Trainer with several useful presets
that performs the training, validation and prediction processes. These presets include automatic
checkpointing, tensorboard logging, setting the torch device and more.
With ``pl_trainer_kwargs`` you can add additional kwargs to instantiate the PyTorch Lightning trainer
object. Check the `PL Trainer documentation
<https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html>`_ for more information about the
supported kwargs. Default: ``None``.
With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts'
:class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process.
The model will stop training early if the validation loss `val_loss` does not improve beyond
specifications. For more information on callbacks, visit:
`PyTorch Lightning Callbacks
<https://pytorch-lightning.readthedocs.io/en/stable/extensions/callbacks.html>`_
.. highlight:: python
.. code-block:: python
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# stop training when validation loss does not decrease more than 0.05 (`min_delta`) over
# a period of 5 epochs (`patience`)
my_stopper = EarlyStopping(
monitor="val_loss",
patience=5,
min_delta=0.05,
mode='min',
)
pl_trainer_kwargs={"callbacks": [my_stopper]}
..
Note that you can also use a custom PyTorch Lightning Trainer for training and prediction with optional
parameter ``trainer`` in :func:`fit()` and :func:`predict()`.
show_warnings
whether to show warnings raised from PyTorch Lightning. Useful to detect potential issues of
your forecasting use case. Default: ``False``.
References
----------
.. [1] https://openreview.net/forum?id=r1ecqn4YwB
"""
super().__init__(**self._extract_torch_model_params(**self.model_params))
# extract pytorch lightning module kwargs
self.pl_module_params = self._extract_pl_module_params(**self.model_params)
raise_if_not(
isinstance(layer_widths, int) or len(layer_widths) == num_stacks,
"Please pass an integer or a list of integers with length `num_stacks`"
"as value for the `layer_widths` argument.",
logger,
)
self.generic_architecture = generic_architecture
self.num_stacks = num_stacks
self.num_blocks = num_blocks
self.num_layers = num_layers
self.layer_widths = layer_widths
self.expansion_coefficient_dim = expansion_coefficient_dim
self.trend_polynomial_degree = trend_polynomial_degree
# Currently batch norm is not an option as it seems to perform badly
self.batch_norm = False
self.dropout = dropout
self.activation = activation
if not generic_architecture:
self.num_stacks = 2
if isinstance(layer_widths, int):
self.layer_widths = [layer_widths] * num_stacks
def _create_model(self, train_sample: Tuple[torch.Tensor]) -> torch.nn.Module:
# samples are made of (past_target, past_covariates, future_target)
input_dim = train_sample[0].shape[1] + (
train_sample[1].shape[1] if train_sample[1] is not None else 0
)
output_dim = train_sample[-1].shape[1]
nr_params = 1 if self.likelihood is None else self.likelihood.num_parameters
return _NBEATSModule(
input_dim=input_dim,
output_dim=output_dim,
nr_params=nr_params,
generic_architecture=self.generic_architecture,
num_stacks=self.num_stacks,
num_blocks=self.num_blocks,
num_layers=self.num_layers,
layer_widths=self.layer_widths,
expansion_coefficient_dim=self.expansion_coefficient_dim,
trend_polynomial_degree=self.trend_polynomial_degree,
batch_norm=self.batch_norm,
dropout=self.dropout,
activation=self.activation,
**self.pl_module_params,
)
| 42.589354 | 120 | 0.621611 |
ace92b0abdbae5f778b82863b87fe5fea21a657c | 2,065 | py | Python | userbot/modules/salam.py | brut69/Hexxa-Userbot | 0fbd633159e54c64394ebe66a17ea6131f4be631 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2021-08-13T05:17:12.000Z | 2021-11-15T12:12:47.000Z | userbot/modules/salam.py | brut69/Hexxa-Userbot | 0fbd633159e54c64394ebe66a17ea6131f4be631 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 62 | 2021-09-02T22:18:59.000Z | 2022-03-28T22:22:10.000Z | userbot/modules/salam.py | brut69/Hexxa-Userbot | 0fbd633159e54c64394ebe66a17ea6131f4be631 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8 | 2021-08-14T19:49:16.000Z | 2021-08-24T03:47:11.000Z | from platform import uname
from time import sleep
from userbot import ALIVE_NAME, CMD_HELP
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
@register(outgoing=True, pattern="^.P(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Assalamu'alaikum...`")
@register(outgoing=True, pattern=r"^\.p(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Assalamu'alaikum...`")
@register(outgoing=True, pattern="^.Ass(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit(f"**Halo bro saya {DEFAULTUSER} salam kenal 😁**")
sleep(2)
await typew.edit("`Assalamualaikum Waruhmatulahi Wabarukatuh...`")
@register(outgoing=True, pattern="^.Waa(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("**iyaa bro**")
sleep(2)
await typew.edit("`Walaikumsalam Waruhmatulahi Wabarukatuh...`")
@register(outgoing=True, pattern="^.L(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Wa'alaikumsalam...`")
@register(outgoing=True, pattern=r"^\.l(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Wa'alaikumsalam...`")
CMD_HELP.update(
{
"salam": "**✘ Plugin : **`salam`\
\n\n • **Perintah :** `.P` | `.p`\
\n • **Function :** Untuk salam ke semua orang\
\n\n • **Perintah :** `.Ass`\
\n • **Function :** Salam kenal dan salam\
\n\n • **Perintah :** `.Waa`\
\n • **Function :** Menjawab salam panjang\
\n\n • **Perintah :** `.L` | `.l`\
\n • **Function :** Untuk menjawab salam\
\n\n\n • **Pesan untuk salam dan menjawab salam ke semua orang , dimanapun Hexxa berada.**\n➠ **Pesan dari developer Hexxa Apis , enjoy userbot:D**\
"
}
)
| 31.287879 | 158 | 0.590799 |
ace92d3087744e8a0686662313a90867b055ac19 | 549 | py | Python | demosys/loaders/data/binary.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 70 | 2017-03-31T12:01:41.000Z | 2022-01-05T06:30:57.000Z | demosys/loaders/data/binary.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 69 | 2017-06-18T22:37:46.000Z | 2020-01-23T04:02:22.000Z | demosys/loaders/data/binary.py | Contraz/demosys-py | 0479e0f3b0a3901f601bffd2d11e155f97b47555 | [
"0BSD"
] | 9 | 2017-05-13T21:13:02.000Z | 2020-10-01T18:09:49.000Z | from demosys.loaders.base import BaseLoader
from demosys.exceptions import ImproperlyConfigured
class Loader(BaseLoader):
name = 'binary'
def load(self):
"""Load a file in binary mode"""
self.meta.resolved_path = self.find_data(self.meta.path)
if not self.meta.resolved_path:
raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path))
print("Loading:", self.meta.path)
with open(self.meta.resolved_path, 'rb') as fd:
return fd.read()
| 28.894737 | 90 | 0.637523 |
ace92e2e2864d0d6dd5d33a5dc94a259b0ed69b7 | 1,340 | py | Python | viz/runtime_compare.py | Avaneesh-Ramesh-07/Dashing | 4e8d5a28ade1d627d51ad3de5d360a57b763b323 | [
"MIT"
] | null | null | null | viz/runtime_compare.py | Avaneesh-Ramesh-07/Dashing | 4e8d5a28ade1d627d51ad3de5d360a57b763b323 | [
"MIT"
] | 1 | 2020-09-14T18:44:09.000Z | 2020-11-10T22:04:12.000Z | viz/runtime_compare.py | Avaneesh-Ramesh-07/Dashing | 4e8d5a28ade1d627d51ad3de5d360a57b763b323 | [
"MIT"
] | 2 | 2021-12-20T21:53:25.000Z | 2022-02-16T22:22:45.000Z |
import numpy as np
import plotly.graph_objects as go
import os
def create_runtime_comparison(data_loaders, global_options):
pairs = global_options['runtime_pairs']
for pair in pairs:
app_pair1, app_pair2 = pair.split(',')
app1, reg1 = app_pair1.split(':', 1)
app2, reg2 = app_pair2.split(':', 1)
create_runtime_linechart(data_loaders, app1, app2, reg1, reg2)
def create_runtime_linechart(data_loaders, app1, app2, reg1, reg2):
dl1 = data_loaders[app1]
dl2 = data_loaders[app2]
y_axis1 = dl1.get_app_runtime(reg1, rescale=False)
y_axis2 = dl2.get_app_runtime(reg2, rescale=False)
x_axis = dl1.proc_configs
y_axis1 = np.multiply(y_axis1, x_axis)
y_axis2 = np.multiply(y_axis2, x_axis)
scatter_plot1 = go.Scatter(x=x_axis, y=y_axis1, name=app1)
scatter_plot2 = go.Scatter(x=x_axis, y=y_axis2, name=app2)
fig = go.Figure()
fig.add_trace(scatter_plot1)
fig.add_trace(scatter_plot2)
title_str = '%s-%s vs. %s-%s' % (app1, reg1, app2, reg2)
fig.update_layout(title=title_str)
fig.layout.xaxis.title = "Processor Configurations"
fig.layout.yaxis.title = "Runtime"
fig.update_xaxes(title_font=dict(size=18))
fig.update_yaxes(title_font=dict(size=18))
fig.show()
dl1['charts'].append(fig)
dl2['charts'].append(fig)
| 30.454545 | 70 | 0.691045 |
ace92f1f871b34c41fabbd4562b9209cfeb528ad | 4,839 | py | Python | hippocampe/komand_hippocampe/actions/more/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | hippocampe/komand_hippocampe/actions/more/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | hippocampe/komand_hippocampe/actions/more/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
OBSERVABLES = "observables"
class Output:
RESULTS = "results"
class MoreInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"observables": {
"type": "array",
"title": "Observables",
"description": "Observables to get intelligence about",
"items": {
"$ref": "#/definitions/observable"
},
"order": 1
}
},
"required": [
"observables"
],
"definitions": {
"observable": {
"type": "object",
"title": "observable",
"properties": {
"type": {
"type": "string",
"title": "Type",
"description": "Type of the observable (e.g. domain, ip)",
"order": 2
},
"value": {
"type": "string",
"title": "Value",
"description": "Value of the observable",
"order": 1
}
},
"required": [
"value",
"type"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class MoreOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"type": "array",
"title": "Results",
"description": "List of results, one per observable",
"items": {
"$ref": "#/definitions/intelligence"
},
"order": 1
}
},
"required": [
"results"
],
"definitions": {
"intelligence": {
"type": "object",
"title": "intelligence",
"properties": {
"observable": {
"type": "string",
"title": "Observable",
"description": "Observable",
"order": 1
},
"records": {
"type": "array",
"title": "Records",
"description": "Record found in Hippocampe feed (empty if the observable is unknown)",
"items": {
"$ref": "#/definitions/intelligence_report"
},
"order": 2
}
},
"definitions": {
"intelligence_report": {
"type": "object",
"title": "intelligence_report",
"properties": {
"firstAppearance": {
"type": "string",
"title": "First Appearance",
"displayType": "date",
"description": "First appearance",
"format": "date-time",
"order": 1
},
"hipposcore": {
"type": "number",
"title": "Hipposcore",
"description": "Hipposcore",
"order": 6
},
"idSource": {
"type": "string",
"title": "ID Source",
"description": "Source ID",
"order": 5
},
"ip": {
"type": "string",
"title": "IP",
"description": "IP",
"order": 3
},
"lastAppearance": {
"type": "string",
"title": "Last Appearance",
"displayType": "date",
"description": "Last appearance",
"format": "date-time",
"order": 2
},
"source": {
"type": "string",
"title": "Source",
"description": "Source",
"order": 4
}
}
}
}
},
"intelligence_report": {
"type": "object",
"title": "intelligence_report",
"properties": {
"firstAppearance": {
"type": "string",
"title": "First Appearance",
"displayType": "date",
"description": "First appearance",
"format": "date-time",
"order": 1
},
"hipposcore": {
"type": "number",
"title": "Hipposcore",
"description": "Hipposcore",
"order": 6
},
"idSource": {
"type": "string",
"title": "ID Source",
"description": "Source ID",
"order": 5
},
"ip": {
"type": "string",
"title": "IP",
"description": "IP",
"order": 3
},
"lastAppearance": {
"type": "string",
"title": "Last Appearance",
"displayType": "date",
"description": "Last appearance",
"format": "date-time",
"order": 2
},
"source": {
"type": "string",
"title": "Source",
"description": "Source",
"order": 4
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 23.604878 | 96 | 0.424468 |
ace9301d17e33809b58e09b103ae31f4dcc0e5f4 | 3,235 | py | Python | temboo/core/Library/Facebook/Actions/Books/Reads/DeleteReading.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Facebook/Actions/Books/Reads/DeleteReading.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Facebook/Actions/Books/Reads/DeleteReading.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteReading
# Deletes a given book reading action.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteReading(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteReading Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteReading, self).__init__(temboo_session, '/Library/Facebook/Actions/Books/Reads/DeleteReading')
def new_input_set(self):
return DeleteReadingInputSet()
def _make_result_set(self, result, path):
return DeleteReadingResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteReadingChoreographyExecution(session, exec_id, path)
class DeleteReadingInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteReading
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(DeleteReadingInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of an action to delete.)
"""
super(DeleteReadingInputSet, self)._set_input('ActionID', value)
class DeleteReadingResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteReading Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook. Returns "true" on success.)
"""
return self._output.get('Response', None)
class DeleteReadingChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteReadingResultSet(response, path)
| 36.761364 | 153 | 0.687172 |
ace9303866851690a4e9818ef7858f6a538f3c41 | 1,430 | py | Python | dbquery/query.py | Caleb-Shepard/auto-registrar | fab625a463ebca02fad3b5e944bf8ce5c5104096 | [
"MIT"
] | 2 | 2018-10-23T15:15:02.000Z | 2018-10-29T18:03:01.000Z | dbquery/query.py | Caleb-Shepard/auto-registrar | fab625a463ebca02fad3b5e944bf8ce5c5104096 | [
"MIT"
] | null | null | null | dbquery/query.py | Caleb-Shepard/auto-registrar | fab625a463ebca02fad3b5e944bf8ce5c5104096 | [
"MIT"
] | null | null | null | """ *********************************************************************** """
""" """
""" |\ """
""" query.py ------| \---- """
""" | \` \ | p """
""" By: mihirlad55 | \`-\ \ | o """
""" |---\ \ `| l """
""" Created: 2018/10/29 13:11:01 by mihirlad55 | ` .\ \ | y """
""" Updated: 2018/10/29 20:31:57 by mihirlad55 ------------- """
""" """
""" *********************************************************************** """
# Import library
import sqlite3
# Connect to database file and retreive cursor object
conn = sqlite3.connect('course_database.db')
cur = conn.cursor()
query = ""
while True:
# Get query text from user
query = input("Enter SQL Command (type exit to exit): ")
if (query == "exit"):
break
try:
# Query Database
cur.execute(query)
except sqlite3.OperationalError as err:
print(type(err), err)
# Print results of query
print(cur.fetchall())
conn.commit()
conn.close() | 38.648649 | 80 | 0.300699 |
ace930f61543a1b7b469427703b348cc391aa345 | 4,753 | py | Python | ai_ct_scans/GUI/image_viewer.py | nhsx-mirror/skunkworks-ct-alignment-lesion-detection | d208ae5047ced840a181cd4c6f827dbbcafcdaf7 | [
"MIT"
] | 2 | 2021-11-15T09:52:00.000Z | 2021-11-30T11:33:15.000Z | ai_ct_scans/GUI/image_viewer.py | nhsx-mirror/skunkworks-ct-alignment-lesion-detection | d208ae5047ced840a181cd4c6f827dbbcafcdaf7 | [
"MIT"
] | null | null | null | ai_ct_scans/GUI/image_viewer.py | nhsx-mirror/skunkworks-ct-alignment-lesion-detection | d208ae5047ced840a181cd4c6f827dbbcafcdaf7 | [
"MIT"
] | 1 | 2022-03-25T12:22:55.000Z | 2022-03-25T12:22:55.000Z | import numpy as np
import cv2
from PySide2.QtWidgets import QLabel, QSizePolicy
from PySide2.QtGui import QImage, QPixmap, QPainter, QColor, QFont
from ai_ct_scans.image_processing_utils import normalise
# TODO: Test this class properly
class ImageViewer(QLabel):
"""Class to utilise the QLabel pixmap for displaying images in the GUI.
Attributes:
pixmap (QPixmap): Current value of the displayed pixmap.
"""
def __init__(self):
super().__init__()
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.pixmap = QPixmap()
self.info_text = None
def set_image(self, input_image, info_text=None, point=None, ellipse=None):
"""Convert the image for displaying within the ImageViewer as a QPixmap.
Args:
input_image (np.array): image to display.
info_text (str): information text string to display on corner of image.
point (list): coordinates of a point to overlay on the image as [x, y], or None.
ellipse (dict): dictionary describing ellipse to overlay on image.
"""
image = np.array(input_image)
if image.dtype != np.uint8 and not np.any((image < 0) | (image > 1)):
image *= 255
image = np.uint8(image)
else:
image = normalise(image)
# Make sure image has three channels (covert to BGR if nessessary)
if image.ndim == 3:
image = image
else:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# Copy the image
image = np.copy(image)
if point:
image = cv2.circle(image, point, radius=5, color=(0, 0, 255), thickness=-1)
if ellipse is not None:
# Ensure accurate integer arguments to cv2.ellipse, use shift to maintain
# accuracy of non-integer inputs.
center = (
int(round(ellipse["center"][0] * 2 ** 10)),
int(round(ellipse["center"][1] * 2 ** 10)),
)
axes = (
int(round((ellipse["axis"][0] / 2) * 2 ** 10)),
int(round((ellipse["axis"][1] / 2) * 2 ** 10)),
)
image = cv2.ellipse(
image,
center,
axes,
int(round(ellipse["angle"])),
0,
360,
(0, 255, 0),
1,
cv2.LINE_AA,
10,
)
# Create QImage from image data
image = (
QImage(
image.data,
image.shape[1],
image.shape[0],
image.shape[1] * 3,
QImage.Format_RGB888,
)
.rgbSwapped()
.rgbSwapped()
)
# Create QPixmap from QImage
self.pixmap = QPixmap.fromImage(image)
self.info_text = info_text
self.update()
def scale_factor(self):
"""Find a suitable scale factor.
Returns:
(float): scale factor to fit image to window.
"""
# Check if there is an empty pixmap
if self.pixmap.isNull():
return 1
else:
# Determine scaling to fit the window without overlap
return min(
self.width() / self.pixmap.width(), self.height() / self.pixmap.height()
)
def painter_element(self):
"""Create painter instance for rendering image.
Returns:
(QPainter): QPainter instance with correect scaling and rendering options.
"""
# Create painter instance
painter_instance = QPainter(self)
# Determine the scale factor
painter_instance.scale(self.scale_factor(), self.scale_factor())
# Set rendering setttings for better rendering
painter_instance.setRenderHint(QPainter.SmoothPixmapTransform)
painter_instance.setRenderHint(QPainter.Antialiasing)
return painter_instance
def paintEvent(self, event):
"""Paint the existing pixmap when the window changes.
Args:
event (QEvent): automatically populated Qt event information.
"""
# Get an instance of the painter
painter = self.painter_element()
# Draw pixmap
painter.drawPixmap(0, 0, self.pixmap)
# Write info text
painter.setPen(QColor("white"))
painter.setFont(QFont("Arial", 15))
if self.info_text is not None:
# TODO: Consider moving this to another overlay method so that it has a fixed font size with any image
painter.drawText(10, 20, self.info_text)
# Finish drawing
painter.end()
| 32.333333 | 114 | 0.562171 |
ace931092d7fabbf582390133bc27da38ede936f | 3,722 | py | Python | sdk/python/pulumi_azure_native/datamigration/v20180715preview/get_file.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datamigration/v20180715preview/get_file.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datamigration/v20180715preview/get_file.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetFileResult',
'AwaitableGetFileResult',
'get_file',
]
@pulumi.output_type
class GetFileResult:
"""
A file resource
"""
def __init__(__self__, etag=None, id=None, name=None, properties=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
HTTP strong entity tag value. This is ignored if submitted.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ProjectFilePropertiesResponse':
"""
Custom file properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetFileResult(GetFileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFileResult(
etag=self.etag,
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_file(file_name: Optional[str] = None,
group_name: Optional[str] = None,
project_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFileResult:
"""
A file resource
:param str file_name: Name of the File
:param str group_name: Name of the resource group
:param str project_name: Name of the project
:param str service_name: Name of the service
"""
__args__ = dict()
__args__['fileName'] = file_name
__args__['groupName'] = group_name
__args__['projectName'] = project_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datamigration/v20180715preview:getFile', __args__, opts=opts, typ=GetFileResult).value
return AwaitableGetFileResult(
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 29.776 | 136 | 0.61741 |
ace931c6b42b7000b2551d35b561356d7cf4805e | 3,325 | py | Python | tools/create_datasets.py | Lornatang/YOLOv3-PyTorch | c41040169aedfb3633f3a5af42c8e5a309e92548 | [
"Apache-2.0"
] | 11 | 2020-03-13T13:16:58.000Z | 2021-06-26T17:54:35.000Z | tools/create_datasets.py | Lornatang/PyTorch-yolov3 | c41040169aedfb3633f3a5af42c8e5a309e92548 | [
"Apache-2.0"
] | 6 | 2020-03-13T06:04:22.000Z | 2021-03-30T02:39:23.000Z | tools/create_datasets.py | Lornatang/YOLOv3-PyTorch | c41040169aedfb3633f3a5af42c8e5a309e92548 | [
"Apache-2.0"
] | 8 | 2020-02-22T10:38:59.000Z | 2021-06-26T17:54:36.000Z | # Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Source: https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py
import os
import xml.etree.ElementTree
import argparse
from PIL import Image
sets = ["train", "val", "trainval"]
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h
def convert_annotation(xml_path, image_index):
in_file = open(f"{xml_path}/{image_index}.xml")
out_file = open(f"labels/{image_index}.txt", "w")
tree = xml.etree.ElementTree.parse(in_file)
root = tree.getroot()
w = 0
h = 0
try:
size = root.find("size")
w = int(size.find("width").text)
h = int(size.find("height").text)
except ValueError:
pass
else:
path = os.path.join(os.getcwd(), "JPEGImages", image_index + ".jpg")
img = Image.open(path)
w, h = img.size
for obj in root.iter("object"):
difficult = obj.find("difficult").text
cls = obj.find("name").text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find("bndbox")
box = (float(xmlbox.find("xmin").text), float(xmlbox.find("xmax").text), float(xmlbox.find("ymin").text),
float(xmlbox.find("ymax").text))
bbox = convert((w, h), box)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bbox]) + "\n")
def main(args):
try:
os.makedirs("labels")
except OSError:
pass
for image_set in sets:
image_indexs = open(f"ImageSets/Main/{image_set}.txt").read().strip().split()
list_file = open(f"{image_set}.txt", "w")
for image_index in image_indexs:
list_file.write(f"data/{args.dataroot}/images/{image_index}.jpg\n")
convert_annotation(args.xml_path, image_index)
list_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser("Script tool for dividing training set and verification set in dataset.")
parser.add_argument('--xml-path', type=str, default="./Annotations", help="Location of dimension files in dataset.")
parser.add_argument('--dataroot', type=str, required=True, help='Dataset name')
args = parser.parse_args()
main(args) | 35.37234 | 120 | 0.605113 |
ace9338675351202a1546ed23d1824e8578d1d9b | 2,310 | py | Python | mixer/constellation.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | 3 | 2019-08-28T00:51:23.000Z | 2020-03-26T01:19:19.000Z | mixer/constellation.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | null | null | null | mixer/constellation.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | null | null | null | from .ws import MixerWS
class MixerConstellation:
CONSTELLATION_URL = "wss://constellation.mixer.com"
websocket = None
def __init__(self, on_connected):
self.on_connected = on_connected
self.callbacks = dict()
self.packet_id = 0
async def start(self):
"""Initializes the Constellation websocket and begins to listen for events."""
self.websocket = MixerWS(self.CONSTELLATION_URL)
await self.websocket.connect()
await self.on_connected(self) # call on_connected func (we should probably subscribe to events)
while True:
# receive a packet from server
packet = await self.websocket.receive_packet()
# make sure it's an event we're subscribed to
if packet["type"] != "event": continue
if packet["event"] != "live": continue
# find and invoke the callback function with the packet & payload
event_name = packet["data"]["channel"]
payload = packet["data"]["payload"]
callback = self.callbacks.get(event_name, None)
if callback is not None:
await callback(packet, payload)
async def subscribe(self, events, callback):
"""Subcribes the Constellation websocket to a list of provided events.
Args:
events (list): A list of events to subscribe to.
callback (function): A callable function to trigger with constellation packet payload when event is triggered.
Returns:
int: The unique id used to identify the "livesubscribe" method packet.
"""
# if a single event is provided, wrap it in a list automatically
if isinstance(events, str):
events = [events]
# build livesubscribe packet
packet = {
"type": "method",
"method": "livesubscribe",
"params": {
"events": events
},
"id": self.packet_id
}
# send packet to server and determine callback
await self.websocket.send_packet(packet)
for event in events:
self.callbacks[event] = callback
# increment packet id and return unique packet id
self.packet_id += 1
return self.packet_id
| 33.478261 | 122 | 0.604329 |
ace933d1bc4cb64302745b858922ce858b680ae2 | 8,290 | py | Python | models/res_unet_adrian.py | jankukacka/lwnet | 5b91c1897e68021c1dac263645d1d3050190ab35 | [
"MIT"
] | 39 | 2020-09-03T01:45:25.000Z | 2022-03-18T03:48:04.000Z | models/res_unet_adrian.py | jankukacka/lwnet | 5b91c1897e68021c1dac263645d1d3050190ab35 | [
"MIT"
] | 3 | 2020-09-08T17:52:43.000Z | 2022-02-20T17:34:34.000Z | models/res_unet_adrian.py | jankukacka/lwnet | 5b91c1897e68021c1dac263645d1d3050190ab35 | [
"MIT"
] | 15 | 2020-09-08T07:43:37.000Z | 2022-02-23T08:29:43.000Z | # Loosely inspired on https://github.com/jvanvugt/pytorch-unet
# Improvements (conv_bridge, shortcut) added by A. Galdran (Dec. 2019)
import torch
import torch.nn as nn
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class ConvBlock(torch.nn.Module):
def __init__(self, in_c, out_c, k_sz=3, shortcut=False, pool=True):
'''
pool_mode can be False (no pooling) or True ('maxpool')
'''
super(ConvBlock, self).__init__()
if shortcut==True: self.shortcut = nn.Sequential(conv1x1(in_c, out_c), nn.BatchNorm2d(out_c))
else: self.shortcut=False
pad = (k_sz - 1) // 2
block = []
if pool: self.pool = nn.MaxPool2d(kernel_size=2)
else: self.pool = False
block.append(nn.Conv2d(in_c, out_c, kernel_size=k_sz, padding=pad))
block.append(nn.ReLU())
block.append(nn.BatchNorm2d(out_c))
block.append(nn.Conv2d(out_c, out_c, kernel_size=k_sz, padding=pad))
block.append(nn.ReLU())
block.append(nn.BatchNorm2d(out_c))
self.block = nn.Sequential(*block)
def forward(self, x):
if self.pool: x = self.pool(x)
out = self.block(x)
if self.shortcut: return out + self.shortcut(x)
else: return out
class UpsampleBlock(torch.nn.Module):
def __init__(self, in_c, out_c, up_mode='transp_conv'):
super(UpsampleBlock, self).__init__()
block = []
if up_mode == 'transp_conv':
block.append(nn.ConvTranspose2d(in_c, out_c, kernel_size=2, stride=2))
elif up_mode == 'up_conv':
block.append(nn.Upsample(mode='bilinear', scale_factor=2, align_corners=False))
block.append(nn.Conv2d(in_c, out_c, kernel_size=1))
else:
raise Exception('Upsampling mode not supported')
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class ConvBridgeBlock(torch.nn.Module):
def __init__(self, channels, k_sz=3):
super(ConvBridgeBlock, self).__init__()
pad = (k_sz - 1) // 2
block=[]
block.append(nn.Conv2d(channels, channels, kernel_size=k_sz, padding=pad))
block.append(nn.ReLU())
block.append(nn.BatchNorm2d(channels))
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UpConvBlock(torch.nn.Module):
def __init__(self, in_c, out_c, k_sz=3, up_mode='up_conv', conv_bridge=False, shortcut=False):
super(UpConvBlock, self).__init__()
self.conv_bridge = conv_bridge
self.up_layer = UpsampleBlock(in_c, out_c, up_mode=up_mode)
self.conv_layer = ConvBlock(2 * out_c, out_c, k_sz=k_sz, shortcut=shortcut, pool=False)
if self.conv_bridge:
self.conv_bridge_layer = ConvBridgeBlock(out_c, k_sz=k_sz)
def forward(self, x, skip):
up = self.up_layer(x)
if self.conv_bridge:
out = torch.cat([up, self.conv_bridge_layer(skip)], dim=1)
else:
out = torch.cat([up, skip], dim=1)
out = self.conv_layer(out)
return out
class UNet(nn.Module):
def __init__(self, in_c, n_classes, layers, k_sz=3, up_mode='transp_conv', conv_bridge=True, shortcut=True):
super(UNet, self).__init__()
self.n_classes = n_classes
self.first = ConvBlock(in_c=in_c, out_c=layers[0], k_sz=k_sz,
shortcut=shortcut, pool=False)
self.down_path = nn.ModuleList()
for i in range(len(layers) - 1):
block = ConvBlock(in_c=layers[i], out_c=layers[i + 1], k_sz=k_sz,
shortcut=shortcut, pool=True)
self.down_path.append(block)
self.up_path = nn.ModuleList()
reversed_layers = list(reversed(layers))
for i in range(len(layers) - 1):
block = UpConvBlock(in_c=reversed_layers[i], out_c=reversed_layers[i + 1], k_sz=k_sz,
up_mode=up_mode, conv_bridge=conv_bridge, shortcut=shortcut)
self.up_path.append(block)
# init, shamelessly lifted from torchvision/models/resnet.py
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.final = nn.Conv2d(layers[0], n_classes, kernel_size=1)
def forward(self, x):
x = self.first(x)
down_activations = []
for i, down in enumerate(self.down_path):
down_activations.append(x)
x = down(x)
down_activations.reverse()
for i, up in enumerate(self.up_path):
x = up(x, down_activations[i])
return self.final(x)
class WNet(nn.Module):
def __init__(self, in_c, n_classes, layers, k_sz=3, up_mode='transp_conv', conv_bridge=True, shortcut=True):
super(WNet, self).__init__()
self.n_classes = n_classes
self.first = ConvBlock(in_c=in_c, out_c=layers[0], k_sz=k_sz,
shortcut=shortcut, pool=False)
self.down_path = nn.ModuleList()
for i in range(len(layers) - 1):
block = ConvBlock(in_c=layers[i], out_c=layers[i + 1], k_sz=k_sz,
shortcut=shortcut, pool=True)
self.down_path.append(block)
self.up_path = nn.ModuleList()
reversed_layers = list(reversed(layers))
for i in range(len(layers) - 1):
block = UpConvBlock(in_c=reversed_layers[i], out_c=reversed_layers[i + 1], k_sz=k_sz,
up_mode=up_mode, conv_bridge=conv_bridge, shortcut=shortcut)
self.up_path.append(block)
self.final = nn.Conv2d(layers[0], n_classes, kernel_size=1)
############################
self.first_2 = ConvBlock(in_c=in_c+1, out_c=layers[0], k_sz=k_sz,
shortcut=shortcut, pool=False)
self.down_path_2 = nn.ModuleList()
for i in range(len(layers) - 1):
block = ConvBlock(in_c=2 * layers[i], out_c=layers[i + 1], k_sz=k_sz,
shortcut=shortcut, pool=True)
self.down_path_2.append(block)
self.up_path_2 = nn.ModuleList()
reversed_layers = list(reversed(layers))
for i in range(len(layers) - 1):
block = UpConvBlock(in_c=reversed_layers[i], out_c=reversed_layers[i + 1], k_sz=k_sz,
up_mode=up_mode, conv_bridge=conv_bridge, shortcut=shortcut)
self.up_path_2.append(block)
self.final_2 = nn.Conv2d(layers[0], n_classes, kernel_size=1)
# init, shamelessly lifted from torchvision/models/resnet.py
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, data):
x = self.first(data)
down_activations = []
up_activations = []
for i, down in enumerate(self.down_path):
down_activations.append(x)
x = down(x)
down_activations.reverse()
for i, up in enumerate(self.up_path):
x = up(x, down_activations[i])
up_activations.append(x)
out1 = self.final(x)
new_data = torch.cat([data, torch.sigmoid(out1)], dim=1)
x = self.first_2(new_data)
down_activations = []
up_activations.reverse()
for i, down in enumerate(self.down_path_2):
down_activations.append(x)
x = down(torch.cat([x, up_activations[i]], dim=1))
down_activations.reverse()
up_activations = []
for i, up in enumerate(self.up_path_2):
x = up(x, down_activations[i])
up_activations.append(x)
out2 = self.final_2(x)
return out1, out2
| 38.37963 | 112 | 0.597708 |
ace93464a0e9ff210e3786a7109475d41e60cbab | 3,366 | py | Python | education/forms.py | berylvirgo/mg03-project | 98cf21ec7466a3e1631ab7b29c0b798490a983c3 | [
"MIT"
] | null | null | null | education/forms.py | berylvirgo/mg03-project | 98cf21ec7466a3e1631ab7b29c0b798490a983c3 | [
"MIT"
] | null | null | null | education/forms.py | berylvirgo/mg03-project | 98cf21ec7466a3e1631ab7b29c0b798490a983c3 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import models
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, UserCreationForm
from django.forms import fields
from .models import *
from users.models import User
class LoginForm(AuthenticationForm):
remember_me = forms.BooleanField(required=False) # and add the remember_me field
class Meta:
model = User
fields = [
'email',
'password',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Enter Email Address...'})
self.fields['password'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Enter Password'})
self.fields['remember_me'].widget.attrs.update(
{'class': 'custom-control-input', 'id': 'customCheck'})
class SignUpForm(UserCreationForm):
name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(max_length=254, help_text='Enter a valid email address')
class Meta:
model = User
fields = [
'name',
'email',
'password1',
'password2',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Full Name', 'required': True})
self.fields['email'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Email Address'})
self.fields['password1'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Password'})
self.fields['password2'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Repeat Password'})
class CustomPasswordResetForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].widget.attrs.update(
{'class': 'form-control form-control-user', 'placeholder': 'Enter Email Address...'})
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = [
'username',
'email',
]
class ProfileForm(forms.ModelForm):
class Meta:
model = User
fields = [
'bio',
'phone_number',
'date_of_birth',
'profile_image'
]
class MessageForm(forms.ModelForm):
class Meta:
model = Message
fields = '__all__'
exclude = ['is_read']
def __init__(self, *args, **kwargs):
super(MessageForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update(
{'class': 'form-control', 'placeholder': 'Your Name *', })
self.fields['email'].widget.attrs.update(
{'class': 'form-control', 'placeholder': 'Your Email *', })
self.fields['phone_number'].widget.attrs.update(
{'class': 'form-control', 'placeholder': 'Your Phone Number *', })
self.fields['body'].widget.attrs.update(
{'class': 'form-control', 'placeholder': 'Your Message *', })
| 32.365385 | 102 | 0.596554 |
ace9349b24c67affb1770eae106180efd6e13749 | 1,001 | py | Python | basic_sockets/interactive-socket/client.py | balam909/python | e29041ff6771c0a6be77f5e8cb772cd69fd39f4c | [
"CC0-1.0"
] | null | null | null | basic_sockets/interactive-socket/client.py | balam909/python | e29041ff6771c0a6be77f5e8cb772cd69fd39f4c | [
"CC0-1.0"
] | null | null | null | basic_sockets/interactive-socket/client.py | balam909/python | e29041ff6771c0a6be77f5e8cb772cd69fd39f4c | [
"CC0-1.0"
] | null | null | null | import sys, os
from socket import *
if(len(sys.argv)>2):
host=sys.argv[1]
port=int(sys.argv[2])
else:
print("Unable to create connection, required parameters 'Host' and/or 'Port' where not provided")
sys.exit(1)
server_address=gethostbyname(host)
connection_socket=socket(AF_INET,SOCK_STREAM)
connection_socket.connect((server_address,port))
pid=os.fork()
if pid!=0:
incoming_stream=connection_socket.makefile("r")
print("Client - Client is accepting server messages")
while True:
msg=incoming_stream.readline()
print(msg)
if msg=="salir\n":
break
incoming_stream.close()
connection_socket.close()
print("Server disconnected, if you are not disconnected type 'salir'")
os.waitpid(pid,0)
else:
outgoing_stream=connection_socket.makefile("w")
print("Client - Server is accepting client messages")
while True:
msg=input()
outgoing_stream.write(msg+"\n")
outgoing_stream.flush()
if msg=="salir\n":
break
outgoing_stream.close()
connection_socket.close()
sys.exit(0)
| 25.666667 | 98 | 0.749251 |
ace935425089f4cbe7a578bf25ba6b4f1cea8c8d | 3,161 | py | Python | neural_admixture/model/modules.py | AlbertDominguez/neural-admixture | 2f277a4e7bcdf920117bec85a381d2147d64c0be | [
"AFL-1.1"
] | 13 | 2021-06-24T16:12:27.000Z | 2022-03-05T14:23:14.000Z | neural_admixture/model/modules.py | AlbertDominguez/neural-admixture | 2f277a4e7bcdf920117bec85a381d2147d64c0be | [
"AFL-1.1"
] | 6 | 2021-12-24T14:44:21.000Z | 2022-03-24T09:42:10.000Z | neural_admixture/model/modules.py | AlbertDominguez/neural-admixture | 2f277a4e7bcdf920117bec85a381d2147d64c0be | [
"AFL-1.1"
] | 7 | 2021-06-28T20:52:58.000Z | 2022-03-23T18:05:37.000Z | import logging
import sys
import torch
import torch.nn as nn
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
class ZeroOneClipper(object):
def __call__(self, module):
if hasattr(module, 'weight'):
with torch.no_grad():
w = module.weight.data
w.clamp_(0.,1.) # 0 <= F_ij <= 1
class NeuralEncoder(nn.Module):
def __init__(self, input_size, ks):
super().__init__()
assert sum([k < 2 for k in ks]) == 0, 'Invalid number of clusters. Requirement: k >= 2'
self.ks = ks
self.heads = nn.ModuleList([
nn.Sequential(
nn.Linear(input_size, k, bias=True)
# nn.BatchNorm1d(k)
) for k in ks])
def _get_head_for_k(self, k):
return self.heads[k-min(self.ks)]
def forward(self, X):
outputs = [self._get_head_for_k(k)(X) for k in self.ks]
return outputs
class NeuralDecoder(nn.Module):
def __init__(self, ks, output_size, bias=False, inits=None, freeze=False):
super().__init__()
self.ks = ks
self.freeze = freeze
if inits is None:
self.decoders = nn.ModuleList(
[nn.Linear(k, output_size, bias=bias) for k in self.ks]
)
if self.freeze:
log.warn('Not going to freeze weights as no initialization was provided.')
else:
layers = [None]*len(self.ks)
for i in range(len(ks)):
ini = end if i != 0 else 0
end = ini+self.ks[i]
layers[i] = nn.Linear(self.ks[i], output_size, bias=bias)
layers[i].weight = torch.nn.Parameter(inits[ini:end].clone().detach().requires_grad_(not self.freeze).T)
self.decoders = nn.ModuleList(layers)
if self.freeze:
log.info('Decoder weights will be frozen.')
assert len(self.decoders) == len(self.ks)
def _get_decoder_for_k(self, k):
return self.decoders[k-min(self.ks)]
def forward(self, hid_states):
outputs = [torch.clamp(self._get_decoder_for_k(self.ks[i])(hid_states[i]), 0, 1) for i in range(len(self.ks))]
return outputs
# class NonLinearMultiHeadDecoder(nn.Module):
# def __init__(self, ks, output_size, bias=False,
# hidden_size=512, hidden_activation=nn.ReLU(),
# inits=None):
# super().__init__()
# self.ks = ks
# self.hidden_size = hidden_size
# self.output_size = output_size
# self.heads_decoder = nn.Linear(sum(self.ks), self.hidden_size, bias=bias)
# self.common_decoder = nn.Linear(self.hidden_size, self.output_size)
# self.nonlinearity = hidden_activation
# self.sigmoid = nn.Sigmoid()
# def forward(self, hid_states):
# if len(hid_states) > 1:
# concat_states = torch.cat(hid_states, 1)
# else:
# concat_states = hid_states[0]
# dec = self.nonlinearity(self.heads_decoder(concat_states))
# rec = self.sigmoid(self.common_decoder(dec))
# return rec
| 36.755814 | 120 | 0.585891 |
ace935509094d41fedb7a711f5dbbcc3f85284cb | 4,541 | py | Python | tests/flattrs_test/AllScalarsWithDefaults.py | Tinche/flattrs | 6c5725aaaaa89f0bbdf444178dc7c626ec95614a | [
"MIT"
] | 7 | 2018-08-27T16:59:55.000Z | 2022-01-20T19:11:28.000Z | tests/flattrs_test/AllScalarsWithDefaults.py | Tinche/flattrs | 6c5725aaaaa89f0bbdf444178dc7c626ec95614a | [
"MIT"
] | 1 | 2019-12-04T12:17:45.000Z | 2020-01-14T13:37:32.000Z | tests/flattrs_test/AllScalarsWithDefaults.py | Tinche/flattrs | 6c5725aaaaa89f0bbdf444178dc7c626ec95614a | [
"MIT"
] | 2 | 2019-12-04T12:08:14.000Z | 2019-12-04T12:51:57.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: flattrs_test
import flatbuffers
class AllScalarsWithDefaults(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAllScalarsWithDefaults(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AllScalarsWithDefaults()
x.Init(buf, n + offset)
return x
# AllScalarsWithDefaults
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# AllScalarsWithDefaults
def Boolean(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return True
# AllScalarsWithDefaults
def Uint8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Float32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 1.0
# AllScalarsWithDefaults
def Float64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 1.0
def AllScalarsWithDefaultsStart(builder): builder.StartObject(11)
def AllScalarsWithDefaultsAddBoolean(builder, boolean): builder.PrependBoolSlot(0, boolean, 1)
def AllScalarsWithDefaultsAddUint8(builder, uint8): builder.PrependUint8Slot(1, uint8, 1)
def AllScalarsWithDefaultsAddUint16(builder, uint16): builder.PrependUint16Slot(2, uint16, 1)
def AllScalarsWithDefaultsAddUint32(builder, uint32): builder.PrependUint32Slot(3, uint32, 1)
def AllScalarsWithDefaultsAddUint64(builder, uint64): builder.PrependUint64Slot(4, uint64, 1)
def AllScalarsWithDefaultsAddInt8(builder, int8): builder.PrependInt8Slot(5, int8, 1)
def AllScalarsWithDefaultsAddInt16(builder, int16): builder.PrependInt16Slot(6, int16, 1)
def AllScalarsWithDefaultsAddInt32(builder, int32): builder.PrependInt32Slot(7, int32, 1)
def AllScalarsWithDefaultsAddInt64(builder, int64): builder.PrependInt64Slot(8, int64, 1)
def AllScalarsWithDefaultsAddFloat32(builder, float32): builder.PrependFloat32Slot(9, float32, 1.0)
def AllScalarsWithDefaultsAddFloat64(builder, float64): builder.PrependFloat64Slot(10, float64, 1.0)
def AllScalarsWithDefaultsEnd(builder): return builder.EndObject()
| 40.90991 | 100 | 0.70425 |
ace9356246bc7d0ea9b164ef2ff33dd25af54d5b | 1,140 | py | Python | bundle-workflow/tests/tests_manifests/test_manifest.py | qreshi/opensearch-build | ed0f26a7f80562d16793ee899fae761a23ce6361 | [
"Apache-2.0"
] | null | null | null | bundle-workflow/tests/tests_manifests/test_manifest.py | qreshi/opensearch-build | ed0f26a7f80562d16793ee899fae761a23ce6361 | [
"Apache-2.0"
] | null | null | null | bundle-workflow/tests/tests_manifests/test_manifest.py | qreshi/opensearch-build | ed0f26a7f80562d16793ee899fae761a23ce6361 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
import unittest
from manifests.manifest import Manifest
class TestManifest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.join(os.path.dirname(__file__), "data")
def test_manifest_is_abstract(self):
with self.assertRaises(TypeError) as context:
Manifest(None)
self.assertEqual(
"Can't instantiate abstract class Manifest with abstract methods __init__",
context.exception.__str__(),
)
def test_invalid_version(self):
class TestManifest(Manifest):
def __init__(self, data):
super().__init__(data)
manifest_path = os.path.join(self.data_path, "invalid-schema-version.yml")
with self.assertRaises(ValueError) as context:
TestManifest.from_path(manifest_path)
self.assertEqual(
"Unsupported schema version: invalid", context.exception.__str__()
)
| 30.810811 | 87 | 0.67807 |
ace936532194fbf2b8b05c028ffc120f1ca741ea | 11,670 | py | Python | src/robot/running/testlibraries.py | yahman72/robotframework | 9f82d9a2bf088073859eb23a33d275c6a8c0b975 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2015-03-11T14:59:20.000Z | 2015-03-11T14:59:20.000Z | src/robot/running/testlibraries.py | yahman72/robotframework | 9f82d9a2bf088073859eb23a33d275c6a8c0b975 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/running/testlibraries.py | yahman72/robotframework | 9f82d9a2bf088073859eb23a33d275c6a8c0b975 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from robot.errors import DataError
from robot.libraries import STDLIBS, DEPRECATED_STDLIBS
from robot.output import LOGGER
from robot.utils import (getdoc, get_error_details, Importer, is_java_init,
is_java_method, JYTHON, normalize, seq2str2, unic)
from .dynamicmethods import (GetKeywordArguments, GetKeywordDocumentation,
GetKeywordNames, RunKeyword)
from .handlers import Handler, InitHandler, DynamicHandler
from .handlerstore import HandlerStore
from .outputcapture import OutputCapturer
if JYTHON:
from java.lang import Object
else:
Object = None
def TestLibrary(name, args=None, variables=None, create_handlers=True):
if name in STDLIBS or name in DEPRECATED_STDLIBS:
import_name = 'robot.libraries.' + name
else:
import_name = name
with OutputCapturer(library_import=True):
importer = Importer('test library')
libcode = importer.import_class_or_module(import_name)
libclass = _get_lib_class(libcode)
lib = libclass(libcode, name, args or [], variables)
if create_handlers:
lib.create_handlers()
return lib
def _get_lib_class(libcode):
if inspect.ismodule(libcode):
return _ModuleLibrary
if GetKeywordNames(libcode):
if RunKeyword(libcode):
return _DynamicLibrary
else:
return _HybridLibrary
return _ClassLibrary
class _BaseTestLibrary(object):
_log_success = LOGGER.debug
_log_failure = LOGGER.info
_log_failure_details = LOGGER.debug
def __init__(self, libcode, name, args, variables):
if os.path.exists(name):
name = os.path.splitext(os.path.basename(os.path.abspath(name)))[0]
self.version = self._get_version(libcode)
self.name = name
self.orig_name = name # Stores original name when importing WITH NAME
self.handlers = HandlerStore(self.name)
self._instance_cache = []
self.has_listener = None # Set when first instance is created
self._doc = None
self.doc_format = self._get_doc_format(libcode)
self.scope = self._get_scope(libcode)
self.init = self._create_init_handler(libcode)
self.positional_args, self.named_args \
= self.init.resolve_arguments(args, variables)
self._libcode = libcode
self._libinst = None
def __len__(self):
return len(self.handlers)
@property
def doc(self):
if self._doc is None:
self._doc = getdoc(self.get_instance())
return self._doc
@property
def listener(self):
if self.has_listener:
return self._get_listener(self.get_instance())
return None
def _get_listener(self, inst):
return getattr(inst, 'ROBOT_LIBRARY_LISTENER', None)
def create_handlers(self):
self._create_handlers(self.get_instance())
self.init_scope_handling()
def start_suite(self):
pass
def end_suite(self):
pass
def start_test(self):
pass
def end_test(self):
pass
def _get_version(self, libcode):
return self._get_attr(libcode, 'ROBOT_LIBRARY_VERSION') \
or self._get_attr(libcode, '__version__')
def _get_attr(self, object, attr, default='', upper=False):
value = unic(getattr(object, attr, default))
if upper:
value = normalize(value, ignore='_').upper()
return value
def _get_doc_format(self, libcode):
return self._get_attr(libcode, 'ROBOT_LIBRARY_DOC_FORMAT', upper=True)
def _get_scope(self, libcode):
scope = self._get_attr(libcode, 'ROBOT_LIBRARY_SCOPE', upper=True)
return scope if scope in ['GLOBAL','TESTSUITE'] else 'TESTCASE'
def _create_init_handler(self, libcode):
return InitHandler(self, self._resolve_init_method(libcode))
def _resolve_init_method(self, libcode):
init_method = getattr(libcode, '__init__', None)
return init_method if self._valid_init(init_method) else lambda: None
def _valid_init(self, method):
return inspect.ismethod(method) or is_java_init(method)
def init_scope_handling(self):
if self.scope == 'GLOBAL':
return
self._libinst = None
self.start_suite = self._caching_start
self.end_suite = self._restoring_end
if self.scope == 'TESTCASE':
self.start_test = self._caching_start
self.end_test = self._restoring_end
def _caching_start(self):
self._instance_cache.append(self._libinst)
self._libinst = None
def _restoring_end(self):
self._libinst = self._instance_cache.pop()
def get_instance(self):
if self._libinst is None:
self._libinst = self._get_instance(self._libcode)
if self.has_listener is None:
self.has_listener = self._get_listener(self._libinst) is not None
return self._libinst
def _get_instance(self, libcode):
with OutputCapturer(library_import=True):
try:
return libcode(*self.positional_args, **self.named_args)
except:
self._raise_creating_instance_failed()
def _create_handlers(self, libcode):
for name in self._get_handler_names(libcode):
method = self._try_to_get_handler_method(libcode, name)
if method:
handler = self._try_to_create_handler(name, method)
if handler:
self.handlers.add(handler)
self._log_success("Created keyword '%s'" % handler.name)
def _get_handler_names(self, libcode):
return [name for name in dir(libcode)
if not name.startswith(('_', 'ROBOT_LIBRARY_'))]
def _try_to_get_handler_method(self, libcode, name):
try:
return self._get_handler_method(libcode, name)
except:
self._report_adding_keyword_failed(name)
def _report_adding_keyword_failed(self, name):
msg, details = get_error_details()
self._log_failure("Adding keyword '%s' to library '%s' failed: %s"
% (name, self.name, msg))
if details:
self._log_failure_details('Details:\n%s' % details)
def _get_handler_method(self, libcode, name):
method = getattr(libcode, name)
if not inspect.isroutine(method):
raise DataError('Not a method or function')
return method
def _try_to_create_handler(self, name, method):
try:
return self._create_handler(name, method)
except:
self._report_adding_keyword_failed(name)
def _create_handler(self, handler_name, handler_method):
return Handler(self, handler_name, handler_method)
def _raise_creating_instance_failed(self):
msg, details = get_error_details()
if self.positional_args or self.named_args:
args = self.positional_args \
+ ['%s=%s' % item for item in self.named_args.items()]
args_text = 'arguments %s' % seq2str2(args)
else:
args_text = 'no arguments'
raise DataError("Initializing test library '%s' with %s failed: %s\n%s"
% (self.name, args_text, msg, details))
class _ClassLibrary(_BaseTestLibrary):
def _get_handler_method(self, libinst, name):
# Type is checked before using getattr to avoid calling properties,
# most importantly bean properties generated by Jython (issue 188).
for item in (libinst,) + inspect.getmro(libinst.__class__):
if item in (object, Object):
continue
if not (hasattr(item, '__dict__') and name in item.__dict__):
continue
self._validate_handler(item.__dict__[name])
return getattr(libinst, name)
raise DataError('No non-implicit implementation found')
def _validate_handler(self, handler):
if not self._is_routine(handler):
raise DataError('Not a method or function')
if self._is_implicit_java_or_jython_method(handler):
raise DataError('Implicit methods are ignored')
def _is_routine(self, handler):
return inspect.isroutine(handler) or is_java_method(handler)
def _is_implicit_java_or_jython_method(self, handler):
if not is_java_method(handler):
return False
for signature in handler.argslist[:handler.nargs]:
cls = signature.declaringClass
if not (cls is Object or self._is_created_by_jython(handler, cls)):
return False
return True
def _is_created_by_jython(self, handler, cls):
proxy_methods = getattr(cls, '__supernames__', []) + ['classDictInit']
return handler.__name__ in proxy_methods
class _ModuleLibrary(_BaseTestLibrary):
def _get_scope(self, libcode):
return 'GLOBAL'
def _get_handler_method(self, libcode, name):
method = _BaseTestLibrary._get_handler_method(self, libcode, name)
if hasattr(libcode, '__all__') and name not in libcode.__all__:
raise DataError('Not exposed as a keyword')
return method
def get_instance(self):
if self.has_listener is None:
self.has_listener = self._get_listener(self._libcode) is not None
return self._libcode
def _create_init_handler(self, libcode):
return InitHandler(self, lambda: None)
class _HybridLibrary(_BaseTestLibrary):
_log_failure = LOGGER.warn
def _get_handler_names(self, instance):
try:
return instance.get_keyword_names()
except AttributeError:
return instance.getKeywordNames()
class _DynamicLibrary(_BaseTestLibrary):
_log_failure = LOGGER.warn
def __init__(self, libcode, name, args, variables=None):
_BaseTestLibrary.__init__(self, libcode, name, args, variables)
@property
def doc(self):
if self._doc is None:
self._doc = (self._get_kw_doc('__intro__') or
_BaseTestLibrary.doc.fget(self))
return self._doc
def _get_kw_doc(self, name, instance=None):
getter = GetKeywordDocumentation(instance or self.get_instance())
return getter(name)
def _get_kw_args(self, name, instance=None):
getter = GetKeywordArguments(instance or self.get_instance())
return getter(name)
def _get_handler_names(self, instance):
return GetKeywordNames(instance)()
def _get_handler_method(self, instance, name):
return RunKeyword(instance)
def _create_handler(self, name, method):
doc = self._get_kw_doc(name)
argspec = self._get_kw_args(name)
return DynamicHandler(self, name, method, doc, argspec)
def _create_init_handler(self, libcode):
docgetter = lambda: self._get_kw_doc('__init__')
return InitHandler(self, self._resolve_init_method(libcode), docgetter)
| 35.150602 | 79 | 0.661611 |
ace93677fc1f9d44329779abfe7064b00f0ad61a | 643 | py | Python | __init__.py | dmwesterhoff/slackd | ec87abc693d65fcedb2233b97f84b604c37b5930 | [
"MIT"
] | 1 | 2016-03-18T21:35:54.000Z | 2016-03-18T21:35:54.000Z | __init__.py | dmwesterhoff/slackd | ec87abc693d65fcedb2233b97f84b604c37b5930 | [
"MIT"
] | null | null | null | __init__.py | dmwesterhoff/slackd | ec87abc693d65fcedb2233b97f84b604c37b5930 | [
"MIT"
] | null | null | null | # .::::::. ::: :::. .,-::::: ::: . :::::::-.
# ;;;` ` ;;; ;;`;; ,;;;'````' ;;; .;;,.;;, `';,
# '[==/[[[[, [[[ ,[[ '[[, [[[ [[[[[/' `[[ [[
# ''' $ $$' c$$$cc$$$c $$$ _$$$$, $$, $$
# 88b dPo88oo,.__888 888,`88bo,__,o,"888"88o, 888_,o8P'
# "YMmMY" """"YUMMMYMM ""` "YUMMMMMP"MMM "MMP" MMMMP"`
"""
slackd
~~~~~~
:copyright: (c) 2016 David M. Westerhoff
:license: All rights reserved
"""
__title__ = 'slackd'
__version__ = '1.0.0'
__author__ = 'David Westerhoff'
__license__ = 'All rights reserved'
__copyright__ = 'Copyright 2016 David M. Westerhoff'
| 29.227273 | 64 | 0.402799 |
ace937cea3dd5e4b2cb27da52c1e1b7009c00b21 | 447 | py | Python | daiquiri/query/migrations/0016_rename_database_to_schema.py | agy-why/daiquiri | 4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d | [
"Apache-2.0"
] | 14 | 2018-12-23T18:35:02.000Z | 2021-12-15T04:55:12.000Z | daiquiri/query/migrations/0016_rename_database_to_schema.py | agy-why/daiquiri | 4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d | [
"Apache-2.0"
] | 40 | 2018-12-20T12:44:05.000Z | 2022-03-21T11:35:20.000Z | daiquiri/query/migrations/0016_rename_database_to_schema.py | agy-why/daiquiri | 4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d | [
"Apache-2.0"
] | 5 | 2019-05-16T08:03:35.000Z | 2021-08-23T20:03:11.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-16 16:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('daiquiri_query', '0015_queryarchivejob'),
]
operations = [
migrations.RenameField(
model_name='queryjob',
old_name='database_name',
new_name='schema_name',
),
]
| 21.285714 | 51 | 0.621924 |
ace93ada19a8eced6ed56b0fa80cdeb908402f7d | 3,005 | py | Python | deployApp.py | mjstriffler/housingdata_project | 939a251a4a6aa65143773756052031c33bd3832d | [
"MIT"
] | null | null | null | deployApp.py | mjstriffler/housingdata_project | 939a251a4a6aa65143773756052031c33bd3832d | [
"MIT"
] | null | null | null | deployApp.py | mjstriffler/housingdata_project | 939a251a4a6aa65143773756052031c33bd3832d | [
"MIT"
] | null | null | null | # Import libraries
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import pandas as pd
from tensorflow.keras.models import load_model
import joblib
app = Flask(__name__)
# Load the model
with open('predict.pkl', 'rb') as file:
testrun = pickle.load(file)
#testrun = pickle.load(open('model.pkl','rb'))
# testrun = load_model("migration_trained.h5")
##Define app routoe
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
medincome = (request.form['Median Income'])
homeprice = request.form.get('Avg Home Price')
umemployment = request.form.get("Unemployment Rate")
totalpop = request.form.get("Total Population")
usState = request.form.get("State")
# stateList = [
# Alabama,
# Alaska,
# Arizona,
# Arkansas,
# California,
# Colorado,
# Connecticut,
# Delaware,
# Florida,
# Georgia,
# Hawaii,
# Idaho,
# Illinois,
# Indiana,
# Iowa,
# Kansas,
# Kentucky,
# Louisiana,
# Maine,
# Maryland,
# Massachusetts,
# Michigan,
# Minnesota,
# Mississippi,
# Missouri,
# Montana,
# Nebraska,
# Nevada,
# New_Hampshire,
# New_Jersey,
# New_Mexico,
# New_York,
# North_Carolina,
# North_Dakota,
# Ohio,
# Oklahoma,
# Oregon,
# Pennsylvania,
# Rhode_Island,
# South_Carolina,
# South_Dakota,
# Tennessee,
# Texas,
# Utah,
# Vermont,
# Virginia,
# Washington,
# West_Virginia,
# Wisconsin,
# Wyoming]
# for state in stateList:
# if state == request.form["State"]:
# statekey = "Current residence in_" + state
# else:
# nonstatekey = "Current residence in_" + state
x1= {"Median Income" : [medincome], "Avg Home Price" : [homeprice], "Unemployment Rate": [umemployment], "Total Population" : [totalpop]}
x2 = pd.DataFrame(x1)
# data = list(dict.items())
# data2 = np.array(data)
# data3 = data2.tostring()
# data4 = np.fromstring(data2, dtype=int)
# usmig = [medincome, homeprice, umemployment, totalpop]
predMigrate = testrun.predict(x2)
return render_template('tableau.html', state=usState , predMigrate= predMigrate [0])
return render_template('tableau.html')
# let data = {element: "barium"};
# fetch("/post/data/here", {
# method: "POST", zåß
# body: JSON.stringify(data)
# }).then(res => {
# console.log("Request complete! response:", res);
# })
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True) | 22.259259 | 145 | 0.545757 |
ace93be6a74f16c12b63919833a799c2a9d9cde2 | 471 | py | Python | mat_db/main/migrations/0008_remove_hosedynamic_dyn_name_and_more.py | tkminek/material_database | 8661617077192d20e8d9445cd6560bf1266f0582 | [
"MIT"
] | null | null | null | mat_db/main/migrations/0008_remove_hosedynamic_dyn_name_and_more.py | tkminek/material_database | 8661617077192d20e8d9445cd6560bf1266f0582 | [
"MIT"
] | null | null | null | mat_db/main/migrations/0008_remove_hosedynamic_dyn_name_and_more.py | tkminek/material_database | 8661617077192d20e8d9445cd6560bf1266f0582 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-25 22:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0007_rename_e_min40_hosedynamic_dyn_e_min40_and_more'),
]
operations = [
migrations.RemoveField(
model_name='hosedynamic',
name='Dyn_name',
),
migrations.RemoveField(
model_name='hosestatic',
name='Stat_name',
),
]
| 21.409091 | 73 | 0.596603 |
ace93c94a313d2ddcf1666e50742712ea0798687 | 1,362 | py | Python | restfulpy/tests/test_documentaion.py | mehrdad1373pedramfar/restfulpy | 19757dc485f5477cdbf2a7033cd1c7c79ef97647 | [
"MIT"
] | null | null | null | restfulpy/tests/test_documentaion.py | mehrdad1373pedramfar/restfulpy | 19757dc485f5477cdbf2a7033cd1c7c79ef97647 | [
"MIT"
] | null | null | null | restfulpy/tests/test_documentaion.py | mehrdad1373pedramfar/restfulpy | 19757dc485f5477cdbf2a7033cd1c7c79ef97647 | [
"MIT"
] | null | null | null | from os import path, mkdir
from bddrest.authoring import response, status
from nanohttp import action, RegexRouteController
from restfulpy.controllers import RootController
from restfulpy.testing import ApplicableTestCase
HERE = path.abspath(path.dirname(__file__))
DATA_DIRECTORY = path.abspath(path.join(HERE, '../../data'))
class Root(RegexRouteController):
def __init__(self):
return super().__init__([
('/apiv1/documents', self.get), ('/', self.index)
])
@action
def get(self):
return 'Index'
@action
def index(self):
return 'Index'
class TestApplication(ApplicableTestCase):
__controller_factory__ = Root
__story_directory__ = path.join(DATA_DIRECTORY, 'stories')
__api_documentation_directory__ = path.join(DATA_DIRECTORY, 'markdown')
__metadata__ = {
'/': dict(a=dict(not_none=True, required=True))
}
def test_index(self):
with self.given(
'There is a / in the title',
'/apiv1/documents',
'GET'
):
assert status == 200
assert response.body == b'Index'
def test_root_request(self):
with self.given(
'Requesting on the root controller',
'/',
'INDEX',
form=dict(a=1)
):
assert status == 200
| 24.321429 | 75 | 0.613069 |
ace93d4d53690b2e4103a3c56de5f412fa20d5ea | 2,329 | py | Python | mmsegmentation/mmseg/datasets/pipelines/formating_clip.py | jfzhuang/ST_Memory | f253c05b7ecb37a1cbe9f312a628ba30b4555230 | [
"MIT"
] | null | null | null | mmsegmentation/mmseg/datasets/pipelines/formating_clip.py | jfzhuang/ST_Memory | f253c05b7ecb37a1cbe9f312a628ba30b4555230 | [
"MIT"
] | null | null | null | mmsegmentation/mmseg/datasets/pipelines/formating_clip.py | jfzhuang/ST_Memory | f253c05b7ecb37a1cbe9f312a628ba30b4555230 | [
"MIT"
] | null | null | null | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class DefaultFormatBundle_Clip(object):
def __call__(self, results):
if 'clip' in results:
clip = results['clip']
for i in range(len(clip)):
if len(clip[i].shape) < 3:
clip[i] = np.expand_dims(clip[i], -1)
clip[i] = np.ascontiguousarray(clip[i].transpose(2, 0, 1))
clip[i] = DC(to_tensor(clip[i]), stack=True)
results['clip'] = clip
if 'gt_semantic_seg' in results:
# convert to long
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...].astype(np.int64)), stack=True
)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class ImageToTensor_Clip(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
clip = results[key]
for i in range(len(clip)):
if len(clip[i].shape) < 3:
clip[i] = np.expand_dims(clip[i], -1)
clip[i] = to_tensor(clip[i].transpose(2, 0, 1))
results[key] = clip
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
| 31.053333 | 93 | 0.597252 |
ace93f7348d9f6fba0660f4549e928e55494c344 | 1,033 | py | Python | KaggleTitanic/models/model_2017_09_17_18_25_35-0.9877.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | KaggleTitanic/models/model_2017_09_17_18_25_35-0.9877.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | KaggleTitanic/models/model_2017_09_17_18_25_35-0.9877.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
StackingEstimator(estimator=LogisticRegression(C=10.0))
),
LogisticRegression(C=10.0)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 39.730769 | 122 | 0.801549 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.