hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e33622f2e93a45dd032a9f590ba30c99f8ec7de | 850 | py | Python | setup.py | appchoose/choixpeau | 936831fd47212f4a81f7ffe0970319e30a31a072 | [
"MIT"
] | null | null | null | setup.py | appchoose/choixpeau | 936831fd47212f4a81f7ffe0970319e30a31a072 | [
"MIT"
] | null | null | null | setup.py | appchoose/choixpeau | 936831fd47212f4a81f7ffe0970319e30a31a072 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="choixpeau",
version="0.0.9",
author="Keurcien Luu",
author_email="keurcien@appchoose.io",
description="Efficiently assign users to buckets.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=['tests']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"redis"
]
) | 26.5625 | 57 | 0.647059 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def dependencies():
import os
"""
Obtain the dependencies from requirements.txt.
"""
with open('requirements.txt') as reqs:
return reqs.read().splitlines()
setuptools.setup(
name="choixpeau",
version="0.0.9",
author="Keurcien Luu",
author_email="keurcien@appchoose.io",
description="Efficiently assign users to buckets.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=['tests']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"redis"
]
) | 162 | 0 | 23 |
8c35737c2c0e2b1af5a4b10af8ccba507d65c08e | 2,531 | py | Python | sc2g/sc2g/env/attack/multi_attack.py | kiriphorito/COMP3096---MARL | 5e05413b0980d60f4a3f2a17123178c93bb0b763 | [
"Apache-2.0"
] | 1 | 2018-07-07T09:02:27.000Z | 2018-07-07T09:02:27.000Z | sc2g/sc2g/env/attack/multi_attack.py | kiriphorito/COMP3096---MARL | 5e05413b0980d60f4a3f2a17123178c93bb0b763 | [
"Apache-2.0"
] | null | null | null | sc2g/sc2g/env/attack/multi_attack.py | kiriphorito/COMP3096---MARL | 5e05413b0980d60f4a3f2a17123178c93bb0b763 | [
"Apache-2.0"
] | null | null | null | #================================
# RESEARCH GROUP PROJECT [RGP]
#================================
# This file is part of the COMP3096 Research Group Project.
# System
import logging
# Gym Imports
import gym
from gym.spaces import Box, Discrete, Tuple
# PySC2 Imports
from pysc2.lib.actions import FUNCTIONS, FunctionCall
from pysc2.lib.features import SCREEN_FEATURES
# Numpy
import numpy as np
# Typing
from typing import List
from sc2g.env.unit_tracking import UnitTrackingEnv
# Setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| 35.152778 | 145 | 0.667325 | #================================
# RESEARCH GROUP PROJECT [RGP]
#================================
# This file is part of the COMP3096 Research Group Project.
# System
import logging
# Gym Imports
import gym
from gym.spaces import Box, Discrete, Tuple
# PySC2 Imports
from pysc2.lib.actions import FUNCTIONS, FunctionCall
from pysc2.lib.features import SCREEN_FEATURES
# Numpy
import numpy as np
# Typing
from typing import List
from sc2g.env.unit_tracking import UnitTrackingEnv
# Setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class TupleEx(Tuple):
def __init__(self, spaces):
super().__init__(spaces)
self.n = 1
for i in range(0, len(spaces)):
self.n *= spaces[i].n
class MultiAttackEnv(UnitTrackingEnv):
def __init__(self, sc2_env, **kwargs):
super().__init__(sc2_env, **kwargs)
# Number of marines (hardcoded)
self.number_of_marines = 9
# Specify observation and action space
screen_shape_observation = self.screen_shape + (1,)
self.observation_space = Box(low=0, high=SCREEN_FEATURES.player_relative.scale, shape=screen_shape_observation)
self.resolution = self.screen_shape[0] * self.screen_shape[1] # (width x height)
self.action_space = Discrete(self.resolution * self.number_of_marines)
self.unravel_shape = (self.screen_shape[0], self.screen_shape[1]) * self.number_of_marines
def get_sc2_action(self, gym_action) -> List[FunctionCall]:
# Get coords by unravelling action. DQN only supports returning an integer as action.
# How unravel works:
# Ref: https://www.quora.com/What-is-a-simple-intuitive-example-for-the-unravel_index-in-Python
idx = int(gym_action / self.resolution)
coords = gym_action % self.resolution
coords = np.unravel_index(coords, self.unravel_shape)
coords = (coords[0], coords[1])
target_unit = self.state['player_units_stable'][idx]
target_tag = target_unit.tag.item()
player_unit_tags = [unit.tag.item() for unit in self.state["player_units"]] # .item() to convert numpy.int64 to native python type (int)
# PySC2 uses different conventions for observations (y,x) and actions (x,y)
# ::-1 reverses the tuple i.e. (1,2) becomes (2,1)
if target_tag not in player_unit_tags:
actions = [FUNCTIONS.no_op()]
else:
actions = [FUNCTIONS.move_unit(target_tag, "now", coords[::-1])]
return actions
| 1,826 | 17 | 125 |
e041767572c63ac66e2d0df131aae92a56d35ce7 | 776 | py | Python | scripts/initialize_session.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 248 | 2019-12-12T08:09:27.000Z | 2021-11-14T22:28:01.000Z | scripts/initialize_session.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 29 | 2019-12-29T20:43:25.000Z | 2021-05-14T07:06:55.000Z | scripts/initialize_session.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 30 | 2020-03-05T12:59:59.000Z | 2021-08-08T07:02:53.000Z | """
Initialize an authenticated instance of PRAW to interact with.
$ python -i initialize_session.py
"""
from ttrv.docs import AGENT
from ttrv.packages import praw
from ttrv.content import RequestHeaderRateLimiter
from ttrv.config import Config
config = Config()
config.load_refresh_token()
reddit = praw.Reddit(
user_agent=AGENT.format(version='test_session'),
decode_html_entities=False,
disable_update_check=True,
timeout=10, # 10 second request timeout
handler=RequestHeaderRateLimiter())
reddit.set_oauth_app_info(
config['oauth_client_id'],
config['oauth_client_secret'],
config['oauth_redirect_uri'])
reddit.refresh_access_information(config.refresh_token)
inbox = reddit.get_inbox()
items = [next(inbox) for _ in range(20)]
pass
| 25.032258 | 62 | 0.771907 | """
Initialize an authenticated instance of PRAW to interact with.
$ python -i initialize_session.py
"""
from ttrv.docs import AGENT
from ttrv.packages import praw
from ttrv.content import RequestHeaderRateLimiter
from ttrv.config import Config
config = Config()
config.load_refresh_token()
reddit = praw.Reddit(
user_agent=AGENT.format(version='test_session'),
decode_html_entities=False,
disable_update_check=True,
timeout=10, # 10 second request timeout
handler=RequestHeaderRateLimiter())
reddit.set_oauth_app_info(
config['oauth_client_id'],
config['oauth_client_secret'],
config['oauth_redirect_uri'])
reddit.refresh_access_information(config.refresh_token)
inbox = reddit.get_inbox()
items = [next(inbox) for _ in range(20)]
pass
| 0 | 0 | 0 |
ce24a308eade4585492f2aa251d512f761a3c6c0 | 963 | py | Python | examples/plot_skewt.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
] | 62 | 2020-01-13T19:48:49.000Z | 2022-03-22T07:56:37.000Z | examples/plot_skewt.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
] | 215 | 2020-01-07T20:17:11.000Z | 2022-03-31T18:49:57.000Z | examples/plot_skewt.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
] | 16 | 2020-01-13T21:25:55.000Z | 2022-03-26T18:01:29.000Z | """
Example on how to plot a Skew-T plot of a sounding
--------------------------------------------------
This example shows how to make a Skew-T plot from a sounding
and calculate stability indicies. METPy needs to be installed
in order to run this example
"""
import act
from matplotlib import pyplot as plt
try:
import metpy
METPY = True
except ImportError:
METPY = False
if METPY:
# Read data
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
print(list(sonde_ds))
# Calculate stability indicies
sonde_ds = act.retrievals.calculate_stability_indicies(
sonde_ds, temp_name="tdry", td_name="dp", p_name="pres",
rh_name='rh')
print(sonde_ds["lifted_index"])
# Set up plot
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10))
# Add data
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
sonde_ds.close()
plt.show()
| 24.075 | 69 | 0.650052 | """
Example on how to plot a Skew-T plot of a sounding
--------------------------------------------------
This example shows how to make a Skew-T plot from a sounding
and calculate stability indicies. METPy needs to be installed
in order to run this example
"""
import act
from matplotlib import pyplot as plt
try:
import metpy
METPY = True
except ImportError:
METPY = False
if METPY:
# Read data
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
print(list(sonde_ds))
# Calculate stability indicies
sonde_ds = act.retrievals.calculate_stability_indicies(
sonde_ds, temp_name="tdry", td_name="dp", p_name="pres",
rh_name='rh')
print(sonde_ds["lifted_index"])
# Set up plot
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10))
# Add data
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
sonde_ds.close()
plt.show()
| 0 | 0 | 0 |
bbfa359d168f09d85503210267ca9eeee11d12a2 | 1,555 | py | Python | geneClassification/predotarOutput.py | jsharbrough/CyMIRA_gene_classification | 8c2766b970a2441dae26c35fefa28319f7aa96ef | [
"MIT"
] | null | null | null | geneClassification/predotarOutput.py | jsharbrough/CyMIRA_gene_classification | 8c2766b970a2441dae26c35fefa28319f7aa96ef | [
"MIT"
] | null | null | null | geneClassification/predotarOutput.py | jsharbrough/CyMIRA_gene_classification | 8c2766b970a2441dae26c35fefa28319f7aa96ef | [
"MIT"
] | null | null | null | import sys
predotarOutput(sys.argv[1],sys.argv[2])
| 35.340909 | 87 | 0.470096 | import sys
def predotarOutput(predotar,fasta):
infile = open(fasta,'r')
geneList = []
for line in infile:
if line[0] == '>':
realLine = line
while realLine[-1] == '\t' or realLine[-1] == '\n' or realLine[-1] == '\r':
realLine = realLine[0:-1]
lineSplit = realLine.split(' ')
gene = lineSplit[0]
geneList.append(gene[1:])
infile.close()
infile = open(predotar,'r')
predDict = {}
for line in infile:
realLine = line
while realLine[-1] == '\t' or realLine[-1] == '\n' or realLine[-1] == '\r':
realLine = realLine[0:-1]
lineSplit = realLine.split('\t')
if lineSplit[0] in geneList:
pred = 'N/A'
if 'Discarding' not in line:
mt = float(lineSplit[1])
pt = float(lineSplit[2])
er = float(lineSplit[3])
none = float(lineSplit[4])
if mt > er and mt > none:
if pt > er and pt > none:
pred = 'dual'
else:
pred = 'mitochondrial'
elif pt > er and pt > none:
pred = 'plastid'
else:
pred = 'non-organellar'
predDict[lineSplit[0]] = pred
infile.close()
sys.stdout.write('#Gene\tpredotar Prediction\n')
for gene in geneList:
sys.stdout.write(gene + '\t' + predDict[gene] + '\n')
predotarOutput(sys.argv[1],sys.argv[2])
| 1,481 | 0 | 22 |
1651ce50f2518d0d6646b45b02486f4b9c277ef0 | 2,634 | py | Python | preprocess_reddit_nov.py | sara-02/TiDeH | 1c5e58bf1b64128a6d2d538dcc71abac98c4c358 | [
"MIT"
] | 1 | 2019-09-19T17:56:54.000Z | 2019-09-19T17:56:54.000Z | preprocess_reddit_nov.py | sara-02/TiDeH | 1c5e58bf1b64128a6d2d538dcc71abac98c4c358 | [
"MIT"
] | null | null | null | preprocess_reddit_nov.py | sara-02/TiDeH | 1c5e58bf1b64128a6d2d538dcc71abac98c4c358 | [
"MIT"
] | null | null | null | """
Author: Sarah Masud
Copyright (c): Sarah Masud
"""
import json
import numpy as np
from datetime import datetime, timedelta
import os
import sys
main_dir = os.path.join("data", "reddit_data")
with open(os.path.join(main_dir, "selected_discussion_nov.jsonlist"),
"r") as f:
data = f.readlines()
n = len(data)
print(n)
sys.stdout.flush()
subreddit_stats = {}
input_dir_path = os.path.join("data", "reddit_data", "NOV_INPUT")
output_dir_path = os.path.join("data", "reddit_data", "NOV_OUTPUT")
output_hour_dir_path = os.path.join("data", "reddit_data", "NOV_OUTPUT_HOUR")
for i in range(n):
print(i)
sys.stdout.flush()
each_reddit = json.loads(data[i])
key = list(each_reddit.keys())[0]
each_reddit = each_reddit[key]
subreddit_stats[i] = {}
subreddit_stats[i]['total'] = len(each_reddit)
atleast_1 = 0
atleast_10 = 0
sub_dir_path = os.path.join(input_dir_path, str(i))
os.mkdir(sub_dir_path)
out_sub_dir_path = os.path.join(output_dir_path, str(i))
os.mkdir(out_sub_dir_path)
out_hour_sub_dir_path = os.path.join(output_hour_dir_path, str(i))
os.mkdir(out_hour_sub_dir_path)
for index, each_post in enumerate(each_reddit):
event_list_temp = []
if len(each_post['comments']) < 10:
continue
d1 = datetime.fromtimestamp(each_post['created_utc'])
event_list_temp = []
for each_comment in each_post['comments']:
d2 = datetime.fromtimestamp(each_comment['created_utc'])
if d2 > d1 + timedelta(days=30):
break
td = d2 - d1
td = td.total_seconds() / 3600 # in hours
event_list_temp.append(str(td) + " " + "1")
l = len(event_list_temp)
if l < 10:
atleast_1 += 1
continue
event_list = []
event_list.append(str(l + 1) + " " + str(each_post['created_utc']))
event_list.append("0.0 1")
event_list.extend(event_list_temp)
atleast_1 += 1
atleast_10 += 1
file_path = os.path.join(sub_dir_path, str(index) + ".txt")
with open(file_path, "w") as f:
for each_line in event_list:
f.write(each_line + "\n")
if atleast_10 == 0:
print("10-0", i)
sys.stdout.flush()
os.rmdir(sub_dir_path)
os.rmdir(out_sub_dir_path)
os.rmdir(out_hour_sub_dir_path)
subreddit_stats[i]['atleast_1'] = atleast_1
subreddit_stats[i]['atleast_10'] = atleast_10
with open(os.path.join(main_dir, "subreddit_stats_nov_10.json"), "w") as f:
json.dump(subreddit_stats, f, indent=True)
| 33.341772 | 77 | 0.628702 | """
Author: Sarah Masud
Copyright (c): Sarah Masud
"""
import json
import numpy as np
from datetime import datetime, timedelta
import os
import sys
main_dir = os.path.join("data", "reddit_data")
with open(os.path.join(main_dir, "selected_discussion_nov.jsonlist"),
"r") as f:
data = f.readlines()
n = len(data)
print(n)
sys.stdout.flush()
subreddit_stats = {}
input_dir_path = os.path.join("data", "reddit_data", "NOV_INPUT")
output_dir_path = os.path.join("data", "reddit_data", "NOV_OUTPUT")
output_hour_dir_path = os.path.join("data", "reddit_data", "NOV_OUTPUT_HOUR")
for i in range(n):
print(i)
sys.stdout.flush()
each_reddit = json.loads(data[i])
key = list(each_reddit.keys())[0]
each_reddit = each_reddit[key]
subreddit_stats[i] = {}
subreddit_stats[i]['total'] = len(each_reddit)
atleast_1 = 0
atleast_10 = 0
sub_dir_path = os.path.join(input_dir_path, str(i))
os.mkdir(sub_dir_path)
out_sub_dir_path = os.path.join(output_dir_path, str(i))
os.mkdir(out_sub_dir_path)
out_hour_sub_dir_path = os.path.join(output_hour_dir_path, str(i))
os.mkdir(out_hour_sub_dir_path)
for index, each_post in enumerate(each_reddit):
event_list_temp = []
if len(each_post['comments']) < 10:
continue
d1 = datetime.fromtimestamp(each_post['created_utc'])
event_list_temp = []
for each_comment in each_post['comments']:
d2 = datetime.fromtimestamp(each_comment['created_utc'])
if d2 > d1 + timedelta(days=30):
break
td = d2 - d1
td = td.total_seconds() / 3600 # in hours
event_list_temp.append(str(td) + " " + "1")
l = len(event_list_temp)
if l < 10:
atleast_1 += 1
continue
event_list = []
event_list.append(str(l + 1) + " " + str(each_post['created_utc']))
event_list.append("0.0 1")
event_list.extend(event_list_temp)
atleast_1 += 1
atleast_10 += 1
file_path = os.path.join(sub_dir_path, str(index) + ".txt")
with open(file_path, "w") as f:
for each_line in event_list:
f.write(each_line + "\n")
if atleast_10 == 0:
print("10-0", i)
sys.stdout.flush()
os.rmdir(sub_dir_path)
os.rmdir(out_sub_dir_path)
os.rmdir(out_hour_sub_dir_path)
subreddit_stats[i]['atleast_1'] = atleast_1
subreddit_stats[i]['atleast_10'] = atleast_10
with open(os.path.join(main_dir, "subreddit_stats_nov_10.json"), "w") as f:
json.dump(subreddit_stats, f, indent=True)
| 0 | 0 | 0 |
0502816af4a4b97daf2785ad8619224dca136413 | 1,538 | py | Python | diventi/products/migrations/0025_auto_20191006_1746.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 2 | 2019-06-27T16:00:17.000Z | 2020-08-14T07:46:05.000Z | diventi/products/migrations/0025_auto_20191006_1746.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 26 | 2020-02-15T22:39:35.000Z | 2022-02-19T21:09:01.000Z | diventi/products/migrations/0025_auto_20191006_1746.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 1 | 2021-11-12T22:30:15.000Z | 2021-11-12T22:30:15.000Z | # Generated by Django 2.2.4 on 2019-10-06 15:46
from django.db import migrations, models
| 34.954545 | 113 | 0.616385 | # Generated by Django 2.2.4 on 2019-10-06 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0024_auto_20190813_0943'),
]
operations = [
migrations.AlterField(
model_name='product',
name='courtesy_message',
field=models.TextField(blank=True, verbose_name='courtesy message'),
),
migrations.AlterField(
model_name='product',
name='courtesy_message_en',
field=models.TextField(blank=True, null=True, verbose_name='courtesy message'),
),
migrations.AlterField(
model_name='product',
name='courtesy_message_it',
field=models.TextField(blank=True, null=True, verbose_name='courtesy message'),
),
migrations.AlterField(
model_name='product',
name='courtesy_short_message',
field=models.CharField(blank=True, max_length=50, verbose_name='short courtesy messages'),
),
migrations.AlterField(
model_name='product',
name='courtesy_short_message_en',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='short courtesy messages'),
),
migrations.AlterField(
model_name='product',
name='courtesy_short_message_it',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='short courtesy messages'),
),
]
| 0 | 1,424 | 23 |
fc6ed3dfe2f754e8db5400b7ba3bb37104a2a85b | 926 | py | Python | ibslib/io/extract.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null | ibslib/io/extract.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null | ibslib/io/extract.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null |
from ibslib.io import aims_extractor
__author__='Manny Bier'
def extract(struct_dir, extractor="aims", extractor_kwargs={}):
"""
Purpose is to extract information from a specific direcoty format.
For example, extract FHI-aims calculation directories to a Structure
json file.
Arguments
---------
struct_dir: path
Path to the directory that information will be extracted from
extractor: str
Extraction method to use
kwargs: dict
Dictionary of keyword arguments which will be passed to the extraction
process.
"""
if extractor == "aims":
result = aims_extractor.extract(struct_dir, extractor_kwargs)
return result
if __name__ == "__main__":
struct_dir = "/Users/ibier/Research/Results/Hab_Project/FUQJIK/2_mpc/Genarris/Relaxation"
result = extract(struct_dir, extractor="aims")
| 23.15 | 93 | 0.665227 |
from ibslib.io import aims_extractor
__author__='Manny Bier'
def extract(struct_dir, extractor="aims", extractor_kwargs={}):
"""
Purpose is to extract information from a specific direcoty format.
For example, extract FHI-aims calculation directories to a Structure
json file.
Arguments
---------
struct_dir: path
Path to the directory that information will be extracted from
extractor: str
Extraction method to use
kwargs: dict
Dictionary of keyword arguments which will be passed to the extraction
process.
"""
if extractor == "aims":
result = aims_extractor.extract(struct_dir, extractor_kwargs)
return result
if __name__ == "__main__":
struct_dir = "/Users/ibier/Research/Results/Hab_Project/FUQJIK/2_mpc/Genarris/Relaxation"
result = extract(struct_dir, extractor="aims")
| 0 | 0 | 0 |
ca55b1b738ba3f6e5e18c6b4b70c0664ddb85b58 | 340 | py | Python | TelegramEDT/EDTscoped_session.py | flifloo/TelegramEDT | 299dc340af31c4f7e6c601f133a5c10d6396225f | [
"MIT"
] | 1 | 2019-09-19T08:06:13.000Z | 2019-09-19T08:06:13.000Z | TelegramEDT/EDTscoped_session.py | flifloo/TelegramEDT | 299dc340af31c4f7e6c601f133a5c10d6396225f | [
"MIT"
] | 11 | 2019-09-20T10:27:30.000Z | 2021-09-08T01:19:58.000Z | TelegramEDT/EDTscoped_session.py | flifloo/TelegramEDT | 299dc340af31c4f7e6c601f133a5c10d6396225f | [
"MIT"
] | null | null | null | from sqlalchemy.orm import scoped_session as ss
| 26.153846 | 60 | 0.726471 | from sqlalchemy.orm import scoped_session as ss
class scoped_session:
def __init__(self, session_factory, scopefunc=None):
self.scoped_session = ss(session_factory, scopefunc)
def __enter__(self):
return self.scoped_session
def __exit__(self, exc_type, exc_val, exc_tb):
self.scoped_session.remove()
| 188 | 0 | 103 |
22ea2593ed65592652a2e72b69383a67c861d53c | 356 | py | Python | lx145.py | ggznmg/spider | 04cc769f35ee14e56f05dd10d8d8ee6cacd574ce | [
"Apache-2.0"
] | null | null | null | lx145.py | ggznmg/spider | 04cc769f35ee14e56f05dd10d8d8ee6cacd574ce | [
"Apache-2.0"
] | null | null | null | lx145.py | ggznmg/spider | 04cc769f35ee14e56f05dd10d8d8ee6cacd574ce | [
"Apache-2.0"
] | null | null | null | import re
typ='Extra stings Hello 2134567 World_This is a Regex Demo Extra stings'
result = re.search('(Extra) stings Hello 2134567 (.*) is a Regex Demo Extra (.*)',typ ,re.S)
print(result.group(1,2,3))
# type="submit" id="su" value="百度一下" class="bg s_btn"></span><span class="tools"><span id="mHolder"><div id="mCon"><span>输入法</span></div><ul id="mMenu"> | 59.333333 | 152 | 0.685393 | import re
typ='Extra stings Hello 2134567 World_This is a Regex Demo Extra stings'
result = re.search('(Extra) stings Hello 2134567 (.*) is a Regex Demo Extra (.*)',typ ,re.S)
print(result.group(1,2,3))
# type="submit" id="su" value="百度一下" class="bg s_btn"></span><span class="tools"><span id="mHolder"><div id="mCon"><span>输入法</span></div><ul id="mMenu"> | 0 | 0 | 0 |
c79999ff35a192bcfb8a5e9dacb937e75a79e418 | 1,030 | py | Python | .idea/VirtualEnvironment/Lib/site-packages/hstest/dynamic/input/input_handler.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | null | null | null | .idea/VirtualEnvironment/Lib/site-packages/hstest/dynamic/input/input_handler.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | 5 | 2021-04-08T22:02:15.000Z | 2022-02-10T14:53:45.000Z | .idea/VirtualEnvironment/Lib/site-packages/hstest/dynamic/input/input_handler.py | Vladpetr/NewsPortal | cd4127fbc09d9c8f5e65c8ae699856c6d380a320 | [
"Apache-2.0"
] | null | null | null | import io
import sys
from typing import List
from hstest.dynamic.input.dynamic_input_func import DynamicTestFunction, DynamicInputFunction
from hstest.dynamic.input.input_mock import InputMock
| 25.121951 | 93 | 0.68835 | import io
import sys
from typing import List
from hstest.dynamic.input.dynamic_input_func import DynamicTestFunction, DynamicInputFunction
from hstest.dynamic.input.input_mock import InputMock
class InputHandler:
real_in: io.TextIOWrapper = sys.stdin
mock_in: InputMock = InputMock()
@staticmethod
def replace_input():
sys.stdin = InputHandler.mock_in
@staticmethod
def revert_input():
sys.stdin = InputHandler.real_in
@staticmethod
def set_dynamic_input_func(func: DynamicTestFunction):
InputHandler.mock_in.set_dynamic_input_func(func)
@staticmethod
def set_input(text: str):
"""
Deprecated
"""
InputHandler.mock_in.provide_text(text)
@staticmethod
def set_input_funcs(input_funcs: List[DynamicInputFunction]):
"""
Deprecated
"""
InputHandler.mock_in.set_texts([
DynamicInputFunction(func.trigger_count, func.input_function)
for func in input_funcs
])
| 170 | 642 | 23 |
11fca35b31758384c9ae077df6a74e7efd6e35cd | 536 | py | Python | ex_starmap.py | xuqinghan/learn-RxPY | 92735036ead3bd8874010546a17564a94d596be1 | [
"MIT"
] | null | null | null | ex_starmap.py | xuqinghan/learn-RxPY | 92735036ead3bd8874010546a17564a94d596be1 | [
"MIT"
] | null | null | null | ex_starmap.py | xuqinghan/learn-RxPY | 92735036ead3bd8874010546a17564a94d596be1 | [
"MIT"
] | null | null | null | import rx
from rx import operators as ops
import operator
def demo_starmap():
'''tuple unpacking'''
a = rx.of(1, 2, 3, 4)
b = rx.of(2, 2, 4, 4)
a.pipe(
ops.zip(b),
ops.starmap(operator.mul)
).subscribe(print)
if __name__ == '__main__':
#demo_zip()
demo_starmap() | 19.142857 | 63 | 0.550373 | import rx
from rx import operators as ops
import operator
def demo_zip():
a = rx.of(1, 2, 3, 4)
b = rx.of(2, 2, 4, 4)
a.pipe(
ops.zip(b), # returns a tuple with the items of a and b
ops.map(lambda z: operator.mul(z[0], z[1]))
).subscribe(print)
def demo_starmap():
'''tuple unpacking'''
a = rx.of(1, 2, 3, 4)
b = rx.of(2, 2, 4, 4)
a.pipe(
ops.zip(b),
ops.starmap(operator.mul)
).subscribe(print)
if __name__ == '__main__':
#demo_zip()
demo_starmap() | 199 | 0 | 23 |
52600e1523588ffc964ef10450c34f10c615eb26 | 1,543 | py | Python | tests/integration/custom_resources/ec2/test_nat_gateway.py | CloudWanderer-io/CloudWanderer | bad89c771cebe931790347afb49aa3bd046f3467 | [
"MIT"
] | 16 | 2020-12-22T17:01:48.000Z | 2022-01-21T10:37:14.000Z | tests/integration/custom_resources/ec2/test_nat_gateway.py | CloudWanderer-io/CloudWanderer | bad89c771cebe931790347afb49aa3bd046f3467 | [
"MIT"
] | 110 | 2020-12-07T21:55:48.000Z | 2022-01-11T12:10:49.000Z | tests/integration/custom_resources/ec2/test_nat_gateway.py | CloudWanderer-io/CloudWanderer | bad89c771cebe931790347afb49aa3bd046f3467 | [
"MIT"
] | 2 | 2021-12-23T21:09:23.000Z | 2021-12-23T22:25:24.000Z | import unittest
from cloudwanderer import URN
from ..helpers import CloudWandererCalls, ExpectedCall, MultipleResourceScenario, NoMotoMock, SingleResourceScenario
| 32.829787 | 119 | 0.605962 | import unittest
from cloudwanderer import URN
from ..helpers import CloudWandererCalls, ExpectedCall, MultipleResourceScenario, NoMotoMock, SingleResourceScenario
class TestNatGateways(NoMotoMock, unittest.TestCase):
nat_gateway_payload = {
"CreateTime": "2021-04-13T09:39:49.000Z",
"NatGatewayAddresses": [
{
"AllocationId": "eipalloc-11111111111111111",
"NetworkInterfaceId": "eni-11111111111111111",
"PrivateIp": "10.10.10.78",
}
],
"NatGatewayId": "nat-11111111111111111",
"State": "pending",
"SubnetId": "subnet-11111111",
"VpcId": "vpc-11111111",
"Tags": [{"Key": "Name", "Value": "test-gateway"}],
}
mock = {
"ec2": {
"describe_nat_gateways.return_value": {"NatGateways": [nat_gateway_payload]},
}
}
single_resource_scenarios = [
SingleResourceScenario(
urn=URN.from_string("urn:aws:123456789012:eu-west-2:ec2:nat_gateway:nat-11111111111111111"),
expected_results=[nat_gateway_payload],
expected_call=ExpectedCall(
"ec2", "describe_nat_gateways", [], {"NatGatewayIds": ["nat-11111111111111111"]}
),
)
]
multiple_resource_scenarios = [
MultipleResourceScenario(
arguments=CloudWandererCalls(regions=["eu-west-2"], service_names=["ec2"], resource_types=["nat_gateway"]),
expected_results=[nat_gateway_payload],
)
]
| 0 | 1,354 | 23 |
2aa0db54d2d69fc692c1dbb0b9145923a218fff6 | 13,764 | py | Python | app.py | tmt-uet/image_captioning | 7b2d4ad565f9276e6112009bbd263378c9a94d1a | [
"MIT"
] | null | null | null | app.py | tmt-uet/image_captioning | 7b2d4ad565f9276e6112009bbd263378c9a94d1a | [
"MIT"
] | null | null | null | app.py | tmt-uet/image_captioning | 7b2d4ad565f9276e6112009bbd263378c9a94d1a | [
"MIT"
] | null | null | null |
import os
from flask import Flask, json, Response, request, render_template, send_file, jsonify, send_from_directory
from werkzeug.utils import secure_filename
import requests
from flask_cors import CORS
from datetime import datetime
import torch
import torch.nn.functional as F
import numpy as np
import json
import torchvision.transforms as transforms
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
import skimage.transform
import argparse
from scipy.misc import imread, imresize
from PIL import Image
import shutil
from PIL import Image
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
print('-------------', device)
def caption_image_beam_search(encoder, decoder, image_path, word_map, beam_size):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(word_map)
# Read image and process
img = imread(image_path)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
# img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
img = img / 255.
img = torch.FloatTensor(img).to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
image = transform(img) # (3, 256, 256)
# Encode
image = image.unsqueeze(0) # (1, 3, 256, 256)
# (1, enc_image_size, enc_image_size, encoder_dim)
encoder_out = encoder(image)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
# (1, num_pixels, encoder_dim)
encoder_out = encoder_out.view(1, -1, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
# (k, num_pixels, encoder_dim)
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor(
[[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(
device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = decoder.embedding(
k_prev_words).squeeze(1) # (s, embed_dim)
# (s, encoder_dim), (s, num_pixels)
awe, alpha = decoder.attention(encoder_out, h)
# (s, enc_image_size, enc_image_size)
alpha = alpha.view(-1, enc_image_size, enc_image_size)
# gating scalar, (s, encoder_dim)
gate = decoder.sigmoid(decoder.f_beta(h))
awe = gate * awe
h, c = decoder.decode_step(
torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = decoder.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
# (s)
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat(
[seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
complete_inds = list(
set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 50:
break
step += 1
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
# print('seq', seq)
return seq, alphas
app = Flask(__name__, static_folder='storage')
init_app()
path_model = 'BEST_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar'
path_word_map = 'WORDMAP_coco_5_cap_per_img_5_min_word_freq.json'
beam_size = 5
# Load model
checkpoint = torch.load(path_model, map_location=str(device))
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.eval()
encoder = checkpoint['encoder']
encoder = encoder.to(device)
encoder.eval()
# Load word map (word2ix)
with open(path_word_map, 'r') as j:
word_map = json.load(j)
rev_word_map = {v: k for k, v in word_map.items()} # ix2word
VALID_IMAGE_EXTENSIONS = [
".jpg",
".jpeg",
".png",
".gif",
]
@app.route('/api', methods=['GET'])
@app.route('/api/add_image', methods=['POST'])
@app.route('/api/add_url_image', methods=['GET'])
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=5000
#debug=False,
#threaded=False
)
| 34.496241 | 119 | 0.633319 |
import os
from flask import Flask, json, Response, request, render_template, send_file, jsonify, send_from_directory
from werkzeug.utils import secure_filename
import requests
from flask_cors import CORS
from datetime import datetime
import torch
import torch.nn.functional as F
import numpy as np
import json
import torchvision.transforms as transforms
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
import skimage.transform
import argparse
from scipy.misc import imread, imresize
from PIL import Image
import shutil
from PIL import Image
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
print('-------------', device)
def caption_image_beam_search(encoder, decoder, image_path, word_map, beam_size):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(word_map)
# Read image and process
img = imread(image_path)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
# img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
img = img / 255.
img = torch.FloatTensor(img).to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
image = transform(img) # (3, 256, 256)
# Encode
image = image.unsqueeze(0) # (1, 3, 256, 256)
# (1, enc_image_size, enc_image_size, encoder_dim)
encoder_out = encoder(image)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
# (1, num_pixels, encoder_dim)
encoder_out = encoder_out.view(1, -1, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
# (k, num_pixels, encoder_dim)
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor(
[[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(
device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = decoder.embedding(
k_prev_words).squeeze(1) # (s, embed_dim)
# (s, encoder_dim), (s, num_pixels)
awe, alpha = decoder.attention(encoder_out, h)
# (s, enc_image_size, enc_image_size)
alpha = alpha.view(-1, enc_image_size, enc_image_size)
# gating scalar, (s, encoder_dim)
gate = decoder.sigmoid(decoder.f_beta(h))
awe = gate * awe
h, c = decoder.decode_step(
torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = decoder.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
# (s)
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat(
[seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
complete_inds = list(
set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 50:
break
step += 1
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
# print('seq', seq)
return seq, alphas
def visualize_att(image_path, seq, alphas, rev_word_map, smooth=True):
print('####################')
"""
Visualizes caption with weights at every word.
Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb
:param image_path: path to image that has been captioned
:param seq: caption
:param alphas: weights
:param rev_word_map: reverse word mapping, i.e. ix2word
:param smooth: smooth weights?
"""
image = Image.open(image_path)
image = image.resize([14 * 24, 14 * 24], Image.LANCZOS)
words = [rev_word_map[ind] for ind in seq]
# print('word', words)
result = ''
for t in range(len(words)):
if t == 0 or t == len(words) - 1:
continue
result += words[t]
result += ' '
# if t > 50:
# break
# plt.subplot(np.ceil(len(words) / 5.), 5, t + 1)
# plt.text(0, 1, '%s' % (words[t]), color='black',
# backgroundcolor='white', fontsize=12)
# plt.imshow(image)
# current_alpha = alphas[t, :]
# if smooth:
# alpha = skimage.transform.pyramid_expand(
# current_alpha.numpy(), upscale=24, sigma=8)
# else:
# alpha = skimage.transform.resize(
# current_alpha.numpy(), [14 * 24, 14 * 24])
# if t == 0:
# plt.imshow(alpha, alpha=0)
# else:
# plt.imshow(alpha, alpha=0.8)
# plt.set_cmap(cm.Greys_r)
# plt.axis('off')
# plt.show()
# print('result', result)
return result
app = Flask(__name__, static_folder='storage')
def init_app():
CORS(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
if os.path.exists(os.path.join(os.getcwd(), 'storage')) == False:
os.mkdir(os.path.join(os.getcwd(), 'storage'))
app.config['storage'] = os.path.join(os.getcwd(), 'storage')
app.config['file_allowed'] = ['image/png', 'image/jpeg']
init_app()
path_model = 'BEST_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar'
path_word_map = 'WORDMAP_coco_5_cap_per_img_5_min_word_freq.json'
beam_size = 5
# Load model
checkpoint = torch.load(path_model, map_location=str(device))
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.eval()
encoder = checkpoint['encoder']
encoder = encoder.to(device)
encoder.eval()
# Load word map (word2ix)
with open(path_word_map, 'r') as j:
word_map = json.load(j)
rev_word_map = {v: k for k, v in word_map.items()} # ix2word
VALID_IMAGE_EXTENSIONS = [
".jpg",
".jpeg",
".png",
".gif",
]
def valid_url_extension(url, extension_list=VALID_IMAGE_EXTENSIONS):
# http://stackoverflow.com/a/10543969/396300
return any([url.endswith(e) for e in extension_list])
def is_url_image(image_url):
image_formats = ("image/png", "image/jpeg", "image/jpg")
r = requests.head(image_url)
if r.headers["content-type"] in image_formats:
return True
return False
def check_request_containt_image_file(request_file):
if 'img' not in request_file:
return 1
file = request_file['img']
if file.mimetype not in app.config['file_allowed']:
return 2
def success_handle(code, error_message, status, mimetype='application/json'):
# return Response(json.dumps({"code": code, "message": error_message, "status": status}), mimetype=mimetype)
return jsonify(code=code, message=error_message, status=status)
def error_handle(code, error_message, status, mimetype='application/json'):
return Response(json.dumps({"code": code, "message": error_message, "status": status}), mimetype=mimetype)
def convert_png_to_jpg(path_image):
path_raw = path_image.replace('jpg', 'png')
im = Image.open(path_raw)
rgb_im = im.convert('RGB')
rgb_im.save(path_image)
@app.route('/api', methods=['GET'])
def homepage():
print('ahihihihi', flush=True)
return success_handle(1, "OK", "OK")
@app.route('/api/add_image', methods=['POST'])
def add_image():
created1 = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
print(created1, flush=True)
# user_id = str(request.form['user_id'])
# if user_id == '':
# return error_handle(0, "CHƯA NHẬP TÊN USER", "INVALID")
# print('user_id', user_id, flush=True)
flag_check = check_request_containt_image_file(request.files)
# print('-------')
if(flag_check == 1):
# print("Not file in request")
return error_handle(0, "KHÔNG CÓ FILE TRONG REQUEST", "INVALID")
if(flag_check == 2):
# print("File extension is not allowed")
return error_handle(0, "CHỈ ĐƯỢC UPLOAD FILE THEO DẠNG .JPG .PNG .JPEG", "INVALID")
file1 = request.files['img']
try:
if file1.filename == "":
print("No filename")
return error_handle(0, "KHÔNG CÓ ẢNH ĐẦU VÀO", "INVALID")
filename = secure_filename(file1.filename)
path_image = os.path.join(app.config['storage'], filename)
file1.save(path_image)
if 'png' in path_image:
path_image = path_image.replace('png', 'jpg')
convert_png_to_jpg(path_image)
except Exception as e:
print(e)
return error_handle(0, "LỖI THÊM ẢNH", "INVALID")
# try:
# file_path = os.path.join(
# app.config['storage'], '{}.jpg'.format(created1))
# # print(file_path)
# file1.save(file_path)
try:
# Encode, decode with attention and beam search
seq, alphas = caption_image_beam_search(
encoder, decoder, path_image, word_map, beam_size)
alphas = torch.FloatTensor(alphas)
# print(alphas)
# Visualize caption and attention of best sequence
result = visualize_att(path_image, seq, alphas, rev_word_map, True)
# print(result)
return success_handle(1, result, "VALID")
except Exception as e:
print(e)
return error_handle(0, "LỖI SERVER", "INVALID")
@app.route('/api/add_url_image', methods=['GET'])
def add_url_image():
created1 = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
print(created1, flush=True)
url = str(request.args.get('url_img'))
print('url', len(url))
if len(url) > 200:
return error_handle(0, "URL QUÁ DÀI", "INVALID")
try:
flag_check = is_url_image(url)
if flag_check == False:
return error_handle(0, "URL KHÔNG CHỨA ẢNH", "INVALID")
response = requests.get(url, stream=True)
path_image = 'storage/{}.jpg'.format(created1)
with open(path_image, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
except:
return error_handle(0, "LỖI URL", "INVALID")
try:
print(path_image)
# Encode, decode with attention and beam search
seq, alphas = caption_image_beam_search(
encoder, decoder, path_image, word_map, beam_size)
alphas = torch.FloatTensor(alphas)
# print(alphas)
# Visualize caption and attention of best sequence
result = visualize_att(path_image, seq, alphas, rev_word_map, True)
# print(result)
return success_handle(1, result, "VALID")
except:
return error_handle(0, "LỖI SERVER", "INVALID")
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=5000
#debug=False,
#threaded=False
)
| 6,320 | 0 | 250 |
31a03099b587aeeeb96296b79c441b86f6b41cd2 | 348 | py | Python | bot/handlers/users/start.py | YoshlikMedia/STT-uzbek-bot | fec5d6a7bec4135aeb35a86970193654637194f5 | [
"MIT"
] | null | null | null | bot/handlers/users/start.py | YoshlikMedia/STT-uzbek-bot | fec5d6a7bec4135aeb35a86970193654637194f5 | [
"MIT"
] | null | null | null | bot/handlers/users/start.py | YoshlikMedia/STT-uzbek-bot | fec5d6a7bec4135aeb35a86970193654637194f5 | [
"MIT"
] | null | null | null | from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from loader import dp
@dp.message_handler(CommandStart())
| 31.636364 | 86 | 0.735632 | from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from loader import dp
@dp.message_handler(CommandStart())
async def bot_start(message: types.Message):
await message.answer(f"<b>Assalom Alaykum, {message.from_user.full_name}!\n\n</b>"
f"Botdan foydalanish uchun voice yuboring")
| 179 | 0 | 22 |
64884f131266d13ea970b5918aa5dab206b2d99c | 1,761 | py | Python | test/displays_test.py | theJollySin/pytextgame | 62ce5525934b90a0b3ad2a2188315220a05dd1f8 | [
"MIT"
] | 1 | 2015-09-11T19:43:29.000Z | 2015-09-11T19:43:29.000Z | test/displays_test.py | theJollySin/pytextgame | 62ce5525934b90a0b3ad2a2188315220a05dd1f8 | [
"MIT"
] | 25 | 2015-09-10T16:18:09.000Z | 2015-10-03T22:19:44.000Z | test/displays_test.py | theJollySin/pytextgame | 62ce5525934b90a0b3ad2a2188315220a05dd1f8 | [
"MIT"
] | 1 | 2019-11-22T10:00:59.000Z | 2019-11-22T10:00:59.000Z |
import unittest
from pytextgame.colors import *
from pytextgame.displays import Displays
from pytextgame.geometry import *
from pytextgame.window import Window
if __name__ == '__main__':
unittest.main()
| 33.226415 | 86 | 0.634299 |
import unittest
from pytextgame.colors import *
from pytextgame.displays import Displays
from pytextgame.geometry import *
from pytextgame.window import Window
class MockWindow(Window):
def __init__(ui, stdscr, game, rect, border=WHITE):
Window.__init__(ui, stdscr, game, rect, border)
class TestDisplays(unittest.TestCase):
def test_one_window_display(self):
displays = Displays()
displays['mock'] = {'test': [MockWindow, Rectangle(0, 0, 60, 30)]}
self.assertEqual(displays.keys(), ['mock'])
self.assertEqual(displays['mock'].keys(), ['test'])
def test_multiple_displays(self):
displays = Displays()
displays['mock'] = {'test': [MockWindow, Rectangle(0, 0, 60, 30)]}
displays['not_real'] = {'testing': [MockWindow, Rectangle(0, 0, 60, 30), RED]}
self.assertEqual(sorted(displays.keys()), ['mock', 'not_real'])
self.assertEqual(displays['mock'].keys(), ['test'])
def test_add_broken_keys(self):
displays = Displays()
with self.assertRaises(TypeError):
displays[123] = {'test': [MockWindow, Rectangle(0, 0, 60, 30)]}
def test_add_broken_window_name(self):
displays = Displays()
with self.assertRaises(TypeError):
displays['faux'] = {123: [MockWindow, Rectangle(0, 0, 60, 30)]}
def test_add_broken_window(self):
displays = Displays()
with self.assertRaises(TypeError):
displays['faux'] = {'test': [123, Rectangle(0, 0, 60, 30)]}
def test_add_broken_rectangle(self):
displays = Displays()
with self.assertRaises(TypeError):
displays['faux'] = {'test': [MockWindow, Direction(0, 1)]}
if __name__ == '__main__':
unittest.main()
| 1,293 | 21 | 235 |
240d4c477fc5a10a913e0fc617380114eb2bd239 | 1,281 | py | Python | test_flying_shapes.py | brain-research/flying-shapes | e9a63576c92f3e1f7ae4933596977ded04b962ad | [
"Apache-2.0"
] | 3 | 2018-06-24T04:05:08.000Z | 2020-07-12T23:37:05.000Z | test_flying_shapes.py | brain-research/flying-shapes | e9a63576c92f3e1f7ae4933596977ded04b962ad | [
"Apache-2.0"
] | null | null | null | test_flying_shapes.py | brain-research/flying-shapes | e9a63576c92f3e1f7ae4933596977ded04b962ad | [
"Apache-2.0"
] | 3 | 2018-06-24T04:05:09.000Z | 2020-07-12T23:37:09.000Z | # Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test create and print a batch from the flying shapes dataset.
"""
from third_party import dataset
def test_flying_shapes():
"""Wrapper for flying_shapes.py data generator."""
config = {}
config['seq_length'] = 10
config['batch_size'] = 2
config['image_size'] = 600
config['num_digits'] = 3
config['step_length'] = 0.5
config['digit_size'] = 180
config['frame_size'] = (config['image_size']**2) * 3
config['file_path'] = 'flying_shapes.npy'
data_generator = dataset.FlyingShapesDataHandler(config)
x, bboxes = data_generator.GetUnlabelledBatch()
data_generator.DisplayData(x, bboxes)
x2, bboxes2 = data_generator.GetLabelledBatch()
data_generator.DisplayData(x2, bboxes2)
| 32.846154 | 74 | 0.741608 | # Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test create and print a batch from the flying shapes dataset.
"""
from third_party import dataset
def test_flying_shapes():
"""Wrapper for flying_shapes.py data generator."""
config = {}
config['seq_length'] = 10
config['batch_size'] = 2
config['image_size'] = 600
config['num_digits'] = 3
config['step_length'] = 0.5
config['digit_size'] = 180
config['frame_size'] = (config['image_size']**2) * 3
config['file_path'] = 'flying_shapes.npy'
data_generator = dataset.FlyingShapesDataHandler(config)
x, bboxes = data_generator.GetUnlabelledBatch()
data_generator.DisplayData(x, bboxes)
x2, bboxes2 = data_generator.GetLabelledBatch()
data_generator.DisplayData(x2, bboxes2)
| 0 | 0 | 0 |
899b10506a0dbb47e4207752eb3dc70380a4e900 | 682 | py | Python | sandbox/python/parse_esc.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 2 | 2021-12-12T13:26:06.000Z | 2022-03-03T16:14:53.000Z | sandbox/python/parse_esc.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 5 | 2019-03-01T07:08:46.000Z | 2019-04-28T07:32:42.000Z | sandbox/python/parse_esc.py | rboman/progs | c60b4e0487d01ccd007bcba79d1548ebe1685655 | [
"Apache-2.0"
] | 2 | 2017-12-13T13:13:52.000Z | 2019-03-13T20:08:15.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
red='\033[31m'
reset='\033[0m'
s = "[€€€éä] nice " + red + "colors" + reset + '!\n'
print(s)
#import tempfile
#fd, path = tempfile.mkstemp()
#print fd, path
"""
tmpf = os.fdopen(fd, 'w')
try:
with as tmp:
# do stuff with temp file
tmp.write('stuff')
finally:
os.remove(path)
f = tempfile.NamedTemporaryFile()
"""
# write string into file
f = open("tmp.txt", 'w')
for i in range(5):
f.write(s)
f.close()
# read string from file
f = open("tmp.txt", 'r')
for l in f.readlines():
print(l, end=' ')
#print l.encode('utf-8').decode('unicode_escape'),
#print l.decode('unicode_escape')
| 15.155556 | 54 | 0.582111 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
red='\033[31m'
reset='\033[0m'
s = "[€€€éä] nice " + red + "colors" + reset + '!\n'
print(s)
#import tempfile
#fd, path = tempfile.mkstemp()
#print fd, path
"""
tmpf = os.fdopen(fd, 'w')
try:
with as tmp:
# do stuff with temp file
tmp.write('stuff')
finally:
os.remove(path)
f = tempfile.NamedTemporaryFile()
"""
# write string into file
f = open("tmp.txt", 'w')
for i in range(5):
f.write(s)
f.close()
# read string from file
f = open("tmp.txt", 'r')
for l in f.readlines():
print(l, end=' ')
#print l.encode('utf-8').decode('unicode_escape'),
#print l.decode('unicode_escape')
| 0 | 0 | 0 |
5daaa6240a8ebcdb6a4a6613ecb521171d92d71a | 5,083 | py | Python | gmprocess/subcommands/compute_waveform_metrics.py | dchurchwell-usgs/groundmotion-processing | 57aecdde36d98b5f838b819dc281a47a84351256 | [
"Unlicense"
] | null | null | null | gmprocess/subcommands/compute_waveform_metrics.py | dchurchwell-usgs/groundmotion-processing | 57aecdde36d98b5f838b819dc281a47a84351256 | [
"Unlicense"
] | null | null | null | gmprocess/subcommands/compute_waveform_metrics.py | dchurchwell-usgs/groundmotion-processing | 57aecdde36d98b5f838b819dc281a47a84351256 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
from gmprocess.subcommands.lazy_loader import LazyLoader
arg_dicts = LazyLoader(
'arg_dicts', globals(), 'gmprocess.subcommands.arg_dicts')
base = LazyLoader('base', globals(), 'gmprocess.subcommands.base')
distributed = LazyLoader('distributed', globals(), 'dask.distributed')
ws = LazyLoader('ws', globals(), 'gmprocess.io.asdf.stream_workspace')
station_summary = LazyLoader(
'station_summary', globals(), 'gmprocess.metrics.station_summary')
const = LazyLoader('const', globals(), 'gmprocess.utils.constants')
class ComputeWaveformMetricsModule(base.SubcommandModule):
"""Compute waveform metrics.
"""
command_name = 'compute_waveform_metrics'
aliases = ('wm', )
arguments = [
arg_dicts.ARG_DICTS['eventid'],
arg_dicts.ARG_DICTS['textfile'],
arg_dicts.ARG_DICTS['label'],
arg_dicts.ARG_DICTS['overwrite'],
arg_dicts.ARG_DICTS['num_processes']
]
def main(self, gmrecords):
"""Compute waveform metrics.
Args:
gmrecords:
GMrecordsApp instance.
"""
logging.info('Running subcommand \'%s\'' % self.command_name)
self.gmrecords = gmrecords
self._check_arguments()
self._get_events()
for event in self.events:
self._compute_event_waveform_metrics(event)
self._summarize_files_created()
| 37.10219 | 72 | 0.54397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
from gmprocess.subcommands.lazy_loader import LazyLoader
arg_dicts = LazyLoader(
'arg_dicts', globals(), 'gmprocess.subcommands.arg_dicts')
base = LazyLoader('base', globals(), 'gmprocess.subcommands.base')
distributed = LazyLoader('distributed', globals(), 'dask.distributed')
ws = LazyLoader('ws', globals(), 'gmprocess.io.asdf.stream_workspace')
station_summary = LazyLoader(
'station_summary', globals(), 'gmprocess.metrics.station_summary')
const = LazyLoader('const', globals(), 'gmprocess.utils.constants')
class ComputeWaveformMetricsModule(base.SubcommandModule):
"""Compute waveform metrics.
"""
command_name = 'compute_waveform_metrics'
aliases = ('wm', )
arguments = [
arg_dicts.ARG_DICTS['eventid'],
arg_dicts.ARG_DICTS['textfile'],
arg_dicts.ARG_DICTS['label'],
arg_dicts.ARG_DICTS['overwrite'],
arg_dicts.ARG_DICTS['num_processes']
]
def main(self, gmrecords):
"""Compute waveform metrics.
Args:
gmrecords:
GMrecordsApp instance.
"""
logging.info('Running subcommand \'%s\'' % self.command_name)
self.gmrecords = gmrecords
self._check_arguments()
self._get_events()
for event in self.events:
self._compute_event_waveform_metrics(event)
self._summarize_files_created()
def _compute_event_waveform_metrics(self, event):
self.eventid = event.id
logging.info(
'Computing waveform metrics for event %s...' % self.eventid)
event_dir = os.path.join(self.gmrecords.data_path, self.eventid)
workname = os.path.normpath(
os.path.join(event_dir, const.WORKSPACE_NAME))
if not os.path.isfile(workname):
logging.info(
'No workspace file found for event %s. Please run '
'subcommand \'assemble\' to generate workspace file.'
% self.eventid)
logging.info('Continuing to next event.')
return event.id
self.workspace = ws.StreamWorkspace.open(workname)
ds = self.workspace.dataset
station_list = ds.waveforms.list()
self._get_labels()
summaries = []
metricpaths = []
if self.gmrecords.args.num_processes > 0:
futures = []
client = distributed.Client(
n_workers=self.gmrecords.args.num_processes)
for station_id in station_list:
# Cannot parallelize IO to ASDF file
streams = self.workspace.getStreams(
event.id,
stations=[station_id],
labels=[self.gmrecords.args.label],
config=self.gmrecords.conf
)
if not len(streams):
raise ValueError('No matching streams found.')
for stream in streams:
if stream.passed:
metricpaths.append('/'.join([
ws.format_netsta(stream[0].stats),
ws.format_nslit(
stream[0].stats,
stream.get_inst(),
stream.tag)
]))
logging.info(
'Calculating waveform metrics for %s...'
% stream.get_id()
)
if self.gmrecords.args.num_processes > 0:
future = client.submit(
station_summary.StationSummary.from_config,
stream=stream,
config=self.gmrecords.conf,
event=event,
calc_waveform_metrics=True,
calc_station_metrics=False)
futures.append(future)
else:
summaries.append(
station_summary.StationSummary.from_config(
stream, event=event,
config=self.gmrecords.conf,
calc_waveform_metrics=True,
calc_station_metrics=False
)
)
if self.gmrecords.args.num_processes > 0:
# Collect the processed streams
summaries = [future.result() for future in futures]
client.shutdown()
# Cannot parallelize IO to ASDF file
logging.info('Adding waveform metrics to workspace files '
'with tag \'%s\'.' % self.gmrecords.args.label)
for i, summary in enumerate(summaries):
xmlstr = summary.get_metric_xml()
metricpath = metricpaths[i]
self.workspace.insert_aux(
xmlstr, 'WaveFormMetrics', metricpath,
overwrite=self.gmrecords.args.overwrite)
self.workspace.close()
return event.id
| 3,604 | 0 | 27 |
b9c6523eb62600db7ffe2af7d6ec31883b7403b8 | 25,068 | py | Python | src/main.py | mkarmann/conway-reversed | a3ae10dd5768affb9caf193a246395ee0fb2bc6f | [
"MIT"
] | null | null | null | src/main.py | mkarmann/conway-reversed | a3ae10dd5768affb9caf193a246395ee0fb2bc6f | [
"MIT"
] | null | null | null | src/main.py | mkarmann/conway-reversed | a3ae10dd5768affb9caf193a246395ee0fb2bc6f | [
"MIT"
] | null | null | null | import pandas as pd
import math
import cv2
import numpy as np
import matplotlib
import random
from bestguess.bestguess import BestGuessModule, MODEL_CHANNELS
from stochastic_optimizer import BestChangeLayer
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
import os
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
LR = 1e-4
BATCH_SIZE = 128
STEPS_PER_EPOCH = 1024
EPOCHS = 128
GOL_DELTA = 2
TEST_SAMPLES = 20
HALF_LR_AFTER_N_EPOCHS = 32
OUTLINE_SIZE = 5*2
RUN_NAME = time.strftime("%Y_%m_%d_%H_%M_%S") + '_GoL_delta_' + str(GOL_DELTA)
SNAPSHOTS_DIR = '../out/training/snapshots/{}'.format(RUN_NAME)
TENSORBOARD_LOGS_DIR = '../out/training/logs'
VIDEO_DIR = '../out/training/videos/{}'.format(RUN_NAME)
SUBMISSION_DIR = '../out/submissions'
SUBMISSION_FILE_FORMAT = SUBMISSION_DIR + '/submission_{}.csv'
SCORE_FILE_FORMAT = SUBMISSION_DIR + '/score_{}.csv'
if __name__ == "__main__":
improve_submission()
| 38.625578 | 166 | 0.593346 | import pandas as pd
import math
import cv2
import numpy as np
import matplotlib
import random
from bestguess.bestguess import BestGuessModule, MODEL_CHANNELS
from stochastic_optimizer import BestChangeLayer
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
import os
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
LR = 1e-4
BATCH_SIZE = 128
STEPS_PER_EPOCH = 1024
EPOCHS = 128
GOL_DELTA = 2
TEST_SAMPLES = 20
HALF_LR_AFTER_N_EPOCHS = 32
OUTLINE_SIZE = 5*2
RUN_NAME = time.strftime("%Y_%m_%d_%H_%M_%S") + '_GoL_delta_' + str(GOL_DELTA)
SNAPSHOTS_DIR = '../out/training/snapshots/{}'.format(RUN_NAME)
TENSORBOARD_LOGS_DIR = '../out/training/logs'
VIDEO_DIR = '../out/training/videos/{}'.format(RUN_NAME)
SUBMISSION_DIR = '../out/submissions'
SUBMISSION_FILE_FORMAT = SUBMISSION_DIR + '/submission_{}.csv'
SCORE_FILE_FORMAT = SUBMISSION_DIR + '/score_{}.csv'
def state_step(state: np.array):
neighbour_sum = \
np.roll(state, -1, axis=0) + \
np.roll(state, 1, axis=0) + \
np.roll(state, -1, axis=1) + \
np.roll(state, 1, axis=1) + \
np.roll(np.roll(state, -1, axis=0), -1, axis=1) + \
np.roll(np.roll(state, -1, axis=0), 1, axis=1) + \
np.roll(np.roll(state, 1, axis=0), -1, axis=1) + \
np.roll(np.roll(state, 1, axis=0), 1, axis=1)
out = np.zeros(state.shape, dtype=np.int)
out[neighbour_sum == 3] = 1
out[np.logical_and(neighbour_sum == 2, state == 1)] = 1
return out
def state_loss(pred: np.array, target: np.array):
return np.mean(np.abs(pred - target))
def plot(state: np.array):
plt.imshow(state.astype(np.float))
plt.show()
def create_random_board(shape=(25, 25), warmup_steps=5):
factor = np.random.uniform(0.01, 0.99, (1, ))
state = (np.random.uniform(0.0, 1.0, shape) > factor).astype(np.int)
for i in range(warmup_steps):
state = state_step(state)
return state
def create_training_sample(shape=(25, 25), warmup_steps=5, delta=GOL_DELTA, random_warmup=False):
while True:
start = create_random_board(shape, warmup_steps + (np.random.randint(0, 5) if random_warmup else 0))
end = start.copy()
for i in range(delta):
end = state_step(end)
if np.any(end):
return {
"start": start,
"end": end,
"delta": delta
}
class ResBlock(nn.Module):
def __init__(self, features):
super().__init__()
self.main = nn.Sequential(
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Conv2d(features, features // 4, 3, padding=0, bias=False),
nn.BatchNorm2d(features // 4),
nn.ReLU(True),
nn.Conv2d(features // 4, features, 1, padding=0, bias=False)
)
def forward(self, x):
return self.main(x) + x[:, :, 1:-1, 1:-1]
def create_net_input_array(state: np.array, predicted_mask: np.array, predictions: np.array, outline_size=0):
input_dead = (1 - state).astype(np.float)
input_alive = state.astype(np.float)
input_unpredicted = (1 - predicted_mask).astype(np.float)
input_predicted_dead = ((1 - predictions) * predicted_mask).astype(np.float)
input_predicted_alive = (predictions * predicted_mask).astype(np.float)
sample_input = np.stack([
input_dead,
input_alive,
input_unpredicted,
input_predicted_dead,
input_predicted_alive],
axis=2
)
# plt.subplot(2, 1, 1)
# plt.imshow(sample_input[:, :, 0])
# outlining
if outline_size > 0:
tiles_y = ((outline_size - 1) // state.shape[0]) * 2 + 2 + 1
tiles_x = ((outline_size - 1) // state.shape[1]) * 2 + 2 + 1
offset_y = state.shape[0] - ((outline_size - 1) % state.shape[0]) - 1
offset_x = state.shape[1] - ((outline_size - 1) % state.shape[1]) - 1
sample_input = np.tile(sample_input, (tiles_y, tiles_x, 1))
sample_input = sample_input[
offset_y:(offset_y + state.shape[0] + 2 * outline_size),
offset_x:(offset_x + state.shape[1] + 2 * outline_size)
]
# plt.subplot(2, 1, 2)
# plt.imshow(sample_input[:, :, 0])
# plt.show()
return sample_input.transpose((2, 0, 1)).astype(np.float)
class GoLDataset(Dataset):
def __init__(self, shape=(25, 25), warmup_steps=5, delta=GOL_DELTA, size=1024, outline_size=0):
self.shape = shape
self.warmup_steps = warmup_steps
self.delta = delta
self.size = size
self.outline_size = outline_size
def __len__(self):
return self.size
def __getitem__(self, idx):
sample = create_training_sample(self.shape, self.warmup_steps, self.delta, random_warmup=True)
start = sample["start"]
end = sample["end"]
predicted_mask = (np.random.uniform(0.0, 1.0, self.shape) > np.random.uniform(0.0, 1.0, (1, ))).astype(np.int)
# there needs to be a cell left to predict
if np.sum(predicted_mask) == self.shape[0] * self.shape[1]:
predicted_mask[np.random.randint(0, self.shape[0], (1, )), np.random.randint(0, self.shape[1], (1, ))] = 0
sample_input = create_net_input_array(end, predicted_mask, start, outline_size=self.outline_size)
sample_target = start
return {
"input": sample_input,
"mask": np.expand_dims(predicted_mask.astype(np.float), 3).transpose((2, 0, 1)),
"target": sample_target
}
class GoLModule(nn.Module):
def __init__(self, channels=128):
super().__init__()
self.main = nn.Sequential(
nn.Conv2d(5, channels, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 1, bias=False),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 1, bias=False),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
ResBlock(channels),
nn.BatchNorm2d(channels),
nn.Conv2d(channels, 2, 1)
)
def get_num_trainable_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def get_l1_loss_of_parameters(self):
loss = None
for param in self.parameters():
if loss is None:
loss = torch.abs(param)
else:
loss += torch.abs(param)
return loss * (1. / self.get_num_trainable_parameters())
def get_l2_loss_of_parameters(self):
loss = None
for param in self.parameters():
if loss is None:
loss = torch.sum(torch.mul(param, param))
else:
loss += torch.sum(torch.mul(param, param))
return loss * (1. / self.get_num_trainable_parameters())
def forward(self, x):
return self.main(x)
def get_probabilities(self, x):
return torch.softmax(self.main(x), 1)
def get_best_guess(self, x: torch.tensor, mask: np.array):
probabilities = self.get_probabilities(x)
masked_probabilities = np.array(probabilities.tolist()) * (1 - mask)
guess = np.unravel_index(masked_probabilities.argmax(), masked_probabilities.shape)
return {
"coord_yx": np.array([guess[2], guess[3]]),
"alive": guess[1]
}
def get_best_guesses(self, x: torch.tensor, mask: np.array, num_guesses=2):
probabilities = self.get_probabilities(x)
masked_probabilities = np.array(probabilities.tolist()) * (1 - mask)
guess = np.unravel_index(masked_probabilities.argmax(), masked_probabilities.shape)
return {
"coord_yx": np.array([guess[2], guess[3]]),
"alive": guess[1]
}
def get_best_by_threshold(self, x: torch.tensor, mask: np.array, threshold: float):
probabilities = self.get_probabilities(x)
masked_probabilities = np.array(probabilities.tolist()) * (1 - mask)
results = np.where(masked_probabilities >= threshold)
return {
"coord_yx": np.array([results[2], results[3]]),
"alive": results[1]
}
def get_tendencies_img(self, x):
return np.array(self.get_probabilities(x).tolist()).transpose((0, 2, 3, 1))[0, :, :, 1]
def solve_pure_gpu(self, state: np.array, device: torch.device):
predicted_mask = np.zeros(state.shape)
predictions = np.zeros(state.shape)
sample_input = create_net_input_array(state, predicted_mask, predictions, outline_size=OUTLINE_SIZE)
batch_input = torch.from_numpy(np.expand_dims(sample_input, 0)).float().to(device)
for i in range(state.shape[0] * state.shape[1]):
probabilities = torch.softmax(self.main(batch_input), 1)
max_val = torch.argmax(probabilities.reshape((-1, 2 * state.shape[0] * state.shape[1])), 1, keepdim=True).reshape((-1, 2, state.shape[0], state.shape[1]))
# batch_input[:, ]
def solve_batch(self, states: np.array, device: torch.device, best_guesses_per_sample=1):
predicted_masks = np.zeros(states.shape)
predictions = np.zeros(states.shape)
batches_indices = np.arange(0, states.shape[0])
total_runs = states.shape[1] * states.shape[2]
sample_inputs = np.zeros((states.shape[0], 5, states.shape[1] + 2 * OUTLINE_SIZE, states.shape[2] + 2 * OUTLINE_SIZE), dtype=np.float)
for i in range(total_runs):
for b in range(states.shape[0]):
sample_inputs[b] = create_net_input_array(states[b], predicted_masks[b], predictions[b], outline_size=OUTLINE_SIZE)
input_tensor = torch.from_numpy(sample_inputs).float().to(device)
if best_guesses_per_sample == 1 or i > total_runs // 2:
# only chose the best guess
probabilities = self.get_probabilities(input_tensor)
masked_probabilities = np.array(probabilities.tolist()) * (1 - predicted_masks.reshape((-1, 1, states.shape[1], states.shape[2])))
maxes = np.argmax(masked_probabilities.reshape((-1, 2 * states.shape[1] * states.shape[2])), axis=1)
guesses = np.unravel_index(maxes, probabilities.shape)
predicted_masks[batches_indices, guesses[2], guesses[3]] = 1
predictions[batches_indices, guesses[2], guesses[3]] = guesses[1]
else:
num_best_guesses = min(best_guesses_per_sample, total_runs - i)
# select between multiple best guesses
probabilities = self.get_probabilities(input_tensor)
masked_probabilities = np.array(probabilities.tolist()) * (
1 - predicted_masks.reshape((-1, 1, states.shape[1], states.shape[2])))
maxes_all = np.zeros((states.shape[0], num_best_guesses), dtype=np.int)
mkp = masked_probabilities.reshape((-1, 2 * states.shape[1] * states.shape[2]))
for g in range(num_best_guesses):
maxes = np.argmax(mkp, axis=1)
maxes_all[:, g] = maxes
mkp[batches_indices, maxes] = 0
maxes = maxes_all[:, np.random.randint(0, num_best_guesses, states.shape[0])]
guesses = np.unravel_index(maxes, probabilities.shape)
predicted_masks[batches_indices, guesses[2], guesses[3]] = 1
predictions[batches_indices, guesses[2], guesses[3]] = guesses[1]
return predictions
def solve(self,
state: np.array,
device: torch.device,
ground_truth=None,
plot_each_step=False,
plot_result=False,
video_fname=None,
quick_fill_threshold=1.0):
predicted_mask = np.zeros(state.shape)
predictions = np.zeros(state.shape)
total_runs = state.shape[0] * state.shape[1]
video_out = None
for i in range(total_runs):
sample_input = create_net_input_array(state, predicted_mask, predictions, outline_size=OUTLINE_SIZE)
batch_input = torch.from_numpy(np.expand_dims(sample_input, 0)).float().to(device)
if quick_fill_threshold < 1:
thr_guess = self.get_best_by_threshold(batch_input, predicted_mask, threshold=quick_fill_threshold)
if len(thr_guess['alive']) > 0:
predicted_mask[thr_guess['coord_yx'][0], thr_guess['coord_yx'][1]] = 1
predictions[thr_guess['coord_yx'][0], thr_guess['coord_yx'][1]] = thr_guess['alive']
else:
quick_fill_threshold = 1
else:
guess = self.get_best_guess(batch_input, predicted_mask)
predicted_mask[guess['coord_yx'][0], guess['coord_yx'][1]] = 1
predictions[guess['coord_yx'][0], guess['coord_yx'][1]] = guess['alive']
if plot_each_step or (i == total_runs - 1 and plot_result) or video_fname is not None:
# input
fig = plt.figure(figsize=(16, 9), dpi=100)
sub = fig.add_subplot(2, 3, 1)
sub.set_title("start (ground truth)")
if ground_truth is not None:
sub.imshow(ground_truth.astype(np.float))
sub = fig.add_subplot(2, 3, 4)
sub.set_title("end (input)")
sub.imshow(state.astype(np.float))
# net
sub = fig.add_subplot(2, 3, 3)
sub.set_title("Certainty heatmap")
prob = self.get_tendencies_img(batch_input)
overlay = np.ones((state.shape[0], state.shape[1], 4), dtype=np.float)
overlay[:, :, 3] = predicted_mask
# prob[prob < 0.5] *= -1
# prob[prob < 0.5] += 1.0
# prob *= (1 - prev_predicted_mask)
sub.imshow(prob, vmin=0.0, vmax=1.0)
sub.imshow(overlay, vmin=0.0, vmax=1.0)
# outcome
sub = fig.add_subplot(2, 3, 2)
sub.set_title("net prediction")
overlay = np.ones((state.shape[0], state.shape[1], 4), dtype=np.float)
overlay[:, :, 3] = (1.0 - predicted_mask) * 0.66
sub.imshow(predictions.astype(np.float))
sub.imshow(overlay, vmin=0.0, vmax=1.0)
sub = fig.add_subplot(2, 3, 5)
sub.set_title("prediction after {} steps".format(GOL_DELTA))
outc = predictions
for d in range(GOL_DELTA):
outc = state_step(outc)
sub.imshow(outc.astype(np.float))
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
if video_fname is not None:
if video_out is None:
if not os.path.exists(os.path.dirname(video_fname)):
os.makedirs(os.path.dirname(video_fname))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_out = cv2.VideoWriter(video_fname, fourcc, 60.0,
(img.shape[1], img.shape[0]))
video_out.write(img[:, :, ::-1])
if i == total_runs - 1:
for n in range(59):
video_out.write(img[:, :, ::-1])
plt.close(fig)
if video_out is not None:
video_out.release()
return predictions
def test():
device = torch.device('cuda')
net = GoLModule()
net.load_state_dict(torch.load('../out/training/snapshots/2020_09_25_18_51_41_GoL_delta_1/epoch_103.pt'))
net.eval()
net.to(device)
print(net.get_num_trainable_parameters())
df_input = pd.read_csv("../input/test.csv")
input_values = df_input.values
error_sum = 0
for i in range(100):
delta = input_values[i][1]
end_state = input_values[i][2:].reshape((25, 25))
gt_states = [end_state]
for d in range(delta):
gt_states.append(state_step(gt_states[-1]))
current_solve = end_state
for d in range(delta):
prev_solve = current_solve.copy()
current_solve = net.solve(current_solve, device) # video_fname='out/vid_out_{}.avi'.format(d + 1))
# check error
outc = current_solve
for d in range(delta):
outc = state_step(outc)
c_error = state_loss(outc, end_state)
print('Error d{}: {}'.format(delta, c_error))
error_sum += c_error
print("Moving avg: {}\n".format(error_sum / (i + 1)))
print('\nMean Error: {}'.format(error_sum / 100))
def improve_submission():
DELTA = 3
if not os.path.isdir(SUBMISSION_DIR):
os.makedirs(SUBMISSION_DIR)
submission_id = 0
while os.path.exists(SUBMISSION_FILE_FORMAT.format(submission_id)):
submission_id += 1
if submission_id == 0:
df_submission = pd.read_csv("../input/sample_submission.csv")
df_scores = pd.DataFrame(np.ones(len(df_submission), dtype=np.int) * 25 * 25, columns=["num_wrong_cells"])
else:
df_submission = pd.read_csv(SUBMISSION_FILE_FORMAT.format(submission_id - 1))
df_scores = pd.read_csv(SCORE_FILE_FORMAT.format(submission_id - 1))
input_csv = "../input/test.csv"
df_input = pd.read_csv(input_csv)
input_values = np.array(df_input.values)
scores_values = np.array(df_scores.values)
submission_values = np.array(df_submission.values)
to_check_rows = np.sum(scores_values > 0)
print("To check rows: {}".format(to_check_rows))
# -------------------
# init models
# -------------------
device = torch.device('cuda')
# net = GoLModule()
# net.load_state_dict(torch.load('../out/training/snapshots/2020_09_25_18_51_41_GoL_delta_1/epoch_103.pt'))
# net.eval()
# net.to(device)
net = BestGuessModule(channels=MODEL_CHANNELS)
net.load_state_dict(torch.load('P:\\python\\convay-reversed\\best_guess\\out\\training\\snapshots\\128___2020_11_16_04_43_05_GoL_delta_1continue\\epoch_040.pt'))
print('Num parameters: {}'.format(net.get_num_trainable_parameters()))
net.to(device)
net.eval()
bcls = [
BestChangeLayer(window=(3, 3), delta=DELTA, device=device)
]
# -------------------
# loop through examples
start_time = time.time()
batch_size = 16
stop_states = np.zeros((batch_size, 25, 25), dtype=np.int)
indices = np.zeros(batch_size, dtype=np.int)
current_sample_idx = 0
for i in range(len(submission_values)):
# ignore if errors are already zero
old_score = scores_values[i][0]
if old_score == 0:
continue
delta = input_values[i][1]
stop_state = input_values[i][2:].reshape((25, 25))
# skipping if wanted
if delta != DELTA:
continue
indices[current_sample_idx] = i
stop_states[current_sample_idx] = stop_state
current_sample_idx = (current_sample_idx + 1) % batch_size
if current_sample_idx != 0:
continue
print("\nRow {} with delta {} has old score: {}".format(i + 1, delta, old_score))
# ------------------------
# do the single prediction
# ------------------------
pred_start_states = stop_states.copy()
bcl = random.choice(bcls)
for d in range(delta):
pred_start_states = net.solve_batch(pred_start_states, device)
pred_start_states = bcl.solve_batch(stop_states, device, num_steps=2000, initial_states=pred_start_states)
# -------------------
for index, pred_start_state, stop_state in zip(indices, pred_start_states, stop_states):
pred_end_state = pred_start_state.copy()
for d in range(delta):
pred_end_state = state_step(pred_end_state)
new_score = np.sum(np.abs(pred_end_state - stop_state))
old_score = scores_values[index][0]
if new_score < old_score:
print("Improved from {} to {}!".format(old_score, new_score))
submission_values[index, 1:] = pred_start_state.reshape((-1,))
scores_values[index] = new_score
print("Mean error is now {}".format(np.mean(scores_values, dtype=np.float) / (25 * 25)))
if np.mean(scores_values, dtype=np.float) / (25 * 25) < 0.012346:
break
print("Estimated time left until finished: {} seconds".format(int((time.time() - start_time) * (len(submission_values) - i - 1) / (i + 1))))
print("\n------------------------")
print("Mean error is now {}".format(np.mean(scores_values) / (25 * 25)))
print("Rows left to check: {}".format(np.sum(scores_values > 0)))
print("Writing files...")
df_submission = pd.DataFrame(data=submission_values, columns=list(df_submission.columns.values))
df_scores = pd.DataFrame(data=scores_values, columns=list(df_scores.columns.values))
df_submission.to_csv(SUBMISSION_FILE_FORMAT.format(submission_id), index=False)
df_scores.to_csv(SCORE_FILE_FORMAT.format(submission_id), index=False)
print("Done!")
def train():
device = torch.device('cuda')
dataset = GoLDataset(size=STEPS_PER_EPOCH * BATCH_SIZE, outline_size=OUTLINE_SIZE)
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
num_workers=1,
pin_memory=True)
net = GoLModule()
print('Num parameters: {}'.format(net.get_num_trainable_parameters()))
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=LR)
cel = nn.CrossEntropyLoss(reduction='none')
train_writer = SummaryWriter(log_dir=TENSORBOARD_LOGS_DIR + '/' + RUN_NAME, comment=RUN_NAME)
total_steps = 0
for epoch in range(EPOCHS):
batch_iter = iter(loader)
for step in range(STEPS_PER_EPOCH):
batch = next(batch_iter)
net.zero_grad()
torch_input = batch['input'].float().to(device)
torch_target = batch['target'].long().to(device)
torch_mask = batch['mask'].float().to(device)
prediction = net(torch_input)
# weight_loss = net.get_l2_loss_of_parameters()
classification_loss = torch.mean(cel(prediction, torch_target) * torch_mask)
loss = classification_loss # weight_loss + classification_loss
loss.backward()
optimizer.step()
if step % 16 == 0:
loss_item = loss.item()
print('Epoch {} - Step {} - loss {:.5f}'.format(epoch + 1, step + 1, loss_item))
# train_writer.add_scalar('loss/weights-l2', weight_loss.item(), total_steps)
train_writer.add_scalar('loss/class-ce', classification_loss.item(), total_steps)
train_writer.add_scalar('loss/train', loss_item, total_steps)
total_steps += 1
# print("Create test video")
net.eval()
# display_sample = create_training_sample()
# net.solve(display_sample['end'],
# device,
# display_sample['start'],
# video_fname='{}/epoch_{:03}.avi'.format(VIDEO_DIR, epoch + 1)
# )
print("Calculate epoch loss")
epoch_loss = 0
for t in range(TEST_SAMPLES):
test_sample = create_training_sample(random_warmup=True)
res = net.solve(test_sample['end'], device)
outc = res
for d in range(GOL_DELTA):
outc = state_step(outc)
epoch_loss += np.mean(np.abs((outc - test_sample['end'])))
epoch_loss /= TEST_SAMPLES
print("Epoch loss {}".format(epoch_loss))
train_writer.add_scalar('loss/test', epoch_loss, total_steps)
net.train()
# adjust lr
new_lr = LR * math.pow(0.5, (epoch / HALF_LR_AFTER_N_EPOCHS))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
train_writer.add_scalar('lr', new_lr, total_steps)
print("Save snapshot")
if not os.path.isdir(SNAPSHOTS_DIR):
os.makedirs(SNAPSHOTS_DIR)
torch.save(net.state_dict(), '{}/epoch_{:03}.pt'.format(SNAPSHOTS_DIR, epoch + 1))
print("Done!")
if __name__ == "__main__":
improve_submission()
| 23,236 | 16 | 759 |
e04b27f085886c6943117e834186a94704ba6132 | 1,553 | py | Python | src/compat_func.py | rangell/icff | a24b7b289219caef26d24e8cdbf37e24de49447a | [
"MIT"
] | null | null | null | src/compat_func.py | rangell/icff | a24b7b289219caef26d24e8cdbf37e24de49447a | [
"MIT"
] | null | null | null | src/compat_func.py | rangell/icff | a24b7b289219caef26d24e8cdbf37e24de49447a | [
"MIT"
] | null | null | null | import copy
import numpy as np
from sparse_dot_mkl import dot_product_mkl
from utils import MIN_FLOAT
from IPython import embed
#def raw_overlap(node, constraint, num_points):
| 34.511111 | 74 | 0.721185 | import copy
import numpy as np
from sparse_dot_mkl import dot_product_mkl
from utils import MIN_FLOAT
from IPython import embed
#def raw_overlap(node, constraint, num_points):
def raw_overlap(raw_reps, stacked_constraints, num_points):
extreme_raw_reps = copy.deepcopy(raw_reps).astype(float)
extreme_raw_reps *= np.inf
extreme_constraints = copy.deepcopy(stacked_constraints).astype(float)
extreme_constraints.data *= np.inf
compat_mx = dot_product_mkl(
extreme_raw_reps, extreme_constraints.T, dense=True
)
incompat_mx = ((compat_mx == -np.inf) | np.isnan(compat_mx))
pos_overlap_scores = dot_product_mkl(
raw_reps.astype(float), stacked_constraints.T, dense=True
)
pos_overlap_scores = np.array(pos_overlap_scores)
pos_overlap_mask = (pos_overlap_scores > 0)
pos_feat_totals = np.array(np.sum(stacked_constraints > 0, axis=1).T)
overlap_scores = pos_overlap_scores / pos_feat_totals
overlap_scores *= pos_overlap_mask
overlap_scores[overlap_scores == 1] = num_points
overlap_scores[incompat_mx] = -np.inf
return np.asarray(overlap_scores)
def transformed_overlap(node, constraint):
# FIXME: this won't work
assert False
rep = node.transformed_rep
pair_product = rep * constraint
if np.array_equal(rep, np.ones_like(rep) * -np.inf) \
or not np.all(pair_product >= 0):
return MIN_FLOAT
else:
score = np.sum(pair_product > 0) / np.sum(constraint != 0)
return score if score < 1 else num_points * score
| 1,326 | 0 | 45 |
de704cd31fbc4b18a4d1b37cb1147d6cf312e8b6 | 9,735 | py | Python | the_script.py | IntenseToxicity/Imperial-to-metric-conversion | 393d99c9ede1362f69c338c5d73a3f83df5d0f58 | [
"MIT"
] | null | null | null | the_script.py | IntenseToxicity/Imperial-to-metric-conversion | 393d99c9ede1362f69c338c5d73a3f83df5d0f58 | [
"MIT"
] | null | null | null | the_script.py | IntenseToxicity/Imperial-to-metric-conversion | 393d99c9ede1362f69c338c5d73a3f83df5d0f58 | [
"MIT"
] | null | null | null | """
Name: InsanityNet
Last Date Edited: 18-Feb-2022
Description:
Imperial and Metric conversion.
Feet and Inches to Meters OR Meters to Feet and Inches
Requirements:
fractions.Fraction function built into Python3
Known Issues:
1. Negative Feet and Positive Inches. I can't be bothered to fix it at 11 PM.
"""
# Necessary imports for this script to function.
from fractions import Fraction as Fracs
# Imperial to Metric Conversion
def imperial(x):
"""
This function will convert Imperial Feet (and Inches) to Metric Meters (or Centimeters if less than 1 whole Meter
:return: Converted value from Imperial to Metric
"""
# FEET SECTION
# Take the feet from Array and set to variable frac_ft
frac_ft = float(x[0])
# Convert from Feet to Meters
result_1 = frac_ft * 0.3048
# Format the resulting converted float to have 4 decimal places
result_1 = float("{:.4f}".format(result_1))
# INCHES SECTION
# INCH TO METERS
# Connvert the inch (and fraction inch) to decimal inch
frac = float(sum(Fracs(s) for s in x[1].split()))
# Calculate Inch section of results
result_2 = frac / 39.37
# RESULTS
# Calculate the results
result = result_1 + result_2
# Format to 4 decimal places
result = float("{:.4f}".format(result))
# RETURN SECTION
# Return the converted result to be displayed
return result
# Metric to Imperial Conversion
def metric(x):
"""
This function will convert Metric meters to Imperial Feet (and Inches).
:return: Converted value from Metric to Imperial
"""
# Initial conversion
# Meters to Feet
meters_in_ft = float("{:.4f}".format(x * 3.280839895))
# Inches portion of conversion
meters_in_in = meters_in_ft % 1 * 12
# For the weird rounding issues where it assumes .999 is 12 inches (1ft) just convert it over to prevent
# 5 ft 12 inch issues
if meters_in_in >= 11.992:
meters_in_ft = meters_in_ft + 1
meters_in_in = meters_in_in - 11.992
# LIMIT/FORMAT OUTPUTS
# Limit Feet to 0 decimal places
meters_in_ft = int(meters_in_ft)
# Limit Inches to 2 decimal places
meters_in_in = float("{:.2f}".format(meters_in_in))
# Return the
return meters_in_ft, meters_in_in
# Main function
# If not called as library, run the specified function automatically.
if __name__ == "__main__":
# MEME CONTROL! 'Cause it's 11 PM and I have no impulse control.
coconut = 1
# If coconut != NULL run the program
if coconut != '':
main()
# Otherwise, find the coconut so the program can run!
else:
print(f"Coconut is NULL. Find the Coconut.")
# My friend Kolock can go <explitive> himself for recommending complexity be added to this program.
# If negative feet and positive inches are input, wrong answer. I do not care enough to fix it at this time
# This is essentially python 101 so this is already overcomplicated based on your teachings. -_-
# My friend Kolock also specified I needed to add this: Coconut.jpg
# The above Something about good luck. Oh well, it's 11 PM, and I am tired of programming for the day.
# My Australian friend is a bad influence on me when I do not have impulse control this late at night.
| 32.667785 | 117 | 0.564355 | """
Name: InsanityNet
Last Date Edited: 18-Feb-2022
Description:
Imperial and Metric conversion.
Feet and Inches to Meters OR Meters to Feet and Inches
Requirements:
fractions.Fraction function built into Python3
Known Issues:
1. Negative Feet and Positive Inches. I can't be bothered to fix it at 11 PM.
"""
# Necessary imports for this script to function.
from fractions import Fraction as Fracs
# Imperial to Metric Conversion
def imperial(x):
"""
This function will convert Imperial Feet (and Inches) to Metric Meters (or Centimeters if less than 1 whole Meter
:return: Converted value from Imperial to Metric
"""
# FEET SECTION
# Take the feet from Array and set to variable frac_ft
frac_ft = float(x[0])
# Convert from Feet to Meters
result_1 = frac_ft * 0.3048
# Format the resulting converted float to have 4 decimal places
result_1 = float("{:.4f}".format(result_1))
# INCHES SECTION
# INCH TO METERS
# Connvert the inch (and fraction inch) to decimal inch
frac = float(sum(Fracs(s) for s in x[1].split()))
# Calculate Inch section of results
result_2 = frac / 39.37
# RESULTS
# Calculate the results
result = result_1 + result_2
# Format to 4 decimal places
result = float("{:.4f}".format(result))
# RETURN SECTION
# Return the converted result to be displayed
return result
# Metric to Imperial Conversion
def metric(x):
"""
This function will convert Metric meters to Imperial Feet (and Inches).
:return: Converted value from Metric to Imperial
"""
# Initial conversion
# Meters to Feet
meters_in_ft = float("{:.4f}".format(x * 3.280839895))
# Inches portion of conversion
meters_in_in = meters_in_ft % 1 * 12
# For the weird rounding issues where it assumes .999 is 12 inches (1ft) just convert it over to prevent
# 5 ft 12 inch issues
if meters_in_in >= 11.992:
meters_in_ft = meters_in_ft + 1
meters_in_in = meters_in_in - 11.992
# LIMIT/FORMAT OUTPUTS
# Limit Feet to 0 decimal places
meters_in_ft = int(meters_in_ft)
# Limit Inches to 2 decimal places
meters_in_in = float("{:.2f}".format(meters_in_in))
# Return the
return meters_in_ft, meters_in_in
# Main function
def main():
# Initialize variables
system_value = bool() # 0 for Metric start, 1 for Imperial start.
initial = str() # The initial value to convert FROM.
sysloop = True # Internal loop 1 initial starting value [Unit of measure validation loop]
# User input and Validation
while sysloop is True:
try:
# Get the system [Metric or Imperial] that the initial value will be in
system = str(input(f"What system of measure are you converting from?\n"
f"[M]etric or [I]mperial?\n"
f">>> "))
# If metric start, set system_value to 0
if system.lower() == "m" or system.lower() == "metric":
system_value = bool(0)
break
# If imperial start, set system_value to 1
elif system.lower() == "i" or system.lower() == "imperial":
system_value = bool(1)
break
# If not either of them, invalid, indicate, repeat input request.
else:
print(f"Invalid input, please select M for Metric or I for Imperial.")
continue
except ValueError:
continue
# ---------- IMPERIAL SECTION ----------
# Send initial value to imperial function to convert to metric
if system_value == 1:
# Loop data validation, initial imperial input
while True:
initial = []
# Initial validation and conversion from str fraction to decimal notation
# using the fractions.Fraction imported function
try:
# Initial input of value to convert from
x = str(input(f"\n"
f"How many Feet would you like to convert?\n"
f">>> "))
y = str(input(f"\n"
f"How many inches would you like to convert?\n"
f">>> "))
# Add to array
initial.append(x)
initial.append(y)
# Break the loop
break
# IF Value error THEN print message and CONTINUE
except ValueError:
# Print error
print("There was an error in your input. Please ensure it is in the proper notation.")
# Clear array to retry
initial.clear()
# Continue loop (Retry)
continue
# Call imperial function and store the returned value in result variable.
result = imperial(initial)
# IF result is a whole number THEN print in Meters ELSE print in Centimeters
# PLURAL VS SINGULAR CHECKS AND PRINTING
# IF GREATER THAN 1
if abs(float(result)) > 1:
print(f"\n"
f"Your converted value is: {result} Meters")
# OR IF EQUALS 1
elif abs(float(result)) == 1:
print(f"\n"
f"You converted value is: {result} Meter")
# OR IF LESS THAN 1, CONVERT TO CENTIMETERS
else:
# Convert Decimal meters below 1 meter to centimeters
result = result * 100
# IF GREATER THAN 1
if abs(result) > 1:
print(f"\n"
f"Your converted value is: {result} Centimeters")
# OR IF EQUALS 1
if abs(result) == 1:
print(f"\n"
f"Your converted value is: {result} Centimeter")
# OR IF LESS THAN 1
elif abs(result) < 1:
print(f"\n"
f"Your converted value is: {result} Centimeters")
# ---------- METRIC SECTION ----------
# Send initial value to metric function to convert to imperial
elif system_value == 0:
# User input validation
while True:
try:
initial = float(input(f"How many Meters would you like to convert?\n"
f"In decimal notation please. [I.e, 1.75]\n"
f">>> "))
except ValueError:
print("There was an error in your input. Please ensure it is in the proper notation.")
continue
break
# Call metric function and store the returned value in result variable.
result = metric(initial)
# This section was recommended to be added by my Australian friend Kolock. F KOLOCK!
# PLURAL VS SINGULAR CHECKS
# IF FEET EQUALS 1
if abs(result[0]) == 1:
# AND IF INCHES LESS THAN 0.01
if abs(result[1]) < 0.01:
print(f"Your converted value is:\n"
f"{result[0]} Foot")
# OR IF INCHES EQUALS 1
elif abs(result[1]) == 1:
print(f"Your converted value is:\n"
f"{result[0]} Foot {result[1]} Inch")
# OR IF INCHES GREATER THAN 1
elif abs(result[1]) > 1:
print(f"Your converted value is:\n"
f"{result[0]} Foot {result[1]} Inches")
# OR IF FEET EQUALS 0
elif abs(result[0]) == 0:
# AND IF INCHES EQUALS 1
if abs(result[1]) == 1:
print(f"Your converted value is:\n"
f"{result[1]} Inch")
# OR IF INCHES NOT EQUALS 1
else:
print(f"Your converted value is:\n"
f"{result[1]} Inches")
# OTHERWISE, IF FEET BETWEEN (0 AND 1) AND FEET GREATER THAN 1
else:
# AND IF INCHES EQUALS 1
if abs(result[1]) == 1:
print(f"Your converted value is:\n"
f"{result[0]} Feet and {result[1]} Inch")
# OR IF INCHES LESS THAN 1 AND GREATER THAN 0
elif 0 < abs(result[1]) < 1.01:
print(f"Your converted value is:\n"
f"{result[0]} Feet {result[1]} Inches")
# OR IF INCHES APPROXIMATELY EQUALS 0
elif abs(result[1]) < 0.01:
print(f"Your converted value is:\n"
f"{result[0]} Feet")
# Print end of program information
print(f"\n"
f"Thank you for converting!")
# Quit the program with error code 0 [Success]
quit(0)
# If not called as library, run the specified function automatically.
if __name__ == "__main__":
# MEME CONTROL! 'Cause it's 11 PM and I have no impulse control.
coconut = 1
# If coconut != NULL run the program
if coconut != '':
main()
# Otherwise, find the coconut so the program can run!
else:
print(f"Coconut is NULL. Find the Coconut.")
# My friend Kolock can go <explitive> himself for recommending complexity be added to this program.
# If negative feet and positive inches are input, wrong answer. I do not care enough to fix it at this time
# This is essentially python 101 so this is already overcomplicated based on your teachings. -_-
# My friend Kolock also specified I needed to add this: Coconut.jpg
# The above Something about good luck. Oh well, it's 11 PM, and I am tired of programming for the day.
# My Australian friend is a bad influence on me when I do not have impulse control this late at night.
| 6,429 | 0 | 22 |
7224812f862ee6febf65f98bfc55b27264c7263e | 1,677 | py | Python | tack/commands/HelpCommand.py | mer1dian/anubis_py | cc34364d7da4591758dff0bf72db4a9b3063e843 | [
"Unlicense"
] | 3 | 2015-07-07T09:11:13.000Z | 2018-05-29T22:36:12.000Z | tack/commands/HelpCommand.py | mer1dian/anubis_py | cc34364d7da4591758dff0bf72db4a9b3063e843 | [
"Unlicense"
] | null | null | null | tack/commands/HelpCommand.py | mer1dian/anubis_py | cc34364d7da4591758dff0bf72db4a9b3063e843 | [
"Unlicense"
] | 2 | 2015-07-07T09:11:15.000Z | 2016-05-28T16:54:06.000Z | # Authors:
# Trevor Perrin
# Moxie Marlinspike
#
# See the LICENSE file for legal information regarding use of this file.
import sys
from tack.version import __version__
from tack.commands.Command import Command
from tack.commands.GenerateKeyCommand import GenerateKeyCommand
from tack.commands.SignCommand import SignCommand
from tack.commands.ViewCommand import ViewCommand
from tack.commands.PackCommand import PackCommand
from tack.commands.UnpackCommand import UnpackCommand
| 28.913793 | 85 | 0.68873 | # Authors:
# Trevor Perrin
# Moxie Marlinspike
#
# See the LICENSE file for legal information regarding use of this file.
import sys
from tack.version import __version__
from tack.commands.Command import Command
from tack.commands.GenerateKeyCommand import GenerateKeyCommand
from tack.commands.SignCommand import SignCommand
from tack.commands.ViewCommand import ViewCommand
from tack.commands.PackCommand import PackCommand
from tack.commands.UnpackCommand import UnpackCommand
class HelpCommand(Command):
COMMANDS = {"genkey" : GenerateKeyCommand, "sign" : SignCommand,
"view" : ViewCommand, "pack" : PackCommand, "unpack" : UnpackCommand}
def __init__(self, argv):
Command.__init__(self, argv, "", "", allowArgRemainder=-1)
if len(self.argRemainder) < 1 or len(self.argRemainder)>1:
HelpCommand.printGeneralUsage()
self.command = self.argRemainder[0]
if not self.command in HelpCommand.COMMANDS:
self.printError("%s not a valid command." % self.command)
def execute(self):
HelpCommand.COMMANDS[self.command].printHelp()
@staticmethod
def printHelp():
print(
"""Provides help for individual commands.
help <command>
""")
@staticmethod
def printGeneralUsage(message=None):
if message:
print ("Error: %s\n" % message)
sys.stdout.write(
"""tack.py version %s (%s)
Commands (use "help <command>" to see optional args):
genkey
sign -k KEY -c CERT
view FILE
help COMMAND
("pack" and "unpack" are advanced commands for debugging)
""" % (__version__, Command.getCryptoVersion()))
sys.exit(-1)
| 862 | 306 | 23 |
8be2cd9841d9145cd26261f7dfd950f7433f5eea | 789 | py | Python | ktane/mvc/message_controller.py | hanzikl/ktane-controller | 3116e76cd56f2c4348df99427c52db1c8ea065e9 | [
"MIT"
] | null | null | null | ktane/mvc/message_controller.py | hanzikl/ktane-controller | 3116e76cd56f2c4348df99427c52db1c8ea065e9 | [
"MIT"
] | null | null | null | ktane/mvc/message_controller.py | hanzikl/ktane-controller | 3116e76cd56f2c4348df99427c52db1c8ea065e9 | [
"MIT"
] | null | null | null | import serial
from .serial_mock.serial import Serial as SerialMock
from time import sleep
DELAY_BETWEEN_MESSAGES = 0.05
| 26.3 | 90 | 0.662864 | import serial
from .serial_mock.serial import Serial as SerialMock
from time import sleep
DELAY_BETWEEN_MESSAGES = 0.05
class MessageController:
def __init__(self, connection):
"""
:param connection: serial.Serial
"""
assert isinstance(connection, serial.Serial) or isinstance(connection, SerialMock)
self.connection = connection
def process_message(self, msg):
print("Processing message: {}".format(msg))
def receive_messages(self):
# mock update
if isinstance(self.connection, SerialMock):
self.connection.update_mock()
while self.connection.in_waiting > 0:
msg = self.connection.readline()
self.process_message(msg)
sleep(DELAY_BETWEEN_MESSAGES)
| 357 | 287 | 23 |
075fdd513c640c9c351a31895ef902f02de9af5a | 8,996 | py | Python | cosilico/base/distribution.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | cosilico/base/distribution.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | cosilico/base/distribution.py | cosilico/cosilico | 983373139aeaf459271c559a47a6439939ec93a5 | [
"MIT"
] | null | null | null | from collections import Collection
import altair as alt
import pandas as pd
def histogram(x, data, opacity=1., maxbins=30, color=None, padding=0,):
"""Display a histogram.
Parameters
----------
x : str
value to be binned
data : pandas.DataFrame
dataframe containing x
opacity : float
opacity of the histogram layer
maxbins : int
max bins allowable in the histogram
color : str, None
Color of histogram layer
padding : int
Amount of padding on ends of x-axis
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.histogram('sepal_length', iris)
Returns
-------
altair.Chart
"""
mark_kwargs = {
'opacity': opacity,
}
if color is not None: mark_kwargs['color'] = color
chart = alt.Chart(data).mark_bar(**mark_kwargs).encode(
x=alt.X(f'{x}:Q',
bin=alt.Bin(maxbins=maxbins),
title=x,
scale=alt.Scale(padding=padding)
),
y=alt.Y('count():Q',
title='Count',
)
)
return chart
def layered_histogram(x, hue, data, opacity=.6, maxbins=100,
stack=None, padding=0):
"""Display a layered histogram.
Parameters
----------
x : str
value to be binned
hue : str
value defining layers of the histogram
data : pandas.DataFrame
dataframe containing x and hue columns
opacity : float
opacity of the histogram layers
maxbins : int
max bins allowable in the histogram
stack : str, None, bool
argument for stack parameter in altair. If None,
then the areas of the layers that overlap will be
different colors. If 'zero', then the layers will
completly occlude one another.
padding : int
Amount of padding on ends of x-axis
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.layered_histogram('sepal_length', 'species', iris)
Returns
-------
altair.Chart
"""
chart = alt.Chart(data).mark_area(
opacity=opacity,
interpolate='step'
).encode(
alt.X(f'{x}:Q', bin=alt.Bin(maxbins=100), title=x,
scale=alt.Scale(padding=padding)),
alt.Y('count()', stack=stack, title='Count'),
alt.Color(f'{hue}:N')
)
return chart
def distribution_plot(x, data, color=None, opacity=.6, bandwidth=.3,
filled=True, steps=200, x_pad_scaler=.2, line_only=False,
orientation='vertical'):
"""Display a simple distribution plot.
Parameters
----------
x : str
value to calculate distribution for.
data : pandas.DataFrame
dataframe containing x column
color : str, None
color of the distribution mark
opacity : float
opacity of the distribution plot layers
bandwidth : float
bandwidth used for density calculations
steps : int
number of steps used for smoothing distribution lines
x_pad_scaler : float
Used to extend x-axis range if needed. Adds
x_pad_scaler * (x_max_value - x_min_value) to each
side of the x-axis.
filled : bool
Whether the curve is filled or not.
line_only : bool
Whether to include only the distribution plot kernel line
orientation : str
Can either be 'vertical' or 'horizontal'
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>> base.distribution_plot('sepal_length', iris)
Returns
-------
altair.Chart
"""
chart = alt.Chart(data)
value_range = max(data[x]) - min(data[x])
chart = chart.transform_density(
density=x,
bandwidth=bandwidth,
counts=True,
extent=[min(data[x]) - float(x_pad_scaler * value_range),
max(data[x]) + float(x_pad_scaler * value_range)],
steps=steps,
)
axis_kwargs, mark_kwargs = {}, {}
if orientation == 'vertical':
mark_kwargs['orient'] = alt.Orientation('vertical')
if line_only:
chart = chart.mark_line(opacity=opacity, **mark_kwargs)
# axis_kwargs['axis'] = None
# if orientation == 'vertical':
# encode_kwargs['order'] = 'value:Q'
else:
chart = chart.mark_area(opacity=opacity, filled=filled,
**mark_kwargs)
# chart = chart.encode(
# x=alt.X(f'value:Q',
# title=x,
# **axis_kwargs
# ),
# y=alt.Y('density:Q',
# **axis_kwargs
# ),
# )
if orientation == 'horizontal':
chart = chart.encode(
x=alt.X(f'value:Q',
title=x,
**axis_kwargs
),
y=alt.Y('density:Q',
**axis_kwargs
),
)
else:
chart = chart.encode(
y=alt.X(f'value:Q',
title=x,
**axis_kwargs
),
x=alt.Y('density:Q',
**axis_kwargs
),
order='value:Q'
)
return chart
def layered_distribution_plot(x, data, hue=None, opacity=.6, bandwidth=.3,
steps=200, stack=None, x_pad_scaler=.2, filled=True):
"""Display a layered distribution plot.
Parameters
----------
x : Collection, str
value to calculate distribution for.
If x is an iterable, then x will be treated as a list values
to use for a fold transform. If x is a str, data will not be
fold transformed
data : pandas.DataFrame
dataframe containing values
hue : str, None
value defining layers of the distribution plot. If x is a
a string, then hue must be specified. Otherwise legend will
be named by the hue value.
opacity : float
opacity of the distribution plot layers
bandwidth : float
bandwidth used for density calculations
steps : int
number of steps used for smoothing distribution lines
stack : str, None, bool
argument for stack parameter in altair. If None,
then the areas of the layers that overlap will be
different colors. If 'zero', then the layers will
completly occlude one another.
x_pad_scaler : float
Used to extend x-axis range if needed. Adds
x_pad_scaler * (x_max_value - x_min_value) to each
side of the x-axis.
filled : bool
Whether the layers are filled or not.
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>> variables = ['sepal_length', 'sepal_width',
... 'petal_length', 'petal_width']
>>> base.layered_distribution_plot(variables, iris)
Returns
-------
altair.Chart
"""
transformed = data.copy()
if isinstance(x, Collection) and not isinstance(x, str):
transformed = data.melt(value_vars=x)
x = 'value'
if hue is not None:
transformed.columns = [hue if c == 'variable' else c
for c in transformed.columns]
else:
hue = 'variable'
value_range = max(transformed[x]) - min(transformed[x])
chart = alt.Chart(transformed).transform_density(
density=x,
bandwidth=bandwidth,
groupby=[hue],
counts=True,
extent=[min(transformed[x]) - float(x_pad_scaler * value_range),
max(transformed[x]) + float(x_pad_scaler * value_range)],
steps=steps,
).mark_area(
opacity=opacity,
filled=filled,
).encode(
x=alt.X(f'value:Q',
title=x
),
y=alt.Y('density:Q', stack=stack),
color=alt.Color(f'{hue}:N')
)
return chart
def boxplot(x, y, data, color=None):
"""Display a boxplot.
Arguments
---------
x : str
column in data holding x-axis categories
y : str
column in data holding y-axis values
data : pandas.DataFrame
dataframe holding x and y
color : str, None
If color is None, boxes will be colored by x.
Otherwise all boxes will be set to color.
Example
-------
>>> import seaborn as sns
>>> import cosilico.base as base
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.boxplot('species', 'sepal_width', iris)
Output
------
altair.Chart
"""
mark_kwargs, encode_kwargs = {}, {}
if color is not None:
mark_kwargs['color'] = color
else:
encode_kwargs['color'] = color=alt.Color(f'{x}:N')
chart = alt.Chart(data).mark_boxplot(**mark_kwargs).encode(
x=alt.X(f'{x}:N'),
y=alt.Y(f'{y}:Q'),
**encode_kwargs
)
return chart
| 27.178248 | 74 | 0.571699 | from collections import Collection
import altair as alt
import pandas as pd
def histogram(x, data, opacity=1., maxbins=30, color=None, padding=0,):
"""Display a histogram.
Parameters
----------
x : str
value to be binned
data : pandas.DataFrame
dataframe containing x
opacity : float
opacity of the histogram layer
maxbins : int
max bins allowable in the histogram
color : str, None
Color of histogram layer
padding : int
Amount of padding on ends of x-axis
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.histogram('sepal_length', iris)
Returns
-------
altair.Chart
"""
mark_kwargs = {
'opacity': opacity,
}
if color is not None: mark_kwargs['color'] = color
chart = alt.Chart(data).mark_bar(**mark_kwargs).encode(
x=alt.X(f'{x}:Q',
bin=alt.Bin(maxbins=maxbins),
title=x,
scale=alt.Scale(padding=padding)
),
y=alt.Y('count():Q',
title='Count',
)
)
return chart
def layered_histogram(x, hue, data, opacity=.6, maxbins=100,
stack=None, padding=0):
"""Display a layered histogram.
Parameters
----------
x : str
value to be binned
hue : str
value defining layers of the histogram
data : pandas.DataFrame
dataframe containing x and hue columns
opacity : float
opacity of the histogram layers
maxbins : int
max bins allowable in the histogram
stack : str, None, bool
argument for stack parameter in altair. If None,
then the areas of the layers that overlap will be
different colors. If 'zero', then the layers will
completly occlude one another.
padding : int
Amount of padding on ends of x-axis
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.layered_histogram('sepal_length', 'species', iris)
Returns
-------
altair.Chart
"""
chart = alt.Chart(data).mark_area(
opacity=opacity,
interpolate='step'
).encode(
alt.X(f'{x}:Q', bin=alt.Bin(maxbins=100), title=x,
scale=alt.Scale(padding=padding)),
alt.Y('count()', stack=stack, title='Count'),
alt.Color(f'{hue}:N')
)
return chart
def distribution_plot(x, data, color=None, opacity=.6, bandwidth=.3,
filled=True, steps=200, x_pad_scaler=.2, line_only=False,
orientation='vertical'):
"""Display a simple distribution plot.
Parameters
----------
x : str
value to calculate distribution for.
data : pandas.DataFrame
dataframe containing x column
color : str, None
color of the distribution mark
opacity : float
opacity of the distribution plot layers
bandwidth : float
bandwidth used for density calculations
steps : int
number of steps used for smoothing distribution lines
x_pad_scaler : float
Used to extend x-axis range if needed. Adds
x_pad_scaler * (x_max_value - x_min_value) to each
side of the x-axis.
filled : bool
Whether the curve is filled or not.
line_only : bool
Whether to include only the distribution plot kernel line
orientation : str
Can either be 'vertical' or 'horizontal'
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>> base.distribution_plot('sepal_length', iris)
Returns
-------
altair.Chart
"""
chart = alt.Chart(data)
value_range = max(data[x]) - min(data[x])
chart = chart.transform_density(
density=x,
bandwidth=bandwidth,
counts=True,
extent=[min(data[x]) - float(x_pad_scaler * value_range),
max(data[x]) + float(x_pad_scaler * value_range)],
steps=steps,
)
axis_kwargs, mark_kwargs = {}, {}
if orientation == 'vertical':
mark_kwargs['orient'] = alt.Orientation('vertical')
if line_only:
chart = chart.mark_line(opacity=opacity, **mark_kwargs)
# axis_kwargs['axis'] = None
# if orientation == 'vertical':
# encode_kwargs['order'] = 'value:Q'
else:
chart = chart.mark_area(opacity=opacity, filled=filled,
**mark_kwargs)
# chart = chart.encode(
# x=alt.X(f'value:Q',
# title=x,
# **axis_kwargs
# ),
# y=alt.Y('density:Q',
# **axis_kwargs
# ),
# )
if orientation == 'horizontal':
chart = chart.encode(
x=alt.X(f'value:Q',
title=x,
**axis_kwargs
),
y=alt.Y('density:Q',
**axis_kwargs
),
)
else:
chart = chart.encode(
y=alt.X(f'value:Q',
title=x,
**axis_kwargs
),
x=alt.Y('density:Q',
**axis_kwargs
),
order='value:Q'
)
return chart
def layered_distribution_plot(x, data, hue=None, opacity=.6, bandwidth=.3,
steps=200, stack=None, x_pad_scaler=.2, filled=True):
"""Display a layered distribution plot.
Parameters
----------
x : Collection, str
value to calculate distribution for.
If x is an iterable, then x will be treated as a list values
to use for a fold transform. If x is a str, data will not be
fold transformed
data : pandas.DataFrame
dataframe containing values
hue : str, None
value defining layers of the distribution plot. If x is a
a string, then hue must be specified. Otherwise legend will
be named by the hue value.
opacity : float
opacity of the distribution plot layers
bandwidth : float
bandwidth used for density calculations
steps : int
number of steps used for smoothing distribution lines
stack : str, None, bool
argument for stack parameter in altair. If None,
then the areas of the layers that overlap will be
different colors. If 'zero', then the layers will
completly occlude one another.
x_pad_scaler : float
Used to extend x-axis range if needed. Adds
x_pad_scaler * (x_max_value - x_min_value) to each
side of the x-axis.
filled : bool
Whether the layers are filled or not.
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>> variables = ['sepal_length', 'sepal_width',
... 'petal_length', 'petal_width']
>>> base.layered_distribution_plot(variables, iris)
Returns
-------
altair.Chart
"""
transformed = data.copy()
if isinstance(x, Collection) and not isinstance(x, str):
transformed = data.melt(value_vars=x)
x = 'value'
if hue is not None:
transformed.columns = [hue if c == 'variable' else c
for c in transformed.columns]
else:
hue = 'variable'
value_range = max(transformed[x]) - min(transformed[x])
chart = alt.Chart(transformed).transform_density(
density=x,
bandwidth=bandwidth,
groupby=[hue],
counts=True,
extent=[min(transformed[x]) - float(x_pad_scaler * value_range),
max(transformed[x]) + float(x_pad_scaler * value_range)],
steps=steps,
).mark_area(
opacity=opacity,
filled=filled,
).encode(
x=alt.X(f'value:Q',
title=x
),
y=alt.Y('density:Q', stack=stack),
color=alt.Color(f'{hue}:N')
)
return chart
def boxplot(x, y, data, color=None):
"""Display a boxplot.
Arguments
---------
x : str
column in data holding x-axis categories
y : str
column in data holding y-axis values
data : pandas.DataFrame
dataframe holding x and y
color : str, None
If color is None, boxes will be colored by x.
Otherwise all boxes will be set to color.
Example
-------
>>> import seaborn as sns
>>> import cosilico.base as base
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.boxplot('species', 'sepal_width', iris)
Output
------
altair.Chart
"""
mark_kwargs, encode_kwargs = {}, {}
if color is not None:
mark_kwargs['color'] = color
else:
encode_kwargs['color'] = color=alt.Color(f'{x}:N')
chart = alt.Chart(data).mark_boxplot(**mark_kwargs).encode(
x=alt.X(f'{x}:N'),
y=alt.Y(f'{y}:Q'),
**encode_kwargs
)
return chart
| 0 | 0 | 0 |
7257b7a7a0223a998f334158bbdfce34ad0a62db | 897 | py | Python | indra/databases/ido_client.py | johnbachman/belpy | 1f8052c294fa05b3cd471c544b725f6f0adf9869 | [
"BSD-2-Clause"
] | 136 | 2016-02-11T22:06:37.000Z | 2022-03-31T17:26:20.000Z | indra/databases/ido_client.py | johnbachman/belpy | 1f8052c294fa05b3cd471c544b725f6f0adf9869 | [
"BSD-2-Clause"
] | 748 | 2016-02-03T16:27:56.000Z | 2022-03-09T14:27:54.000Z | indra/databases/ido_client.py | johnbachman/belpy | 1f8052c294fa05b3cd471c544b725f6f0adf9869 | [
"BSD-2-Clause"
] | 56 | 2015-08-28T14:03:44.000Z | 2022-02-04T06:15:55.000Z | """A client to OWL."""
from typing import Optional
from indra.databases.owl_client import OwlClient
_client = OwlClient('ido')
def get_ido_name_from_ido_id(ido_id: str) -> Optional[str]:
"""Return the HP name corresponding to the given HP ID.
Parameters
----------
ido_id :
The IDO identifier to be converted. Example: "0000403"
Returns
-------
:
The IDO name corresponding to the given IDO identifier.
"""
return _client.get_name_from_id(ido_id)
def get_ido_id_from_ido_name(ido_name: str) -> Optional[str]:
"""Return the HP identifier corresponding to the given IDO name.
Parameters
----------
ido_name :
The IDO name to be converted. Example: "parasite role"
Returns
-------
:
The IDO identifier corresponding to the given IDO name.
"""
return _client.get_id_from_name(ido_name)
| 22.425 | 68 | 0.651059 | """A client to OWL."""
from typing import Optional
from indra.databases.owl_client import OwlClient
_client = OwlClient('ido')
def get_ido_name_from_ido_id(ido_id: str) -> Optional[str]:
"""Return the HP name corresponding to the given HP ID.
Parameters
----------
ido_id :
The IDO identifier to be converted. Example: "0000403"
Returns
-------
:
The IDO name corresponding to the given IDO identifier.
"""
return _client.get_name_from_id(ido_id)
def get_ido_id_from_ido_name(ido_name: str) -> Optional[str]:
"""Return the HP identifier corresponding to the given IDO name.
Parameters
----------
ido_name :
The IDO name to be converted. Example: "parasite role"
Returns
-------
:
The IDO identifier corresponding to the given IDO name.
"""
return _client.get_id_from_name(ido_name)
| 0 | 0 | 0 |
7d528b1a9dad3adfd057fcc208a09913c189b15a | 1,007 | py | Python | Python/seven_kyu/max_diff.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | 1 | 2019-12-20T04:09:56.000Z | 2019-12-20T04:09:56.000Z | Python/seven_kyu/max_diff.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | Python/seven_kyu/max_diff.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | # Python solution for 'max diff - easy' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS, MATHEMATICS, ALGORITHMS, NUMBERS, COLLECTIONS, LISTS, DATA STRUCTURES, AND ARRAYS.
# Author: Jack Brokenshire
# Date: 16/07/2020
import unittest
def max_diff(lst):
"""
Finds the difference beteween the largest and smallest items in a list.
:param lst: a list of integers.
:return: the difference between the biggest and the smallest value in a list received as parameter, otherwise, 0.
"""
if lst:
return max(lst) - min(lst)
return 0
class TestMaxDiff(unittest.TestCase):
"""Class to test 'max_diff' function"""
if __name__ == '__main__':
unittest.main()
| 29.617647 | 117 | 0.650447 | # Python solution for 'max diff - easy' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS, MATHEMATICS, ALGORITHMS, NUMBERS, COLLECTIONS, LISTS, DATA STRUCTURES, AND ARRAYS.
# Author: Jack Brokenshire
# Date: 16/07/2020
import unittest
def max_diff(lst):
"""
Finds the difference beteween the largest and smallest items in a list.
:param lst: a list of integers.
:return: the difference between the biggest and the smallest value in a list received as parameter, otherwise, 0.
"""
if lst:
return max(lst) - min(lst)
return 0
class TestMaxDiff(unittest.TestCase):
"""Class to test 'max_diff' function"""
def test_max_diff(self):
self.assertEqual(max_diff([0, 1, 2, 3, 4, 5, 6]), 6)
self.assertEqual(max_diff([-0, 1, 2, -3, 4, 5, -6]), 11)
self.assertEqual(max_diff([0, 1, 2, 3, 4, 5, 16]), 16)
self.assertEqual(max_diff([16]), 0)
self.assertEqual(max_diff([]), 0)
if __name__ == '__main__':
unittest.main()
| 278 | 0 | 27 |
340edcacb078a82926d5528e093ea31cb4090ca5 | 4,391 | py | Python | libs/clientQt.py | SilvioGiancola/TCPClient | aa6e5c137feeacb61900a848474730c1720d31bd | [
"MIT"
] | 1 | 2021-12-06T10:47:31.000Z | 2021-12-06T10:47:31.000Z | libs/clientQt.py | SilvioGiancola/TCPClient | aa6e5c137feeacb61900a848474730c1720d31bd | [
"MIT"
] | 1 | 2020-05-21T01:40:34.000Z | 2020-05-21T01:40:34.000Z | libs/clientQt.py | SilvioGiancola/TCPClient | aa6e5c137feeacb61900a848474730c1720d31bd | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QDataStream, QIODevice, QObject, QByteArray, pyqtSignal
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication, QDialog, QMainWindow, QLineEdit, QPushButton, QVBoxLayout
from PyQt5.QtWidgets import *
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
from PyQt5.QtNetwork import *
from PyQt5.QtGui import QPixmap, QImage
from PyQt5 import QtWidgets, uic
from functools import partial
from libs.clientAbstract import ClientAbstract
# PORTS = (9998, 8000)
# class Client(QMainWindow, Ui_MainWindow):
| 32.286765 | 99 | 0.628786 | from PyQt5.QtCore import QDataStream, QIODevice, QObject, QByteArray, pyqtSignal
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication, QDialog, QMainWindow, QLineEdit, QPushButton, QVBoxLayout
from PyQt5.QtWidgets import *
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
from PyQt5.QtNetwork import *
from PyQt5.QtGui import QPixmap, QImage
from PyQt5 import QtWidgets, uic
from functools import partial
from libs.clientAbstract import ClientAbstract
# PORTS = (9998, 8000)
# class Client(QMainWindow, Ui_MainWindow):
class ClientQt(QTcpSocket, ClientAbstract):
# conn = pyqtSignal(bool)
# disconn = pyqtSignal(bool)
messageReceived = pyqtSignal(str)
messageSent = pyqtSignal(str)
# PORT = 8493
SIZEOF_UINT32 = 4
# HOST = "10.68.74.44"
def __init__(self, parent=None, HOST="localhost", PORT=0000):
super(ClientQt, self).__init__(parent, HOST, PORT)
self.nextBlockSize = 0
self.request = None
self.text = ""
self.stateChanged.connect(self.plotState)
self.readyRead.connect(self.readFromServer)
self.disconnected.connect(self.disconnectToServer)
self.error.connect(self.serverHasError)
# self.connectToServer()
def isConnected(self):
return self.state == QAbstractSocket.ConnectedState
# Create connection to server
def connectToServer(self):
self.connectToHost(self.HOST, self.PORT)
if self.waitForConnected(2000):
self.messageReceived.emit("[CONNECTED]")
else:
self.messageReceived.emit("[NOT CONNECTED]")
def disconnectToServer(self):
self.close()
def sendImage(self):
self.request = QByteArray()
stream = QDataStream(self.request, QIODevice.WriteOnly)
stream.setVersion(QDataStream.Qt_4_9)
stream.writeUInt32(0)
stream.writeUInt32(1) # HEADER: this is an QImage
ba = QByteArray()
buffer = QBuffer(ba)
self.img = QImage()
self.img.load(f"{self.text}")
self.img.save(buffer, "PNG") # writes image into ba in PNG format
stream.writeBytes(ba)
stream.device().seek(0)
stream.writeUInt32(self.request.size() - self.SIZEOF_UINT32)
self.write(self.request)
self.nextBlockSize = 0
self.request = None
print(f"sending '{self.text}' to Server")
self.messageSent.emit("[SENT] " + self.text)
def sendMessage(self):
self.request = QByteArray()
stream = QDataStream(self.request, QIODevice.WriteOnly)
stream.setVersion(QDataStream.Qt_4_9)
stream.writeUInt32(0)
stream.writeUInt32(0) # HEADER: this is a QString
stream.writeQString(f"{self.text}")
stream.device().seek(0)
stream.writeUInt32(self.request.size() - self.SIZEOF_UINT32)
self.write(self.request)
self.nextBlockSize = 0
self.request = None
print(f"sending '{self.text}' to Server")
self.messageSent.emit("[SENT] " + self.text)
def readFromServer(self):
stream = QDataStream(self)
stream.setVersion(QDataStream.Qt_4_9)
while True:
if self.nextBlockSize == 0:
if self.bytesAvailable() < self.SIZEOF_UINT32:
break
self.nextBlockSize = stream.readUInt32()
if self.bytesAvailable() < self.nextBlockSize:
break
header = stream.readUInt32()
print("header", header)
if header == 0: # QString
textFromServer = stream.readQString()
# print("messageReceived:", textFromServer)
print(f"received '{textFromServer}' from Server")
self.messageReceived.emit("[RECEIVED] " + textFromServer)
self.nextBlockSize = 0
def plotState(self, state):
if state == 0: print("[STATE] UnconnectedState")
elif state == 1: print("[STATE] HostLookupState")
elif state == 2: print("[STATE] ConnectingState")
elif state == 3: print("[STATE] ConnectedState")
elif state == 4: print("[STATE] BoundState")
elif state == 5: print("[STATE] ClosingState")
elif state == 6: print("[STATE] ListeningState")
else: print("ERROR: Undefined state")
| 3,307 | 483 | 22 |
52d1f7f89d9e7f47ec3399a3efebc70617bff62b | 1,462 | py | Python | handrecog/src/util/resize_image.py | hengxyz/hand_detection_recognition | 317545056886d7b85947f9258c4cc02e98cfd2fe | [
"MIT"
] | null | null | null | handrecog/src/util/resize_image.py | hengxyz/hand_detection_recognition | 317545056886d7b85947f9258c4cc02e98cfd2fe | [
"MIT"
] | null | null | null | handrecog/src/util/resize_image.py | hengxyz/hand_detection_recognition | 317545056886d7b85947f9258c4cc02e98cfd2fe | [
"MIT"
] | null | null | null | # from __future__ import absolute_import
# from __future__ import division
from __future__ import print_function
import os
import glob
import numpy as np
from PIL import Image
import cv2
if __name__ == '__main__':
main() | 31.782609 | 80 | 0.55814 | # from __future__ import absolute_import
# from __future__ import division
from __future__ import print_function
import os
import glob
import numpy as np
from PIL import Image
import cv2
def main():
data = '/data/zming/GH/manji/2000_labeled_sample_head'
output = '/data/zming/GH/manji/2000_labeled_sample_head_180'
size_dst = 180
folders = os.listdir(data)
folders.sort()
with open(os.path.join(data, 'bad_images_cv2.txt'), 'w') as f_badimg:
image_num = 0
for fold in folders:
if not os.path.isdir(os.path.join(data, fold)):
continue
if not os.path.isdir(os.path.join(output, fold)):
os.mkdir(os.path.join(output, fold))
images_path = glob.glob(os.path.join(data, fold, '*.png'))
images_path.sort()
for image_path in images_path:
image_num += 1
#print ('\r','%s'%image_path, end = '')
print(image_path)
img_name = str.split(image_path, '/')[-1]
try:
im = cv2.imread(image_path)
im_resize = cv2.resize(im, (size_dst, size_dst))
cv2.imwrite(os.path.join(output, fold, img_name), im_resize)
except:
print('!!!!!!!!!!!!! %s'%image_path)
f_badimg.write(image_path)
continue
if __name__ == '__main__':
main() | 1,211 | 0 | 23 |
e1aa4fd5a0a8efd8ef5ee8b595a5fc3860e6c737 | 2,686 | py | Python | Sentences from Tatoeba/SentenceFinder.py | tomPlus353/Japanese-Web-Scraping | b76cd72ddddaabf7176274032447d7f8d1326aa6 | [
"MIT"
] | null | null | null | Sentences from Tatoeba/SentenceFinder.py | tomPlus353/Japanese-Web-Scraping | b76cd72ddddaabf7176274032447d7f8d1326aa6 | [
"MIT"
] | null | null | null | Sentences from Tatoeba/SentenceFinder.py | tomPlus353/Japanese-Web-Scraping | b76cd72ddddaabf7176274032447d7f8d1326aa6 | [
"MIT"
] | null | null | null | import csv, os
os.chdir('C:\\Users\\tomas\\Desktop\\Multi-Side Flashcard Project\\Sentences from Tatoeba\\')
csv_handle = open("jpn_sentences.tsv",'r')
csv_read = csv.reader(csv_handle, delimiter="\t")
csvList = list(csv_read)
#print(len(csvList))
#print(f"Row 1, column 3: {csvList[0][2]}",f"Row 1, column 1: {csvList[0][0]}")
tatoebaDict = {}
for row in csvList:
tatoebaDict[row[0]] = row[2]
import shelve
os.chdir('C:\\Users\\tomas\\Desktop\\Multi-Side Flashcard Project\\')
d = shelve.open("N1 vocab data")
vocab = d['Vocab 1']
d.close()
#print(len(vocab))
#sentence matching.
#create a list matching sentences for each word
#first match my kanji, then match by kana, then match by kanji minus a ru at the end(or similar)
#Else append 'Sentence not found'
#NOTE -> you want to get some sort of autoconjugator for words
#refine this list to one sentence if 1) that sentence that has other words from vocab in it. (the most words)
#2) if length is 1 then no need to refine.
listOfLists = []
from progress.bar import ChargingBar
endings = {'しい':['しく','しな'],
"う":["って","った","わない",'い','え'],
"つ":["って","った","た",'ち','て'],
'いる':['いた','いて','い','いま','いない','いろ'],
'える':['えた','えて','え','えま','えない','えろ'],
"る":["って","て","った","た",'り','れ','ま','ない','ろ'],
"く":["いて","いた","か","き","け"],
"ぐ":["いで","いだ","が","ぎ","げ"],
"ぬ":["んで","んだ","な","に","ね"],
"ぶ":["んで","んだ","ば","び","べ"],
"む":["んで","んだ","ま","み","め"],
"す":["して","した","さ","し","せ"]}
with ChargingBar(max=len(vocab)) as bar:
for word in vocab:
nestedList = []
for code in tatoebaDict:
#print(f"DEBUGGING\nWord index zero = {word[0]}\nType of word index zero = {type(word[0])}")
#print(f"DEBUGGING\nContents of current key = {tatoebaDict[code]}\nType of content at current key = {type(tatoebaDict[code])}")
if word[0] in tatoebaDict[code]:
#print("DEBUGGING: IF")
nestedList.append(tatoebaDict[code])
elif word[0][-1] in list(endings.keys()):
for subend in endings[word[0][-1]]:
tempWord = word[0][:-1] + subend
if tempWord in tatoebaDict[code]:
#print("DEBUGGING: ELIF2")
if tatoebaDict[code] not in nestedList:
nestedList.append(tatoebaDict[code])
elif word[1] in tatoebaDict[code]:
#print("DEBUGGING: ELIF1")
nestedList.append(tatoebaDict[code])
if len(nestedList) == 0:
nestedList.append('Sentence not found.')
#print('Sentence not found.')
listOfLists.append(nestedList)
#print(f"LENGTH of listOfLists:{len(listOfLists)} ")
bar.next()
#print('LENGTH:\n',len(listOfLists),'CONTENTS:\n',listOfLists)
newData = shelve.open('Raw Sentences')
newData['Raw Sentences 2'] = listOfLists
newData.close() | 38.371429 | 131 | 0.629188 | import csv, os
os.chdir('C:\\Users\\tomas\\Desktop\\Multi-Side Flashcard Project\\Sentences from Tatoeba\\')
csv_handle = open("jpn_sentences.tsv",'r')
csv_read = csv.reader(csv_handle, delimiter="\t")
csvList = list(csv_read)
#print(len(csvList))
#print(f"Row 1, column 3: {csvList[0][2]}",f"Row 1, column 1: {csvList[0][0]}")
tatoebaDict = {}
for row in csvList:
tatoebaDict[row[0]] = row[2]
import shelve
os.chdir('C:\\Users\\tomas\\Desktop\\Multi-Side Flashcard Project\\')
d = shelve.open("N1 vocab data")
vocab = d['Vocab 1']
d.close()
#print(len(vocab))
#sentence matching.
#create a list matching sentences for each word
#first match my kanji, then match by kana, then match by kanji minus a ru at the end(or similar)
#Else append 'Sentence not found'
#NOTE -> you want to get some sort of autoconjugator for words
#refine this list to one sentence if 1) that sentence that has other words from vocab in it. (the most words)
#2) if length is 1 then no need to refine.
listOfLists = []
from progress.bar import ChargingBar
endings = {'しい':['しく','しな'],
"う":["って","った","わない",'い','え'],
"つ":["って","った","た",'ち','て'],
'いる':['いた','いて','い','いま','いない','いろ'],
'える':['えた','えて','え','えま','えない','えろ'],
"る":["って","て","った","た",'り','れ','ま','ない','ろ'],
"く":["いて","いた","か","き","け"],
"ぐ":["いで","いだ","が","ぎ","げ"],
"ぬ":["んで","んだ","な","に","ね"],
"ぶ":["んで","んだ","ば","び","べ"],
"む":["んで","んだ","ま","み","め"],
"す":["して","した","さ","し","せ"]}
with ChargingBar(max=len(vocab)) as bar:
for word in vocab:
nestedList = []
for code in tatoebaDict:
#print(f"DEBUGGING\nWord index zero = {word[0]}\nType of word index zero = {type(word[0])}")
#print(f"DEBUGGING\nContents of current key = {tatoebaDict[code]}\nType of content at current key = {type(tatoebaDict[code])}")
if word[0] in tatoebaDict[code]:
#print("DEBUGGING: IF")
nestedList.append(tatoebaDict[code])
elif word[0][-1] in list(endings.keys()):
for subend in endings[word[0][-1]]:
tempWord = word[0][:-1] + subend
if tempWord in tatoebaDict[code]:
#print("DEBUGGING: ELIF2")
if tatoebaDict[code] not in nestedList:
nestedList.append(tatoebaDict[code])
elif word[1] in tatoebaDict[code]:
#print("DEBUGGING: ELIF1")
nestedList.append(tatoebaDict[code])
if len(nestedList) == 0:
nestedList.append('Sentence not found.')
#print('Sentence not found.')
listOfLists.append(nestedList)
#print(f"LENGTH of listOfLists:{len(listOfLists)} ")
bar.next()
#print('LENGTH:\n',len(listOfLists),'CONTENTS:\n',listOfLists)
newData = shelve.open('Raw Sentences')
newData['Raw Sentences 2'] = listOfLists
newData.close() | 0 | 0 | 0 |
9b8939c98e297f7bb704484058d5a0d9967f3373 | 1,731 | py | Python | models/ssd/links/normalize.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | 7 | 2020-08-12T12:04:28.000Z | 2021-11-22T15:56:08.000Z | models/ssd/links/normalize.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | 1 | 2021-10-07T08:37:39.000Z | 2021-10-08T02:41:39.000Z | models/ssd/links/normalize.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | null | null | null | import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
from chainer import variable
class Normalize(chainer.Link):
"""Learnable L2 normalization [#]_.
This link normalizes input along the channel axis and scales it.
The scale factors are trained channel-wise.
.. [#] Wei Liu, Andrew Rabinovich, Alexander C. Berg.
ParseNet: Looking Wider to See Better. ICLR 2016.
Args:
n_channel (int): The number of channels.
initial: A value to initialize the scale factors. It is pased to
:meth:`chainer.initializers._get_initializer`. The default value
is 0.
eps (float): A small value to avoid zero-division. The default value
is :math:`1e-5`.
"""
def forward(self, x):
"""Normalize input and scale it.
Args:
x (chainer.Variable): A variable holding 4-dimensional array.
Its :obj:`dtype` is :obj:`numpy.float32`.
Returns:
chainer.Variable:
The shape and :obj:`dtype` are same as those of input.
"""
x = F.normalize(x, eps=self.eps, axis=1)
scale = F.broadcast_to(self.scale[:, np.newaxis, np.newaxis], x.shape)
return F.cast(x * scale, chainer.get_dtype())
| 33.288462 | 78 | 0.620451 | import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
from chainer import variable
class Normalize(chainer.Link):
"""Learnable L2 normalization [#]_.
This link normalizes input along the channel axis and scales it.
The scale factors are trained channel-wise.
.. [#] Wei Liu, Andrew Rabinovich, Alexander C. Berg.
ParseNet: Looking Wider to See Better. ICLR 2016.
Args:
n_channel (int): The number of channels.
initial: A value to initialize the scale factors. It is pased to
:meth:`chainer.initializers._get_initializer`. The default value
is 0.
eps (float): A small value to avoid zero-division. The default value
is :math:`1e-5`.
"""
def __init__(self, n_channel, initial=0, eps=1e-5):
super(Normalize, self).__init__()
self.eps = eps
with self.init_scope():
# force to use float32
with chainer.using_config("dtype", "float32"):
initializer = initializers._get_initializer(initial)
self.scale = variable.Parameter(initializer)
self.scale.initialize((n_channel),)
def forward(self, x):
"""Normalize input and scale it.
Args:
x (chainer.Variable): A variable holding 4-dimensional array.
Its :obj:`dtype` is :obj:`numpy.float32`.
Returns:
chainer.Variable:
The shape and :obj:`dtype` are same as those of input.
"""
x = F.normalize(x, eps=self.eps, axis=1)
scale = F.broadcast_to(self.scale[:, np.newaxis, np.newaxis], x.shape)
return F.cast(x * scale, chainer.get_dtype())
| 403 | 0 | 27 |
f112c3c7f3993e3a6456b203d747ff3176963dc6 | 1,366 | py | Python | webroot/python/goldXp.py | peemoRoyal/dotadude | a90faee23a6ce7755d0c7266f1dcdb46816a8d6d | [
"MIT"
] | null | null | null | webroot/python/goldXp.py | peemoRoyal/dotadude | a90faee23a6ce7755d0c7266f1dcdb46816a8d6d | [
"MIT"
] | null | null | null | webroot/python/goldXp.py | peemoRoyal/dotadude | a90faee23a6ce7755d0c7266f1dcdb46816a8d6d | [
"MIT"
] | null | null | null | #!/usr/bin/python2.4
# Small script to show PostgreSQL and Pyscopg together
#
import json
import psycopg2
import sys
match_id = sys.argv[1]
match_id = 2125002095
conn = psycopg2.connect("dbname='postgres' user='petergleixner' host='localhost' password=''")
cursor = conn.cursor()
cursor.execute('''SELECT replay_file FROM replay_data WHERE match_id = %s''' %(match_id))
db_file = cursor.fetchone()
replay_file = json.dumps(db_file)[1:-1]
meta_info = json.loads(replay_file)["meta_info"]
game_info = meta_info["game_info"]
player_info = meta_info["player_info"]
replay = json.loads(replay_file)["replay"]
#Creating dict with heroname:list
hero_dict = dict()
for g in player_info:
hero_dict[g["hero_name"]] = []
count = 0
for r in replay:
if "gold_total" in r["data"]:
count = count + 1
hero_name = r["data"]["hero_name"]
tick = r["tick"]
gold = r["data"]["gold_total"]
tmp_dict = {"x":tick, "y":gold}
hero_dict[hero_name].append(tmp_dict)
print count
print hero_name
print "------------------"
#if hero_name == "lion":
for hero_name in hero_dict:
tmp_hero_gold = json.dumps(hero_dict[hero_name])
#cursor.execute('''INSERT INTO gold_xp (match_id, hero_name, team_id, gold_data) VALUES (%s,%s,%s,%s)''', (match_id, hero_name, 4, tmp_hero_gold))
#conn.commit()
| 28.458333 | 150 | 0.662518 | #!/usr/bin/python2.4
# Small script to show PostgreSQL and Pyscopg together
#
import json
import psycopg2
import sys
match_id = sys.argv[1]
match_id = 2125002095
conn = psycopg2.connect("dbname='postgres' user='petergleixner' host='localhost' password=''")
cursor = conn.cursor()
cursor.execute('''SELECT replay_file FROM replay_data WHERE match_id = %s''' %(match_id))
db_file = cursor.fetchone()
replay_file = json.dumps(db_file)[1:-1]
meta_info = json.loads(replay_file)["meta_info"]
game_info = meta_info["game_info"]
player_info = meta_info["player_info"]
replay = json.loads(replay_file)["replay"]
#Creating dict with heroname:list
hero_dict = dict()
for g in player_info:
hero_dict[g["hero_name"]] = []
count = 0
for r in replay:
if "gold_total" in r["data"]:
count = count + 1
hero_name = r["data"]["hero_name"]
tick = r["tick"]
gold = r["data"]["gold_total"]
tmp_dict = {"x":tick, "y":gold}
hero_dict[hero_name].append(tmp_dict)
print count
print hero_name
print "------------------"
#if hero_name == "lion":
for hero_name in hero_dict:
tmp_hero_gold = json.dumps(hero_dict[hero_name])
#cursor.execute('''INSERT INTO gold_xp (match_id, hero_name, team_id, gold_data) VALUES (%s,%s,%s,%s)''', (match_id, hero_name, 4, tmp_hero_gold))
#conn.commit()
| 0 | 0 | 0 |
4da811dd32ab7ed910bbf58a7870855477f9d2c5 | 16,050 | py | Python | hermione/module_templates/__NOT_IMPLEMENTED_BASE__/src/ml/analysis/hypothesis_autopilot.py | karenstemartins/hermione | 3d411ea7a8a8e1929e3b7f0fb0a0d238d25696d0 | [
"Apache-2.0"
] | 183 | 2020-06-03T22:43:14.000Z | 2022-03-17T22:39:07.000Z | hermione/module_templates/__NOT_IMPLEMENTED_BASE__/src/ml/analysis/hypothesis_autopilot.py | karenstemartins/hermione | 3d411ea7a8a8e1929e3b7f0fb0a0d238d25696d0 | [
"Apache-2.0"
] | 31 | 2020-06-03T22:55:18.000Z | 2022-03-27T20:06:17.000Z | hermione/module_templates/__NOT_IMPLEMENTED_BASE__/src/ml/analysis/hypothesis_autopilot.py | karenstemartins/hermione | 3d411ea7a8a8e1929e3b7f0fb0a0d238d25696d0 | [
"Apache-2.0"
] | 43 | 2020-06-03T22:45:03.000Z | 2021-12-29T19:43:54.000Z | import pandas as pd
import numpy as np
import operator
import warnings
from ml.analysis.hypothesis_testing import HypothesisTester
class HTestAutoPilot:
"""
Chooses what is the most adequate hypothesis test for a given dataset,
based on its datatypes and the assumptions of each test
"""
@staticmethod
def check_binary(col):
"""
Check if data is binary
Parameters
----------
col : array_like
Array of sample data, must be quantitative data.
Returns
-------
Bool
"""
for data in col:
if data not in [0, 1]:
return False
return True
@staticmethod
def check_norm(sample1, sample2, alpha=0.05, normality_method='shapiro'):
"""
Check normality
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
normality_method : string
normality test to be applied
Returns
-------
Array
"""
return [HypothesisTester.normality_test(
s,
alpha=alpha,
method=normality_method,
show_graph=False
).loc['normal'][0] for s in [sample1, sample2]]
@staticmethod
def correlation(sample1, sample2, alpha=0.05,
alternative='two-sided', normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for correlation tests
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is `'two-sided'`,
`'greater'` or `'less'` to specify the direction of the
test.
normality_method : string
normality test to be applied
Returns
-------
pd.DataFrame
"""
sample1, sample2 = np.array(sample1), np.array(sample2)
np_types = [np.dtype(i) for i in [np.int32, np.int64, np.float32,
np.float64]]
if any([t not in np_types for t in [sample1.dtype, sample2.dtype]]):
raise Exception('Samples are not numerical. ',
'Try using categorical_test method instead.')
check_bin1 = HTestAutoPilot.check_binary(sample1)
check_bin2 = HTestAutoPilot.check_binary(sample2)
if check_bin1 and check_bin2:
raise Exception('Both samples are binary, ',
'unable to calculate correlation.')
elif sum([check_bin1, check_bin2]) == 1:
print('One binary sample and one real sample.',
'Point-biserial correlation is going to be applied.')
corr_method = 'pointbiserial'
binary_sample = sample2 if not check_bin1 else sample1
num_sample = sample1 if not check_bin1 else sample2
sample1, sample2 = [binary_sample, num_sample]
else:
check_norm1, check_norm2 = HTestAutoPilot.check_norm(
sample1, sample2,
alpha, normality_method
)
if check_norm1 and check_norm2:
print('Samples are normally distributed.',
'Using Pearson correlation.')
corr_method = 'pearson'
else:
print('Samples are not normally distributed.',
'Using Spearman correlation.')
corr_method = 'spearman'
df_result = HypothesisTester.correlation_test(
sample1,
sample2,
method=corr_method,
alpha=alpha,
alternative=alternative,
show_graph=show_graph,
**kwargs
)
return df_result
@staticmethod
def categorical(df, sample1, sample2, alpha=0.05,
alternative='two-sided', correction=True,
show_graph=True, **kwargs):
"""
Autopilot for tests with categorical variables
Parameters
----------
df : pandas.DataFrame
The dataframe containing the ocurrences for the test.
sample1 : string
The variable name for the test. Must be names of columns in
``data``.
sample2 : string
The variable name for the test. Must be names of columns in
``data``.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether to return `'two-sided'`, `'greater'` or
`'less'` p-value to specify the direction of the test.
correction : bool
Whether to apply Yates' correction when the degree of freedom
of the observed contingency table is 1 (Yates 1934). In case
of Chi-squared test.
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
df_chi2 = HypothesisTester.chi2_test(
df,
sample1, sample2,
correction,
alpha,
show_graph,
**kwargs
)
table = (df.groupby([sample1, sample2]).size() > 5)
if table.sum() == len(table):
df_result = df_chi2
else:
if len(df[sample1].unique()) == 2 and len(df[sample2].unique()) == 2:
warnings.warn("The number of observations is not indicated " +
"for the chi-squared test, cannot garantee a " +
"correct inference. Also using Fisher's exact" +
" test.")
df_fisher = HypothesisTester.fisher_exact_test(
df,
sample1,
sample2,
alpha,
show_graph=False
)
df_result = pd.concat([df_chi2, df_fisher], axis=1).fillna('-')
else:
warnings.warn("The number of observations is not indicated " +
"for the chi-squared test, cannot garantee a " +
"correct inference.")
df_result = df_chi2
return df_result
@staticmethod
def independent_difference(sample1, sample2, alpha=0.05,
alternative='two-sided', correction='auto',
r=0.707, normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for testing the difference in means for independent samples
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is
`'two-sided'`, `'greater'` or `'less'` to specify
the direction of the test.
correction : string or boolean
For unpaired two sample T-tests, specify whether
or not to correct for unequal variances using Welch
separate variances T-test. If 'auto', it will automatically
uses Welch T-test when the sample sizes are unequal, as
recommended by Zimmerman 2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate when small
effect sizes are expected a priori; larger values of r are
appropriate when large effect sizes are expected
(Rouder et al 2009). The default is 0.707 (= :math:`\sqrt{2} / 2`).
normality_method : string
normality test to be applied
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
check_norm1, check_norm2 = HTestAutoPilot.check_norm(
sample1, sample2,
alpha, normality_method
)
if check_norm1 and check_norm2:
print('Samples are normally distributed, an ideal condition',
'for the application of t-test')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=False,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
elif (check_norm1 is False and len(sample1) < 30) or \
(check_norm2 is False and len(sample2) < 30):
print('At least one of the samples is not normally distributed.',
'However, the t-test can be applied due to central limit',
'theorem (n>30). The Mann-Whitney test is also an option',
'as it does not make any assumptions about data ditribution',
'(non-parametric alternative)')
df_result = HypothesisTester.mann_whitney_2indep(
sample1,
sample2,
alpha,
alternative,
show_graph,
**kwargs
)
else:
print('At least one of the samples is not normally distributed',
'and due to the number of observations the central limit',
'theorem does not apply. In this case, the Mann-Whitney',
'test is used as it does not make any assumptions about',
'data ditribution (non-parametric alternative)')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=False,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
df_result_non_param = HypothesisTester.mann_whitney_2indep(
sample1,
sample2,
alpha,
alternative,
show_graph=False
)
df_result = (
pd.concat([df_result, df_result_non_param], axis=1)
.reindex(['T', 'dof', 'cohen-d', 'BF10', 'power',
'U-val', 'RBC', 'CLES', 'p-val', 'CI95%',
'H0', 'H1', 'Result'])
.fillna('-')
)
return df_result
@staticmethod
def dependent_difference(sample1, sample2, alpha=0.05, alternative='two-sided',
correction='auto', r=0.707, normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for testing the difference in means for dependent samples
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is
`'two-sided'`, `'greater'` or `'less'` to
specify the direction of the test.
correction : string or boolean
For unpaired two sample T-tests, specify whether
or not to correct for unequal variances using
Welch separate variances T-test. If 'auto', it
will automatically uses Welch T-test when the
sample sizes are unequal, as recommended by Zimmerman
2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate
when small effect sizes are expected a priori; larger
values of r are appropriate when large effect sizes are
expected (Rouder et al 2009).
The default is 0.707 (= :math:`\sqrt{2} / 2`).
normality_method : string
normality test to be applied
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
diff_sample = sorted(list(map(operator.sub, sample1, sample2)))
check_norm_diff = (
HypothesisTester
.normality_test(
diff_sample,
alpha,
normality_method,
show_graph=False
)
.loc['normal'][0]
)
if check_norm_diff:
print('The distribution of differences is normally distributed',
'an ideal condition for the application of t-test.')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=True,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
elif len(sample1) > 30 and len(sample2) > 30:
print('The distribution of differences is not normally',
'distributed. However, the t-test can be applied',
'due to central limit theorem (n>30). The Wilcoxon',
'test is also an option as it does not make any assumptions',
'about data ditribution (non-parametric alternative).')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=True,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
df_result_non_param = HypothesisTester.wilcoxon_test(
sample1,
sample2,
alpha,
alternative,
show_graph=False,
**kwargs
)
df_result = (
pd.concat([df_result, df_result_non_param], axis=1)
.reindex([
'T', 'dof', 'cohen-d', 'BF10',
'power', 'W-val', 'RBC', 'CLES',
'p-val', 'CI95%', 'H0', 'H1', 'Result'
])
.fillna('-')
)
else:
print('The distribution of differences is not normally',
'distributed and due to the number of observations the',
'central limit theorem does not apply. In this case,',
'the Wilcoxon test is indicated as it does not make',
'any assumptions about data distribution',
'(non-parametric alternative).')
df_result = HypothesisTester.wilcoxon_test(
sample1,
sample2,
alpha,
alternative,
show_graph,
**kwargs
)
return df_result
| 38.214286 | 84 | 0.511402 | import pandas as pd
import numpy as np
import operator
import warnings
from ml.analysis.hypothesis_testing import HypothesisTester
class HTestAutoPilot:
"""
Chooses what is the most adequate hypothesis test for a given dataset,
based on its datatypes and the assumptions of each test
"""
@staticmethod
def check_binary(col):
"""
Check if data is binary
Parameters
----------
col : array_like
Array of sample data, must be quantitative data.
Returns
-------
Bool
"""
for data in col:
if data not in [0, 1]:
return False
return True
@staticmethod
def check_norm(sample1, sample2, alpha=0.05, normality_method='shapiro'):
"""
Check normality
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
normality_method : string
normality test to be applied
Returns
-------
Array
"""
return [HypothesisTester.normality_test(
s,
alpha=alpha,
method=normality_method,
show_graph=False
).loc['normal'][0] for s in [sample1, sample2]]
@staticmethod
def correlation(sample1, sample2, alpha=0.05,
alternative='two-sided', normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for correlation tests
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is `'two-sided'`,
`'greater'` or `'less'` to specify the direction of the
test.
normality_method : string
normality test to be applied
Returns
-------
pd.DataFrame
"""
sample1, sample2 = np.array(sample1), np.array(sample2)
np_types = [np.dtype(i) for i in [np.int32, np.int64, np.float32,
np.float64]]
if any([t not in np_types for t in [sample1.dtype, sample2.dtype]]):
raise Exception('Samples are not numerical. ',
'Try using categorical_test method instead.')
check_bin1 = HTestAutoPilot.check_binary(sample1)
check_bin2 = HTestAutoPilot.check_binary(sample2)
if check_bin1 and check_bin2:
raise Exception('Both samples are binary, ',
'unable to calculate correlation.')
elif sum([check_bin1, check_bin2]) == 1:
print('One binary sample and one real sample.',
'Point-biserial correlation is going to be applied.')
corr_method = 'pointbiserial'
binary_sample = sample2 if not check_bin1 else sample1
num_sample = sample1 if not check_bin1 else sample2
sample1, sample2 = [binary_sample, num_sample]
else:
check_norm1, check_norm2 = HTestAutoPilot.check_norm(
sample1, sample2,
alpha, normality_method
)
if check_norm1 and check_norm2:
print('Samples are normally distributed.',
'Using Pearson correlation.')
corr_method = 'pearson'
else:
print('Samples are not normally distributed.',
'Using Spearman correlation.')
corr_method = 'spearman'
df_result = HypothesisTester.correlation_test(
sample1,
sample2,
method=corr_method,
alpha=alpha,
alternative=alternative,
show_graph=show_graph,
**kwargs
)
return df_result
@staticmethod
def categorical(df, sample1, sample2, alpha=0.05,
alternative='two-sided', correction=True,
show_graph=True, **kwargs):
"""
Autopilot for tests with categorical variables
Parameters
----------
df : pandas.DataFrame
The dataframe containing the ocurrences for the test.
sample1 : string
The variable name for the test. Must be names of columns in
``data``.
sample2 : string
The variable name for the test. Must be names of columns in
``data``.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether to return `'two-sided'`, `'greater'` or
`'less'` p-value to specify the direction of the test.
correction : bool
Whether to apply Yates' correction when the degree of freedom
of the observed contingency table is 1 (Yates 1934). In case
of Chi-squared test.
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
df_chi2 = HypothesisTester.chi2_test(
df,
sample1, sample2,
correction,
alpha,
show_graph,
**kwargs
)
table = (df.groupby([sample1, sample2]).size() > 5)
if table.sum() == len(table):
df_result = df_chi2
else:
if len(df[sample1].unique()) == 2 and len(df[sample2].unique()) == 2:
warnings.warn("The number of observations is not indicated " +
"for the chi-squared test, cannot garantee a " +
"correct inference. Also using Fisher's exact" +
" test.")
df_fisher = HypothesisTester.fisher_exact_test(
df,
sample1,
sample2,
alpha,
show_graph=False
)
df_result = pd.concat([df_chi2, df_fisher], axis=1).fillna('-')
else:
warnings.warn("The number of observations is not indicated " +
"for the chi-squared test, cannot garantee a " +
"correct inference.")
df_result = df_chi2
return df_result
@staticmethod
def independent_difference(sample1, sample2, alpha=0.05,
alternative='two-sided', correction='auto',
r=0.707, normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for testing the difference in means for independent samples
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is
`'two-sided'`, `'greater'` or `'less'` to specify
the direction of the test.
correction : string or boolean
For unpaired two sample T-tests, specify whether
or not to correct for unequal variances using Welch
separate variances T-test. If 'auto', it will automatically
uses Welch T-test when the sample sizes are unequal, as
recommended by Zimmerman 2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate when small
effect sizes are expected a priori; larger values of r are
appropriate when large effect sizes are expected
(Rouder et al 2009). The default is 0.707 (= :math:`\sqrt{2} / 2`).
normality_method : string
normality test to be applied
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
check_norm1, check_norm2 = HTestAutoPilot.check_norm(
sample1, sample2,
alpha, normality_method
)
if check_norm1 and check_norm2:
print('Samples are normally distributed, an ideal condition',
'for the application of t-test')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=False,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
elif (check_norm1 is False and len(sample1) < 30) or \
(check_norm2 is False and len(sample2) < 30):
print('At least one of the samples is not normally distributed.',
'However, the t-test can be applied due to central limit',
'theorem (n>30). The Mann-Whitney test is also an option',
'as it does not make any assumptions about data ditribution',
'(non-parametric alternative)')
df_result = HypothesisTester.mann_whitney_2indep(
sample1,
sample2,
alpha,
alternative,
show_graph,
**kwargs
)
else:
print('At least one of the samples is not normally distributed',
'and due to the number of observations the central limit',
'theorem does not apply. In this case, the Mann-Whitney',
'test is used as it does not make any assumptions about',
'data ditribution (non-parametric alternative)')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=False,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
df_result_non_param = HypothesisTester.mann_whitney_2indep(
sample1,
sample2,
alpha,
alternative,
show_graph=False
)
df_result = (
pd.concat([df_result, df_result_non_param], axis=1)
.reindex(['T', 'dof', 'cohen-d', 'BF10', 'power',
'U-val', 'RBC', 'CLES', 'p-val', 'CI95%',
'H0', 'H1', 'Result'])
.fillna('-')
)
return df_result
@staticmethod
def dependent_difference(sample1, sample2, alpha=0.05, alternative='two-sided',
correction='auto', r=0.707, normality_method='shapiro',
show_graph=True, **kwargs):
"""
Autopilot for testing the difference in means for dependent samples
Parameters
----------
sample1 : array_like
Array of sample data, must be quantitative data.
sample2 : array_like
Array of sample data, must be quantitative data.
alpha : float
level of significance (default = 0.05)
alternative : string
Specify whether the alternative hypothesis is
`'two-sided'`, `'greater'` or `'less'` to
specify the direction of the test.
correction : string or boolean
For unpaired two sample T-tests, specify whether
or not to correct for unequal variances using
Welch separate variances T-test. If 'auto', it
will automatically uses Welch T-test when the
sample sizes are unequal, as recommended by Zimmerman
2004.
r : float
Cauchy scale factor for computing the Bayes Factor.
Smaller values of r (e.g. 0.5), may be appropriate
when small effect sizes are expected a priori; larger
values of r are appropriate when large effect sizes are
expected (Rouder et al 2009).
The default is 0.707 (= :math:`\sqrt{2} / 2`).
normality_method : string
normality test to be applied
show_graph: boolean
display the graph.
Returns
-------
pd.DataFrame
"""
diff_sample = sorted(list(map(operator.sub, sample1, sample2)))
check_norm_diff = (
HypothesisTester
.normality_test(
diff_sample,
alpha,
normality_method,
show_graph=False
)
.loc['normal'][0]
)
if check_norm_diff:
print('The distribution of differences is normally distributed',
'an ideal condition for the application of t-test.')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=True,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
elif len(sample1) > 30 and len(sample2) > 30:
print('The distribution of differences is not normally',
'distributed. However, the t-test can be applied',
'due to central limit theorem (n>30). The Wilcoxon',
'test is also an option as it does not make any assumptions',
'about data ditribution (non-parametric alternative).')
df_result = HypothesisTester.t_test(
sample1,
sample2,
paired=True,
alpha=alpha,
alternative=alternative,
correction=correction,
r=r,
show_graph=show_graph,
**kwargs
)
df_result_non_param = HypothesisTester.wilcoxon_test(
sample1,
sample2,
alpha,
alternative,
show_graph=False,
**kwargs
)
df_result = (
pd.concat([df_result, df_result_non_param], axis=1)
.reindex([
'T', 'dof', 'cohen-d', 'BF10',
'power', 'W-val', 'RBC', 'CLES',
'p-val', 'CI95%', 'H0', 'H1', 'Result'
])
.fillna('-')
)
else:
print('The distribution of differences is not normally',
'distributed and due to the number of observations the',
'central limit theorem does not apply. In this case,',
'the Wilcoxon test is indicated as it does not make',
'any assumptions about data distribution',
'(non-parametric alternative).')
df_result = HypothesisTester.wilcoxon_test(
sample1,
sample2,
alpha,
alternative,
show_graph,
**kwargs
)
return df_result
| 0 | 0 | 0 |
6b21fb3112526e3c6d130cc689c8ac09c81f6b73 | 1,032 | py | Python | url_shortener/database/migrations/versions/0001_initial_migration.py | tiesjan/url-shortener | 395ffc7b88dfe97c88914e8c4047690185b54ea7 | [
"BSD-3-Clause"
] | null | null | null | url_shortener/database/migrations/versions/0001_initial_migration.py | tiesjan/url-shortener | 395ffc7b88dfe97c88914e8c4047690185b54ea7 | [
"BSD-3-Clause"
] | null | null | null | url_shortener/database/migrations/versions/0001_initial_migration.py | tiesjan/url-shortener | 395ffc7b88dfe97c88914e8c4047690185b54ea7 | [
"BSD-3-Clause"
] | null | null | null | """
Initial migration
Revision ID: 4defdf508e78
Revises: (none)
Create Date: 2021-10-19 20:14:59.350979
"""
from alembic import op
import sqlalchemy as sa
from url_shortener.database.functions import utc_now
# revision identifiers, used by Alembic.
revision = '4defdf508e78'
down_revision = None
branch_labels = None
depends_on = None
| 24.571429 | 92 | 0.672481 | """
Initial migration
Revision ID: 4defdf508e78
Revises: (none)
Create Date: 2021-10-19 20:14:59.350979
"""
from alembic import op
import sqlalchemy as sa
from url_shortener.database.functions import utc_now
# revision identifiers, used by Alembic.
revision = '4defdf508e78'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# Create ShortURL table
op.create_table(
'short_url',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slug', sa.String(length=50), nullable=False),
sa.Column('target_url', sa.String(length=500), nullable=False),
sa.Column('public', sa.Boolean(), nullable=False),
sa.Column('visit_count', sa.Integer(), server_default=sa.text('0'), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=utc_now(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
def downgrade():
# Drop ShortURL table
op.drop_table(
'short_url'
)
| 644 | 0 | 46 |
9e6783c49f88b098a9e898f6e0d5fe94e9ffbd08 | 755 | py | Python | SplitterFile.py | abrahamalbert18/Image-Annotation-Application | bd5f0f7adffea2d749de568a41518986744d6846 | [
"MIT"
] | null | null | null | SplitterFile.py | abrahamalbert18/Image-Annotation-Application | bd5f0f7adffea2d749de568a41518986744d6846 | [
"MIT"
] | null | null | null | SplitterFile.py | abrahamalbert18/Image-Annotation-Application | bd5f0f7adffea2d749de568a41518986744d6846 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 16:25:07 2018
@author: Albert
"""
import pandas as pd
import numpy as np
df = pd.read_csv("./PreprocessedDataFiles/MergedPreprocessedDataFiles/MergedPreprocessedDrinkingDataLabels v2.csv")
# originalDF = df
df['index'] = range(0, len(df))
splitRatio = [0.6,0.2,0.2]
if __name__=="__main__":
train, val, test = indicesSplit(splitRatio)
np.savez("./Splits/indicesSplits.npz", train = train, val = val, test = test)
| 29.038462 | 115 | 0.684768 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 16:25:07 2018
@author: Albert
"""
import pandas as pd
import numpy as np
df = pd.read_csv("./PreprocessedDataFiles/MergedPreprocessedDataFiles/MergedPreprocessedDrinkingDataLabels v2.csv")
# originalDF = df
df['index'] = range(0, len(df))
splitRatio = [0.6,0.2,0.2]
def indicesSplit(splitRatio):
train = df.sample(frac = splitRatio[0], random_state = 42)
val = df.drop(train.index).sample(frac = 0.5, random_state = 42)
test = df.drop(train.index.append(val.index))#.sample(frac = splitRatio[2])
return train, val, test
if __name__=="__main__":
train, val, test = indicesSplit(splitRatio)
np.savez("./Splits/indicesSplits.npz", train = train, val = val, test = test)
| 253 | 0 | 23 |
c525cf86b9d588c851934b1a4b781869fe7ba93d | 11,363 | py | Python | doubles/allowance.py | fakeNetflix/uber-repo-doubles | 15e68dcf98f709b19a581915fa6af5ef49ebdd8a | [
"MIT"
] | 150 | 2015-01-06T11:40:39.000Z | 2022-01-10T10:29:59.000Z | doubles/allowance.py | fakeNetflix/uber-repo-doubles | 15e68dcf98f709b19a581915fa6af5ef49ebdd8a | [
"MIT"
] | 78 | 2015-01-13T21:04:13.000Z | 2022-02-11T13:22:56.000Z | doubles/allowance.py | fakeNetflix/uber-repo-doubles | 15e68dcf98f709b19a581915fa6af5ef49ebdd8a | [
"MIT"
] | 22 | 2015-03-11T18:33:33.000Z | 2022-01-13T01:54:19.000Z | import functools
import inspect
import six
from doubles.call_count_accumulator import CallCountAccumulator
from doubles.exceptions import MockExpectationError, VerifyingBuiltinDoubleArgumentError
import doubles.lifecycle
from doubles.verification import verify_arguments
_any = object()
class Allowance(object):
"""An individual method allowance (stub)."""
def __init__(self, target, method_name, caller):
"""
:param Target target: The object owning the method to stub.
:param str method_name: The name of the method to stub.
"""
self._target = target
self._method_name = method_name
self._caller = caller
self.args = _any
self.kwargs = _any
self._custom_matcher = None
self._is_satisfied = True
self._call_counter = CallCountAccumulator()
self._return_value = lambda *args, **kwargs: None
def and_raise(self, exception, *args, **kwargs):
"""Causes the double to raise the provided exception when called.
If provided, additional arguments (positional and keyword) passed to
`and_raise` are used in the exception instantiation.
:param Exception exception: The exception to raise.
"""
self._return_value = proxy_exception
return self
def and_raise_future(self, exception):
"""Similar to `and_raise` but the doubled method returns a future.
:param Exception exception: The exception to raise.
"""
future = _get_future()
future.set_exception(exception)
return self.and_return(future)
def and_return_future(self, *return_values):
"""Similar to `and_return` but the doubled method returns a future.
:param object return_values: The values the double will return when called,
"""
futures = []
for value in return_values:
future = _get_future()
future.set_result(value)
futures.append(future)
return self.and_return(*futures)
def and_return(self, *return_values):
"""Set a return value for an allowance
Causes the double to return the provided values in order. If multiple
values are provided, they are returned one at a time in sequence as the double is called.
If the double is called more times than there are return values, it should continue to
return the last value in the list.
:param object return_values: The values the double will return when called,
"""
if not return_values:
raise TypeError('and_return() expected at least 1 return value')
return_values = list(return_values)
final_value = return_values.pop()
self.and_return_result_of(
lambda: return_values.pop(0) if return_values else final_value
)
return self
def and_return_result_of(self, return_value):
""" Causes the double to return the result of calling the provided value.
:param return_value: A callable that will be invoked to determine the double's return value.
:type return_value: any callable object
"""
if not check_func_takes_args(return_value):
self._return_value = lambda *args, **kwargs: return_value()
else:
self._return_value = return_value
return self
def is_satisfied(self):
"""Returns a boolean indicating whether or not the double has been satisfied.
Stubs are always satisfied, but mocks are only satisfied if they've been
called as was declared.
:return: Whether or not the double is satisfied.
:rtype: bool
"""
return self._is_satisfied
def with_args(self, *args, **kwargs):
"""Declares that the double can only be called with the provided arguments.
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
self.args = args
self.kwargs = kwargs
self.verify_arguments()
return self
def with_args_validator(self, matching_function):
"""Define a custom function for testing arguments
:param func matching_function: The function used to test arguments passed to the stub.
"""
self.args = None
self.kwargs = None
self._custom_matcher = matching_function
return self
def __call__(self, *args, **kwargs):
"""A short hand syntax for with_args
Allows callers to do:
allow(module).foo.with_args(1, 2)
With:
allow(module).foo(1, 2)
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
return self.with_args(*args, **kwargs)
def with_no_args(self):
"""Declares that the double can only be called with no arguments."""
self.args = ()
self.kwargs = {}
self.verify_arguments()
return self
def satisfy_any_args_match(self):
"""Returns a boolean indicating whether or not the stub will accept arbitrary arguments.
This will be true unless the user has specified otherwise using ``with_args`` or
``with_no_args``.
:return: Whether or not the stub accepts arbitrary arguments.
:rtype: bool
"""
return self.args is _any and self.kwargs is _any
def satisfy_exact_match(self, args, kwargs):
"""Returns a boolean indicating whether or not the stub will accept the provided arguments.
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if self.args is None and self.kwargs is None:
return False
elif self.args is _any and self.kwargs is _any:
return True
elif args == self.args and kwargs == self.kwargs:
return True
elif len(args) != len(self.args) or len(kwargs) != len(self.kwargs):
return False
if not all(x == y or y == x for x, y in zip(args, self.args)):
return False
for key, value in self.kwargs.items():
if key not in kwargs:
return False
elif not (kwargs[key] == value or value == kwargs[key]):
return False
return True
def satisfy_custom_matcher(self, args, kwargs):
"""Return a boolean indicating if the args satisfy the stub
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if not self._custom_matcher:
return False
try:
return self._custom_matcher(*args, **kwargs)
except Exception:
return False
def return_value(self, *args, **kwargs):
"""Extracts the real value to be returned from the wrapping callable.
:return: The value the double should return when called.
"""
self._called()
return self._return_value(*args, **kwargs)
def verify_arguments(self, args=None, kwargs=None):
"""Ensures that the arguments specified match the signature of the real method.
:raise: ``VerifyingDoubleError`` if the arguments do not match.
"""
args = self.args if args is None else args
kwargs = self.kwargs if kwargs is None else kwargs
try:
verify_arguments(self._target, self._method_name, args, kwargs)
except VerifyingBuiltinDoubleArgumentError:
if doubles.lifecycle.ignore_builtin_verification():
raise
@verify_count_is_non_negative
def exactly(self, n):
"""Set an exact call count allowance
:param integer n:
"""
self._call_counter.set_exact(n)
return self
@verify_count_is_non_negative
def at_least(self, n):
"""Set a minimum call count allowance
:param integer n:
"""
self._call_counter.set_minimum(n)
return self
@verify_count_is_non_negative
def at_most(self, n):
"""Set a maximum call count allowance
:param integer n:
"""
self._call_counter.set_maximum(n)
return self
def never(self):
"""Set an expected call count allowance of 0"""
self.exactly(0)
return self
def once(self):
"""Set an expected call count allowance of 1"""
self.exactly(1)
return self
def twice(self):
"""Set an expected call count allowance of 2"""
self.exactly(2)
return self
@property
time = times
def _called(self):
"""Indicate that the allowance was called
:raise MockExpectationError if the allowance has been called too many times
"""
if self._call_counter.called().has_too_many_calls():
self.raise_failure_exception()
def raise_failure_exception(self, expect_or_allow='Allowed'):
"""Raises a ``MockExpectationError`` with a useful message.
:raise: ``MockExpectationError``
"""
raise MockExpectationError(
"{} '{}' to be called {}on {!r} with {}, but was not. ({}:{})".format(
expect_or_allow,
self._method_name,
self._call_counter.error_string(),
self._target.obj,
self._expected_argument_string(),
self._caller.filename,
self._caller.lineno,
)
)
def _expected_argument_string(self):
"""Generates a string describing what arguments the double expected.
:return: A string describing expected arguments.
:rtype: str
"""
if self.args is _any and self.kwargs is _any:
return 'any args'
elif self._custom_matcher:
return "custom matcher: '{}'".format(self._custom_matcher.__name__)
else:
return build_argument_repr_string(self.args, self.kwargs)
| 31.131507 | 100 | 0.627299 | import functools
import inspect
import six
from doubles.call_count_accumulator import CallCountAccumulator
from doubles.exceptions import MockExpectationError, VerifyingBuiltinDoubleArgumentError
import doubles.lifecycle
from doubles.verification import verify_arguments
_any = object()
def _get_future():
try:
from concurrent.futures import Future
except ImportError:
try:
from tornado.concurrent import Future
except ImportError:
raise ImportError(
'Error Importing Future, Could not find concurrent.futures or tornado.concurrent',
)
return Future()
def verify_count_is_non_negative(func):
@functools.wraps(func)
def inner(self, arg):
if arg < 0:
raise TypeError(func.__name__ + ' requires one positive integer argument')
return func(self, arg)
return inner
def check_func_takes_args(func):
if six.PY3:
arg_spec = inspect.getfullargspec(func)
return any([arg_spec.args, arg_spec.varargs, arg_spec.varkw, arg_spec.defaults])
else:
arg_spec = inspect.getargspec(func)
return any([arg_spec.args, arg_spec.varargs, arg_spec.keywords, arg_spec.defaults])
def build_argument_repr_string(args, kwargs):
args = [repr(x) for x in args]
kwargs = ['{}={!r}'.format(k, v) for k, v in kwargs.items()]
return '({})'.format(', '.join(args + kwargs))
class Allowance(object):
"""An individual method allowance (stub)."""
def __init__(self, target, method_name, caller):
"""
:param Target target: The object owning the method to stub.
:param str method_name: The name of the method to stub.
"""
self._target = target
self._method_name = method_name
self._caller = caller
self.args = _any
self.kwargs = _any
self._custom_matcher = None
self._is_satisfied = True
self._call_counter = CallCountAccumulator()
self._return_value = lambda *args, **kwargs: None
def and_raise(self, exception, *args, **kwargs):
"""Causes the double to raise the provided exception when called.
If provided, additional arguments (positional and keyword) passed to
`and_raise` are used in the exception instantiation.
:param Exception exception: The exception to raise.
"""
def proxy_exception(*proxy_args, **proxy_kwargs):
raise exception
self._return_value = proxy_exception
return self
def and_raise_future(self, exception):
"""Similar to `and_raise` but the doubled method returns a future.
:param Exception exception: The exception to raise.
"""
future = _get_future()
future.set_exception(exception)
return self.and_return(future)
def and_return_future(self, *return_values):
"""Similar to `and_return` but the doubled method returns a future.
:param object return_values: The values the double will return when called,
"""
futures = []
for value in return_values:
future = _get_future()
future.set_result(value)
futures.append(future)
return self.and_return(*futures)
def and_return(self, *return_values):
"""Set a return value for an allowance
Causes the double to return the provided values in order. If multiple
values are provided, they are returned one at a time in sequence as the double is called.
If the double is called more times than there are return values, it should continue to
return the last value in the list.
:param object return_values: The values the double will return when called,
"""
if not return_values:
raise TypeError('and_return() expected at least 1 return value')
return_values = list(return_values)
final_value = return_values.pop()
self.and_return_result_of(
lambda: return_values.pop(0) if return_values else final_value
)
return self
def and_return_result_of(self, return_value):
""" Causes the double to return the result of calling the provided value.
:param return_value: A callable that will be invoked to determine the double's return value.
:type return_value: any callable object
"""
if not check_func_takes_args(return_value):
self._return_value = lambda *args, **kwargs: return_value()
else:
self._return_value = return_value
return self
def is_satisfied(self):
"""Returns a boolean indicating whether or not the double has been satisfied.
Stubs are always satisfied, but mocks are only satisfied if they've been
called as was declared.
:return: Whether or not the double is satisfied.
:rtype: bool
"""
return self._is_satisfied
def with_args(self, *args, **kwargs):
"""Declares that the double can only be called with the provided arguments.
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
self.args = args
self.kwargs = kwargs
self.verify_arguments()
return self
def with_args_validator(self, matching_function):
"""Define a custom function for testing arguments
:param func matching_function: The function used to test arguments passed to the stub.
"""
self.args = None
self.kwargs = None
self._custom_matcher = matching_function
return self
def __call__(self, *args, **kwargs):
"""A short hand syntax for with_args
Allows callers to do:
allow(module).foo.with_args(1, 2)
With:
allow(module).foo(1, 2)
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
return self.with_args(*args, **kwargs)
def with_no_args(self):
"""Declares that the double can only be called with no arguments."""
self.args = ()
self.kwargs = {}
self.verify_arguments()
return self
def satisfy_any_args_match(self):
"""Returns a boolean indicating whether or not the stub will accept arbitrary arguments.
This will be true unless the user has specified otherwise using ``with_args`` or
``with_no_args``.
:return: Whether or not the stub accepts arbitrary arguments.
:rtype: bool
"""
return self.args is _any and self.kwargs is _any
def satisfy_exact_match(self, args, kwargs):
"""Returns a boolean indicating whether or not the stub will accept the provided arguments.
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if self.args is None and self.kwargs is None:
return False
elif self.args is _any and self.kwargs is _any:
return True
elif args == self.args and kwargs == self.kwargs:
return True
elif len(args) != len(self.args) or len(kwargs) != len(self.kwargs):
return False
if not all(x == y or y == x for x, y in zip(args, self.args)):
return False
for key, value in self.kwargs.items():
if key not in kwargs:
return False
elif not (kwargs[key] == value or value == kwargs[key]):
return False
return True
def satisfy_custom_matcher(self, args, kwargs):
"""Return a boolean indicating if the args satisfy the stub
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if not self._custom_matcher:
return False
try:
return self._custom_matcher(*args, **kwargs)
except Exception:
return False
def return_value(self, *args, **kwargs):
"""Extracts the real value to be returned from the wrapping callable.
:return: The value the double should return when called.
"""
self._called()
return self._return_value(*args, **kwargs)
def verify_arguments(self, args=None, kwargs=None):
"""Ensures that the arguments specified match the signature of the real method.
:raise: ``VerifyingDoubleError`` if the arguments do not match.
"""
args = self.args if args is None else args
kwargs = self.kwargs if kwargs is None else kwargs
try:
verify_arguments(self._target, self._method_name, args, kwargs)
except VerifyingBuiltinDoubleArgumentError:
if doubles.lifecycle.ignore_builtin_verification():
raise
@verify_count_is_non_negative
def exactly(self, n):
"""Set an exact call count allowance
:param integer n:
"""
self._call_counter.set_exact(n)
return self
@verify_count_is_non_negative
def at_least(self, n):
"""Set a minimum call count allowance
:param integer n:
"""
self._call_counter.set_minimum(n)
return self
@verify_count_is_non_negative
def at_most(self, n):
"""Set a maximum call count allowance
:param integer n:
"""
self._call_counter.set_maximum(n)
return self
def never(self):
"""Set an expected call count allowance of 0"""
self.exactly(0)
return self
def once(self):
"""Set an expected call count allowance of 1"""
self.exactly(1)
return self
def twice(self):
"""Set an expected call count allowance of 2"""
self.exactly(2)
return self
@property
def times(self):
return self
time = times
def _called(self):
"""Indicate that the allowance was called
:raise MockExpectationError if the allowance has been called too many times
"""
if self._call_counter.called().has_too_many_calls():
self.raise_failure_exception()
def raise_failure_exception(self, expect_or_allow='Allowed'):
"""Raises a ``MockExpectationError`` with a useful message.
:raise: ``MockExpectationError``
"""
raise MockExpectationError(
"{} '{}' to be called {}on {!r} with {}, but was not. ({}:{})".format(
expect_or_allow,
self._method_name,
self._call_counter.error_string(),
self._target.obj,
self._expected_argument_string(),
self._caller.filename,
self._caller.lineno,
)
)
def _expected_argument_string(self):
"""Generates a string describing what arguments the double expected.
:return: A string describing expected arguments.
:rtype: str
"""
if self.args is _any and self.kwargs is _any:
return 'any args'
elif self._custom_matcher:
return "custom matcher: '{}'".format(self._custom_matcher.__name__)
else:
return build_argument_repr_string(self.args, self.kwargs)
| 1,113 | 0 | 148 |
5c6aa066ee7e069fb7f65b98241148db5b7435ae | 2,857 | py | Python | network/InceptionModule.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | 4 | 2019-01-02T07:54:51.000Z | 2019-01-04T06:11:15.000Z | network/InceptionModule.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | null | null | null | network/InceptionModule.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | null | null | null | import tensorflow as tf
from network.ConvBlock import conv2d_bn
| 51.945455 | 104 | 0.644732 | import tensorflow as tf
from network.ConvBlock import conv2d_bn
def InceptionModule_V1(input,conv1_n,conv3_reduce,conv3_n,conv5_reduce,conv5_n,pool_proj):
conv1 = tf.layers.conv2d(input, conv1_n, (1, 1), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
conv3 = tf.layers.conv2d(input, conv3_reduce, (1, 1), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
conv3 = tf.layers.conv2d(conv3, conv3_n, (3, 3), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
conv5 = tf.layers.conv2d(input, conv5_reduce, (1, 1), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
conv5 = tf.layers.conv2d(conv5, conv5_n, (5, 5), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
pool = tf.layers.max_pooling2d(input, (3, 3), (1, 1),padding='same')
pool = tf.layers.conv2d(pool, pool_proj, (1, 1), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output = tf.concat([conv1,conv3,conv5,pool],axis=-1)
return output
def InceptionModule_V2(input,conv1_n,conv3_reduce,conv3_n,conv3_3_reduce,conv3_3_n,pool_proj):
conv1 = conv2d_bn(input, conv1_n, (1, 1), (1, 1), padding='same')
conv3 = conv2d_bn(input, conv3_reduce, (1, 1), (1, 1), padding='same')
conv3 = conv2d_bn(conv3, conv3_n, (3, 3), (1, 1), padding='same')
conv3_3 = conv2d_bn(input, conv3_3_reduce, (1, 1), (1, 1), padding='same')
conv3_3 = tf.layers.conv2d(conv3_3, conv3_3_n, (3, 3), (1, 1), padding='same')
conv3_3 = tf.layers.conv2d(conv3_3, conv3_3_n, (3, 3), (1, 1), padding='same')
pool = tf.layers.max_pooling2d(input, (3, 3), (1, 1),padding='same')
pool = conv2d_bn(pool, pool_proj, (1, 1), (1, 1), padding='same')
output = tf.concat([conv1,conv3,conv3_3,pool],axis=-1)
return output
def aux_classifier(input):
pool = tf.layers.average_pooling2d(input, (5, 5), (3, 3), padding='same')
conv = tf.layers.conv2d(pool, 128, (1, 1), (1, 1), padding='same', activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
flatten = tf.layers.flatten(conv)
fc = tf.layers.dense(flatten, 1024, activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
dp = tf.layers.dropout(fc,0.7)
logits = tf.layers.dense(dp, 10, kernel_initializer=tf.contrib.layers.xavier_initializer())
return logits | 2,722 | 0 | 69 |
167eec23d426971e1b913e6c330bb3abd675f342 | 241 | py | Python | simple_wo_blueprint_example/myapp/models.py | gtsofa/mnseriesflask | 58a84b698527ff3e790f0f7179193335bd440e3c | [
"MIT"
] | null | null | null | simple_wo_blueprint_example/myapp/models.py | gtsofa/mnseriesflask | 58a84b698527ff3e790f0f7179193335bd440e3c | [
"MIT"
] | null | null | null | simple_wo_blueprint_example/myapp/models.py | gtsofa/mnseriesflask | 58a84b698527ff3e790f0f7179193335bd440e3c | [
"MIT"
] | null | null | null | # myapp/models.py
from myapp import db
# define model here
| 24.1 | 48 | 0.684647 | # myapp/models.py
from myapp import db
# define model here
class Member(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
email = db.Column(db.String(50))
random = db.Column(db.Integer)
| 0 | 159 | 22 |
1b3d360d3d104a1f9e57c1cbb72e5d717d4819c7 | 2,625 | py | Python | external_libs/link/ci/run-tests.py | llloret/sp_link | b87ee622517fc68845b8f1d19c735c0e3bd05176 | [
"MIT"
] | null | null | null | external_libs/link/ci/run-tests.py | llloret/sp_link | b87ee622517fc68845b8f1d19c735c0e3bd05176 | [
"MIT"
] | null | null | null | external_libs/link/ci/run-tests.py | llloret/sp_link | b87ee622517fc68845b8f1d19c735c0e3bd05176 | [
"MIT"
] | 1 | 2021-02-22T11:37:41.000Z | 2021-02-22T11:37:41.000Z | #!/usr/bin/env python
import argparse
import logging
import os
import sys
from distutils.spawn import find_executable
from subprocess import call
if __name__ == '__main__':
logging.basicConfig(format='%(message)s', level=logging.INFO, stream=sys.stdout)
sys.exit(run_tests(parse_args()))
| 27.925532 | 88 | 0.649143 | #!/usr/bin/env python
import argparse
import logging
import os
import sys
from distutils.spawn import find_executable
from subprocess import call
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-t', '--target',
help='Target to test')
arg_parser.add_argument(
'--valgrind', default=False,
help='Run with Valgrind',
action='store_true')
return arg_parser.parse_args(sys.argv[1:])
def get_system_exe_extension():
# Should return 'win32' even on 64-bit Windows
if sys.platform == 'win32':
return '.exe'
else:
return ''
def find_exe(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def build_test_exe_args(args, build_dir):
if args.target is None:
logging.error('Target not specified, please use the --target option')
return None
test_exe = find_exe(args.target + get_system_exe_extension(), build_dir)
if not os.path.exists(test_exe):
logging.error('Could not find test executable for target {}, '
'did you forget to build?'.format(args.target))
else:
logging.debug('Test executable is: {}'.format(test_exe))
test_exe_args = [test_exe]
if args.valgrind is not False:
valgrind_exe = find_executable('valgrind')
if valgrind_exe is None:
logging.error('Valgrind not found, cannot continue')
return None
test_exe_args = [
valgrind_exe, '--leak-check=full', '--show-reachable=yes',
'--gen-suppressions=all', '--error-exitcode=1', '--track-origins=yes',
'--suppressions=../ci/memcheck.supp', test_exe]
return test_exe_args
def run_tests(args):
scripts_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(scripts_dir, os.pardir)
build_dir = os.path.join(root_dir, 'build')
if not os.path.exists(build_dir):
logging.error(
'Build directory not found, did you forget to run the configure.py script?')
return 2
os.chdir(build_dir)
env = os.environ.copy()
env['GLIBCXX_FORCE_NEW'] = '1'
test_exe_args = build_test_exe_args(args, build_dir)
if test_exe_args is None:
return 1
logging.info(test_exe_args)
logging.info('Running Tests for {}'.format(args.target))
return call(test_exe_args, env=env)
if __name__ == '__main__':
logging.basicConfig(format='%(message)s', level=logging.INFO, stream=sys.stdout)
sys.exit(run_tests(parse_args()))
| 2,205 | 0 | 115 |
c458dae29fdfeaad53ba796122d7d999a3528470 | 241 | py | Python | vnpy/trader/gateway/okcoinGateway/__init__.py | Adam1679/vnpy_adam | 91e384c9372ee36689d9bb600fe7f45fbb68976e | [
"MIT"
] | 1 | 2019-03-28T15:45:21.000Z | 2019-03-28T15:45:21.000Z | vnpy/trader/gateway/okcoinGateway/__init__.py | Adam1679/vnpy_adam | 91e384c9372ee36689d9bb600fe7f45fbb68976e | [
"MIT"
] | null | null | null | vnpy/trader/gateway/okcoinGateway/__init__.py | Adam1679/vnpy_adam | 91e384c9372ee36689d9bb600fe7f45fbb68976e | [
"MIT"
] | null | null | null | # encoding: UTF-8
from vnpy.trader import vtConstant
from okcoinGateway import OkcoinGateway
gatewayClass = OkcoinGateway
gatewayName = 'OKEX'
gatewayDisplayName = u'OkEx'
gatewayType = vtConstant.GATEWAYTYPE_BTC
gatewayQryEnabled = True
| 20.083333 | 40 | 0.821577 | # encoding: UTF-8
from vnpy.trader import vtConstant
from okcoinGateway import OkcoinGateway
gatewayClass = OkcoinGateway
gatewayName = 'OKEX'
gatewayDisplayName = u'OkEx'
gatewayType = vtConstant.GATEWAYTYPE_BTC
gatewayQryEnabled = True
| 0 | 0 | 0 |
c4e9bbaf69c142bb6022e73c6da05f216a4b4b48 | 947 | py | Python | tests/unit/_helpers.py | metabolize-forks/booby | 4e15e4e263dc387420398f808ffb345b760364d4 | [
"Apache-2.0"
] | null | null | null | tests/unit/_helpers.py | metabolize-forks/booby | 4e15e4e263dc387420398f808ffb345b760364d4 | [
"Apache-2.0"
] | null | null | null | tests/unit/_helpers.py | metabolize-forks/booby | 4e15e4e263dc387420398f808ffb345b760364d4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import collections
| 16.910714 | 42 | 0.606125 | # -*- coding: utf-8 -*-
import collections
def stub_validator(value):
pass
class Spy(object):
def __init__(self):
self.times_called = 0
def __call__(self):
self.times_called += 1
class MyList(collections.MutableSequence):
def __init__(self, *args):
self._store = list(args)
def __getitem__(self, index):
return self._store[index]
def __setitem__(self, index, value):
pass
def __delitem__(self, index):
pass
def __len__(self):
pass
def insert(self, index, value):
pass
class MyDict(collections.MutableMapping):
def __init__(self, **kwargs):
self._store = kwargs
def __getitem__(self, key):
return self._store[key]
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __len__(self):
pass
def __iter__(self):
return iter(self._store)
| 394 | 38 | 467 |
00cfbc4ba62476b2efa5d02b11c24a2c7078eb23 | 2,250 | py | Python | notes/code/trie.py | Divya-Bhargavi/msds689 | 7bc4e2c5f06ef1889a887d7f49ec367749e473af | [
"MIT"
] | 1 | 2019-08-26T04:11:13.000Z | 2019-08-26T04:11:13.000Z | notes/code/trie.py | khanhbrandy/msds689 | 7f8ee791fda9d4fa65c4b047fef6b37948e5bb7f | [
"MIT"
] | null | null | null | notes/code/trie.py | khanhbrandy/msds689 | 7f8ee791fda9d4fa65c4b047fef6b37948e5bb7f | [
"MIT"
] | 1 | 2019-07-02T08:58:59.000Z | 2019-07-02T08:58:59.000Z | from lolviz import *
import time
import os
import psutil
def search(root:TrieNode, s:str, i=0) -> bool:
"Return true if s is prefix of word in Trie or full word in Trie"
p = root
while p is not None:
if i>=len(s): return True
e = ord(s[i]) - ord('a')
if p.edges[e] is None: return False
p = p.edges[e]
i += 1
return True
if __name__ == '__main__':
words = load()
#words = words[:12000] # reduce size of word list during development
print(f"{len(words)} words in dictionary")
process = psutil.Process(os.getpid())
print(f"{process.memory_info().rss/1024**2:,.3f} MB in use before creating TRIE")
root = create_trie(words)
process = psutil.Process(os.getpid())
print(f"{process.memory_info().rss/1024**2:,.3f} MB in use after creating TRIE")
trie_search(words)
#objviz(root).view() | 25 | 85 | 0.577333 | from lolviz import *
import time
import os
import psutil
class TrieNode:
def __init__(self):
self.edges = [] # init edges, one per a..z letter
for c in range(ord('a'), ord('z')+1): self.edges.append(None)
def add(p:TrieNode, s:str, i=0) -> None:
if i>=len(s): return
e = ord(s[i]) - ord('a')
if p.edges[e] is None:
p.edges[e] = TrieNode()
add(p.edges[e], s, i+1)
def search(root:TrieNode, s:str, i=0) -> bool:
"Return true if s is prefix of word in Trie or full word in Trie"
p = root
while p is not None:
if i>=len(s): return True
e = ord(s[i]) - ord('a')
if p.edges[e] is None: return False
p = p.edges[e]
i += 1
return True
def brute_force_search(words):
start = time.time()
found = 0
for w in words:
if w in words:
found += 1
print(f"{found} found out of {len(words)}")
stop = time.time()
print(f"Brute force search time {stop-start:.2f}s") # wow: 265.85s (4.43 minutes)
def create_trie(words):
start = time.time()
root = TrieNode()
for w in words:
w = w.strip().lower()
if w.isalpha():
add(root, w)
stop = time.time()
print(f"TRIE build time {stop-start:.2f}s") # 6s
return root
def trie_search(words):
start = time.time()
found = 0
for w in words:
w = w.strip().lower()
if w.isalpha():
if search(root, w):
found += 1
print(f"{found} found out of {len(words)}")
stop = time.time()
print(f"TRIE search time {stop-start:.2f}s")
def load():
with open("/usr/share/dict/words") as f: # linux likely /usr/dict/words
words = f.readlines()
return words
if __name__ == '__main__':
words = load()
#words = words[:12000] # reduce size of word list during development
print(f"{len(words)} words in dictionary")
process = psutil.Process(os.getpid())
print(f"{process.memory_info().rss/1024**2:,.3f} MB in use before creating TRIE")
root = create_trie(words)
process = psutil.Process(os.getpid())
print(f"{process.memory_info().rss/1024**2:,.3f} MB in use after creating TRIE")
trie_search(words)
#objviz(root).view() | 1,203 | -6 | 164 |
df67bea4c9819ec52d3d60e17f46ad9238dc3a6d | 3,166 | py | Python | third_party/blink/renderer/build/scripts/make_runtime_features_utilities.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/blink/renderer/build/scripts/make_runtime_features_utilities.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/build/scripts/make_runtime_features_utilities.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
def _validate_runtime_features_graph(features):
"""
Raises AssertionError when sanity check failed.
@param features: a List[Dict]. See origin_trials().
@returns None
"""
feature_pool = {str(f['name']) for f in features}
origin_trial_pool = {
str(f['name'])
for f in features if f['origin_trial_feature_name']
}
for f in features:
assert not f['implied_by'] or not f['depends_on'], _error_message(
'Only one of implied_by and depends_on is allowed', f['name'])
for d in f['depends_on']:
assert d in feature_pool, _error_message(
'Depends on non-existent-feature', f['name'], d)
for i in f['implied_by']:
assert i in feature_pool, _error_message(
'Implied by non-existent-feature', f['name'], i)
assert f['origin_trial_feature_name'] or i not in origin_trial_pool, \
_error_message(
'A feature must be in origin trial if implied by an origin trial feature',
f['name'], i)
graph = {
str(feature['name']): feature['depends_on'] + feature['implied_by']
for feature in features
}
path = set()
for f in features:
assert not has_cycle(str(f['name'])), _error_message(
'Cycle found in depends_on/implied_by graph', f['name'])
def origin_trials(features):
"""
This function returns all features that are in origin trial.
The dependency is considered in origin trial if itself is in origin trial
or any of its dependencies are in origin trial. Propagate dependency
tag use DFS can find all features that are in origin trial.
@param features: a List[Dict]. Each Dict must have keys 'name',
'depends_on', 'implied_by' and 'origin_trial_feature_name'
(see runtime_enabled_features.json5).
@returns Set[str(runtime feature name)]
"""
_validate_runtime_features_graph(features)
origin_trials_set = set()
graph = defaultdict(list)
for feature in features:
for dependency in feature['depends_on']:
graph[dependency].append(str(feature['name']))
for feature in features:
if feature['origin_trial_feature_name']:
dfs(str(feature['name']))
return origin_trials_set
| 34.791209 | 94 | 0.639924 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
def _error_message(message, feature, other_feature=None):
message = 'runtime_enabled_features.json5: {}: {}'.format(feature, message)
if other_feature:
message += ': {}'.format(other_feature)
return message
def _validate_runtime_features_graph(features):
"""
Raises AssertionError when sanity check failed.
@param features: a List[Dict]. See origin_trials().
@returns None
"""
feature_pool = {str(f['name']) for f in features}
origin_trial_pool = {
str(f['name'])
for f in features if f['origin_trial_feature_name']
}
for f in features:
assert not f['implied_by'] or not f['depends_on'], _error_message(
'Only one of implied_by and depends_on is allowed', f['name'])
for d in f['depends_on']:
assert d in feature_pool, _error_message(
'Depends on non-existent-feature', f['name'], d)
for i in f['implied_by']:
assert i in feature_pool, _error_message(
'Implied by non-existent-feature', f['name'], i)
assert f['origin_trial_feature_name'] or i not in origin_trial_pool, \
_error_message(
'A feature must be in origin trial if implied by an origin trial feature',
f['name'], i)
graph = {
str(feature['name']): feature['depends_on'] + feature['implied_by']
for feature in features
}
path = set()
def has_cycle(vertex):
path.add(vertex)
for neighbor in graph[vertex]:
if neighbor in path or has_cycle(neighbor):
return True
path.remove(vertex)
return False
for f in features:
assert not has_cycle(str(f['name'])), _error_message(
'Cycle found in depends_on/implied_by graph', f['name'])
def origin_trials(features):
"""
This function returns all features that are in origin trial.
The dependency is considered in origin trial if itself is in origin trial
or any of its dependencies are in origin trial. Propagate dependency
tag use DFS can find all features that are in origin trial.
@param features: a List[Dict]. Each Dict must have keys 'name',
'depends_on', 'implied_by' and 'origin_trial_feature_name'
(see runtime_enabled_features.json5).
@returns Set[str(runtime feature name)]
"""
_validate_runtime_features_graph(features)
origin_trials_set = set()
graph = defaultdict(list)
for feature in features:
for dependency in feature['depends_on']:
graph[dependency].append(str(feature['name']))
def dfs(node):
origin_trials_set.add(node)
for dependent in graph[node]:
if dependent not in origin_trials_set:
dfs(dependent)
for feature in features:
if feature['origin_trial_feature_name']:
dfs(str(feature['name']))
return origin_trials_set
| 552 | 0 | 77 |
16f2f307f2a23c4ea9298468f8941e54ddc89b6d | 587 | py | Python | K_Mathematical_Modeling/Section 0/solutionExo2.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 7 | 2019-07-03T07:41:55.000Z | 2022-02-06T20:25:37.000Z | K_Mathematical_Modeling/Section 0/solutionExo2.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 9 | 2019-03-14T15:15:09.000Z | 2019-08-01T14:18:21.000Z | K_Mathematical_Modeling/Section 0/solutionExo2.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 11 | 2019-03-12T10:43:11.000Z | 2021-10-05T12:15:00.000Z | Birds_year0 = 100
Birth_rate = 0.5
Death_rate = 0.2
# We repeat the "for" loop with an additional step: every time bird population reaches 1000, an epidemic kills half of them
Time=[0]
Birds=[100]
for year in range (1,51) :
Birds = Birds + [Birds[-1] + Birds[-1] * Birth_rate - Birds[-1] * Death_rate ]
Time = Time + [year]
if Birds[-1]>1000:
Birds[-1]=Birds[-1]/2
print(Birds[-1])
import matplotlib.pyplot as plt
plt.plot(Time,Birds)
plt.xlabel("Time (years)")
plt.ylabel("# of birds")
plt.title('Birds population growth')
plt.show()
| 27.952381 | 124 | 0.637138 | Birds_year0 = 100
Birth_rate = 0.5
Death_rate = 0.2
# We repeat the "for" loop with an additional step: every time bird population reaches 1000, an epidemic kills half of them
Time=[0]
Birds=[100]
for year in range (1,51) :
Birds = Birds + [Birds[-1] + Birds[-1] * Birth_rate - Birds[-1] * Death_rate ]
Time = Time + [year]
if Birds[-1]>1000:
Birds[-1]=Birds[-1]/2
print(Birds[-1])
import matplotlib.pyplot as plt
plt.plot(Time,Birds)
plt.xlabel("Time (years)")
plt.ylabel("# of birds")
plt.title('Birds population growth')
plt.show()
| 0 | 0 | 0 |
0cd24154039f132d5322d370540651a26ee7c32d | 3,682 | py | Python | Topic_modelling_output.py | Dushyanttara/ASR_SD | a7bfc07fc8ecb3b77dfd43cdadbfb2a491b66103 | [
"MIT"
] | null | null | null | Topic_modelling_output.py | Dushyanttara/ASR_SD | a7bfc07fc8ecb3b77dfd43cdadbfb2a491b66103 | [
"MIT"
] | null | null | null | Topic_modelling_output.py | Dushyanttara/ASR_SD | a7bfc07fc8ecb3b77dfd43cdadbfb2a491b66103 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 00:06:55 2019
@author: -
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime
from gensim import corpora, models, similarities
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
os.getcwd()
os.chdir('C:\\Users\\abhishekpandey\\Desktop')
articles = pd.read_excel('speech_input.xlsx', sheet_name = 'Sheet1')
articles.head()
#Concatenating the articles titles and bodies
english_articles_content = (articles['Text']).tolist()
english_stopset = set(stopwords.words('english')).union(
{"things", "that's", "something", "take", "don't", "may", "want", "you're",
"set", "might", "says", "including", "lot", "much", "said", "know",
"good", "step", "often", "going", "thing", "things", "think",
"back", "actually", "better", "look", "find", "right", "example",
"verb", "verbs"})
#Tokenizing words of articles
tokenizer = RegexpTokenizer(r"(?u)[\b\#a-zA-Z][\w&-_]+\b")
english_articles_tokens = list(map(lambda d: [token for token in tokenizer.tokenize(d.lower()) if token not in english_stopset], english_articles_content))
bigram_transformer = models.Phrases(english_articles_tokens)
english_articles_unigrams_bigrams_tokens = list(bigram_transformer[english_articles_tokens])
#Creating a dictionary and filtering out too rare and too common tokens
english_dictionary = corpora.Dictionary(english_articles_unigrams_bigrams_tokens)
english_dictionary.compactify()
print(english_dictionary)
#Processing Bag-of-Words (BoW) for each article
english_articles_bow = [english_dictionary.doc2bow(doc) for doc in english_articles_unigrams_bigrams_tokens]
#Training the LDA topic model on English articles
lda_model = models.LdaModel(english_articles_bow, id2word=english_dictionary, num_topics=30, passes=10, iterations=500)
#Processing the topics for each article
english_articles_lda = lda_model[english_articles_bow]
#Computing the main topic of each article
topics_top_words = get_topics_top_words(lda_model, 5)
#Return the discovered topics, sorted by popularity
corpus_main_topics = get_main_topics(english_articles_lda, topics_top_words)
main_topics_df = pd.DataFrame(corpus_main_topics, columns=['topic']).groupby('topic').size().sort_values(ascending=True).reset_index()
main_topics_df.columns = ['topic','count']
main_topics_df.sort_values('count', ascending=False)
main_topics_df.plot(kind='barh', x='topic', y='count', figsize=(7,20), title='Main topics on shared English articles')
articles_full = articles
articles_full['tagged_keywords'] = corpus_main_topics
articles_full.drop('tagged_keywords', axis=1, inplace =True)
| 42.813953 | 156 | 0.710483 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 00:06:55 2019
@author: -
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime
from gensim import corpora, models, similarities
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
os.getcwd()
os.chdir('C:\\Users\\abhishekpandey\\Desktop')
articles = pd.read_excel('speech_input.xlsx', sheet_name = 'Sheet1')
articles.head()
#Concatenating the articles titles and bodies
english_articles_content = (articles['Text']).tolist()
english_stopset = set(stopwords.words('english')).union(
{"things", "that's", "something", "take", "don't", "may", "want", "you're",
"set", "might", "says", "including", "lot", "much", "said", "know",
"good", "step", "often", "going", "thing", "things", "think",
"back", "actually", "better", "look", "find", "right", "example",
"verb", "verbs"})
#Tokenizing words of articles
tokenizer = RegexpTokenizer(r"(?u)[\b\#a-zA-Z][\w&-_]+\b")
english_articles_tokens = list(map(lambda d: [token for token in tokenizer.tokenize(d.lower()) if token not in english_stopset], english_articles_content))
bigram_transformer = models.Phrases(english_articles_tokens)
english_articles_unigrams_bigrams_tokens = list(bigram_transformer[english_articles_tokens])
#Creating a dictionary and filtering out too rare and too common tokens
english_dictionary = corpora.Dictionary(english_articles_unigrams_bigrams_tokens)
english_dictionary.compactify()
print(english_dictionary)
#Processing Bag-of-Words (BoW) for each article
english_articles_bow = [english_dictionary.doc2bow(doc) for doc in english_articles_unigrams_bigrams_tokens]
#Training the LDA topic model on English articles
lda_model = models.LdaModel(english_articles_bow, id2word=english_dictionary, num_topics=30, passes=10, iterations=500)
#Processing the topics for each article
english_articles_lda = lda_model[english_articles_bow]
def get_topics_top_words(model, max_words):
all_topics = model.show_topics(-1, max_words*2, False, False)
topics = []
for topic in all_topics:
min_score_word = float(abs(topic[1][0][1])) / 2.
top_positive_words = list(map(lambda y: y[0].replace('_',' '), filter(lambda x: x[1] > min_score_word, topic[1])))[0:max_words]
topics.append('[' + ', '.join(top_positive_words) + ']')
return topics
#Computing the main topic of each article
topics_top_words = get_topics_top_words(lda_model, 5)
def get_main_topics(corpus_lda, topics_labels):
min_strength = (1.0 / float(len(topics_labels))) + 0.01
main_topics = map(lambda ts: sorted(ts, key=lambda t: -t[1])[0][0] if sorted(ts, key=lambda t: -t[1])[0][1] > min_strength else None, corpus_lda)
main_topics_labels = map(lambda x: topics_labels[x] if x != None else '', main_topics)
return list(main_topics_labels)
#Return the discovered topics, sorted by popularity
corpus_main_topics = get_main_topics(english_articles_lda, topics_top_words)
main_topics_df = pd.DataFrame(corpus_main_topics, columns=['topic']).groupby('topic').size().sort_values(ascending=True).reset_index()
main_topics_df.columns = ['topic','count']
main_topics_df.sort_values('count', ascending=False)
main_topics_df.plot(kind='barh', x='topic', y='count', figsize=(7,20), title='Main topics on shared English articles')
articles_full = articles
articles_full['tagged_keywords'] = corpus_main_topics
articles_full.drop('tagged_keywords', axis=1, inplace =True)
| 787 | 0 | 50 |
75d3beb4b4c7a19de10f0742149b98b76a7a51f4 | 3,230 | py | Python | dakara_server/playlist/management/commands/createplayer.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 4 | 2018-07-24T18:22:16.000Z | 2020-01-24T16:30:54.000Z | dakara_server/playlist/management/commands/createplayer.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 88 | 2017-11-04T08:58:02.000Z | 2022-03-30T11:39:08.000Z | dakara_server/playlist/management/commands/createplayer.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 1 | 2018-05-05T15:37:20.000Z | 2018-05-05T15:37:20.000Z | from getpass import getpass
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
UserModel = get_user_model()
USERNAME_DEFAULT = "player"
class Command(BaseCommand):
"""Create the player special user."""
help = "Create player account."
def add_arguments(self, parser):
"""Add arguments for the command.
Args:
parser (argparse.ArgumentParser): Parser.
"""
parser.add_argument(
"--username",
help="Specifies the loging for the player. Default to '{}'.".format(
USERNAME_DEFAULT
),
)
parser.add_argument("--password", help="Specifies the password for the player.")
parser.add_argument(
"--noinput",
help="Tells Django to NOT prompt the user for input of any kind. "
"Use command line arguments only.",
action="store_true",
)
@staticmethod
def get_username():
"""Get username from user.
Returns:
str: Username.
"""
username = input("Username (default: '{}'): ".format(USERNAME_DEFAULT))
return username or USERNAME_DEFAULT
def get_password(self):
"""Get password from user.
Returns:
str: Password.
"""
while True:
password = getpass()
password_confirm = getpass("Password (again): ")
if not password == password_confirm:
self.stderr.write("Error: Your passwords didn't match.")
continue
return password
def create_player(self, username, password):
"""Create player from provided credentials.
Args:
username (str): Username for the player.
password (str): Password for the player.
"""
# check password
if not password:
self.stderr.write("Error: Blank passwords aren't allowed.")
return
try:
UserModel.objects.create_user(
username,
password=password,
email="{}@player".format(username),
validated_by_email=True,
validated_by_manager=True,
playlist_permission_level=UserModel.PLAYER,
)
except (IntegrityError, ValueError) as e:
self.stderr.write("Error: {}".format(e))
return
self.stdout.write("Player created successfully.")
def handle(self, *args, **options):
"""Handle the command."""
# in non interactive mode
if options["noinput"]:
self.create_player(
(options["username"] or USERNAME_DEFAULT), options["password"]
)
return
# interactive mode
# username
if options["username"]:
username = options["username"]
else:
username = self.get_username()
# password
if options["password"]:
password = options["password"]
else:
password = self.get_password()
self.create_player(username, password)
| 27.606838 | 88 | 0.5613 | from getpass import getpass
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
UserModel = get_user_model()
USERNAME_DEFAULT = "player"
class Command(BaseCommand):
"""Create the player special user."""
help = "Create player account."
def add_arguments(self, parser):
"""Add arguments for the command.
Args:
parser (argparse.ArgumentParser): Parser.
"""
parser.add_argument(
"--username",
help="Specifies the loging for the player. Default to '{}'.".format(
USERNAME_DEFAULT
),
)
parser.add_argument("--password", help="Specifies the password for the player.")
parser.add_argument(
"--noinput",
help="Tells Django to NOT prompt the user for input of any kind. "
"Use command line arguments only.",
action="store_true",
)
@staticmethod
def get_username():
"""Get username from user.
Returns:
str: Username.
"""
username = input("Username (default: '{}'): ".format(USERNAME_DEFAULT))
return username or USERNAME_DEFAULT
def get_password(self):
"""Get password from user.
Returns:
str: Password.
"""
while True:
password = getpass()
password_confirm = getpass("Password (again): ")
if not password == password_confirm:
self.stderr.write("Error: Your passwords didn't match.")
continue
return password
def create_player(self, username, password):
"""Create player from provided credentials.
Args:
username (str): Username for the player.
password (str): Password for the player.
"""
# check password
if not password:
self.stderr.write("Error: Blank passwords aren't allowed.")
return
try:
UserModel.objects.create_user(
username,
password=password,
email="{}@player".format(username),
validated_by_email=True,
validated_by_manager=True,
playlist_permission_level=UserModel.PLAYER,
)
except (IntegrityError, ValueError) as e:
self.stderr.write("Error: {}".format(e))
return
self.stdout.write("Player created successfully.")
def handle(self, *args, **options):
"""Handle the command."""
# in non interactive mode
if options["noinput"]:
self.create_player(
(options["username"] or USERNAME_DEFAULT), options["password"]
)
return
# interactive mode
# username
if options["username"]:
username = options["username"]
else:
username = self.get_username()
# password
if options["password"]:
password = options["password"]
else:
password = self.get_password()
self.create_player(username, password)
| 0 | 0 | 0 |
92bf5938cadd673401ef50232fe50e8eb090235a | 24,621 | py | Python | python/ga4gh/schemas/ga4gh/genotype_phenotype_service_pb2.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 114 | 2015-01-05T22:19:34.000Z | 2017-02-18T18:51:22.000Z | python/ga4gh/schemas/ga4gh/genotype_phenotype_service_pb2.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 608 | 2015-01-06T00:24:39.000Z | 2017-03-09T05:29:16.000Z | python/ga4gh/schemas/ga4gh/genotype_phenotype_service_pb2.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 98 | 2015-01-12T18:09:52.000Z | 2017-02-15T15:49:17.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ga4gh/schemas/ga4gh/genotype_phenotype_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ga4gh.schemas.ga4gh import common_pb2 as ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2
from ga4gh.schemas.ga4gh import genotype_phenotype_pb2 as ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2
from ga4gh.schemas.google.api import annotations_pb2 as ga4gh_dot_schemas_dot_google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ga4gh/schemas/ga4gh/genotype_phenotype_service.proto',
package='ga4gh.schemas.ga4gh',
syntax='proto3',
serialized_pb=_b('\n4ga4gh/schemas/ga4gh/genotype_phenotype_service.proto\x12\x13ga4gh.schemas.ga4gh\x1a ga4gh/schemas/ga4gh/common.proto\x1a,ga4gh/schemas/ga4gh/genotype_phenotype.proto\x1a*ga4gh/schemas/google/api/annotations.proto\"b\n%SearchPhenotypeAssociationSetsRequest\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x03\x12\x12\n\npage_token\x18\x03 \x01(\t\"\x93\x01\n&SearchPhenotypeAssociationSetsResponse\x12P\n\x1aphenotype_association_sets\x18\x01 \x03(\x0b\x32,.ga4gh.schemas.ga4gh.PhenotypeAssociationSet\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"E\n\x11OntologyTermQuery\x12\x30\n\x05terms\x18\x01 \x03(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\"O\n\x17\x45xternalIdentifierQuery\x12\x34\n\x03ids\x18\x01 \x03(\x0b\x32\'.ga4gh.schemas.ga4gh.ExternalIdentifier\"\xa4\x01\n\rEvidenceQuery\x12\x37\n\x0c\x65videnceType\x18\x01 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x45\n\x14\x65xternal_identifiers\x18\x03 \x03(\x0b\x32\'.ga4gh.schemas.ga4gh.ExternalIdentifier\"\xa8\x02\n\x17SearchPhenotypesRequest\x12$\n\x1cphenotype_association_set_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12/\n\x04type\x18\x04 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x35\n\nqualifiers\x18\x05 \x03(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x37\n\x0c\x61ge_of_onset\x18\x06 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x11\n\tpage_size\x18\x07 \x01(\x03\x12\x12\n\npage_token\x18\x08 \x01(\t\"o\n\x18SearchPhenotypesResponse\x12:\n\nphenotypes\x18\x01 \x03(\x0b\x32&.ga4gh.schemas.ga4gh.PhenotypeInstance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\xcf\x01\n\x1eSearchGenotypePhenotypeRequest\x12$\n\x1cphenotype_association_set_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x65\x61ture_ids\x18\x02 \x03(\t\x12\x15\n\rphenotype_ids\x18\x03 \x03(\t\x12\x34\n\x08\x65vidence\x18\x04 \x03(\x0b\x32\".ga4gh.schemas.ga4gh.EvidenceQuery\x12\x11\n\tpage_size\x18\x05 \x01(\x03\x12\x12\n\npage_token\x18\x06 \x01(\t\"\x82\x01\n\x1fSearchGenotypePhenotypeResponse\x12\x46\n\x0c\x61ssociations\x18\x01 \x03(\x0b\x32\x30.ga4gh.schemas.ga4gh.FeaturePhenotypeAssociation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xcd\x04\n\x18GenotypePhenotypeService\x12\xd0\x01\n\x1eSearchPhenotypeAssociationSets\x12:.ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest\x1a;.ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse\"5\x82\xd3\xe4\x93\x02/\"*/v0.6.0a10/phenotypeassociationsets/search:\x01*\x12\x97\x01\n\x0fSearchPhenotype\x12,.ga4gh.schemas.ga4gh.SearchPhenotypesRequest\x1a-.ga4gh.schemas.ga4gh.SearchPhenotypesResponse\"\'\x82\xd3\xe4\x93\x02!\"\x1c/v0.6.0a10/phenotypes/search:\x01*\x12\xc3\x01\n\x1bSearchPhenotypeAssociations\x12\x33.ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest\x1a\x34.ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse\"9\x82\xd3\xe4\x93\x02\x33\"./v0.6.0a10/featurephenotypeassociations/search:\x01*b\x06proto3')
,
dependencies=[ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2.DESCRIPTOR,ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2.DESCRIPTOR,ga4gh_dot_schemas_dot_google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SEARCHPHENOTYPEASSOCIATIONSETSREQUEST = _descriptor.Descriptor(
name='SearchPhenotypeAssociationSetsRequest',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.dataset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=201,
serialized_end=299,
)
_SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE = _descriptor.Descriptor(
name='SearchPhenotypeAssociationSetsResponse',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_sets', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse.phenotype_association_sets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=302,
serialized_end=449,
)
_ONTOLOGYTERMQUERY = _descriptor.Descriptor(
name='OntologyTermQuery',
full_name='ga4gh.schemas.ga4gh.OntologyTermQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='terms', full_name='ga4gh.schemas.ga4gh.OntologyTermQuery.terms', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=451,
serialized_end=520,
)
_EXTERNALIDENTIFIERQUERY = _descriptor.Descriptor(
name='ExternalIdentifierQuery',
full_name='ga4gh.schemas.ga4gh.ExternalIdentifierQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='ga4gh.schemas.ga4gh.ExternalIdentifierQuery.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=522,
serialized_end=601,
)
_EVIDENCEQUERY = _descriptor.Descriptor(
name='EvidenceQuery',
full_name='ga4gh.schemas.ga4gh.EvidenceQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='evidenceType', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.evidenceType', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='external_identifiers', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.external_identifiers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=604,
serialized_end=768,
)
_SEARCHPHENOTYPESREQUEST = _descriptor.Descriptor(
name='SearchPhenotypesRequest',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_set_id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.phenotype_association_set_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qualifiers', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.qualifiers', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age_of_onset', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.age_of_onset', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.page_size', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.page_token', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=771,
serialized_end=1067,
)
_SEARCHPHENOTYPESRESPONSE = _descriptor.Descriptor(
name='SearchPhenotypesResponse',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotypes', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse.phenotypes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1069,
serialized_end=1180,
)
_SEARCHGENOTYPEPHENOTYPEREQUEST = _descriptor.Descriptor(
name='SearchGenotypePhenotypeRequest',
full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_set_id', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.phenotype_association_set_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_ids', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.feature_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phenotype_ids', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.phenotype_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evidence', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.evidence', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.page_size', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.page_token', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1183,
serialized_end=1390,
)
_SEARCHGENOTYPEPHENOTYPERESPONSE = _descriptor.Descriptor(
name='SearchGenotypePhenotypeResponse',
full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='associations', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse.associations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1393,
serialized_end=1523,
)
_SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE.fields_by_name['phenotype_association_sets'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._PHENOTYPEASSOCIATIONSET
_ONTOLOGYTERMQUERY.fields_by_name['terms'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_EXTERNALIDENTIFIERQUERY.fields_by_name['ids'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._EXTERNALIDENTIFIER
_EVIDENCEQUERY.fields_by_name['evidenceType'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_EVIDENCEQUERY.fields_by_name['external_identifiers'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._EXTERNALIDENTIFIER
_SEARCHPHENOTYPESREQUEST.fields_by_name['type'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESREQUEST.fields_by_name['qualifiers'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESREQUEST.fields_by_name['age_of_onset'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESRESPONSE.fields_by_name['phenotypes'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._PHENOTYPEINSTANCE
_SEARCHGENOTYPEPHENOTYPEREQUEST.fields_by_name['evidence'].message_type = _EVIDENCEQUERY
_SEARCHGENOTYPEPHENOTYPERESPONSE.fields_by_name['associations'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._FEATUREPHENOTYPEASSOCIATION
DESCRIPTOR.message_types_by_name['SearchPhenotypeAssociationSetsRequest'] = _SEARCHPHENOTYPEASSOCIATIONSETSREQUEST
DESCRIPTOR.message_types_by_name['SearchPhenotypeAssociationSetsResponse'] = _SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE
DESCRIPTOR.message_types_by_name['OntologyTermQuery'] = _ONTOLOGYTERMQUERY
DESCRIPTOR.message_types_by_name['ExternalIdentifierQuery'] = _EXTERNALIDENTIFIERQUERY
DESCRIPTOR.message_types_by_name['EvidenceQuery'] = _EVIDENCEQUERY
DESCRIPTOR.message_types_by_name['SearchPhenotypesRequest'] = _SEARCHPHENOTYPESREQUEST
DESCRIPTOR.message_types_by_name['SearchPhenotypesResponse'] = _SEARCHPHENOTYPESRESPONSE
DESCRIPTOR.message_types_by_name['SearchGenotypePhenotypeRequest'] = _SEARCHGENOTYPEPHENOTYPEREQUEST
DESCRIPTOR.message_types_by_name['SearchGenotypePhenotypeResponse'] = _SEARCHGENOTYPEPHENOTYPERESPONSE
SearchPhenotypeAssociationSetsRequest = _reflection.GeneratedProtocolMessageType('SearchPhenotypeAssociationSetsRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPEASSOCIATIONSETSREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest)
))
_sym_db.RegisterMessage(SearchPhenotypeAssociationSetsRequest)
SearchPhenotypeAssociationSetsResponse = _reflection.GeneratedProtocolMessageType('SearchPhenotypeAssociationSetsResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse)
))
_sym_db.RegisterMessage(SearchPhenotypeAssociationSetsResponse)
OntologyTermQuery = _reflection.GeneratedProtocolMessageType('OntologyTermQuery', (_message.Message,), dict(
DESCRIPTOR = _ONTOLOGYTERMQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.OntologyTermQuery)
))
_sym_db.RegisterMessage(OntologyTermQuery)
ExternalIdentifierQuery = _reflection.GeneratedProtocolMessageType('ExternalIdentifierQuery', (_message.Message,), dict(
DESCRIPTOR = _EXTERNALIDENTIFIERQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.ExternalIdentifierQuery)
))
_sym_db.RegisterMessage(ExternalIdentifierQuery)
EvidenceQuery = _reflection.GeneratedProtocolMessageType('EvidenceQuery', (_message.Message,), dict(
DESCRIPTOR = _EVIDENCEQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.EvidenceQuery)
))
_sym_db.RegisterMessage(EvidenceQuery)
SearchPhenotypesRequest = _reflection.GeneratedProtocolMessageType('SearchPhenotypesRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPESREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypesRequest)
))
_sym_db.RegisterMessage(SearchPhenotypesRequest)
SearchPhenotypesResponse = _reflection.GeneratedProtocolMessageType('SearchPhenotypesResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPESRESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypesResponse)
))
_sym_db.RegisterMessage(SearchPhenotypesResponse)
SearchGenotypePhenotypeRequest = _reflection.GeneratedProtocolMessageType('SearchGenotypePhenotypeRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHGENOTYPEPHENOTYPEREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest)
))
_sym_db.RegisterMessage(SearchGenotypePhenotypeRequest)
SearchGenotypePhenotypeResponse = _reflection.GeneratedProtocolMessageType('SearchGenotypePhenotypeResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHGENOTYPEPHENOTYPERESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse)
))
_sym_db.RegisterMessage(SearchGenotypePhenotypeResponse)
# @@protoc_insertion_point(module_scope)
| 46.454717 | 3,000 | 0.779213 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ga4gh/schemas/ga4gh/genotype_phenotype_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ga4gh.schemas.ga4gh import common_pb2 as ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2
from ga4gh.schemas.ga4gh import genotype_phenotype_pb2 as ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2
from ga4gh.schemas.google.api import annotations_pb2 as ga4gh_dot_schemas_dot_google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ga4gh/schemas/ga4gh/genotype_phenotype_service.proto',
package='ga4gh.schemas.ga4gh',
syntax='proto3',
serialized_pb=_b('\n4ga4gh/schemas/ga4gh/genotype_phenotype_service.proto\x12\x13ga4gh.schemas.ga4gh\x1a ga4gh/schemas/ga4gh/common.proto\x1a,ga4gh/schemas/ga4gh/genotype_phenotype.proto\x1a*ga4gh/schemas/google/api/annotations.proto\"b\n%SearchPhenotypeAssociationSetsRequest\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x03\x12\x12\n\npage_token\x18\x03 \x01(\t\"\x93\x01\n&SearchPhenotypeAssociationSetsResponse\x12P\n\x1aphenotype_association_sets\x18\x01 \x03(\x0b\x32,.ga4gh.schemas.ga4gh.PhenotypeAssociationSet\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"E\n\x11OntologyTermQuery\x12\x30\n\x05terms\x18\x01 \x03(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\"O\n\x17\x45xternalIdentifierQuery\x12\x34\n\x03ids\x18\x01 \x03(\x0b\x32\'.ga4gh.schemas.ga4gh.ExternalIdentifier\"\xa4\x01\n\rEvidenceQuery\x12\x37\n\x0c\x65videnceType\x18\x01 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x45\n\x14\x65xternal_identifiers\x18\x03 \x03(\x0b\x32\'.ga4gh.schemas.ga4gh.ExternalIdentifier\"\xa8\x02\n\x17SearchPhenotypesRequest\x12$\n\x1cphenotype_association_set_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12/\n\x04type\x18\x04 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x35\n\nqualifiers\x18\x05 \x03(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x37\n\x0c\x61ge_of_onset\x18\x06 \x01(\x0b\x32!.ga4gh.schemas.ga4gh.OntologyTerm\x12\x11\n\tpage_size\x18\x07 \x01(\x03\x12\x12\n\npage_token\x18\x08 \x01(\t\"o\n\x18SearchPhenotypesResponse\x12:\n\nphenotypes\x18\x01 \x03(\x0b\x32&.ga4gh.schemas.ga4gh.PhenotypeInstance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\xcf\x01\n\x1eSearchGenotypePhenotypeRequest\x12$\n\x1cphenotype_association_set_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x65\x61ture_ids\x18\x02 \x03(\t\x12\x15\n\rphenotype_ids\x18\x03 \x03(\t\x12\x34\n\x08\x65vidence\x18\x04 \x03(\x0b\x32\".ga4gh.schemas.ga4gh.EvidenceQuery\x12\x11\n\tpage_size\x18\x05 \x01(\x03\x12\x12\n\npage_token\x18\x06 \x01(\t\"\x82\x01\n\x1fSearchGenotypePhenotypeResponse\x12\x46\n\x0c\x61ssociations\x18\x01 \x03(\x0b\x32\x30.ga4gh.schemas.ga4gh.FeaturePhenotypeAssociation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xcd\x04\n\x18GenotypePhenotypeService\x12\xd0\x01\n\x1eSearchPhenotypeAssociationSets\x12:.ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest\x1a;.ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse\"5\x82\xd3\xe4\x93\x02/\"*/v0.6.0a10/phenotypeassociationsets/search:\x01*\x12\x97\x01\n\x0fSearchPhenotype\x12,.ga4gh.schemas.ga4gh.SearchPhenotypesRequest\x1a-.ga4gh.schemas.ga4gh.SearchPhenotypesResponse\"\'\x82\xd3\xe4\x93\x02!\"\x1c/v0.6.0a10/phenotypes/search:\x01*\x12\xc3\x01\n\x1bSearchPhenotypeAssociations\x12\x33.ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest\x1a\x34.ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse\"9\x82\xd3\xe4\x93\x02\x33\"./v0.6.0a10/featurephenotypeassociations/search:\x01*b\x06proto3')
,
dependencies=[ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2.DESCRIPTOR,ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2.DESCRIPTOR,ga4gh_dot_schemas_dot_google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SEARCHPHENOTYPEASSOCIATIONSETSREQUEST = _descriptor.Descriptor(
name='SearchPhenotypeAssociationSetsRequest',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.dataset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=201,
serialized_end=299,
)
_SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE = _descriptor.Descriptor(
name='SearchPhenotypeAssociationSetsResponse',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_sets', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse.phenotype_association_sets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=302,
serialized_end=449,
)
_ONTOLOGYTERMQUERY = _descriptor.Descriptor(
name='OntologyTermQuery',
full_name='ga4gh.schemas.ga4gh.OntologyTermQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='terms', full_name='ga4gh.schemas.ga4gh.OntologyTermQuery.terms', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=451,
serialized_end=520,
)
_EXTERNALIDENTIFIERQUERY = _descriptor.Descriptor(
name='ExternalIdentifierQuery',
full_name='ga4gh.schemas.ga4gh.ExternalIdentifierQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='ga4gh.schemas.ga4gh.ExternalIdentifierQuery.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=522,
serialized_end=601,
)
_EVIDENCEQUERY = _descriptor.Descriptor(
name='EvidenceQuery',
full_name='ga4gh.schemas.ga4gh.EvidenceQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='evidenceType', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.evidenceType', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='external_identifiers', full_name='ga4gh.schemas.ga4gh.EvidenceQuery.external_identifiers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=604,
serialized_end=768,
)
_SEARCHPHENOTYPESREQUEST = _descriptor.Descriptor(
name='SearchPhenotypesRequest',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_set_id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.phenotype_association_set_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qualifiers', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.qualifiers', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age_of_onset', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.age_of_onset', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.page_size', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesRequest.page_token', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=771,
serialized_end=1067,
)
_SEARCHPHENOTYPESRESPONSE = _descriptor.Descriptor(
name='SearchPhenotypesResponse',
full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotypes', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse.phenotypes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchPhenotypesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1069,
serialized_end=1180,
)
_SEARCHGENOTYPEPHENOTYPEREQUEST = _descriptor.Descriptor(
name='SearchGenotypePhenotypeRequest',
full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phenotype_association_set_id', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.phenotype_association_set_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_ids', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.feature_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phenotype_ids', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.phenotype_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evidence', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.evidence', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.page_size', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest.page_token', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1183,
serialized_end=1390,
)
_SEARCHGENOTYPEPHENOTYPERESPONSE = _descriptor.Descriptor(
name='SearchGenotypePhenotypeResponse',
full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='associations', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse.associations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1393,
serialized_end=1523,
)
_SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE.fields_by_name['phenotype_association_sets'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._PHENOTYPEASSOCIATIONSET
_ONTOLOGYTERMQUERY.fields_by_name['terms'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_EXTERNALIDENTIFIERQUERY.fields_by_name['ids'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._EXTERNALIDENTIFIER
_EVIDENCEQUERY.fields_by_name['evidenceType'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_EVIDENCEQUERY.fields_by_name['external_identifiers'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._EXTERNALIDENTIFIER
_SEARCHPHENOTYPESREQUEST.fields_by_name['type'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESREQUEST.fields_by_name['qualifiers'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESREQUEST.fields_by_name['age_of_onset'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_common__pb2._ONTOLOGYTERM
_SEARCHPHENOTYPESRESPONSE.fields_by_name['phenotypes'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._PHENOTYPEINSTANCE
_SEARCHGENOTYPEPHENOTYPEREQUEST.fields_by_name['evidence'].message_type = _EVIDENCEQUERY
_SEARCHGENOTYPEPHENOTYPERESPONSE.fields_by_name['associations'].message_type = ga4gh_dot_schemas_dot_ga4gh_dot_genotype__phenotype__pb2._FEATUREPHENOTYPEASSOCIATION
DESCRIPTOR.message_types_by_name['SearchPhenotypeAssociationSetsRequest'] = _SEARCHPHENOTYPEASSOCIATIONSETSREQUEST
DESCRIPTOR.message_types_by_name['SearchPhenotypeAssociationSetsResponse'] = _SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE
DESCRIPTOR.message_types_by_name['OntologyTermQuery'] = _ONTOLOGYTERMQUERY
DESCRIPTOR.message_types_by_name['ExternalIdentifierQuery'] = _EXTERNALIDENTIFIERQUERY
DESCRIPTOR.message_types_by_name['EvidenceQuery'] = _EVIDENCEQUERY
DESCRIPTOR.message_types_by_name['SearchPhenotypesRequest'] = _SEARCHPHENOTYPESREQUEST
DESCRIPTOR.message_types_by_name['SearchPhenotypesResponse'] = _SEARCHPHENOTYPESRESPONSE
DESCRIPTOR.message_types_by_name['SearchGenotypePhenotypeRequest'] = _SEARCHGENOTYPEPHENOTYPEREQUEST
DESCRIPTOR.message_types_by_name['SearchGenotypePhenotypeResponse'] = _SEARCHGENOTYPEPHENOTYPERESPONSE
SearchPhenotypeAssociationSetsRequest = _reflection.GeneratedProtocolMessageType('SearchPhenotypeAssociationSetsRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPEASSOCIATIONSETSREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsRequest)
))
_sym_db.RegisterMessage(SearchPhenotypeAssociationSetsRequest)
SearchPhenotypeAssociationSetsResponse = _reflection.GeneratedProtocolMessageType('SearchPhenotypeAssociationSetsResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPEASSOCIATIONSETSRESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypeAssociationSetsResponse)
))
_sym_db.RegisterMessage(SearchPhenotypeAssociationSetsResponse)
OntologyTermQuery = _reflection.GeneratedProtocolMessageType('OntologyTermQuery', (_message.Message,), dict(
DESCRIPTOR = _ONTOLOGYTERMQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.OntologyTermQuery)
))
_sym_db.RegisterMessage(OntologyTermQuery)
ExternalIdentifierQuery = _reflection.GeneratedProtocolMessageType('ExternalIdentifierQuery', (_message.Message,), dict(
DESCRIPTOR = _EXTERNALIDENTIFIERQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.ExternalIdentifierQuery)
))
_sym_db.RegisterMessage(ExternalIdentifierQuery)
EvidenceQuery = _reflection.GeneratedProtocolMessageType('EvidenceQuery', (_message.Message,), dict(
DESCRIPTOR = _EVIDENCEQUERY,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.EvidenceQuery)
))
_sym_db.RegisterMessage(EvidenceQuery)
SearchPhenotypesRequest = _reflection.GeneratedProtocolMessageType('SearchPhenotypesRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPESREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypesRequest)
))
_sym_db.RegisterMessage(SearchPhenotypesRequest)
SearchPhenotypesResponse = _reflection.GeneratedProtocolMessageType('SearchPhenotypesResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHPHENOTYPESRESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchPhenotypesResponse)
))
_sym_db.RegisterMessage(SearchPhenotypesResponse)
SearchGenotypePhenotypeRequest = _reflection.GeneratedProtocolMessageType('SearchGenotypePhenotypeRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHGENOTYPEPHENOTYPEREQUEST,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchGenotypePhenotypeRequest)
))
_sym_db.RegisterMessage(SearchGenotypePhenotypeRequest)
SearchGenotypePhenotypeResponse = _reflection.GeneratedProtocolMessageType('SearchGenotypePhenotypeResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHGENOTYPEPHENOTYPERESPONSE,
__module__ = 'ga4gh.schemas.ga4gh.genotype_phenotype_service_pb2'
# @@protoc_insertion_point(class_scope:ga4gh.schemas.ga4gh.SearchGenotypePhenotypeResponse)
))
_sym_db.RegisterMessage(SearchGenotypePhenotypeResponse)
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
a3987566f81f1f390282f04c2e3ede2162a5addc | 2,041 | py | Python | headlinenews/supasql.py | bradybellini/headline-news | 048171443f10236c2d74251f16f83fd1f7a02633 | [
"MIT"
] | null | null | null | headlinenews/supasql.py | bradybellini/headline-news | 048171443f10236c2d74251f16f83fd1f7a02633 | [
"MIT"
] | null | null | null | headlinenews/supasql.py | bradybellini/headline-news | 048171443f10236c2d74251f16f83fd1f7a02633 | [
"MIT"
] | null | null | null | import os, logging
from typing import Any
from dotenv import load_dotenv
from supabase import create_client, Client
from headlinenews import RSSParser
from postgrest_py import exceptions
| 37.109091 | 85 | 0.611465 | import os, logging
from typing import Any
from dotenv import load_dotenv
from supabase import create_client, Client
from headlinenews import RSSParser
from postgrest_py import exceptions
class SupaPSQL:
def __init__(self) -> None:
self.logger = logging.getLogger("rogger_logger.supasql.SupaPSQL")
self.logger.info("Creating an instance of SupaPSQL")
load_dotenv()
self.url: str = os.environ.get("SUPABASE_URL")
self.key: str = os.environ.get("SUPABASE_KEY")
self.insert_table: str = "articles_staging"
self.select_table: str = "feeds"
def _create_client(self) -> Client:
self.logger.info("Creating supabase client")
supabase: Client = create_client(self.url, self.key)
self.logger.info("Created supabase client")
return supabase
def _select(self) -> Any:
supabase: Client = self._create_client()
self.logger.info("Fetching feeds from supabase")
select = supabase.table(self.select_table).select("feed_url", "id").execute()
self.logger.info("Fetched feeds from supabase")
return select
def _insert(self, feed: str, feed_id: str) -> None:
supabase: Client = self._create_client()
r = RSSParser()
articles = r.articles(feed, feed_id)
for i in range(len(articles)):
print(articles[i])
try:
self.logger.debug(f"Inserting articles {articles[i]}")
supabase.table(self.insert_table).upsert(
articles[i],
).execute()
except exceptions.APIError as e:
if e.code == "23505":
self.logger.debug(e)
else:
self.logger.exception("Exception has occurred")
def run(self) -> None:
feeds = self._select()
for i in range(len(feeds.data)):
self.logger.debug(f"Inserting {feeds.data[i]['feed_url']}")
self._insert(feeds.data[i]["feed_url"], feeds.data[i]["id"])
| 1,702 | -6 | 157 |
910af70fde3d4b24a60b0a876965bc7f748573ce | 4,695 | py | Python | aiounittest/helpers.py | tmaila/aiounittest | c43d3b619fd6a8fd071758996a5f42310b0293dc | [
"MIT"
] | null | null | null | aiounittest/helpers.py | tmaila/aiounittest | c43d3b619fd6a8fd071758996a5f42310b0293dc | [
"MIT"
] | null | null | null | aiounittest/helpers.py | tmaila/aiounittest | c43d3b619fd6a8fd071758996a5f42310b0293dc | [
"MIT"
] | null | null | null | import asyncio
import functools
def futurized(o):
''' Makes the given object to be awaitable.
:param any o: Object to wrap
:return: awaitable that resolves to provided object
:rtype: asyncio.Future
Anything passed to :code:`futurized` is wrapped in :code:`asyncio.Future`.
This makes it awaitable (can be run with :code:`await` or :code:`yield from`) as
a result of await it returns the original object.
If provided object is a Exception (or its sublcass) then the `Future` will raise it on await.
.. code-block:: python
fut = aiounittest.futurized('SOME TEXT')
ret = await fut
print(ret) # prints SOME TEXT
fut = aiounittest.futurized(Exception('Dummy error'))
ret = await fut # will raise the exception "dummy error"
The main goal is to use it with :code:`unittest.mock.Mock` (or :code:`MagicMock`) to
be able to mock awaitable functions (coroutines).
Consider the below code
.. code-block:: python
from asyncio import sleep
async def add(x, y):
await sleep(666)
return x + y
You rather don't want to wait 666 seconds, you've gotta mock that.
.. code-block:: python
from aiounittest import futurized, AsyncTestCase
from unittest.mock import Mock, patch
import dummy_math
class MyAddTest(AsyncTestCase):
async def test_add(self):
mock_sleep = Mock(return_value=futurized('whatever'))
patch('dummy_math.sleep', mock_sleep).start()
ret = await dummy_math.add(5, 6)
self.assertEqual(ret, 11)
mock_sleep.assert_called_once_with(666)
async def test_fail(self):
mock_sleep = Mock(return_value=futurized(Exception('whatever')))
patch('dummy_math.sleep', mock_sleep).start()
with self.assertRaises(Exception) as e:
await dummy_math.add(5, 6)
mock_sleep.assert_called_once_with(666)
'''
f = asyncio.Future()
if isinstance(o, Exception):
f.set_exception(o)
else:
f.set_result(o)
return f
def run_sync(func=None, loop=None):
''' Runs synchonously given function (coroutine)
:param callable func: function to run (mostly coroutine)
:param ioloop loop: event loop to use to run `func`
:type loop: event loop of None
By default the brand new event loop will be created (old closed). After completion, the loop will be closed and then recreated, set as default,
leaving asyncio clean.
**Note**: :code:`aiounittest.async_test` is an alias of :code:`aiounittest.helpers.run_sync`
Function can be used like a `pytest.mark.asyncio` (implemetation differs),
but it's compatible with :code:`unittest.TestCase` class.
.. code-block:: python
import asyncio
import unittest
from aiounittest import async_test
async def add(x, y):
await asyncio.sleep(0.1)
return x + y
class MyAsyncTestDecorator(unittest.TestCase):
@async_test
async def test_async_add(self):
ret = await add(5, 6)
self.assertEqual(ret, 11)
.. note::
If the loop is provided, it won't be closed. It's up to you.
This function is also used internally by :code:`aiounittest.AsyncTestCase` to run coroutines.
'''
if func is None:
return decorator
else:
return decorator(func)
async_test = run_sync
| 30.888158 | 147 | 0.594249 | import asyncio
import functools
def futurized(o):
''' Makes the given object to be awaitable.
:param any o: Object to wrap
:return: awaitable that resolves to provided object
:rtype: asyncio.Future
Anything passed to :code:`futurized` is wrapped in :code:`asyncio.Future`.
This makes it awaitable (can be run with :code:`await` or :code:`yield from`) as
a result of await it returns the original object.
If provided object is a Exception (or its sublcass) then the `Future` will raise it on await.
.. code-block:: python
fut = aiounittest.futurized('SOME TEXT')
ret = await fut
print(ret) # prints SOME TEXT
fut = aiounittest.futurized(Exception('Dummy error'))
ret = await fut # will raise the exception "dummy error"
The main goal is to use it with :code:`unittest.mock.Mock` (or :code:`MagicMock`) to
be able to mock awaitable functions (coroutines).
Consider the below code
.. code-block:: python
from asyncio import sleep
async def add(x, y):
await sleep(666)
return x + y
You rather don't want to wait 666 seconds, you've gotta mock that.
.. code-block:: python
from aiounittest import futurized, AsyncTestCase
from unittest.mock import Mock, patch
import dummy_math
class MyAddTest(AsyncTestCase):
async def test_add(self):
mock_sleep = Mock(return_value=futurized('whatever'))
patch('dummy_math.sleep', mock_sleep).start()
ret = await dummy_math.add(5, 6)
self.assertEqual(ret, 11)
mock_sleep.assert_called_once_with(666)
async def test_fail(self):
mock_sleep = Mock(return_value=futurized(Exception('whatever')))
patch('dummy_math.sleep', mock_sleep).start()
with self.assertRaises(Exception) as e:
await dummy_math.add(5, 6)
mock_sleep.assert_called_once_with(666)
'''
f = asyncio.Future()
if isinstance(o, Exception):
f.set_exception(o)
else:
f.set_result(o)
return f
def run_sync(func=None, loop=None):
''' Runs synchonously given function (coroutine)
:param callable func: function to run (mostly coroutine)
:param ioloop loop: event loop to use to run `func`
:type loop: event loop of None
By default the brand new event loop will be created (old closed). After completion, the loop will be closed and then recreated, set as default,
leaving asyncio clean.
**Note**: :code:`aiounittest.async_test` is an alias of :code:`aiounittest.helpers.run_sync`
Function can be used like a `pytest.mark.asyncio` (implemetation differs),
but it's compatible with :code:`unittest.TestCase` class.
.. code-block:: python
import asyncio
import unittest
from aiounittest import async_test
async def add(x, y):
await asyncio.sleep(0.1)
return x + y
class MyAsyncTestDecorator(unittest.TestCase):
@async_test
async def test_async_add(self):
ret = await add(5, 6)
self.assertEqual(ret, 11)
.. note::
If the loop is provided, it won't be closed. It's up to you.
This function is also used internally by :code:`aiounittest.AsyncTestCase` to run coroutines.
'''
def get_brand_new_default_event_loop():
old_loop = asyncio.get_event_loop()
if not old_loop.is_closed():
old_loop.close()
_loop = asyncio.new_event_loop()
asyncio.set_event_loop(_loop)
return _loop
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
nonlocal loop
use_default_event_loop = loop is None
if use_default_event_loop:
loop = get_brand_new_default_event_loop()
try:
ret = f(*args, **kwargs)
future = asyncio.ensure_future(ret, loop=loop)
return loop.run_until_complete(future)
finally:
if use_default_event_loop:
# clean up
loop.close()
del loop
# again set a new (unstopped) event loop
get_brand_new_default_event_loop()
return wrapper
if func is None:
return decorator
else:
return decorator(func)
async_test = run_sync
| 936 | 0 | 53 |
a01937274946e3d3dc59bb59097e6ddcb1f8f4f7 | 4,528 | py | Python | daemon/core/gui/dialogs/alerts.py | kleberango/core | 87ca431e73fec22faeaebd6b25fc76e0b165c639 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/dialogs/alerts.py | kleberango/core | 87ca431e73fec22faeaebd6b25fc76e0b165c639 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/dialogs/alerts.py | kleberango/core | 87ca431e73fec22faeaebd6b25fc76e0b165c639 | [
"BSD-2-Clause"
] | 1 | 2021-09-03T19:18:54.000Z | 2021-09-03T19:18:54.000Z | """
check engine light
"""
import tkinter as tk
from tkinter import ttk
from typing import TYPE_CHECKING, Dict, Optional
from core.gui.dialogs.dialog import Dialog
from core.gui.themes import PADX, PADY
from core.gui.widgets import CodeText
from core.gui.wrappers import ExceptionEvent, ExceptionLevel
if TYPE_CHECKING:
from core.gui.app import Application
| 39.373913 | 87 | 0.628092 | """
check engine light
"""
import tkinter as tk
from tkinter import ttk
from typing import TYPE_CHECKING, Dict, Optional
from core.gui.dialogs.dialog import Dialog
from core.gui.themes import PADX, PADY
from core.gui.widgets import CodeText
from core.gui.wrappers import ExceptionEvent, ExceptionLevel
if TYPE_CHECKING:
from core.gui.app import Application
class AlertsDialog(Dialog):
def __init__(self, app: "Application") -> None:
super().__init__(app, "Alerts")
self.tree: Optional[ttk.Treeview] = None
self.codetext: Optional[CodeText] = None
self.alarm_map: Dict[int, ExceptionEvent] = {}
self.draw()
def draw(self) -> None:
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.top.rowconfigure(1, weight=1)
frame = ttk.Frame(self.top)
frame.columnconfigure(0, weight=1)
frame.rowconfigure(0, weight=1)
frame.grid(sticky=tk.NSEW, pady=PADY)
self.tree = ttk.Treeview(
frame,
columns=("time", "level", "session_id", "node", "source"),
show="headings",
)
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.tree.column("time", stretch=tk.YES)
self.tree.heading("time", text="Time")
self.tree.column("level", stretch=tk.YES, width=100)
self.tree.heading("level", text="Level")
self.tree.column("session_id", stretch=tk.YES, width=100)
self.tree.heading("session_id", text="Session ID")
self.tree.column("node", stretch=tk.YES, width=100)
self.tree.heading("node", text="Node")
self.tree.column("source", stretch=tk.YES, width=100)
self.tree.heading("source", text="Source")
self.tree.bind("<<TreeviewSelect>>", self.click_select)
for exception in self.app.statusbar.core_alarms:
level_name = exception.level.name
node_id = exception.node_id if exception.node_id else ""
insert_id = self.tree.insert(
"",
tk.END,
text=exception.date,
values=(
exception.date,
level_name,
exception.session_id,
node_id,
exception.source,
),
tags=(level_name,),
)
self.alarm_map[insert_id] = exception
error_name = ExceptionLevel.ERROR.name
self.tree.tag_configure(error_name, background="#ff6666")
fatal_name = ExceptionLevel.FATAL.name
self.tree.tag_configure(fatal_name, background="#d9d9d9")
warning_name = ExceptionLevel.WARNING.name
self.tree.tag_configure(warning_name, background="#ffff99")
notice_name = ExceptionLevel.NOTICE.name
self.tree.tag_configure(notice_name, background="#85e085")
yscrollbar = ttk.Scrollbar(frame, orient="vertical", command=self.tree.yview)
yscrollbar.grid(row=0, column=1, sticky=tk.NS)
self.tree.configure(yscrollcommand=yscrollbar.set)
xscrollbar = ttk.Scrollbar(frame, orient="horizontal", command=self.tree.xview)
xscrollbar.grid(row=1, sticky=tk.EW)
self.tree.configure(xscrollcommand=xscrollbar.set)
self.codetext = CodeText(self.top)
self.codetext.text.config(state=tk.DISABLED, height=11)
self.codetext.grid(sticky=tk.NSEW, pady=PADY)
frame = ttk.Frame(self.top)
frame.grid(sticky=tk.EW)
frame.columnconfigure(0, weight=1)
frame.columnconfigure(1, weight=1)
button = ttk.Button(frame, text="Reset", command=self.reset_alerts)
button.grid(row=0, column=0, sticky=tk.EW, padx=PADX)
button = ttk.Button(frame, text="Close", command=self.destroy)
button.grid(row=0, column=1, sticky=tk.EW)
def reset_alerts(self) -> None:
self.codetext.text.config(state=tk.NORMAL)
self.codetext.text.delete(1.0, tk.END)
self.codetext.text.config(state=tk.DISABLED)
for item in self.tree.get_children():
self.tree.delete(item)
self.app.statusbar.clear_alerts()
def click_select(self, event: tk.Event) -> None:
current = self.tree.selection()[0]
exception = self.alarm_map[current]
self.codetext.text.config(state=tk.NORMAL)
self.codetext.text.delete(1.0, tk.END)
self.codetext.text.insert(1.0, exception.text)
self.codetext.text.config(state=tk.DISABLED)
| 4,028 | 6 | 130 |
13445cd76a57b0cfb98ffb3231749aa5d5c91508 | 195 | py | Python | gamefixes/356500.py | manueliglesiasgarcia/protonfixes | d676b6bf39f6e4268b4791d3d71c6d74e2127121 | [
"BSD-2-Clause"
] | 54 | 2019-06-21T22:03:45.000Z | 2022-03-20T19:24:36.000Z | gamefixes/356500.py | manueliglesiasgarcia/protonfixes | d676b6bf39f6e4268b4791d3d71c6d74e2127121 | [
"BSD-2-Clause"
] | 21 | 2020-06-13T22:49:18.000Z | 2022-03-20T08:28:39.000Z | gamefixes/356500.py | manueliglesiasgarcia/protonfixes | d676b6bf39f6e4268b4791d3d71c6d74e2127121 | [
"BSD-2-Clause"
] | 53 | 2019-09-11T15:23:25.000Z | 2022-03-20T08:18:49.000Z | """ Game fix for STAR WARS Galactic Battlegrounds Saga
"""
#pylint: disable=C0103
from protonfixes import util
| 17.727273 | 54 | 0.728205 | """ Game fix for STAR WARS Galactic Battlegrounds Saga
"""
#pylint: disable=C0103
from protonfixes import util
def main():
util.protontricks('icodecs')
util.protontricks('directplay')
| 59 | 0 | 23 |
9693d0adc1ee8de233d3015aa07f6141ff5843be | 28 | py | Python | test/login.py | huang880/test | 9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0 | [
"MIT"
] | null | null | null | test/login.py | huang880/test | 9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0 | [
"MIT"
] | null | null | null | test/login.py | huang880/test | 9c65ffd470dd52633ded4eb2b8e1cd89d3b7c5d0 | [
"MIT"
] | null | null | null | a=1
b=2
c=3
d=4
你是不是个沙雕
f=6
| 4 | 7 | 0.607143 | a=1
b=2
c=3
d=4
你是不是个沙雕
f=6
| 0 | 0 | 0 |
e051ca2acbd4d511635ff08104c916ea3c6f9804 | 1,841 | py | Python | tests/integration/tasks/test_azure_data_lake.py | gwieloch/viadot | 8026c4f5e48e5bec7bf62ea3e3e044e81ef3321f | [
"MIT"
] | 23 | 2021-05-04T13:28:30.000Z | 2022-02-16T18:51:24.000Z | tests/integration/tasks/test_azure_data_lake.py | gwieloch/viadot | 8026c4f5e48e5bec7bf62ea3e3e044e81ef3321f | [
"MIT"
] | 142 | 2021-05-01T10:48:12.000Z | 2022-03-28T15:00:06.000Z | tests/integration/tasks/test_azure_data_lake.py | gwieloch/viadot | 8026c4f5e48e5bec7bf62ea3e3e044e81ef3321f | [
"MIT"
] | 13 | 2021-05-14T18:28:10.000Z | 2022-03-17T10:17:17.000Z | import os
import uuid
from viadot.sources import AzureDataLake
from viadot.tasks import (
AzureDataLakeDownload,
AzureDataLakeToDF,
AzureDataLakeUpload,
AzureDataLakeCopy,
AzureDataLakeList,
)
uuid_4 = uuid.uuid4()
uuid_4_2 = uuid.uuid4()
file_name = f"test_file_{uuid_4}.csv"
file_name_2 = f"test_file_{uuid_4}.csv"
adls_path = f"raw/supermetrics/{file_name}"
adls_path_2 = f"raw/supermetrics/{file_name_2}"
file_name_parquet = f"test_file_{uuid_4}.parquet"
adls_path_parquet = f"raw/supermetrics/{file_name_parquet}"
# TODO: add pytest-depends as download tests depend on the upload
# and can't be ran separately
| 27.073529 | 80 | 0.767518 | import os
import uuid
from viadot.sources import AzureDataLake
from viadot.tasks import (
AzureDataLakeDownload,
AzureDataLakeToDF,
AzureDataLakeUpload,
AzureDataLakeCopy,
AzureDataLakeList,
)
uuid_4 = uuid.uuid4()
uuid_4_2 = uuid.uuid4()
file_name = f"test_file_{uuid_4}.csv"
file_name_2 = f"test_file_{uuid_4}.csv"
adls_path = f"raw/supermetrics/{file_name}"
adls_path_2 = f"raw/supermetrics/{file_name_2}"
file_name_parquet = f"test_file_{uuid_4}.parquet"
adls_path_parquet = f"raw/supermetrics/{file_name_parquet}"
# TODO: add pytest-depends as download tests depend on the upload
# and can't be ran separately
def test_azure_data_lake_upload(TEST_CSV_FILE_PATH):
upload_task = AzureDataLakeUpload()
upload_task.run(from_path=TEST_CSV_FILE_PATH, to_path=adls_path)
file = AzureDataLake(adls_path)
assert file.exists()
def test_azure_data_lake_download():
download_task = AzureDataLakeDownload()
download_task.run(from_path=adls_path)
assert os.path.exists(file_name)
os.remove(file_name)
def test_azure_data_lake_to_df():
task = AzureDataLakeToDF()
df = task.run(path=adls_path, sep="\t")
assert not df.empty
def test_azure_data_lake_to_df_parquet(TEST_PARQUET_FILE_PATH):
upload_task = AzureDataLakeUpload()
upload_task.run(from_path=TEST_PARQUET_FILE_PATH, to_path=adls_path_parquet)
lake_to_df_task = AzureDataLakeToDF()
df = lake_to_df_task.run(path=adls_path_parquet)
assert not df.empty
def test_azure_data_lake_copy():
copy_task = AzureDataLakeCopy()
copy_task.run(from_path=adls_path, to_path=adls_path_2)
file = AzureDataLake(adls_path_2)
assert file.exists()
def test_azure_data_lake_list():
list_task = AzureDataLakeList()
files = list_task.run(path="raw/supermetrics")
assert adls_path in files
| 1,057 | 0 | 138 |
3210d5af97fee381968e118be3cb3f5b7f008957 | 2,093 | py | Python | denoising_autoencoder.py | JiananGao/DP-NMF | be96e67bd8955e46baa4d3f477f40c32a009c1cd | [
"MIT"
] | 2 | 2019-03-16T16:59:39.000Z | 2020-12-22T02:09:42.000Z | denoising_autoencoder.py | JiananGao/DP-NMF | be96e67bd8955e46baa4d3f477f40c32a009c1cd | [
"MIT"
] | null | null | null | denoising_autoencoder.py | JiananGao/DP-NMF | be96e67bd8955e46baa4d3f477f40c32a009c1cd | [
"MIT"
] | null | null | null | #Author: Satwik Bhattamishra
import tensorflow as tf
import numpy as np
import tensorflow.examples.tutorials.mnist.input_data as input_data
if __name__ == '__main__':
denoising_autoencoder()
| 26.493671 | 116 | 0.68753 | #Author: Satwik Bhattamishra
import tensorflow as tf
import numpy as np
import tensorflow.examples.tutorials.mnist.input_data as input_data
def denoising_autoencoder(hidden_units= 500, prob=0.4):
learning_rate = 0.001
batch_size = 50
n_epochs = 10
x = tf.placeholder(tf.float32, [None, 784], name='x')
x_ = tf.add(x ,np.random.normal(loc= 0.0, scale=prob , size= (batch_size, 784) ))
n_inp = 784
n_out = hidden_units
W = tf.Variable(tf.random_uniform([n_inp, n_out], -1.0 / np.sqrt(n_inp), 1.0 / np.sqrt(n_inp)) ,dtype=tf.float32 )
b = tf.Variable(tf.truncated_normal([n_out], dtype=tf.float32))
W_ = tf.Variable(tf.random_uniform([n_out, n_inp], -1.0 / np.sqrt(n_inp), 1.0 / np.sqrt(n_inp)) ,dtype=tf.float32 )
b_ = tf.Variable(tf.truncated_normal([n_inp], dtype=tf.float32))
z = tf.nn.sigmoid(tf.matmul(x_ , W) + b)
y = tf.nn.sigmoid(tf.matmul(z , W_) + b_)
cost = tf.reduce_mean(-tf.reduce_sum(x * tf.log(tf.clip_by_value(y ,1e-10,1.0))) )
lout = tf.subtract(x,y)
lh = tf.multiply(tf.multiply(tf.matmul(lout, W), z) , (tf.subtract(1.0,z)) )
lb = lh
lb_ = lout
grad_W = tf.add(tf.matmul(tf.transpose(x_) , lh), tf.matmul(tf.transpose(lout), z ))
grad_b = tf.reduce_mean(lb, axis=0)
grad_b_ = tf.reduce_mean(lb_, axis=0)
new_W = W.assign(W + learning_rate * grad_W)
new_W_ = W_.assign(tf.transpose(W))
new_b = b.assign(b + learning_rate * grad_b )
new_b_ = b_.assign(b_ + learning_rate * grad_b_ )
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mean_img = np.mean(mnist.train.images, axis=0)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# saver.restore(sess, "./weights/da.cpkt")
for i in range(n_epochs):
avg_cost = 0
batches= mnist.train.num_examples // batch_size
for batch_i in range(batches):
batch_xs, _ = mnist.train.next_batch(batch_size)
_, _, _, _, ce = sess.run([new_W, new_W_, new_b, new_b_, cost], feed_dict={x: batch_xs})
avg_cost += ce / batches
print(i, avg_cost)
save = saver.save(sess, "./weights/da.ckpt")
if __name__ == '__main__':
denoising_autoencoder()
| 1,875 | 0 | 23 |
a7c06cb92e54c73f18c82681723c99eff1f48301 | 1,097 | py | Python | Array/longestMountain/longestMountain.py | qyp1997/leetcoder | 4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2 | [
"MIT"
] | null | null | null | Array/longestMountain/longestMountain.py | qyp1997/leetcoder | 4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2 | [
"MIT"
] | null | null | null | Array/longestMountain/longestMountain.py | qyp1997/leetcoder | 4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2020/10/25 17:14
@Auth : Qi
@IDE : PyCharm
@Title: 845. 数组中的最长山脉
@Link : https://leetcode-cn.com/problems/longest-mountain-in-array/
"""
if __name__ == '__main__':
# 测试用例
s = Solution()
print(s.longestMountain([2, 1, 4, 7, 3, 2, 5]))
print(s.longestMountain([0, 1, 0, 0, 1, 1, 1, 1]))
print(s.longestMountain([2, 2, 2]))
| 29.648649 | 76 | 0.441203 | # -*- coding: utf-8 -*-
"""
@Time : 2020/10/25 17:14
@Auth : Qi
@IDE : PyCharm
@Title: 845. 数组中的最长山脉
@Link : https://leetcode-cn.com/problems/longest-mountain-in-array/
"""
class Solution:
def longestMountain(self, A) -> int:
up, down, maxRet = 0, 0, 0
for i in range(1, len(A)):
if A[i] == A[i - 1]:
if down > 0: # if up > 0 and down > 0
maxRet = max(up + down + 1, maxRet)
up, down = 0, 0
elif A[i] > A[i - 1]:
if down == 0:
up += 1
else:
if down > 0: # if up > 0 and down > 0
maxRet = max(up + down + 1, maxRet)
up, down = 1, 0
elif up > 0: # A[i]<A[i-1]
down += 1
return max(up + down + 1, maxRet) if up > 0 and down > 0 else maxRet
if __name__ == '__main__':
# 测试用例
s = Solution()
print(s.longestMountain([2, 1, 4, 7, 3, 2, 5]))
print(s.longestMountain([0, 1, 0, 0, 1, 1, 1, 1]))
print(s.longestMountain([2, 2, 2]))
| 673 | -6 | 49 |
004dae0bab2e65b2b02c2b31eb46c43d4c61bc03 | 3,626 | py | Python | software/inpainting/inpainting_iterative_rgbd/inpaint_rgbd_tiled.py | venkai/deploy_core3d | 6333c634f8f79b3ed9d7b13f39137c406f374a4f | [
"BSD-2-Clause"
] | 3 | 2020-05-03T10:22:46.000Z | 2021-06-26T16:17:09.000Z | software/inpainting/inpainting_iterative_rgbd/inpaint_rgbd_tiled.py | venkai/deploy_core3d | 6333c634f8f79b3ed9d7b13f39137c406f374a4f | [
"BSD-2-Clause"
] | null | null | null | software/inpainting/inpainting_iterative_rgbd/inpaint_rgbd_tiled.py | venkai/deploy_core3d | 6333c634f8f79b3ed9d7b13f39137c406f374a4f | [
"BSD-2-Clause"
] | 1 | 2021-01-19T19:49:58.000Z | 2021-01-19T19:49:58.000Z | import os
import time
import cv2
from inference_new import *
import argparse
from glob import glob
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Command for running the Inpainting Pipeline on RGBD satellite imagery")
parser.add_argument('--gpu', type=str, default='0', help='the gpu that will be used, e.g "0"')
parser.add_argument('--nrep', type=int, default=9, help='repeated depth-inpainting iterations (def: 9)')
parser.add_argument('--input-rgb', type=str, default='./example_input_RGB.png', help='path to the 3-channel RGB input file.')
parser.add_argument('--input-dsm', type=str, default='./example_input_DSM.tif', help='path to the DSM input file.')
parser.add_argument('--input-dtm', type=str, default='./example_input_DTM.tif', help='path to the DTM input file.')
parser.add_argument('--outputdir', type=str, default='./results', help='path to write output prediction')
parser.add_argument('--outputfile', type=str, default='example_output', help='Inpainted output')
parser.add_argument('--fp16', action='store_true', default=False, help='whether to use FP16 inference.')
parser.add_argument('--trn-dir', type=str, default='./models', help='directory which contains caffe model for inference')
parser.add_argument('--iter', type=int, default=0, help='which iteration model to choose (def: 0 [choose latest])')
parser.add_argument('--model-type', type=str, default='rgbd', help='Model Type')
parser.add_argument('--extra-pad', type=int, default=0, help='add extra mirror padding to input '
'[sometimes improves results at border pixels] (def: 0)')
args = parser.parse_args()
log.info(args)
main() | 54.119403 | 129 | 0.664368 | import os
import time
import cv2
from inference_new import *
import argparse
from glob import glob
def main():
st = time.time()
outputdir = args.outputdir
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
# Load network
test_prototxt=args.trn_dir + '/test_' + args.model_type + ('_fp16.prototxt' if args.fp16 else '.prototxt')
if args.iter > 0:
test_caffemodel='%s/%s/trn_iter_%d.caffemodel' % (args.trn_dir, args.model_type, args.iter)
else:
# Get most recent model
list_caffemodels=glob('%s/%s/trn_iter_*.caffemodel' % (args.trn_dir, args.model_type))
test_caffemodel = max(list_caffemodels, key=os.path.getctime)
# Load inputs
(img_in_rgb, H, W) = prepare_input(args.input_rgb, fac=8, extra_pad=args.extra_pad)
(img_in_dsm, H, W) = prepare_input(args.input_dsm, fac=8, extra_pad=args.extra_pad)
(img_in_dtm, H, W) = prepare_input(args.input_dtm, fac=8, extra_pad=args.extra_pad)
img_in_dhm = img_in_dsm - img_in_dtm
img_in_dhm[img_in_dhm < 0] = 0
img_in = np.concatenate((img_in_rgb,img_in_dhm),axis=2)
# Forward pass
for rp_ind in range(args.nrep):
log.info('Forward Pass %d/%d' % (rp_ind + 1, args.nrep))
img_out = forward_tiled(test_prototxt, test_caffemodel, img_in, H, W, out_ch = 4)
od_gt_id = (img_out[:,:,-1] > img_in[0:H,0:W,-1])
img_out[:,:,-1][od_gt_id] = img_in[0:H,0:W,-1][od_gt_id]
img_in[0:H,0:W,-1] = img_out[:,:,-1]
# Save outputs
img_rgb_out = img_out[:,:,0:3]
imsave(img_rgb_out, out_file=outputdir + '/' + args.outputfile + '_RGB.tif')
img_dhm_out = img_out[:,:,-1]
img_dsm_out = img_dhm_out + img_in_dtm[0:H, 0:W,0]
imsave(img_dsm_out, out_file=outputdir + '/' + args.outputfile + '_DSM.tif')
imsave(img_dhm_out, out_file=outputdir + '/' + args.outputfile + '_DHM.tif')
log.info('Total Time: %0.4f secs' % (time.time() - st))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Command for running the Inpainting Pipeline on RGBD satellite imagery")
parser.add_argument('--gpu', type=str, default='0', help='the gpu that will be used, e.g "0"')
parser.add_argument('--nrep', type=int, default=9, help='repeated depth-inpainting iterations (def: 9)')
parser.add_argument('--input-rgb', type=str, default='./example_input_RGB.png', help='path to the 3-channel RGB input file.')
parser.add_argument('--input-dsm', type=str, default='./example_input_DSM.tif', help='path to the DSM input file.')
parser.add_argument('--input-dtm', type=str, default='./example_input_DTM.tif', help='path to the DTM input file.')
parser.add_argument('--outputdir', type=str, default='./results', help='path to write output prediction')
parser.add_argument('--outputfile', type=str, default='example_output', help='Inpainted output')
parser.add_argument('--fp16', action='store_true', default=False, help='whether to use FP16 inference.')
parser.add_argument('--trn-dir', type=str, default='./models', help='directory which contains caffe model for inference')
parser.add_argument('--iter', type=int, default=0, help='which iteration model to choose (def: 0 [choose latest])')
parser.add_argument('--model-type', type=str, default='rgbd', help='Model Type')
parser.add_argument('--extra-pad', type=int, default=0, help='add extra mirror padding to input '
'[sometimes improves results at border pixels] (def: 0)')
args = parser.parse_args()
log.info(args)
main() | 1,839 | 0 | 23 |
6257c7f64cd12ee5ad761bbc7011d80335ead4b9 | 22,819 | py | Python | threeML/utils/time_series/time_series.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | threeML/utils/time_series/time_series.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | threeML/utils/time_series/time_series.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = 'grburgess'
import collections
import os
import numpy as np
import pandas as pd
from pandas import HDFStore
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import sanitize_filename
from threeML.utils.spectrum.binned_spectrum import Quality
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.polynomial import polyfit, unbinned_polyfit, Polynomial
# find out how many splits we need to make
| 29.55829 | 139 | 0.603401 | from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = 'grburgess'
import collections
import os
import numpy as np
import pandas as pd
from pandas import HDFStore
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import sanitize_filename
from threeML.utils.spectrum.binned_spectrum import Quality
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.polynomial import polyfit, unbinned_polyfit, Polynomial
class ReducingNumberOfThreads(Warning):
pass
class ReducingNumberOfSteps(Warning):
pass
class OverLappingIntervals(RuntimeError):
pass
# find out how many splits we need to make
def ceildiv(a, b):
return -(-a // b)
class TimeSeries(object):
def __init__(self, start_time, stop_time, n_channels, native_quality=None,
first_channel=1, ra=None, dec=None, mission=None, instrument=None, verbose=True, edges=None):
"""
The EventList is a container for event data that is tagged in time and in PHA/energy. It handles event selection,
temporal polynomial fitting, temporal binning, and exposure calculations (in subclasses). Once events are selected
and/or polynomials are fit, the selections can be extracted via a PHAContainer which is can be read by an OGIPLike
instance and translated into a PHA instance.
:param n_channels: Number of detector channels
:param start_time: start time of the event list
:param stop_time: stop time of the event list
:param first_channel: where detchans begin indexing
:param rsp_file: the response file corresponding to these events
:param arrival_times: list of event arrival times
:param energies: list of event energies or pha channels
:param native_quality: native pha quality flags
:param edges: The histogram boundaries if not specified by a response
:param mission:
:param instrument:
:param verbose:
:param ra:
:param dec:
"""
self._verbose = verbose
self._n_channels = n_channels
self._first_channel = first_channel
self._native_quality = native_quality
# we haven't made selections yet
self._time_intervals = None
self._poly_intervals = None
self._counts = None
self._exposure = None
self._poly_counts = None
self._poly_count_err = None
self._poly_selected_counts= None
self._poly_exposure = None
# ebounds for objects w/o a response
self._edges = edges
if native_quality is not None:
assert len(
native_quality) == n_channels, "the native quality has length %d but you specified there were %d channels" % (
len(native_quality), n_channels)
self._start_time = start_time
self._stop_time = stop_time
# name the instrument if there is not one
if instrument is None:
custom_warnings.warn('No instrument name is given. Setting to UNKNOWN')
self._instrument = "UNKNOWN"
else:
self._instrument = instrument
if mission is None:
custom_warnings.warn('No mission name is given. Setting to UNKNOWN')
self._mission = "UNKNOWN"
else:
self._mission = mission
self._user_poly_order = -1
self._time_selection_exists = False
self._poly_fit_exists = False
self._fit_method_info = {"bin type": None, 'fit method': None}
def set_active_time_intervals(self, *args):
raise RuntimeError("Must be implemented in subclass")
@property
def poly_fit_exists(self):
return self._poly_fit_exists
@property
def n_channels(self):
return self._n_channels
@property
def poly_intervals(self):
return self._poly_intervals
@property
def polynomials(self):
""" Returns polynomial is they exist"""
if self._poly_fit_exists:
return self._polynomials
else:
RuntimeError('A polynomial fit has not been made.')
def get_poly_info(self):
"""
Return a pandas panel frame with the polynomial coeffcients
and errors
Returns:
a DataFrame
"""
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.error)
df_coeff = pd.DataFrame(coeff)
df_err = pd.DataFrame(err)
# print('Coefficients')
#
# display(df_coeff)
#
# print('Coefficient Error')
#
# display(df_err)
pan = {'coefficients': df_coeff, 'error': df_err}
return pan
else:
RuntimeError('A polynomial fit has not been made.')
def get_total_poly_count(self, start, stop, mask=None):
"""
Get the total poly counts
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral(start, stop)
return total_counts
def get_total_poly_error(self, start, stop, mask=None):
"""
Get the total poly error
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral_error(start, stop) ** 2
return np.sqrt(total_counts)
@property
def bins(self):
if self._temporal_binner is not None:
return self._temporal_binner
else:
raise RuntimeError('This EventList has no binning specified')
def __set_poly_order(self, value):
""" Set poly order only in allowed range and redo fit """
assert type(value) is int, "Polynomial order must be integer"
assert -1 <= value <= 4, "Polynomial order must be 0-4 or -1 to have it determined"
self._user_poly_order = value
if self._poly_fit_exists:
print('Refitting background with new polynomial order (%d) and existing selections' % value)
if self._time_selection_exists:
self.set_polynomial_fit_interval(*self._poly_intervals.to_string().split(','), unbinned=self._unbinned)
else:
RuntimeError("This is a bug. Should never get here")
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" get the poly order """
return self._optimal_polynomial_grade
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
poly_order = property(___get_poly_order, ___set_poly_order,
doc="Get or set the polynomial order")
@property
def time_intervals(self):
"""
the time intervals of the events
:return:
"""
return self._time_intervals
def exposure_over_interval(self, tmin, tmax):
""" calculate the exposure over a given interval """
raise RuntimeError("Must be implemented in sub class")
def counts_over_interval(self, start, stop):
"""
return the number of counts in the selected interval
:param start: start of interval
:param stop: stop of interval
:return:
"""
# this will be a boolean list and the sum will be the
# number of events
raise RuntimeError("Must be implemented in sub class")
def count_per_channel_over_interval(self, start, stop):
"""
:param start:
:param stop:
:return:
"""
raise RuntimeError("Must be implemented in sub class")
def set_polynomial_fit_interval(self, *time_intervals, **options):
"""Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_polynomial_fit_interval("-10.0-0.0","10.-15.")
:param time_intervals: intervals to fit on
:param options:
"""
# Find out if we want to binned or unbinned.
# TODO: add the option to config file
if 'unbinned' in options:
unbinned = options.pop('unbinned')
assert type(unbinned) == bool, 'unbinned option must be True or False'
else:
# assuming unbinned
# could use config file here
# unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']
unbinned = True
# we create some time intervals
poly_intervals = TimeIntervalSet.from_strings(*time_intervals)
# adjust the selections to the data
new_intervals = []
self._poly_selected_counts = []
self._poly_exposure = 0.
for i, time_interval in enumerate(poly_intervals):
t1 = time_interval.start_time
t2 = time_interval.stop_time
if (self._stop_time <= t1) or (t2 <= self._start_time):
custom_warnings.warn(
"The time interval %f-%f is out side of the arrival times and will be dropped" % (
t1, t2))
else:
if t1 < self._start_time:
custom_warnings.warn(
"The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._start_time, self._start_time, t2))
t1 = self._start_time # + 1
if t2 > self._stop_time:
custom_warnings.warn(
"The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._stop_time, t1, self._stop_time))
t2 = self._stop_time # - 1.
new_intervals.append('%f-%f' % (t1, t2))
self._poly_selected_counts.append(self.count_per_channel_over_interval(t1,t2))
self._poly_exposure += self.exposure_over_interval(t1,t2)
# make new intervals after checks
poly_intervals = TimeIntervalSet.from_strings(*new_intervals)
self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
# set the poly intervals as an attribute
self._poly_intervals = poly_intervals
# Fit the events with the given intervals
if unbinned:
self._unbinned = True # keep track!
self._unbinned_fit_polynomials()
else:
self._unbinned = False
self._fit_polynomials()
# we have a fit now
self._poly_fit_exists = True
if self._verbose:
print("%s %d-order polynomial fit with the %s method" % (
self._fit_method_info['bin type'], self._optimal_polynomial_grade, self._fit_method_info['fit method']))
print('\n')
# recalculate the selected counts
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
def get_information_dict(self, use_poly=False, extract=False):
"""
Return a PHAContainer that can be read by different builders
:param use_poly: (bool) choose to build from the polynomial fits
"""
if not self._time_selection_exists:
raise RuntimeError('No time selection exists! Cannot calculate rates')
if extract:
is_poisson = True
counts_err = None
counts = self._poly_selected_counts
rates = old_div(self._counts, self._poly_exposure)
rate_err = None
exposure = self._poly_exposure
elif use_poly:
is_poisson = False
counts_err = self._poly_count_err
counts = self._poly_counts
rate_err = old_div(self._poly_count_err, self._exposure)
rates = old_div(self._poly_counts, self._exposure)
exposure = self._exposure
# removing negative counts
idx = counts < 0.
counts[idx] = 0.
counts_err[idx] = 0.
rates[idx] = 0.
rate_err[idx] = 0.
else:
is_poisson = True
counts_err = None
counts = self._counts
rates = old_div(self._counts, self._exposure)
rate_err = None
exposure = self._exposure
if self._native_quality is None:
quality = np.zeros_like(counts, dtype=int)
else:
quality = self._native_quality
container_dict = {}
container_dict['instrument'] = self._instrument
container_dict['telescope'] = self._mission
container_dict['tstart'] = self._time_intervals.absolute_start_time
container_dict['telapse'] = self._time_intervals.absolute_stop_time - self._time_intervals.absolute_start_time
container_dict['channel'] = np.arange(self._n_channels) + self._first_channel
container_dict['counts'] = counts
container_dict['counts error'] = counts_err
container_dict['rates'] = rates
container_dict['rate error'] = rate_err
container_dict['edges'] = self._edges
# check to see if we already have a quality object
if isinstance(quality, Quality):
container_dict['quality'] = quality
else:
container_dict['quality'] = Quality.from_ogip(quality)
# TODO: make sure the grouping makes sense
container_dict['backfile'] = 'NONE'
container_dict['grouping'] = np.ones(self._n_channels)
container_dict['exposure'] = exposure
# container_dict['response'] = self._response
return container_dict
def __repr__(self):
"""
Examine the currently selected info as well other things.
"""
return self._output().to_string()
def _output(self):
info_dict = collections.OrderedDict()
for i, interval in enumerate(self.time_intervals):
info_dict['active selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['active deadtime'] = self._active_dead_time
if self._poly_fit_exists:
for i, interval in enumerate(self.poly_intervals):
info_dict['polynomial selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['polynomial order'] = self._optimal_polynomial_grade
info_dict['polynomial fit type'] = self._fit_method_info['bin type']
info_dict['polynomial fit method'] = self._fit_method_info['fit method']
return pd.Series(info_dict, index=list(info_dict.keys()))
def _fit_global_and_determine_optimum_grade(self, cnts, bins, exposure):
"""
Provides the ability to find the optimum polynomial grade for *binned* counts by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param cnts: counts per bin
:param bins: the bins used
:param exposure: exposure per bin
:return: polynomial grade
"""
min_grade = 0
max_grade = 4
log_likelihoods = []
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = polyfit(bins, cnts, grade, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array([2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])])
# print("\ndelta log-likelihoods:")
# for i in range(max_grade):
# print("%s -> %s: delta Log-likelihood = %s" % (i, i + 1, deltaLoglike[i]))
# print("")
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _unbinned_fit_global_and_determine_optimum_grade(self, events, exposure):
"""
Provides the ability to find the optimum polynomial grade for *unbinned* events by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param events: an event list
:param exposure: the exposure per event
:return: polynomial grade
"""
# Fit the sum of all the channels to determine the optimal polynomial
# grade
min_grade = 0
max_grade = 4
log_likelihoods = []
t_start = self._poly_intervals.start_times
t_stop = self._poly_intervals.stop_times
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = unbinned_polyfit(events, grade, t_start, t_stop, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array([2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])])
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def _unbinned_fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def save_background(self, filename, overwrite=False):
"""
save the background to an HD5F
:param filename:
:return:
"""
# make the file name proper
filename = os.path.splitext(filename)
filename = "%s.h5" % filename[0]
filename_sanitized = sanitize_filename(filename)
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists!" % filename_sanitized)
with HDFStore(filename_sanitized) as store:
# extract the polynomial information and save it
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.covariance_matrix)
df_coeff = pd.Series(coeff)
df_err = pd.Series(err)
else:
raise RuntimeError('the polynomials have not been fit yet')
df_coeff.to_hdf(store, 'coefficients')
df_err.to_hdf(store, 'covariance')
store.get_storer('coefficients').attrs.metadata = {'poly_order': self._optimal_polynomial_grade,
'poly_selections': list(zip(self._poly_intervals.start_times,
self._poly_intervals.stop_times)),
'unbinned': self._unbinned,
'fit_method': self._fit_method_info['fit method']}
if self._verbose:
print("\nSaved fitted background to %s.\n" % filename)
def restore_fit(self, filename):
filename_sanitized = sanitize_filename(filename)
with HDFStore(filename_sanitized) as store:
coefficients = store['coefficients']
covariance = store['covariance']
self._polynomials = []
# create new polynomials
for i in range(len(coefficients)):
coeff = np.array(coefficients.loc[i])
# make sure we get the right order
# pandas stores the non-needed coeff
# as nans.
coeff = coeff[np.isfinite(coeff)]
cov = covariance.loc[i]
self._polynomials.append(Polynomial.from_previous_fit(coeff, cov))
metadata = store.get_storer('coefficients').attrs.metadata
self._optimal_polynomial_grade = metadata['poly_order']
poly_selections = np.array(metadata['poly_selections'])
self._poly_intervals = TimeIntervalSet.from_starts_and_stops(poly_selections[:, 0], poly_selections[:, 1])
self._unbinned = metadata['unbinned']
if self._unbinned:
self._fit_method_info['bin type'] = 'unbinned'
else:
self._fit_method_info['bin type'] = 'binned'
self._fit_method_info['fit method'] = metadata['fit_method']
# go thru and count the counts!
self._poly_fit_exists = True
# we must go thru and collect the polynomial exposure and counts
# so that they be extracted if needed
self._poly_exposure = 0.
self._poly_selected_counts = []
for i, time_interval in enumerate(self._poly_intervals):
t1 = time_interval.start_time
t2 = time_interval.stop_time
self._poly_selected_counts.append(self.count_per_channel_over_interval(t1,t2))
self._poly_exposure += self.exposure_over_interval(t1,t2)
self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
def view_lightcurve(self, start=-10, stop=20., dt=1., use_binner=False):
raise NotImplementedError('must be implemented in subclass')
| 3,555 | 18,477 | 114 |
0f7f3b70073770b5b01ea972277895a04e2450c4 | 382 | py | Python | serivce/run.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | serivce/run.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | serivce/run.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | import requests
import json
from flask.cli import FlaskGroup
from flask import jsonify
from serivce.app import create_app
app = create_app()
data = json.dumps(dict(name='service', port='8000'))
headers = {'Content-type': 'application/json'}
requests.post('http://localhost:5000/instance', headers=headers, data=data)
cli = FlaskGroup(app)
if __name__ == '__main__':
cli()
| 20.105263 | 75 | 0.73822 | import requests
import json
from flask.cli import FlaskGroup
from flask import jsonify
from serivce.app import create_app
app = create_app()
data = json.dumps(dict(name='service', port='8000'))
headers = {'Content-type': 'application/json'}
requests.post('http://localhost:5000/instance', headers=headers, data=data)
cli = FlaskGroup(app)
if __name__ == '__main__':
cli()
| 0 | 0 | 0 |
a5de7750593cb2f7bb8b789eae2fd2281336211a | 24,635 | py | Python | src/dorfromantik_helper/ui.py | mcdonnnj/dorfromantik_helper | adba24c135d54fc38d8879860972a869a0eba59e | [
"MIT"
] | null | null | null | src/dorfromantik_helper/ui.py | mcdonnnj/dorfromantik_helper | adba24c135d54fc38d8879860972a869a0eba59e | [
"MIT"
] | null | null | null | src/dorfromantik_helper/ui.py | mcdonnnj/dorfromantik_helper | adba24c135d54fc38d8879860972a869a0eba59e | [
"MIT"
] | null | null | null | """Canvas that displays the full game board."""
# Standard Python Libraries
import tkinter as tk
# Third-Party Libraries
import numpy as np
from . import constants
from .board import DorfBoard
| 38.372274 | 97 | 0.577349 | """Canvas that displays the full game board."""
# Standard Python Libraries
import tkinter as tk
# Third-Party Libraries
import numpy as np
from . import constants
from .board import DorfBoard
class DorfBoardCanvas(tk.Canvas):
def __init__(
self, master, board, tile_canvas, pix_height, pix_width, *args, **kwargs
):
tk.Canvas.__init__(
self,
master,
background="white",
width=pix_width,
height=pix_height,
*args,
**kwargs
)
self.board = board
self.tile_canvas = tile_canvas
self.pix_height = pix_height
self.pix_width = pix_width
self.hint_hexes = []
self.selected_hex = None
self.set_coordinate_transform_parameters()
self.set_hex_centers()
def set_coordinate_transform_parameters(self):
"""Compute and store pixel offsets and scaling parameters.
These are needed to center the hex grid on the canvas. Offsets the
coordinates if the displayed board is wider or taller than the canvas
allows
"""
# Compute the (x,y) locations in pre-transformed pixel space of all non-empty board tiles
all_loc_x = []
all_loc_y = []
for x in range(self.board.size):
for y in range(self.board.size):
if not self.board.is_empty_tile(x, y):
loc_x = 1 + 2 * x + y
loc_y = 1 + 1.5 * y
all_loc_x.append(loc_x)
all_loc_y.append(loc_y)
# Give pixel offsets that attempt to center the board in the frame
margin = 4
loc_x_min = min(all_loc_x) - margin
loc_y_min = min(all_loc_y) - margin
loc_x_max = max(all_loc_x) + margin
loc_y_max = max(all_loc_y) + margin
loc_x_diff = loc_x_max - loc_x_min
loc_y_diff = loc_y_max - loc_y_min
# Compute the scale for a hex tile
# Add offset if board is too wide or too tall for canvas
if self.pix_height / loc_y_diff > self.pix_width / loc_x_diff:
hex_edge_len = 2 / 3 ** 0.5 * self.pix_width / loc_x_diff
loc_y_min -= (
3 ** 0.5 / 2 * loc_x_diff / self.pix_width * self.pix_height
- loc_y_diff
) / 2
else:
hex_edge_len = self.pix_height / loc_y_diff
loc_x_min -= (
2 / 3 ** 0.5 * loc_y_diff / self.pix_height * self.pix_width
- loc_x_diff
) / 2
# Store scaling and offset parameters
self.y_scale = hex_edge_len
self.x_scale = 3 ** 0.5 / 2 * hex_edge_len
self.pix_x_offset = self.x_scale * loc_x_min
self.pix_y_offset = self.y_scale * loc_y_min
return
def get_hex_center_pix(self, x, y):
"""Return the (x,y) coordinates in pixel space of a given hex position."""
pix_x = self.x_scale * (1 + 2 * x + y) - self.pix_x_offset
pix_y = self.y_scale * (1 + 1.5 * y) - self.pix_y_offset
return pix_x, pix_y
def set_hex_centers(self):
"""Compute and store the (x,y) coordinates in pixel space of all hex positions."""
self.centers = np.zeros((self.board.size, self.board.size, 2))
for x in range(self.board.size):
for y in range(self.board.size):
self.centers[x, y] = self.get_hex_center_pix(x, y)
return
def draw_hexagon(
self,
x,
y,
border_color=constants.TileOutlineColors.NORMAL,
border_width=2,
fill_color="blue",
):
"""Draw a hexagon on the canvas given a hex position."""
pix_x, pix_y = self.get_hex_center_pix(x, y)
# Compute the coordinates of hexagon vertices
y_size = self.y_scale
x_size = self.x_scale
vertices = [
(pix_x - x_size, pix_y + y_size / 2),
(pix_x - x_size, pix_y - y_size / 2),
(pix_x, pix_y - y_size),
(pix_x + x_size, pix_y - y_size / 2),
(pix_x + x_size, pix_y + y_size / 2),
(pix_x, pix_y + y_size),
]
# Draw hexagon
if fill_color is not None:
self.create_polygon(
vertices[0],
vertices[1],
vertices[2],
vertices[3],
vertices[4],
vertices[5],
fill=fill_color,
)
# Draw outline
if border_color is not None:
for i in range(len(vertices)):
self.create_line(
vertices[i],
vertices[(i + 1) % len(vertices)],
fill=border_color,
width=border_width,
)
def draw_board(self):
"""Draw the full game board on the canvas."""
for x, row in enumerate(self.board.status):
for y, status in enumerate(row):
if status == constants.TileStatus.EMPTY:
# self.draw_hexagon(x, y, fill_color=None, border_color='purple')
continue
elif status == constants.TileStatus.VALID and (x, y) in self.hint_hexes:
fill_color = constants.TileStatusColors.HINT
else:
fill_color = constants.get_color_from_status(status)
self.draw_hexagon(x, y, fill_color=fill_color)
@staticmethod
def euclidian_distance(x1, y1, x2, y2):
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
def get_xy_from_pix(self, pix_x, pix_y):
"""Return the (x,y) position of the hex belonging to the given pixel coordinates.
Approximates the hexagon as a circle with the inner radius of the hexagon.
Gives a five percent margin of error to avoid confusing between neighboring
hexes.
"""
for x in range(self.board.size):
for y in range(self.board.size):
center = self.centers[x, y]
if (
self.euclidian_distance(pix_x, pix_y, center[0], center[1])
< self.x_scale * 0.95
):
return x, y
return None
def on_click(self, event):
"""Control behavior of the canvas on a left mouse click.
Used for highlighting a selected hex. Sets the location of the selected hex
and a tile that represents the surrounding connection.
"""
# Remove highlight from previously selected hex tile
if self.selected_hex:
x, y = self.selected_hex
self.draw_hexagon(
x, y, border_color=constants.TileOutlineColors.NORMAL, fill_color=None
)
self.selected_hex = None
# Get location of newly selected hex tile (if any)
loc = self.get_xy_from_pix(event.x, event.y)
if loc is None:
return
# Highlight newly selected tile if not empty
x, y = loc
if self.board.status[x, y] != constants.TileStatus.EMPTY:
self.selected_hex = loc
self.draw_hexagon(
x, y, border_color=constants.TileOutlineColors.SELECTED, fill_color=None
)
# Set the connecting edges in the slice canvas
if self.board.status[x, y] == constants.TileStatus.VALID:
connections = self.board.get_connecting_edges(x, y)
else:
connections = None
self.tile_canvas.set_connections(connections)
def set_hint(self, hints):
"""Set which tiles to highlight given a hint."""
self.hint_hexes = []
if hints is None:
return
for (x, y, _), _ in hints:
self.hint_hexes.append((x, y))
class HexTileCanvas(tk.Canvas):
def __init__(self, master, scale, *args, **kwargs):
self.x_scale = (scale ** 2 - (scale / 2.0) ** 2) ** 0.5 # hexagon width
self.y_scale = scale # half hexagon height
self.pix_height = 3 * self.y_scale
self.pix_width = 3 * self.x_scale
tk.Canvas.__init__(
self,
master,
background="white",
width=self.pix_width,
height=self.pix_height,
*args,
**kwargs
)
self.selected_slice = None
self.edges = 6 * [None]
self.select_slice(0)
self.set_tile(6 * [constants.TileEdge.GRASS])
def get_tile(self):
return self.edges
def get_triangle_vertices(self, index, scale=1):
y_size = self.y_scale
x_size = self.x_scale
x_offset = 1 / 2 * self.pix_width - x_size
y_offset = 1 / 2 * self.pix_height - y_size
origin = [(x_size, y_size)] # origin
outside_vertices = [
((1 - scale) * x_size, (2 + scale) / 2 * y_size),
((1 - scale) * x_size, (2 - scale) / 2 * y_size),
(1 * x_size, (1 - scale) * y_size),
((1 + scale) * x_size, (2 - scale) / 2 * y_size),
((1 + scale) * x_size, (2 + scale) / 2 * y_size),
(1 * x_size, (1 + scale) * y_size),
]
if index == 5:
vertices = origin + [outside_vertices[5], outside_vertices[0]]
else:
vertices = origin + outside_vertices[index : index + 2]
vertices = [(x + x_offset, y + y_offset) for (x, y) in vertices]
return vertices
def draw_slice(
self, index, border_color="black", border_width=2, fill_color="blue"
):
vertices = self.get_triangle_vertices(index)
if fill_color is not None:
self.create_polygon(vertices[0], vertices[1], vertices[2], fill=fill_color)
if border_color is not None:
for i in range(len(vertices)):
self.create_line(
vertices[i],
vertices[(i + 1) % len(vertices)],
fill=border_color,
width=border_width,
)
def draw_connection(
self, index, border_color=None, border_width=2, fill_color="blue"
):
_, a, b = self.get_triangle_vertices(index, scale=1.1)
_, d, c = self.get_triangle_vertices(index, scale=1.2)
if fill_color is not None:
self.create_polygon(a, b, c, d, fill=fill_color)
if border_color is not None:
self.create_line(a, b, fill=border_color, width=border_width)
self.create_line(b, c, fill=border_color, width=border_width)
self.create_line(c, d, fill=border_color, width=border_width)
self.create_line(d, a, fill=border_color, width=border_width)
def set_edge(self, index, feature):
self.edges[index] = feature
if self.selected_slice == index or self.selected_slice == -1:
border_color = constants.TileOutlineColors.SELECTED
else:
border_color = constants.TileOutlineColors.NORMAL
self.draw_slice(
index,
fill_color=constants.get_color_from_feature(feature),
border_color=border_color,
)
def set_tile(self, tile):
for index, feature in enumerate(tile):
self.set_edge(index, feature)
self.draw_slice(
self.selected_slice,
border_color=constants.TileOutlineColors.SELECTED,
fill_color=None,
)
def set_connections(self, connections):
for index in range(6):
self.draw_connection(
index,
fill_color=constants.TileFeatureColors.EMPTY,
border_color=constants.TileOutlineColors.EMPTY,
)
if not connections:
return
for index, feature in enumerate(connections):
if feature != constants.TileEdge.EMPTY:
self.draw_connection(
index,
fill_color=constants.get_color_from_feature(feature),
border_color=constants.TileOutlineColors.NORMAL,
)
def select_slice(self, index):
# remove any existing highlights
for i in range(6):
self.draw_slice(
i, border_color=constants.TileOutlineColors.NORMAL, fill_color=None
)
# highlight newly selected slice
self.draw_slice(
index, border_color=constants.TileOutlineColors.SELECTED, fill_color=None
)
self.selected_slice = index
def select_all(self):
print("Selected slice: ALL")
self.selected_slice = -1
for i in range(6):
self.draw_slice(
i, border_color=constants.TileOutlineColors.SELECTED, fill_color=None
)
def set_selected_edge(self, feature, auto_advance=True):
if self.selected_slice == -1:
for i in range(6):
self.set_edge(i, feature)
else:
self.set_edge(self.selected_slice, feature)
if auto_advance:
self.select_slice((self.selected_slice + 1) % 6)
def rotate(self, reverse=False):
if reverse:
new_edges = self.edges[1:] + self.edges[:1]
new_selected_slice = (self.selected_slice - 1) % 6
else:
new_edges = self.edges[5:] + self.edges[:5]
new_selected_slice = (self.selected_slice + 1) % 6
for index, feature in enumerate(new_edges):
self.set_edge(index, feature)
if self.selected_slice != -1:
self.select_slice(new_selected_slice)
@staticmethod
def half_plane_test(p1, p2, p3):
return (p1[0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p3[1])
def is_inside_triangle(self, point, vertices):
d1 = self.half_plane_test(point, vertices[0], vertices[1])
d2 = self.half_plane_test(point, vertices[1], vertices[2])
d3 = self.half_plane_test(point, vertices[2], vertices[0])
has_neg = (d1 < 0) or (d2 < 0) or (d3 < 0)
has_pos = (d1 > 0) or (d2 > 0) or (d3 > 0)
return not (has_neg and has_pos)
def get_index_from_pix(self, x, y):
all_triangle_vertices = [self.get_triangle_vertices(i) for i in range(6)]
for i, vertices in enumerate(all_triangle_vertices):
if self.is_inside_triangle((x, y), vertices):
return i
return None
def on_click(self, event):
# Get location of newly selected hex tile (if any)
index = self.get_index_from_pix(event.x, event.y)
print("Selected slice: ", index)
if index is None:
return
# Highlight newly selected tile if valid
self.select_slice(index)
class App(tk.Tk):
def __init__(self, from_npz, pix_height, pix_width, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
board = DorfBoard(from_npz=from_npz)
self.canvas_height = pix_height
self.canvas_width = pix_width
self.boardview_frame = tk.Frame(
self, background="#FFF0C1", bd=1, relief="sunken"
)
self.tile_frame = tk.Frame(self, background="#D2E2FB", bd=1, relief="sunken")
self.control_frame = tk.Frame(self, background="#CCE4CA", bd=1, relief="sunken")
self.textlog_frame = tk.Frame(self, background="#F5C2C1", bd=1, relief="sunken")
self.boardview_frame.grid(
row=0, column=0, columnspan=3, sticky="nsew", padx=2, pady=2
)
self.tile_frame.grid(row=1, column=0, sticky="nsew", padx=2, pady=2)
self.control_frame.grid(
row=1, column=1, rowspan=1, sticky="nsew", padx=2, pady=2
)
self.textlog_frame.grid(
row=1, column=2, rowspan=1, sticky="nsew", padx=2, pady=2
)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=100000)
self.tile_canvas = HexTileCanvas(self.tile_frame, scale=100)
self.tile_canvas.bind("<Button-1>", self.tile_canvas.on_click)
self.tile_canvas.grid(row=0, column=0, padx=5, pady=5)
self.tile_canvas.grid(row=0, column=0)
self.board_canvas = DorfBoardCanvas(
self.boardview_frame,
board=board,
tile_canvas=self.tile_canvas,
pix_height=self.canvas_height,
pix_width=self.canvas_width,
)
self.board_canvas.bind("<Button-1>", self.board_canvas.on_click)
self.board_canvas.grid(row=0, column=0, padx=5, pady=5)
self.board_canvas.draw_board()
board_controls = []
frame = self.control_frame
board_controls.append(tk.Button(frame, text="Place", command=self.place_tile))
board_controls.append(tk.Button(frame, text="Hint", command=self.display_hint))
board_controls.append(tk.Button(frame, text="Sample", command=self.sample_tile))
board_controls.append(tk.Button(frame, text="Remove", command=self.remove_tile))
board_controls.append(tk.Button(frame, text="Undo", command=self.undo))
board_controls.append(
tk.Button(frame, text="Stats", command=self.display_stats)
)
board_controls.append(tk.Button(frame, text="Save", command=self.manual_save))
board_controls.append(tk.Button(frame, text="Quit", command=self.correct_quit))
for i, button in enumerate(board_controls):
button.grid(row=i, column=0)
tile_controls = []
frame = self.control_frame
fn = self.tile_canvas.set_selected_edge
tile_controls.append(
tk.Button(frame, text="ALL", command=self.tile_canvas.select_all)
)
tile_controls.append(
tk.Button(frame, text="Grass", command=lambda: fn(constants.TileEdge.GRASS))
)
tile_controls.append(
tk.Button(frame, text="Trees", command=lambda: fn(constants.TileEdge.TREES))
)
tile_controls.append(
tk.Button(frame, text="House", command=lambda: fn(constants.TileEdge.HOUSE))
)
tile_controls.append(
tk.Button(frame, text="Crops", command=lambda: fn(constants.TileEdge.CROPS))
)
tile_controls.append(
tk.Button(frame, text="River", command=lambda: fn(constants.TileEdge.RIVER))
)
tile_controls.append(
tk.Button(frame, text="Train", command=lambda: fn(constants.TileEdge.TRAIN))
)
tile_controls.append(
tk.Button(frame, text="Water", command=lambda: fn(constants.TileEdge.WATER))
)
tile_controls.append(
tk.Button(
frame, text="Station", command=lambda: fn(constants.TileEdge.STATION)
)
)
for i, button in enumerate(tile_controls):
button.grid(row=i, column=1)
rotate_controls = []
frame = self.control_frame
rotate_controls.append(
tk.Button(
frame,
text="Rotate CW",
command=lambda: self.tile_canvas.rotate(reverse=False),
)
)
rotate_controls.append(
tk.Button(
frame,
text="Rotate CCW",
command=lambda: self.tile_canvas.rotate(reverse=True),
)
)
for i, button in enumerate(rotate_controls):
button.grid(row=i, column=2)
self.log = tk.Label(self.textlog_frame, text="")
self.log.pack()
self.can_undo = False
def manual_save(self):
self.board_canvas.board.save(to_npz=constants.MANUAL_SAVE_FILEPATH)
self.log.config(text="Saved board state")
def undo(self):
if self.can_undo:
board = DorfBoard(from_npz=constants.AUTO_SAVE_FILEPATH)
self.board_canvas = DorfBoardCanvas(
self.boardview_frame,
board=board,
tile_canvas=self.tile_canvas,
pix_height=self.canvas_height,
pix_width=self.canvas_width,
)
self.board_canvas.bind("<Button-1>", self.board_canvas.on_click)
self.board_canvas.grid(row=0, column=0, padx=5, pady=5)
self.board_canvas.draw_board()
self.log.config(text="Removed last placed tile")
self.can_undo = False
else:
self.log.config(text="ERROR: Unable to undo move")
def place_tile(self):
if self.board_canvas.selected_hex is None:
self.log.config(text="ERROR: no selected tile")
x, y = self.board_canvas.selected_hex
if self.board_canvas.board.status[x, y] != constants.TileStatus.VALID:
self.log.config(
text="ERROR: Illegal tile placement at ({},{})".format(x, y)
)
return
self.board_canvas.board.save(to_npz=constants.AUTO_SAVE_FILEPATH)
self.can_undo = True
tile = self.tile_canvas.get_tile()
self.board_canvas.board.place_tile(x, y, tile)
self.board_canvas.set_coordinate_transform_parameters()
self.board_canvas.set_hex_centers()
self.board_canvas.delete("all")
self.board_canvas.selected_hex = None
self.board_canvas.set_hint(None)
self.board_canvas.draw_board()
self.tile_canvas.set_tile(6 * [constants.TileEdge.GRASS])
self.tile_canvas.set_connections(None)
self.log.config(text="Placed tile at ({},{})".format(x, y))
def remove_tile(self):
if self.board_canvas.selected_hex is None:
self.log.config(text="ERROR: No selected hex to remove")
return
x, y = self.board_canvas.selected_hex
if self.board_canvas.board.status[x, y] == constants.TileStatus.VALID:
self.log.config(text="ERROR: Illegal tile removal at ({},{})".format(x, y))
return
self.board_canvas.board.save(to_npz=constants.AUTO_SAVE_FILEPATH)
self.can_undo = True
self.board_canvas.board.remove_tile(x, y)
self.board_canvas.selected_hex = None
self.board_canvas.set_hint(None)
self.board_canvas.delete("all")
self.board_canvas.draw_board()
self.log.config(text="Removed tile at ({},{})".format(x, y))
def sample_tile(self):
if self.board_canvas.selected_hex is None:
self.log.config(text="ERROR: No selected hex to sample")
return
x, y = self.board_canvas.selected_hex
if self.board_canvas.board.status[x, y] == constants.TileStatus.VALID:
self.log.config(text="ERROR: Illegal tile sample at ({},{})".format(x, y))
return
tile = self.board_canvas.board.edges[x, y]
self.tile_canvas.set_tile(tile)
self.log.config(text="Tile sampled at ({},{})".format(x, y))
def display_hint(self):
tile = self.tile_canvas.get_tile()
hint = self.board_canvas.board.get_hint(tile, threshold=2, top_k=10)
if not hint:
hint = self.board_canvas.board.get_hint(tile, top_k=5)
text_hint = [
"x={}, y={}, {} of {} good connections with {} perfects (score = {})".format(
x,
y,
evaluation["good"],
evaluation["good"] + evaluation["bad"],
evaluation["perfect"],
evaluation["score"],
)
for (x, y, _), evaluation in hint
]
text_hint = "\n".join(text_hint)
self.log.config(text=text_hint)
self.board_canvas.set_hint(hint)
self.board_canvas.draw_board()
def display_stats(self):
text = "{} tiles placed\n".format(
self.board_canvas.board.get_num_tiles_with_status(
[
constants.TileStatus.GOOD,
constants.TileStatus.PERFECT,
constants.TileStatus.IMPERFECT,
]
)
- 1
)
text += "{} perfect tiles\n".format(
self.board_canvas.board.get_num_tiles_with_status(
constants.TileStatus.PERFECT
)
)
text += "{} bad tiles\n".format(
self.board_canvas.board.get_num_tiles_with_status(
constants.TileStatus.IMPERFECT
)
)
text += "{} legal tile locations\n".format(
self.board_canvas.board.get_num_tiles_with_status(
constants.TileStatus.VALID
)
)
self.log.config(text=text)
def correct_quit(self):
self.destroy()
self.quit()
| 16,732 | 7,394 | 311 |
8e242ece176dcf825713ef90eb78d86b8b43772e | 283 | py | Python | recent_updates/recent_updates_lib/__init__.py | djmattyg007/archlinux | 268b6356aec59e5f285f67b556e998da1b06a8d2 | [
"Unlicense"
] | null | null | null | recent_updates/recent_updates_lib/__init__.py | djmattyg007/archlinux | 268b6356aec59e5f285f67b556e998da1b06a8d2 | [
"Unlicense"
] | null | null | null | recent_updates/recent_updates_lib/__init__.py | djmattyg007/archlinux | 268b6356aec59e5f285f67b556e998da1b06a8d2 | [
"Unlicense"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
| 23.583333 | 42 | 0.749117 | import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
def get_pkg_updates(url):
page = requests.get(url)
soup = BeautifulSoup(page.text)
return soup.find(id="pkg-updates")
def print_table(headers, data):
print(tabulate(data, headers=headers))
| 161 | 0 | 46 |
16ba5d46a3f627000bdc25262e6704e3cd4ff7a4 | 13,288 | py | Python | tests/parsers/cups_ipp.py | jonathan-greig/plaso | b88a6e54c06a162295d09b016bddbfbfe7ca9070 | [
"Apache-2.0"
] | 6 | 2015-07-30T11:07:24.000Z | 2021-07-23T07:12:30.000Z | tests/parsers/cups_ipp.py | jonathan-greig/plaso | b88a6e54c06a162295d09b016bddbfbfe7ca9070 | [
"Apache-2.0"
] | null | null | null | tests/parsers/cups_ipp.py | jonathan-greig/plaso | b88a6e54c06a162295d09b016bddbfbfe7ca9070 | [
"Apache-2.0"
] | 1 | 2021-07-23T07:12:37.000Z | 2021-07-23T07:12:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parser test for MacOS Cups IPP Log files."""
import unittest
from dfvfs.helpers import fake_file_system_builder
from dfvfs.path import fake_path_spec
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import cups_ipp
from tests.parsers import test_lib
class CupsIppParserTest(test_lib.ParserTestCase):
"""Tests for MacOS Cups IPP parser."""
# pylint: disable=protected-access
_ATTRIBUTES_GROUP_DATA = bytes(bytearray([
0x01, 0x47, 0x00, 0x12, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
0x65, 0x73, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x00, 0x05,
0x75, 0x74, 0x66, 0x2d, 0x38, 0x03]))
def _CreateAttributeTestData(self, parser, tag_value, name, value_data):
"""Creates attribute test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
tag_value (int): value of the attribute tag.
name (str): name of the attribute.
value_data (bytes): data of the attribute value.
Returns:
bytes: attribute test data.
"""
attribute_map = parser._GetDataTypeMap('cups_ipp_attribute')
attribute = attribute_map.CreateStructureValues(
tag_value=tag_value, name_size=len(name), name=name,
value_data_size=len(value_data), value_data=value_data)
return attribute_map.FoldByteStream(attribute)
def _CreateDateTimeValueData(self, parser):
"""Creates date time value test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
Returns:
bytes: date time value test data.
"""
datetime_map = parser._GetDataTypeMap('cups_ipp_datetime_value')
datetime = datetime_map.CreateStructureValues(
year=2018, month=11, day_of_month=27, hours=16, minutes=41, seconds=51,
deciseconds=5, direction_from_utc=ord('+'), hours_from_utc=1,
minutes_from_utc=0)
return datetime_map.FoldByteStream(datetime)
def _CreateHeaderData(self, parser):
"""Creates header test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
Returns:
bytes: header test data.
"""
header_map = parser._GetDataTypeMap('cups_ipp_header')
header = header_map.CreateStructureValues(
major_version=1, minor_version=1, operation_identifier=5,
request_identifier=0)
return header_map.FoldByteStream(header)
def testGetStringValue(self):
"""Tests the _GetStringValue function."""
parser = cups_ipp.CupsIppParser()
string_value = parser._GetStringValue({}, 'test')
self.assertIsNone(string_value)
string_value = parser._GetStringValue({'test': ['1', '2,3', '4']}, 'test')
self.assertEqual(string_value, '1, "2,3", 4')
def testParseAttribute(self):
"""Tests the _ParseAttribute function."""
parser = cups_ipp.CupsIppParser()
attribute_data = self._CreateAttributeTestData(
parser, 0x00, 'test', b'\x12')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'test')
self.assertEqual(value, b'\x12')
# Test with attribute data too small.
file_object = self._CreateFileObject('cups_ipp', attribute_data[:-1])
with self.assertRaises(errors.ParseError):
parser._ParseAttribute(file_object)
# Test attribute with integer value.
attribute_data = self._CreateAttributeTestData(
parser, 0x21, 'int', b'\x12\x34\x56\x78')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'int')
self.assertEqual(value, 0x12345678)
# Test attribute with boolean value.
attribute_data = self._CreateAttributeTestData(
parser, 0x22, 'bool', b'\x01')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'bool')
self.assertEqual(value, True)
# Test attribute with date time value.
datetime_data = self._CreateDateTimeValueData(parser)
attribute_data = self._CreateAttributeTestData(
parser, 0x31, 'datetime', datetime_data)
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'datetime')
self.assertIsNotNone(value)
self.assertEqual(value.year, 2018)
# Test attribute with string without language.
attribute_data = self._CreateAttributeTestData(
parser, 0x42, 'string', b'NOLANG')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'string')
self.assertEqual(value, 'NOLANG')
# Test attribute with ASCII string and tag value charset.
attribute_data = self._CreateAttributeTestData(
parser, 0x47, 'charset', b'utf8')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'charset')
self.assertEqual(value, 'utf8')
def testParseAttributesGroup(self):
"""Tests the _ParseAttributesGroup function."""
parser = cups_ipp.CupsIppParser()
file_object = self._CreateFileObject(
'cups_ipp', self._ATTRIBUTES_GROUP_DATA)
name_value_pairs = list(parser._ParseAttributesGroup(file_object))
self.assertEqual(name_value_pairs, [('attributes-charset', 'utf-8')])
# Test with unsupported attributes groups start tag value.
file_object = self._CreateFileObject('cups_ipp', b''.join([
b'\xff', self._ATTRIBUTES_GROUP_DATA[1:]]))
with self.assertRaises(errors.ParseError):
list(parser._ParseAttributesGroup(file_object))
def testParseBooleanValue(self):
"""Tests the _ParseBooleanValue function."""
parser = cups_ipp.CupsIppParser()
boolean_value = parser._ParseBooleanValue(b'\x00')
self.assertFalse(boolean_value)
boolean_value = parser._ParseBooleanValue(b'\x01')
self.assertTrue(boolean_value)
# Test with unsupported data.
with self.assertRaises(errors.ParseError):
parser._ParseBooleanValue(b'\x02')
def testParseDateTimeValue(self):
"""Tests the _ParseDateTimeValue function."""
parser = cups_ipp.CupsIppParser()
datetime_data = self._CreateDateTimeValueData(parser)
datetime_value = parser._ParseDateTimeValue(datetime_data, 0)
self.assertIsNotNone(datetime_value)
self.assertEqual(datetime_value.year, 2018)
# Test with data too small.
with self.assertRaises(errors.ParseError):
parser._ParseDateTimeValue(datetime_data[:-1], 0)
def testParseIntegerValue(self):
"""Tests the _ParseIntegerValue function."""
parser = cups_ipp.CupsIppParser()
integer_value = parser._ParseIntegerValue(b'\x00\x00\x00\x01', 0)
self.assertEqual(integer_value, 1)
# Test with data too small.
with self.assertRaises(errors.ParseError):
parser._ParseIntegerValue(b'\x01\x00\x00', 0)
def testParseHeader(self):
"""Tests the _ParseHeader function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/cups_ipp', b'')
test_path_spec = fake_path_spec.FakePathSpec(location='/cups_ipp')
test_file_entry = file_system_builder.file_system.GetFileEntryByPathSpec(
test_path_spec)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=test_file_entry)
parser = cups_ipp.CupsIppParser()
header_data = self._CreateHeaderData(parser)
file_object = self._CreateFileObject('cups_ipp', header_data)
parser._ParseHeader(parser_mediator, file_object)
# Test with header data too small.
file_object = self._CreateFileObject('cups_ipp', header_data[:-1])
with self.assertRaises(errors.UnableToParseFile):
parser._ParseHeader(parser_mediator, file_object)
# Test with unsupported format version.
header_map = parser._GetDataTypeMap('cups_ipp_header')
header = header_map.CreateStructureValues(
major_version=99, minor_version=1, operation_identifier=5,
request_identifier=0)
header_data = header_map.FoldByteStream(header)
file_object = self._CreateFileObject('cups_ipp', header_data)
with self.assertRaises(errors.UnableToParseFile):
parser._ParseHeader(parser_mediator, file_object)
# Test with unsupported operation identifier.
header = header_map.CreateStructureValues(
major_version=1, minor_version=1, operation_identifier=99,
request_identifier=0)
header_data = header_map.FoldByteStream(header)
file_object = self._CreateFileObject('cups_ipp', header_data)
parser._ParseHeader(parser_mediator, file_object)
def testParseFileObject(self):
"""Tests the ParseFileObject function."""
parser = cups_ipp.CupsIppParser()
header_data = self._CreateHeaderData(parser)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, self._ATTRIBUTES_GROUP_DATA]))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
# Test with attribute group data too small.
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, self._ATTRIBUTES_GROUP_DATA[:-1]]))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
# Test attribute with date time value.
datetime_data = self._CreateDateTimeValueData(parser)
attribute_data = self._CreateAttributeTestData(
parser, 0x31, 'date-time-at-creation', datetime_data)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, b'\x01', attribute_data, b'\x03']))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
def testParse(self):
"""Tests the Parse function."""
# TODO: only tested against MacOS Cups IPP (Version 2.0)
parser = cups_ipp.CupsIppParser()
storage_writer = self._ParseFile(['mac_cups_ipp'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'application': 'LibreOffice',
'computer_name': 'localhost',
'copies': 1,
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:21',
'doc_type': 'application/pdf',
'job_id': 'urn:uuid:d51116d9-143c-3863-62aa-6ef0202de49a',
'job_name': 'Assignament 1',
'owner': 'Joaquin Moreno Garijo',
'printer_id': 'RHULBW',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'uri': 'ipp://localhost:631/printers/RHULBW',
'user': 'moxilo'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:21',
'timestamp_desc': definitions.TIME_DESCRIPTION_START}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:32',
'timestamp_desc': definitions.TIME_DESCRIPTION_END}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 35.060686 | 79 | 0.72637 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parser test for MacOS Cups IPP Log files."""
import unittest
from dfvfs.helpers import fake_file_system_builder
from dfvfs.path import fake_path_spec
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import cups_ipp
from tests.parsers import test_lib
class CupsIppParserTest(test_lib.ParserTestCase):
"""Tests for MacOS Cups IPP parser."""
# pylint: disable=protected-access
_ATTRIBUTES_GROUP_DATA = bytes(bytearray([
0x01, 0x47, 0x00, 0x12, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
0x65, 0x73, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x00, 0x05,
0x75, 0x74, 0x66, 0x2d, 0x38, 0x03]))
def _CreateAttributeTestData(self, parser, tag_value, name, value_data):
"""Creates attribute test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
tag_value (int): value of the attribute tag.
name (str): name of the attribute.
value_data (bytes): data of the attribute value.
Returns:
bytes: attribute test data.
"""
attribute_map = parser._GetDataTypeMap('cups_ipp_attribute')
attribute = attribute_map.CreateStructureValues(
tag_value=tag_value, name_size=len(name), name=name,
value_data_size=len(value_data), value_data=value_data)
return attribute_map.FoldByteStream(attribute)
def _CreateDateTimeValueData(self, parser):
"""Creates date time value test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
Returns:
bytes: date time value test data.
"""
datetime_map = parser._GetDataTypeMap('cups_ipp_datetime_value')
datetime = datetime_map.CreateStructureValues(
year=2018, month=11, day_of_month=27, hours=16, minutes=41, seconds=51,
deciseconds=5, direction_from_utc=ord('+'), hours_from_utc=1,
minutes_from_utc=0)
return datetime_map.FoldByteStream(datetime)
def _CreateHeaderData(self, parser):
"""Creates header test data.
Args:
parser (CupsIppParser): CUPS IPP parser.
Returns:
bytes: header test data.
"""
header_map = parser._GetDataTypeMap('cups_ipp_header')
header = header_map.CreateStructureValues(
major_version=1, minor_version=1, operation_identifier=5,
request_identifier=0)
return header_map.FoldByteStream(header)
def testGetStringValue(self):
"""Tests the _GetStringValue function."""
parser = cups_ipp.CupsIppParser()
string_value = parser._GetStringValue({}, 'test')
self.assertIsNone(string_value)
string_value = parser._GetStringValue({'test': ['1', '2,3', '4']}, 'test')
self.assertEqual(string_value, '1, "2,3", 4')
def testParseAttribute(self):
"""Tests the _ParseAttribute function."""
parser = cups_ipp.CupsIppParser()
attribute_data = self._CreateAttributeTestData(
parser, 0x00, 'test', b'\x12')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'test')
self.assertEqual(value, b'\x12')
# Test with attribute data too small.
file_object = self._CreateFileObject('cups_ipp', attribute_data[:-1])
with self.assertRaises(errors.ParseError):
parser._ParseAttribute(file_object)
# Test attribute with integer value.
attribute_data = self._CreateAttributeTestData(
parser, 0x21, 'int', b'\x12\x34\x56\x78')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'int')
self.assertEqual(value, 0x12345678)
# Test attribute with boolean value.
attribute_data = self._CreateAttributeTestData(
parser, 0x22, 'bool', b'\x01')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'bool')
self.assertEqual(value, True)
# Test attribute with date time value.
datetime_data = self._CreateDateTimeValueData(parser)
attribute_data = self._CreateAttributeTestData(
parser, 0x31, 'datetime', datetime_data)
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'datetime')
self.assertIsNotNone(value)
self.assertEqual(value.year, 2018)
# Test attribute with string without language.
attribute_data = self._CreateAttributeTestData(
parser, 0x42, 'string', b'NOLANG')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'string')
self.assertEqual(value, 'NOLANG')
# Test attribute with ASCII string and tag value charset.
attribute_data = self._CreateAttributeTestData(
parser, 0x47, 'charset', b'utf8')
file_object = self._CreateFileObject('cups_ipp', attribute_data)
name, value = parser._ParseAttribute(file_object)
self.assertEqual(name, 'charset')
self.assertEqual(value, 'utf8')
def testParseAttributesGroup(self):
"""Tests the _ParseAttributesGroup function."""
parser = cups_ipp.CupsIppParser()
file_object = self._CreateFileObject(
'cups_ipp', self._ATTRIBUTES_GROUP_DATA)
name_value_pairs = list(parser._ParseAttributesGroup(file_object))
self.assertEqual(name_value_pairs, [('attributes-charset', 'utf-8')])
# Test with unsupported attributes groups start tag value.
file_object = self._CreateFileObject('cups_ipp', b''.join([
b'\xff', self._ATTRIBUTES_GROUP_DATA[1:]]))
with self.assertRaises(errors.ParseError):
list(parser._ParseAttributesGroup(file_object))
def testParseBooleanValue(self):
"""Tests the _ParseBooleanValue function."""
parser = cups_ipp.CupsIppParser()
boolean_value = parser._ParseBooleanValue(b'\x00')
self.assertFalse(boolean_value)
boolean_value = parser._ParseBooleanValue(b'\x01')
self.assertTrue(boolean_value)
# Test with unsupported data.
with self.assertRaises(errors.ParseError):
parser._ParseBooleanValue(b'\x02')
def testParseDateTimeValue(self):
"""Tests the _ParseDateTimeValue function."""
parser = cups_ipp.CupsIppParser()
datetime_data = self._CreateDateTimeValueData(parser)
datetime_value = parser._ParseDateTimeValue(datetime_data, 0)
self.assertIsNotNone(datetime_value)
self.assertEqual(datetime_value.year, 2018)
# Test with data too small.
with self.assertRaises(errors.ParseError):
parser._ParseDateTimeValue(datetime_data[:-1], 0)
def testParseIntegerValue(self):
"""Tests the _ParseIntegerValue function."""
parser = cups_ipp.CupsIppParser()
integer_value = parser._ParseIntegerValue(b'\x00\x00\x00\x01', 0)
self.assertEqual(integer_value, 1)
# Test with data too small.
with self.assertRaises(errors.ParseError):
parser._ParseIntegerValue(b'\x01\x00\x00', 0)
def testParseHeader(self):
"""Tests the _ParseHeader function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/cups_ipp', b'')
test_path_spec = fake_path_spec.FakePathSpec(location='/cups_ipp')
test_file_entry = file_system_builder.file_system.GetFileEntryByPathSpec(
test_path_spec)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=test_file_entry)
parser = cups_ipp.CupsIppParser()
header_data = self._CreateHeaderData(parser)
file_object = self._CreateFileObject('cups_ipp', header_data)
parser._ParseHeader(parser_mediator, file_object)
# Test with header data too small.
file_object = self._CreateFileObject('cups_ipp', header_data[:-1])
with self.assertRaises(errors.UnableToParseFile):
parser._ParseHeader(parser_mediator, file_object)
# Test with unsupported format version.
header_map = parser._GetDataTypeMap('cups_ipp_header')
header = header_map.CreateStructureValues(
major_version=99, minor_version=1, operation_identifier=5,
request_identifier=0)
header_data = header_map.FoldByteStream(header)
file_object = self._CreateFileObject('cups_ipp', header_data)
with self.assertRaises(errors.UnableToParseFile):
parser._ParseHeader(parser_mediator, file_object)
# Test with unsupported operation identifier.
header = header_map.CreateStructureValues(
major_version=1, minor_version=1, operation_identifier=99,
request_identifier=0)
header_data = header_map.FoldByteStream(header)
file_object = self._CreateFileObject('cups_ipp', header_data)
parser._ParseHeader(parser_mediator, file_object)
def testParseFileObject(self):
"""Tests the ParseFileObject function."""
parser = cups_ipp.CupsIppParser()
header_data = self._CreateHeaderData(parser)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, self._ATTRIBUTES_GROUP_DATA]))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
# Test with attribute group data too small.
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, self._ATTRIBUTES_GROUP_DATA[:-1]]))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
# Test attribute with date time value.
datetime_data = self._CreateDateTimeValueData(parser)
attribute_data = self._CreateAttributeTestData(
parser, 0x31, 'date-time-at-creation', datetime_data)
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
file_object = self._CreateFileObject('cups_ipp', b''.join([
header_data, b'\x01', attribute_data, b'\x03']))
parser.ParseFileObject(parser_mediator, file_object)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
def testParse(self):
"""Tests the Parse function."""
# TODO: only tested against MacOS Cups IPP (Version 2.0)
parser = cups_ipp.CupsIppParser()
storage_writer = self._ParseFile(['mac_cups_ipp'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'application': 'LibreOffice',
'computer_name': 'localhost',
'copies': 1,
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:21',
'doc_type': 'application/pdf',
'job_id': 'urn:uuid:d51116d9-143c-3863-62aa-6ef0202de49a',
'job_name': 'Assignament 1',
'owner': 'Joaquin Moreno Garijo',
'printer_id': 'RHULBW',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'uri': 'ipp://localhost:631/printers/RHULBW',
'user': 'moxilo'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:21',
'timestamp_desc': definitions.TIME_DESCRIPTION_START}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'data_type': 'cups:ipp:event',
'date_time': '2013-11-03 18:07:32',
'timestamp_desc': definitions.TIME_DESCRIPTION_END}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
5cc078595469499a2573ffbe88997b94b12bfd24 | 769 | py | Python | Backend/oeda/rtxlib/preprocessors/__init__.py | iliasger/OEDA | 0b5d94156cfc2d9252062f5d90dcb266466498d6 | [
"Apache-2.0"
] | 2 | 2019-10-11T10:40:11.000Z | 2021-05-17T14:56:18.000Z | Backend/oeda/rtxlib/preprocessors/__init__.py | iliasger/OEDA | 0b5d94156cfc2d9252062f5d90dcb266466498d6 | [
"Apache-2.0"
] | 3 | 2022-02-15T05:11:34.000Z | 2022-03-02T13:01:36.000Z | Backend/oeda/rtxlib/preprocessors/__init__.py | iliasger/OEDA | 0b5d94156cfc2d9252062f5d90dcb266466498d6 | [
"Apache-2.0"
] | 2 | 2019-05-27T13:01:23.000Z | 2020-10-23T14:47:31.000Z | from colorama import Fore
from oeda.log import *
from oeda.rtxlib.preprocessors.SparkPreProcessor import SparkPreProcessor
def init_pre_processors(wf):
""" we look into the workflows definition and run the required preprocessors """
if hasattr(wf, "pre_processors"):
pp = wf.pre_processors
for p in pp:
if p["type"] == "spark":
p["instance"] = SparkPreProcessor(wf, p)
else:
info("> Preprocessor | None", Fore.CYAN)
def kill_pre_processors(wf):
""" after the experiment, we stop all preprocessors """
try:
for p in wf.pre_processors:
p["instance"].shutdown()
info("> Shutting down Spark preprocessor")
except AttributeError:
pass
| 29.576923 | 84 | 0.622887 | from colorama import Fore
from oeda.log import *
from oeda.rtxlib.preprocessors.SparkPreProcessor import SparkPreProcessor
def init_pre_processors(wf):
""" we look into the workflows definition and run the required preprocessors """
if hasattr(wf, "pre_processors"):
pp = wf.pre_processors
for p in pp:
if p["type"] == "spark":
p["instance"] = SparkPreProcessor(wf, p)
else:
info("> Preprocessor | None", Fore.CYAN)
def kill_pre_processors(wf):
""" after the experiment, we stop all preprocessors """
try:
for p in wf.pre_processors:
p["instance"].shutdown()
info("> Shutting down Spark preprocessor")
except AttributeError:
pass
| 0 | 0 | 0 |
aab2fb48530b34760ffc1b2b1c43e3ed7ef58254 | 260 | py | Python | pythainlp/translate/__init__.py | wannaphongcom/pythai-nlp | efcbe7f1881351fadf62d28cc0415d22156a2e71 | [
"Apache-2.0"
] | 125 | 2016-06-27T06:16:38.000Z | 2017-10-14T08:02:26.000Z | pythainlp/translate/__init__.py | wannaphongcom/pythainlp | efcbe7f1881351fadf62d28cc0415d22156a2e71 | [
"Apache-2.0"
] | 48 | 2016-08-31T02:01:03.000Z | 2017-10-07T16:33:47.000Z | pythainlp/translate/__init__.py | wannaphongcom/pythai-nlp | efcbe7f1881351fadf62d28cc0415d22156a2e71 | [
"Apache-2.0"
] | 40 | 2016-06-27T00:19:12.000Z | 2017-10-16T06:32:20.000Z | # -*- coding: utf-8 -*-
"""
Language translation.
"""
__all__ = [
"ThZhTranslator",
"ZhThTranslator",
"Translate"
]
from pythainlp.translate.core import Translate
from pythainlp.translate.zh_th import (
ThZhTranslator,
ZhThTranslator,
)
| 14.444444 | 46 | 0.673077 | # -*- coding: utf-8 -*-
"""
Language translation.
"""
__all__ = [
"ThZhTranslator",
"ZhThTranslator",
"Translate"
]
from pythainlp.translate.core import Translate
from pythainlp.translate.zh_th import (
ThZhTranslator,
ZhThTranslator,
)
| 0 | 0 | 0 |
23102cfdc377911d6f7c24434ac0713380ea8134 | 1,005 | py | Python | test/unit/ggrc/models/test_custom_attribute_definition.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/unit/ggrc/models/test_custom_attribute_definition.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/unit/ggrc/models/test_custom_attribute_definition.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Custom Attribute Definition validation"""
import unittest
from mock import MagicMock
from ggrc.models import all_models
from ggrc.access_control import role as acr
class TestCustomAttributeDefinition(unittest.TestCase):
"""Test Custom Attribute Definition validation"""
def test_title_with_asterisk_throws(self):
"""Test if raises if title contains * symbol"""
with self.assertRaises(ValueError):
title = "Title with asterisk *"
self.cad.definition_type = "assessment_template"
self.cad.validate_title("title", title)
| 34.655172 | 79 | 0.753234 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Custom Attribute Definition validation"""
import unittest
from mock import MagicMock
from ggrc.models import all_models
from ggrc.access_control import role as acr
class TestCustomAttributeDefinition(unittest.TestCase):
"""Test Custom Attribute Definition validation"""
def setUp(self):
# pylint: disable=protected-access
self.cad = all_models.CustomAttributeDefinition()
self.cad._get_reserved_names = MagicMock(return_value=frozenset({'title'}))
self.cad._get_global_cad_names = MagicMock(return_value={'reg url': 1})
acr.get_custom_roles_for = MagicMock(return_value=dict())
def test_title_with_asterisk_throws(self):
"""Test if raises if title contains * symbol"""
with self.assertRaises(ValueError):
title = "Title with asterisk *"
self.cad.definition_type = "assessment_template"
self.cad.validate_title("title", title)
| 306 | 0 | 25 |
0b7b1e425f8017f791073b532d42d48a2786d924 | 171 | py | Python | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | file = open("13")
sum = 0
for numbers in file:
#print(numbers.rstrip())
numbers = int(numbers)
sum += numbers;
print(sum)
sum = str(sum)
print(sum[:10])
| 10.6875 | 28 | 0.596491 | file = open("13")
sum = 0
for numbers in file:
#print(numbers.rstrip())
numbers = int(numbers)
sum += numbers;
print(sum)
sum = str(sum)
print(sum[:10])
| 0 | 0 | 0 |
41a4a212bdd800f5254430c0e4838186564b6a70 | 5,422 | py | Python | autoencoder_use_mnist.py | deeplearningathome/autoencoder | 7cd4a54e1ed47bbcf0cf97e2eafc7cb0cc8ad1af | [
"MIT"
] | 3 | 2017-08-08T09:43:12.000Z | 2018-05-23T09:29:41.000Z | autoencoder_use_mnist.py | deeplearningathome/autoencoder | 7cd4a54e1ed47bbcf0cf97e2eafc7cb0cc8ad1af | [
"MIT"
] | null | null | null | autoencoder_use_mnist.py | deeplearningathome/autoencoder | 7cd4a54e1ed47bbcf0cf97e2eafc7cb0cc8ad1af | [
"MIT"
] | 2 | 2016-12-26T09:27:00.000Z | 2019-12-13T13:14:05.000Z | """
MIT License
Copyright (c) 2016 deeplearningathome. http://deeplearningathome.com/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
from autoencoder import AutoEncoder
import numpy
import time
#these are helper functions to read mnist data. They are part of Tensorflow models
from utils import maybe_download, extract_data, extract_labels, variable_summaries
from tensorflow.contrib.tensorboard.plugins import projector
flags = tf.flags
flags.DEFINE_string("encoder_network", "784,128,10", "specifies encoder network")
flags.DEFINE_float("noise_level", 0.0, "noise level for denoising autoencoder")
flags.DEFINE_integer("batch_size", 128, "batch size")
flags.DEFINE_integer("num_epochs", 60, "number of epochs")
flags.DEFINE_integer("eval_every_step", 2000, "evaluate every x steps")
flags.DEFINE_string("acitivation_kind", "sigmoid", "type of neuron activations")
flags.DEFINE_float("learning_rate", 0.1, "learning rate")
flags.DEFINE_string("optimizer_kind", "rmsprop", "type of oprtimizer")
flags.DEFINE_string("logdir", "tblogs", "tensorboard logs")
FLAGS = flags.FLAGS
if __name__ == "__main__":
tf.app.run(main=main)
| 50.672897 | 134 | 0.71007 | """
MIT License
Copyright (c) 2016 deeplearningathome. http://deeplearningathome.com/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
from autoencoder import AutoEncoder
import numpy
import time
#these are helper functions to read mnist data. They are part of Tensorflow models
from utils import maybe_download, extract_data, extract_labels, variable_summaries
from tensorflow.contrib.tensorboard.plugins import projector
flags = tf.flags
flags.DEFINE_string("encoder_network", "784,128,10", "specifies encoder network")
flags.DEFINE_float("noise_level", 0.0, "noise level for denoising autoencoder")
flags.DEFINE_integer("batch_size", 128, "batch size")
flags.DEFINE_integer("num_epochs", 60, "number of epochs")
flags.DEFINE_integer("eval_every_step", 2000, "evaluate every x steps")
flags.DEFINE_string("acitivation_kind", "sigmoid", "type of neuron activations")
flags.DEFINE_float("learning_rate", 0.1, "learning rate")
flags.DEFINE_string("optimizer_kind", "rmsprop", "type of oprtimizer")
flags.DEFINE_string("logdir", "tblogs", "tensorboard logs")
FLAGS = flags.FLAGS
def main(_):
VALIDATION_SIZE = 5000 # Size of the MNIST validation set.
"""
This is Mnist specific example
"""
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
VALIDATION_SIZE = 5000 # Size of the validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
train_size = train_labels.shape[0]
#(self, layers, noise, batch_size, is_training, activation='sigmoid')
dA = AutoEncoder(FLAGS.encoder_network, FLAGS.noise_level,
True, FLAGS.acitivation_kind, FLAGS.optimizer_kind, FLAGS.learning_rate)
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(logdir=FLAGS.logdir, graph=tf.get_default_graph())
#adding summaries for Tensorboard
tf.summary.image("input", tf.reshape(dA.x, [tf.shape(dA.x)[0], 28, 28, 1]), max_outputs=4)
tf.summary.image("reconstructed_input", tf.reshape(dA.reconstruction, [tf.shape(dA.reconstruction)[0], 28, 28, 1]), max_outputs=4)
variable_summaries("encodings", dA.encoding, 'embedding')
eval_loss = dA.loss
tf.summary.scalar("Evaluation Loss", eval_loss)
merged = tf.summary.merge_all()
start_time = time.time()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in xrange(int(FLAGS.num_epochs * train_size) // FLAGS.batch_size):
offset = (step * FLAGS.batch_size) % (train_size - FLAGS.batch_size)
batch_data = train_data[offset:(offset + FLAGS.batch_size), ...].reshape(FLAGS.batch_size, 784)
feed_dict = {dA.x: batch_data}
if step % FLAGS.eval_every_step == 0:
tloss, _ = sess.run([dA.loss, dA.train_op], feed_dict=feed_dict)
print('Train loss: %.4f' % (tloss))
else:
sess.run([dA.train_op], feed_dict=feed_dict)
#print('Training minibatch at step %d loss: %.6f' % (step, loss))
if step % FLAGS.eval_every_step == 0:
with tf.name_scope("Validation"):
eval_feed_dict = {dA.x: validation_data.reshape(VALIDATION_SIZE, 784)}
eloss, emerged = sess.run([eval_loss, merged], eval_feed_dict)
summary_writer.add_summary(emerged, step)
saver.save(sess, FLAGS.logdir + "/model")
print('Validation loss: %.4f' % (eloss))
print('Calculating test error')
tfeed_dict = {dA.x: test_data.reshape(10000, 784)}
tloss = sess.run(dA.loss, feed_dict=tfeed_dict)
print('Test loss: %.6f' %(tloss))
if __name__ == "__main__":
tf.app.run(main=main)
| 3,262 | 0 | 23 |
8dc572c5ddfe9278604ffcd87024633538f518d4 | 9,925 | py | Python | www/app/views/api.py | YuiJL/myweblog | 82f355d32b6d37cf329f1852459d40d5410de810 | [
"MIT"
] | 2 | 2016-09-18T12:41:44.000Z | 2016-10-26T04:27:25.000Z | www/app/views/api.py | YuiJL/webblog | 82f355d32b6d37cf329f1852459d40d5410de810 | [
"MIT"
] | null | null | null | www/app/views/api.py | YuiJL/webblog | 82f355d32b6d37cf329f1852459d40d5410de810 | [
"MIT"
] | 2 | 2016-09-22T05:31:18.000Z | 2016-10-05T18:39:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Jiayi Li'
import time, os, re
from bson.objectid import ObjectId
from werkzeug.utils import secure_filename
from flask import request, redirect, url_for, jsonify, abort, Blueprint, make_response, g, flash, current_app
from app import db
from app.models import User, Blog, Comment
from app.filters import markdown_filter
from app.utilities import allowed_file, cookie_to_user
api = Blueprint('api', __name__, url_prefix='/api')
APIS = ('blogs', 'users', 'comments')
#************************************
#----------------APIs----------------
#************************************
@api.route('/<collection>')
def api_get_all(collection):
'''
get all documents from a collection
'''
if collection not in APIS:
abort(400)
cursor = db[collection]
a = []
for document in cursor.find().sort("created", -1):
if collection == 'users':
document.update(password='******')
document.update(_id=str(document['_id']))
a.append(document)
return jsonify({collection:a})
@api.route('/<collection>/<item_id>')
def api_get_one(collection, item_id):
'''
get a single document from a collection
'''
document = db[collection].find_one({'_id': ObjectId(item_id)})
if not document:
abort(400)
if collection == 'users':
document.update(password='******')
document.update(_id=str(document['_id']))
return jsonify(document)
@api.route('/blogs/<blog_id>/comments')
def api_get_blog_comments(blog_id):
'''
get all comments from a blog
'''
comments = []
for comment in db.comments.find({'blog_id':blog_id}).sort("created", -1):
comment.update(_id=str(comment['_id']))
comment.update(content=markdown_filter(comment['content']))
if comment.get('subcomment'):
for subcomment in comment.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(comment)
return jsonify(comments=comments)
@api.route('/blogs', methods=['POST'])
def api_post_blog():
'''
post a new blog
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
title = request.form.get('title')
tag = request.form.get('tag').lstrip(r'/\;,. ').rstrip(r'/\;,. ')
content = request.form.get('content')
# create a new Blog and save it to mongodb
blog = Blog(
user_id = g.__user__.get('_id'),
user_name = g.__user__.get('name'),
user_image = g.__user__.get('image'),
title = title.strip(),
tag = re.split(r'[\s\;\,\.\\\/]+', tag),
content = content.lstrip('\n').rstrip()
)
blog_resp = blog.__dict__
return jsonify(blog_id=str(blog_resp['_id']))
@api.route('/blogs/<blog_id>', methods=['POST'])
def api_edit_blog(blog_id):
'''
edit a blog and post it
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
title = request.form.get('title')
tag = request.form.get('tag').lstrip(r'/\;,. ').rstrip(r'/\;,. ')
content = request.form.get('content')
content = content.lstrip('\n').rstrip()
db.blogs.update_one(
{'_id': ObjectId(blog_id)},
{
'$set': {
'title': title.strip(),
'tag': re.split(r'[\s\;\,\.\\\/]+', tag),
'content': content,
'summary': '%s%s' % (content[:140], '...'),
'last_modified': True,
'modified': int(time.time())
}
})
return jsonify(blog_id=blog_id)
@api.route('/blogs/<blog_id>/comments', methods=['POST'])
def api_post_and_get_comment(blog_id):
'''
post a new comment
'''
if not g.__user__:
return make_response('Please login', 403)
content = request.form.get('content').lstrip('\n').rstrip()
if not content:
return make_response('Content cannot be empty.')
# create a new Comment and save it to mongodb
blog = db.blogs.find_one({'_id': ObjectId(blog_id)})
comment = Comment(
blog_id = blog_id,
blog_author = blog.get('user_name'),
blog_title = blog.get('title'),
user_id = g.__user__.get('_id'),
user_name = g.__user__.get('name'),
user_image = g.__user__.get('image'),
content = content
)
comments = []
for document in db.comments.find({'blog_id':blog_id}).sort("created", -1):
document.update(_id=str(document['_id']))
document.update(content=markdown_filter(document['content']))
if document.get('subcomment'):
for subcomment in document.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(document)
return jsonify(comments=comments)
@api.route('/blogs/<blog_id>/comments/<comment_id>', methods=['POST'])
def api_pose_subcomment(blog_id, comment_id):
'''
post a subcomment
'''
if not g.__user__:
return make_response('Please login', 403)
content = request.form.get('content').lstrip('\n').rstrip()
if not content:
return make_response('Content cannot be empty', 403)
comment = db.comments.find_one({'_id': ObjectId(comment_id)})
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$set': {'subcomment': True},
'$push': {
'subcontent': {
'_id': str(ObjectId()),
'user_id': g.__user__.get('_id'),
'user_name': g.__user__.get('name'),
'user_image': g.__user__.get('image'),
'content': content,
'created': int(time.time())
}
}
})
comments = []
for document in db.comments.find({'blog_id': blog_id}).sort("created", -1):
document.update(_id=str(document['_id']))
document.update(content=markdown_filter(document['content']))
if document.get('subcomment'):
for subcomment in document.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(document)
return jsonify(comments=comments)
@api.route('/<collection>/<item_id>/delete', methods=['POST'])
def api_delete_one(collection, item_id):
'''
delete one document from a certain collection
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
if collection == 'comments':
blog_id = db.comments.find_one({'_id': ObjectId(item_id)}).get('blog_id')
db[collection].delete_one({'_id': ObjectId(item_id)})
if collection == 'blogs':
db.comments.delete_many({'blog_id': ObjectId(item_id)})
if collection == 'comments':
return redirect(url_for('api.api_get_blog_comments', blog_id=blog_id))
return jsonify(item_id=item_id)
@api.route('/comments/<comment_id>/delete/<own_id>', methods=['POST'])
def api_delete_subcomment(comment_id, own_id):
'''
delete a subcomment from a certain comment
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$pull': {'subcontent': {'_id': own_id}}
})
if not db.comments.find_one({'_id': ObjectId(comment_id)}).get('subcontent'):
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$set': {'subcomment': False}
})
blog_id = db.comments.find_one({'_id': ObjectId(comment_id)}).get('blog_id')
return redirect(url_for('api.api_get_blog_comments', blog_id=blog_id))
@api.route('/image/<user_id>', methods=['POST'])
def api_upload(user_id):
'''
upload image files for user avatar
'''
if 'file' not in request.files:
flash('No file part')
return redirect(request.referrer)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.referrer)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
# update users
db.users.update_one(
{'_id': ObjectId(user_id)},
{
'$set': {'image': '/static/img/' + filename}
})
# update blogs
db.blogs.update_many(
{'user_id': user_id},
{
'$set': {'user_image': '/static/img/' + filename}
})
# update comments
db.comments.update_many(
{'user_id': user_id},
{
'$set': {'user_image': '/static/img/' + filename}
})
# update subcomments in comments
for comment in db.comments.find():
if comment.get('subcomment'):
for subcomment in comment['subcontent']:
# find one match and update one
if user_id in subcomment.values():
db.comments.update_one(
{
'_id': comment['_id'],
'subcontent': {'$elemMatch': {'_id': subcomment['_id']}}
},
{
'$set': {
'subcontent.$.user_image': '/static/img/' + filename
}
})
else:
flash('File not allowed')
return redirect(request.referrer) | 32.224026 | 109 | 0.556675 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Jiayi Li'
import time, os, re
from bson.objectid import ObjectId
from werkzeug.utils import secure_filename
from flask import request, redirect, url_for, jsonify, abort, Blueprint, make_response, g, flash, current_app
from app import db
from app.models import User, Blog, Comment
from app.filters import markdown_filter
from app.utilities import allowed_file, cookie_to_user
api = Blueprint('api', __name__, url_prefix='/api')
APIS = ('blogs', 'users', 'comments')
#************************************
#----------------APIs----------------
#************************************
@api.route('/<collection>')
def api_get_all(collection):
'''
get all documents from a collection
'''
if collection not in APIS:
abort(400)
cursor = db[collection]
a = []
for document in cursor.find().sort("created", -1):
if collection == 'users':
document.update(password='******')
document.update(_id=str(document['_id']))
a.append(document)
return jsonify({collection:a})
@api.route('/<collection>/<item_id>')
def api_get_one(collection, item_id):
'''
get a single document from a collection
'''
document = db[collection].find_one({'_id': ObjectId(item_id)})
if not document:
abort(400)
if collection == 'users':
document.update(password='******')
document.update(_id=str(document['_id']))
return jsonify(document)
@api.route('/blogs/<blog_id>/comments')
def api_get_blog_comments(blog_id):
'''
get all comments from a blog
'''
comments = []
for comment in db.comments.find({'blog_id':blog_id}).sort("created", -1):
comment.update(_id=str(comment['_id']))
comment.update(content=markdown_filter(comment['content']))
if comment.get('subcomment'):
for subcomment in comment.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(comment)
return jsonify(comments=comments)
@api.route('/blogs', methods=['POST'])
def api_post_blog():
'''
post a new blog
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
title = request.form.get('title')
tag = request.form.get('tag').lstrip(r'/\;,. ').rstrip(r'/\;,. ')
content = request.form.get('content')
# create a new Blog and save it to mongodb
blog = Blog(
user_id = g.__user__.get('_id'),
user_name = g.__user__.get('name'),
user_image = g.__user__.get('image'),
title = title.strip(),
tag = re.split(r'[\s\;\,\.\\\/]+', tag),
content = content.lstrip('\n').rstrip()
)
blog_resp = blog.__dict__
return jsonify(blog_id=str(blog_resp['_id']))
@api.route('/blogs/<blog_id>', methods=['POST'])
def api_edit_blog(blog_id):
'''
edit a blog and post it
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
title = request.form.get('title')
tag = request.form.get('tag').lstrip(r'/\;,. ').rstrip(r'/\;,. ')
content = request.form.get('content')
content = content.lstrip('\n').rstrip()
db.blogs.update_one(
{'_id': ObjectId(blog_id)},
{
'$set': {
'title': title.strip(),
'tag': re.split(r'[\s\;\,\.\\\/]+', tag),
'content': content,
'summary': '%s%s' % (content[:140], '...'),
'last_modified': True,
'modified': int(time.time())
}
})
return jsonify(blog_id=blog_id)
@api.route('/blogs/<blog_id>/comments', methods=['POST'])
def api_post_and_get_comment(blog_id):
'''
post a new comment
'''
if not g.__user__:
return make_response('Please login', 403)
content = request.form.get('content').lstrip('\n').rstrip()
if not content:
return make_response('Content cannot be empty.')
# create a new Comment and save it to mongodb
blog = db.blogs.find_one({'_id': ObjectId(blog_id)})
comment = Comment(
blog_id = blog_id,
blog_author = blog.get('user_name'),
blog_title = blog.get('title'),
user_id = g.__user__.get('_id'),
user_name = g.__user__.get('name'),
user_image = g.__user__.get('image'),
content = content
)
comments = []
for document in db.comments.find({'blog_id':blog_id}).sort("created", -1):
document.update(_id=str(document['_id']))
document.update(content=markdown_filter(document['content']))
if document.get('subcomment'):
for subcomment in document.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(document)
return jsonify(comments=comments)
@api.route('/blogs/<blog_id>/comments/<comment_id>', methods=['POST'])
def api_pose_subcomment(blog_id, comment_id):
'''
post a subcomment
'''
if not g.__user__:
return make_response('Please login', 403)
content = request.form.get('content').lstrip('\n').rstrip()
if not content:
return make_response('Content cannot be empty', 403)
comment = db.comments.find_one({'_id': ObjectId(comment_id)})
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$set': {'subcomment': True},
'$push': {
'subcontent': {
'_id': str(ObjectId()),
'user_id': g.__user__.get('_id'),
'user_name': g.__user__.get('name'),
'user_image': g.__user__.get('image'),
'content': content,
'created': int(time.time())
}
}
})
comments = []
for document in db.comments.find({'blog_id': blog_id}).sort("created", -1):
document.update(_id=str(document['_id']))
document.update(content=markdown_filter(document['content']))
if document.get('subcomment'):
for subcomment in document.get('subcontent'):
subcomment.update(content=markdown_filter(subcomment['content']))
comments.append(document)
return jsonify(comments=comments)
@api.route('/<collection>/<item_id>/delete', methods=['POST'])
def api_delete_one(collection, item_id):
'''
delete one document from a certain collection
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
if collection == 'comments':
blog_id = db.comments.find_one({'_id': ObjectId(item_id)}).get('blog_id')
db[collection].delete_one({'_id': ObjectId(item_id)})
if collection == 'blogs':
db.comments.delete_many({'blog_id': ObjectId(item_id)})
if collection == 'comments':
return redirect(url_for('api.api_get_blog_comments', blog_id=blog_id))
return jsonify(item_id=item_id)
@api.route('/comments/<comment_id>/delete/<own_id>', methods=['POST'])
def api_delete_subcomment(comment_id, own_id):
'''
delete a subcomment from a certain comment
'''
if not g.__user__.get('admin'):
return make_response('Permission denied.', 403)
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$pull': {'subcontent': {'_id': own_id}}
})
if not db.comments.find_one({'_id': ObjectId(comment_id)}).get('subcontent'):
db.comments.update_one(
{'_id': ObjectId(comment_id)},
{
'$set': {'subcomment': False}
})
blog_id = db.comments.find_one({'_id': ObjectId(comment_id)}).get('blog_id')
return redirect(url_for('api.api_get_blog_comments', blog_id=blog_id))
@api.route('/image/<user_id>', methods=['POST'])
def api_upload(user_id):
'''
upload image files for user avatar
'''
if 'file' not in request.files:
flash('No file part')
return redirect(request.referrer)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.referrer)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
# update users
db.users.update_one(
{'_id': ObjectId(user_id)},
{
'$set': {'image': '/static/img/' + filename}
})
# update blogs
db.blogs.update_many(
{'user_id': user_id},
{
'$set': {'user_image': '/static/img/' + filename}
})
# update comments
db.comments.update_many(
{'user_id': user_id},
{
'$set': {'user_image': '/static/img/' + filename}
})
# update subcomments in comments
for comment in db.comments.find():
if comment.get('subcomment'):
for subcomment in comment['subcontent']:
# find one match and update one
if user_id in subcomment.values():
db.comments.update_one(
{
'_id': comment['_id'],
'subcontent': {'$elemMatch': {'_id': subcomment['_id']}}
},
{
'$set': {
'subcontent.$.user_image': '/static/img/' + filename
}
})
else:
flash('File not allowed')
return redirect(request.referrer) | 0 | 0 | 0 |
b37a81a05ea7a387843e0c8e55bac504975a6e81 | 1,091 | py | Python | Python/Misc.py | Spectavi/video-diff | 4ad28aea48877937f6b5b25f374f9c14eaf79212 | [
"BSD-3-Clause"
] | 1 | 2021-04-18T21:24:42.000Z | 2021-04-18T21:24:42.000Z | Python/Misc.py | Spectavi/video-diff | 4ad28aea48877937f6b5b25f374f9c14eaf79212 | [
"BSD-3-Clause"
] | null | null | null | Python/Misc.py | Spectavi/video-diff | 4ad28aea48877937f6b5b25f374f9c14eaf79212 | [
"BSD-3-Clause"
] | null | null | null | import cv
# TODO: This class doesn't seem to be used and is based on old OpenCV bindings.
# Either finish the class or remove it.
def convert_np_to_cvmat(img_np):
"""
This gives a: AttributeError: 'numpy.ndarray' object has no attribute
'from_array'
ImageAlignment.template_image = ImageAlignment.template_image.from_array()
"""
# Inspired from https://stackoverflow.com/questions/5575108/how-to-convert-a-numpy-array-view-to-opencv-matrix :
h_np, w_np = img_np.shape[:2]
tmp_cv = cv.CreateMat(h_np, w_np, cv.CV_8UC3)
cv.SetData(tmp_cv, img_np.data, img_np.strides[0])
return tmp_cv
| 36.366667 | 116 | 0.698442 | import cv
# TODO: This class doesn't seem to be used and is based on old OpenCV bindings.
# Either finish the class or remove it.
def convert_np_to_cvmat(img_np):
"""
This gives a: AttributeError: 'numpy.ndarray' object has no attribute
'from_array'
ImageAlignment.template_image = ImageAlignment.template_image.from_array()
"""
# Inspired from https://stackoverflow.com/questions/5575108/how-to-convert-a-numpy-array-view-to-opencv-matrix :
h_np, w_np = img_np.shape[:2]
tmp_cv = cv.CreateMat(h_np, w_np, cv.CV_8UC3)
cv.SetData(tmp_cv, img_np.data, img_np.strides[0])
return tmp_cv
def convert_np_to_ipl_image(img_np):
# Inspired from https://stackoverflow.com/questions/11528009/opencv-converting-from-numpy-to-iplimage-in-python
# img_np is numpy array
num_colors = 1
bitmap = cv.CreateImageHeader((img_np.shape[1], img_np.shape[0]),
cv.IPL_DEPTH_8U, num_colors)
cv.SetData(bitmap, img_np.tostring(),
img_np.dtype.itemsize * num_colors * img_np.shape[1])
return bitmap
| 441 | 0 | 23 |
148a619326f816979634a70896847110174d2691 | 6,456 | py | Python | python/extract_small_fragments.py | maubarsom/biotico-tools | 3e34e1fdde083e49da10f5afdae6a951246b8a94 | [
"Apache-2.0"
] | 1 | 2021-01-16T20:47:29.000Z | 2021-01-16T20:47:29.000Z | python/extract_small_fragments.py | maubarsom/biotico-tools | 3e34e1fdde083e49da10f5afdae6a951246b8a94 | [
"Apache-2.0"
] | null | null | null | python/extract_small_fragments.py | maubarsom/biotico-tools | 3e34e1fdde083e49da10f5afdae6a951246b8a94 | [
"Apache-2.0"
] | 1 | 2015-06-26T18:22:11.000Z | 2015-06-26T18:22:11.000Z | #!/usr/bin/env python
"""
Script that uses output from cutadapt to quickly detect fully overlapping pairs.
It is based on the fact that if sequencing adapters are trimmed from both paired-ends, the resulting
fragment needs to be shorter than the pair-end length
Depends of cutadapt seqio and xopen modules from version 1.6
Author: Mauricio Barrientos-Somarribas
Email: mauricio.barrientos@ki.se
Copyright 2014 Mauricio Barrientos-Somarribas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import os.path
#Time of script execution and logging module
import time
import logging
import re
import math
import itertools
from collections import *
from cutadapt import seqio,xopen
from distance import hamming
import ctypes
#Data Analysis libs
import numpy as np
#****************Begin of Main ***************
#*****************End of Main**********************
#Assumes sequences are the same length, with a hamming distance of less than 0.05% of the length
#Assumes sequences are aligned and the same length
if __name__ == '__main__':
#Process command line arguments
parser = argparse.ArgumentParser(description="Script to process fastq files after adapter removal and extracts sequences fragments smaller than read length")
parser.add_argument("R1",help="Fastq with forward paired-end")
parser.add_argument("R2",help="Fastq with reverse paired-end")
parser.add_argument("-o","--output-prefix", default=None, help="Prefix of the output files" )
parser.add_argument("--raw_read_length", default=301,type=int, help="Length of raw reads (before adapter trimming). Default: 301" )
parser.add_argument("--min-trim", default=10,type=int, help="Minimum number of bases trimmed to consider the adapter removed was not spurious. Default: 10" )
parser.add_argument("-l","--log-file", default=None, help="Name of the log file")
args = parser.parse_args()
if validate_args(args):
#Initialize log
log_level = logging.INFO
if args.log_file:
logging.basicConfig(filename=args.log_file,level=log_level)
else:
logging.basicConfig(stream=sys.stderr,level=log_level)
time_start = time.time()
main( args )
logging.info("Time elapsed: "+str(time.time() - time_start)+"\n")
else:
logging.error("Invalid arguments. Exiting script\n")
sys.exit(1)
| 33.978947 | 158 | 0.74272 | #!/usr/bin/env python
"""
Script that uses output from cutadapt to quickly detect fully overlapping pairs.
It is based on the fact that if sequencing adapters are trimmed from both paired-ends, the resulting
fragment needs to be shorter than the pair-end length
Depends of cutadapt seqio and xopen modules from version 1.6
Author: Mauricio Barrientos-Somarribas
Email: mauricio.barrientos@ki.se
Copyright 2014 Mauricio Barrientos-Somarribas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import os.path
#Time of script execution and logging module
import time
import logging
import re
import math
import itertools
from collections import *
from cutadapt import seqio,xopen
from distance import hamming
import ctypes
#Data Analysis libs
import numpy as np
#****************Begin of Main ***************
def main(args):
#Global vars
global raw_read_len
global min_trim
raw_read_len = args.raw_read_length
min_trim = args.min_trim
paired_reader = seqio.PairedSequenceReader(args.R1, args.R2 , fileformat="fastq")
stats = FragmentStats(raw_read_len)
try:
out_r1 = xopen.xopen(args.output_prefix+"_R1.fq.gz", "w")
out_r2 = xopen.xopen(args.output_prefix+"_R2.fq.gz", "w")
small_fragments = xopen.xopen(args.output_prefix+"_single.fq.gz","w")
except Exception:
logging.error("An error has occured creating one of the output files")
sys.exit(1)
for read1, read2 in paired_reader:
is_short_fragment = False
#If both paired-ends were trimmed "confidently"
r1_len, r2_len = len(read1.sequence), len(read2.sequence)
if max(r1_len,r2_len) < (raw_read_len - min_trim):
aligned_r1, aligned_r2 = align_sequences(read1,read2)
is_short_fragment = is_fragment(aligned_r1,aligned_r2)
if is_short_fragment:
stats.add_small_fragment( len(aligned_r1.sequence) )
consensus_fragment = get_consensus(aligned_r1,aligned_r2)
consensus_fragment.write(small_fragments)
else:
stats.add_non_fragment()
read1.write(out_r1)
read2.write(out_r2)
out_r1.close()
out_r2.close()
small_fragments.close()
logging.info(str(stats)+"\n")
#*****************End of Main**********************
class FragmentStats:
def __init__(self,read_len):
self.total_reads = 0
self.fragment_count = 0
self.fragment_size_dist = np.zeros(read_len,dtype=np.uint32)
def add_small_fragment(self,fragment_len):
self.total_reads += 1
self.fragment_count += 1
self.fragment_size_dist[fragment_len] += 1
def add_non_fragment(self):
self.total_reads += 1
def __str__(self):
out_str = "Extract fragment stats\n"
out_str += "Reads processed: "+str(self.total_reads)+"\n"
out_str += "Small fragments detected: {0}({1:.2%})\n".format(self.fragment_count,
float(self.fragment_count)/self.total_reads)
out_str += "Small fragment size distribution\n"
out_str += "Fragment_len\tcount\n"
out_str += "\n".join([ "{0}\t{1}".format(frag_size,count)
for frag_size, count in enumerate(self.fragment_size_dist) ] )
return out_str
def rev_complement(seq):
return seq[::-1].upper().replace("A","t").replace("T","a").replace("G","c").replace("C","g").upper()
def is_fragment(aligned_r1,aligned_r2):
r1_len, r2_len = len(aligned_r1.sequence), len(aligned_r2.sequence)
is_frag = r1_len == r2_len and hamming(aligned_r1.sequence , aligned_r2.sequence ) < (r1_len * 0.05)
#is_frag = is_frag and abs( r1_len - r2_len ) <= max_fragment_len_dif:
return is_frag
#Assumes sequences are the same length, with a hamming distance of less than 0.05% of the length
def align_sequences(r1,r2):
new_r2 = seqio.Sequence(r2.name, rev_complement(r2.sequence), r2.qualities[::-1])
return r1,new_r2
#Assumes sequences are aligned and the same length
def get_consensus(aligned_r1,aligned_r2):
consensus_seq = ctypes.create_string_buffer(aligned_r1.sequence)
consensus_qual = ctypes.create_string_buffer(aligned_r1.qualities)
for pos in range(len(aligned_r1)):
if aligned_r2.qualities[pos] > consensus_qual[pos]:
consensus_seq[pos] = aligned_r2.sequence[pos]
consensus_qual[pos] = aligned_r2.qualities[pos]
#Removes illumina's pair info from the consensus sequence name
consensus_obj = seqio.Sequence(aligned_r1.name.split(" ")[0],
consensus_seq.value,
consensus_qual.value)
return consensus_obj
def validate_args(args):
valid_args= True
if not os.path.isfile( args.R1 ):
logging.error( args.R1+" is not a valid file")
valid_args = False
if not os.path.isfile( args.R2 ):
logging.error( args.R2+" is not a valid file")
valid_args = False
if valid_args and not args.output_prefix:
stem_re = re.match(r"(.+)_R?[12]\.f(ast)?q(\.gz)?$",args.R1 )
args.output_prefix = stem_re.group(1)
return valid_args
if __name__ == '__main__':
#Process command line arguments
parser = argparse.ArgumentParser(description="Script to process fastq files after adapter removal and extracts sequences fragments smaller than read length")
parser.add_argument("R1",help="Fastq with forward paired-end")
parser.add_argument("R2",help="Fastq with reverse paired-end")
parser.add_argument("-o","--output-prefix", default=None, help="Prefix of the output files" )
parser.add_argument("--raw_read_length", default=301,type=int, help="Length of raw reads (before adapter trimming). Default: 301" )
parser.add_argument("--min-trim", default=10,type=int, help="Minimum number of bases trimmed to consider the adapter removed was not spurious. Default: 10" )
parser.add_argument("-l","--log-file", default=None, help="Name of the log file")
args = parser.parse_args()
if validate_args(args):
#Initialize log
log_level = logging.INFO
if args.log_file:
logging.basicConfig(filename=args.log_file,level=log_level)
else:
logging.basicConfig(stream=sys.stderr,level=log_level)
time_start = time.time()
main( args )
logging.info("Time elapsed: "+str(time.time() - time_start)+"\n")
else:
logging.error("Invalid arguments. Exiting script\n")
sys.exit(1)
| 3,423 | -1 | 252 |
1420efe3a125985e64e2524aecee773b66a7a539 | 5,956 | py | Python | models/audio_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | 1 | 2021-11-20T12:31:24.000Z | 2021-11-20T12:31:24.000Z | models/audio_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | null | null | null | models/audio_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
| 48.819672 | 118 | 0.662525 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Unet(nn.Module):
def __init__(self, ngf=64):
super(Unet, self).__init__()
two_stream = TwoStreamBlock(ngf * 8, ngf * 8)
unet_block = UnetBlock(ngf * 8, ngf * 8, upconv_in_dim=ngf * 8 * 2, submodule=two_stream)
unet_block = UnetBlock(ngf * 8, ngf * 4, downconv_in_dim=None, submodule=unet_block)
unet_block = UnetBlock(ngf * 4, ngf * 2, downconv_in_dim=None, submodule=unet_block)
unet_block = UnetBlock(ngf * 2, ngf, downconv_in_dim=None, submodule=unet_block)
unet_block = UnetBlock(ngf, 1, downconv_in_dim=1, submodule=unet_block, outermost=True)
self.bn0 = nn.BatchNorm2d(1)
self.unet_block = unet_block
def forward(self, x, feat_motion, feat_appear):
x = self.bn0(x.unsqueeze(1))
x, feat_motion, feat_appear = self.unet_block(x, feat_motion, feat_appear)
return x.squeeze(), feat_motion, feat_appear
class UnetBlock(nn.Module):
def __init__(self, downconv_out_dim, upconv_out_dim, downconv_in_dim=None, upconv_in_dim=None,
submodule=None, outermost=False, innermost=False, noskip=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
self.noskip = noskip
use_bias = False
if downconv_in_dim is None:
downconv_in_dim = upconv_out_dim
if innermost:
upconv_in_dim = downconv_out_dim
elif upconv_in_dim is None:
upconv_in_dim = downconv_out_dim * 2
downrelu = nn.LeakyReLU(0.2, False)
downnorm = nn.BatchNorm2d(downconv_out_dim)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(upconv_out_dim)
upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
if outermost:
downconv = nn.Conv2d(downconv_in_dim, downconv_out_dim, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(upconv_in_dim, upconv_out_dim, kernel_size=3, padding=1)
down = [downconv]
up = [uprelu, upsample, upconv]
elif innermost:
downconv = nn.Conv2d(downconv_in_dim, downconv_out_dim, kernel_size=4, stride=2, padding=1,
bias=use_bias) # nice
upconv = nn.Conv2d(upconv_in_dim, upconv_out_dim, kernel_size=3, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upsample, upconv, upnorm]
else:
downconv = nn.Conv2d(downconv_in_dim, downconv_out_dim, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(upconv_in_dim, upconv_out_dim, kernel_size=3, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
self.down_net = nn.Sequential(*down)
self.submodule = submodule
self.up_net = nn.Sequential(*up)
def forward(self, x, feat_motion, feat_appear):
x_out = self.down_net(x)
x_out, feat_motion, feat_appear = self.submodule(x_out, feat_motion, feat_appear)
x_out = self.up_net(x_out)
if self.outermost or self.noskip:
return x_out, feat_motion, feat_appear
else:
return torch.cat([x, x_out], 1), feat_motion, feat_appear
class TwoStreamBlock(nn.Module):
def __init__(self, downconv_out_dim, upconv_out_dim, feat_motion_dim=2048, feat_appear_dim=512):
super(TwoStreamBlock, self).__init__()
upconv_in_dim = downconv_out_dim
downconv_in_dim = upconv_out_dim
downconv = nn.Conv2d(downconv_in_dim, downconv_out_dim, kernel_size=4, stride=2, padding=1, bias=False)
downrelu = nn.LeakyReLU(0.2, False)
uprelu = nn.ReLU(True)
upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
upconv = nn.Conv2d(upconv_in_dim, upconv_out_dim, kernel_size=3, padding=1, bias=False)
upnorm = nn.BatchNorm2d(upconv_out_dim)
self.down_motion = nn.Sequential(*[downrelu, downconv])
self.up_motion = nn.Sequential(*[uprelu, upsample, upconv, upnorm])
downconv2 = nn.Conv2d(downconv_in_dim, downconv_out_dim, kernel_size=4, stride=2, padding=1, bias=False)
downrelu2 = nn.LeakyReLU(0.2, False)
uprelu2 = nn.ReLU(True)
upsample2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = nn.Conv2d(upconv_in_dim, upconv_out_dim, kernel_size=3, padding=1, bias=False)
upnorm2 = nn.BatchNorm2d(upconv_out_dim)
self.down_appear = nn.Sequential(*[downrelu2, downconv2])
self.up_appear = nn.Sequential(*[uprelu2, upsample2, upconv2, upnorm2])
self.fc_motion = nn.Linear(feat_motion_dim, downconv_in_dim)
self.fc_appear = nn.Linear(feat_appear_dim, downconv_in_dim)
self.sig_conv = nn.Conv2d(1, 1, kernel_size=1)
self.max_pool = nn.AdaptiveMaxPool2d((1, 1))
self.sigmoid = nn.Sigmoid()
def forward(self, x, feat_motion, feat_appear):
feat_motion = self.fc_motion(feat_motion)
feat_motion = feat_motion.unsqueeze(-1).unsqueeze(-1)
feat_appear = feat_appear.permute(0, 2, 3, 4, 1).contiguous()
feat_appear = self.fc_appear(feat_appear).mean(1)
feat_appear = feat_appear.permute(0, 3, 1, 2).contiguous()
map = torch.sum(torch.mul(feat_motion, feat_appear), 1, keepdim=True)
map = self.sig_conv(map)
map = self.sigmoid(map)
feat_appear = self.max_pool(map * feat_appear).squeeze()
x_motion = self.down_motion(x)
x_motion = feat_motion + x_motion
x_motion = self.up_motion(x_motion)
x_appear = self.down_appear(x)
x_appear = feat_appear.unsqueeze(-1).unsqueeze(-1) + x_appear
x_appear = self.up_appear(x_appear)
return torch.cat([x_appear, x_motion], 1), feat_motion, feat_appear
| 5,641 | 18 | 228 |
180dcf48c8c5be31604981911aac4f3dde2a1d12 | 1,864 | py | Python | tests/test_grid.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | tests/test_grid.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | tests/test_grid.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | from battleship.grid import Grid, Outcome
from battleship.ship import Ship
| 23.3 | 43 | 0.607833 | from battleship.grid import Grid, Outcome
from battleship.ship import Ship
def test_grid_creates_ok():
grid = Grid(10)
assert grid.lost( )
def test_grid_places_ship_ok():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
assert not grid.lost( )
def test_grid_places_ship_overlap_not_ok():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
another = Ship((0,1), (2, 1))
assert not grid.place(another)
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_miss():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(1, 1)
assert result == Outcome.MISS
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_hit():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(0, 1)
assert result == Outcome.HIT
assert grid.ships == 2
assert not grid.lost( )
def test_grid_shot_invalid_same_spot():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(1, 1)
assert result == Outcome.MISS
assert grid.ships == 3
assert not grid.lost( )
result = grid.hit(1, 1)
assert result == Outcome.INVALID
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_and_win():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 1))
grid.place(ship)
assert grid.ships == 2
result = grid.hit(0, 0)
assert result == Outcome.HIT
assert grid.ships == 1
result = grid.hit(0, 1)
assert result == Outcome.HIT
assert grid.lost( )
| 1,623 | 0 | 161 |
0d426cc28bb3a102172b4a5193fac6badf8ee34e | 7,741 | py | Python | test/nn/conv/test_message_passing.py | zheng-da/pytorch_geometric | 3d23afdf5f5c6f386b17a339790181313c60aae6 | [
"MIT"
] | 2 | 2020-12-06T13:10:52.000Z | 2021-07-06T06:50:10.000Z | test/nn/conv/test_message_passing.py | zheng-da/pytorch_geometric | 3d23afdf5f5c6f386b17a339790181313c60aae6 | [
"MIT"
] | null | null | null | test/nn/conv/test_message_passing.py | zheng-da/pytorch_geometric | 3d23afdf5f5c6f386b17a339790181313c60aae6 | [
"MIT"
] | 1 | 2021-07-06T06:50:21.000Z | 2021-07-06T06:50:21.000Z | import copy
import pytest
from typing import Tuple, Optional
import torch
from torch_sparse import SparseTensor
from torch_sparse.matmul import spmm
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import softmax
edge_index = torch.tensor([
[0, 0, 0, 1, 1],
[0, 1, 2, 0, 2],
])
adj_t = SparseTensor(row=edge_index[1], col=edge_index[0])
x = (
torch.arange(1, 3, dtype=torch.float),
torch.arange(1, 4, dtype=torch.float),
)
| 31.72541 | 71 | 0.6556 | import copy
import pytest
from typing import Tuple, Optional
import torch
from torch_sparse import SparseTensor
from torch_sparse.matmul import spmm
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import softmax
edge_index = torch.tensor([
[0, 0, 0, 1, 1],
[0, 1, 2, 0, 2],
])
adj_t = SparseTensor(row=edge_index[1], col=edge_index[0])
x = (
torch.arange(1, 3, dtype=torch.float),
torch.arange(1, 4, dtype=torch.float),
)
class MyBasicConv(MessagePassing):
def __init__(self):
super(MyBasicConv, self).__init__()
def forward(self, x, edge_index):
return self.propagate(edge_index, x=x)
def test_my_basic_conv():
conv = MyBasicConv()
out = conv(x[1], edge_index)
assert out.tolist() == [3.0, 1.0, 3.0]
assert conv(x, edge_index).tolist() == out.tolist()
assert conv(x[0], adj_t).tolist() == out.tolist()
assert conv(x, adj_t).tolist() == out.tolist()
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
assert jitted(x[1], edge_index).tolist() == out.tolist()
with pytest.raises(RuntimeError):
jitted = conv.jittable(x=x, edge_index=edge_index)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x[0], edge_index=adj_t)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x, edge_index=adj_t)
jitted = torch.jit.script(jitted)
class MyAdvancedConv(MessagePassing):
def __init__(self):
super(MyAdvancedConv, self).__init__(aggr='add')
def forward(self, x, edge_index):
return self.propagate(edge_index, x=x)
def message(self, x_j, edge_index_i, size_i: Optional[int]):
assert size_i is not None
alpha = softmax(x_j, edge_index_i, num_nodes=size_i)
return alpha * x_j
def update(self, inputs):
return inputs + 2
def test_my_advanced_conv():
conv = MyAdvancedConv()
out = conv(x[1], edge_index)
assert conv(x, edge_index).tolist() == out.tolist()
assert conv(x[0], adj_t).tolist() == out.tolist()
assert conv(x, adj_t).tolist() == out.tolist()
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
assert jitted(x[1], edge_index).tolist() == out.tolist()
with pytest.raises(RuntimeError):
jitted = conv.jittable(x=x, edge_index=edge_index)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x[0], edge_index=adj_t)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x, edge_index=adj_t)
jitted = torch.jit.script(jitted)
class MyDefaultArgumentsConv(MessagePassing):
def __init__(self):
super(MyDefaultArgumentsConv, self).__init__(aggr='mean')
def forward(self, x, edge_index, **kwargs):
return self.propagate(edge_index, x=x, **kwargs)
def message(self, x_j, zeros: bool = True):
return x_j * 0 if zeros else x_j
def test_my_default_arguments_conv():
conv = MyDefaultArgumentsConv()
out = conv(x[1], edge_index)
assert out.tolist() == [0, 0, 0]
assert conv(x, edge_index).tolist() == out.tolist()
assert conv(x[0], adj_t).tolist() == out.tolist()
assert conv(x, adj_t).tolist() == out.tolist()
with pytest.raises(torch.jit.frontend.NotSupportedError):
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x, edge_index=edge_index)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x[0], edge_index=adj_t)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x, edge_index=adj_t)
jitted = torch.jit.script(jitted)
class MyBipartiteConv(MessagePassing):
def __init__(self):
super(MyBipartiteConv, self).__init__(aggr='mean')
def forward(self, x: Tuple[torch.Tensor, torch.Tensor], edge_index,
size: Optional[Tuple[int, int]] = None):
return self.propagate(edge_index, size=size, x=x)
def update(self, inputs, x: Tuple[torch.Tensor, torch.Tensor]):
if isinstance(x, tuple):
x = x[-1]
return inputs + x
def test_my_bipartite_conv():
conv = MyBipartiteConv()
out = conv(x[1], edge_index)
assert out.tolist() == [2.5, 3.0, 4.5]
assert conv(x, edge_index).tolist() == out.tolist()
assert conv(x, adj_t).tolist() == out.tolist()
with pytest.raises(RuntimeError):
conv(x[0], adj_t)
jitted = conv.jittable(x=x, edge_index=edge_index)
jitted = torch.jit.script(jitted)
assert jitted(x, edge_index).tolist() == out.tolist()
with pytest.raises(RuntimeError):
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x[0], edge_index=adj_t)
jitted = torch.jit.script(jitted)
jitted = conv.jittable(x=x, edge_index=adj_t)
jitted = torch.jit.script(jitted)
class MyDoublePropagateConv(MessagePassing):
def __init__(self):
super(MyDoublePropagateConv, self).__init__(aggr='max')
def forward(self, x, edge_index):
self.flow = 'source_to_target'
out1 = self.propagate(edge_index, x=x)
self.flow = 'target_to_source'
out2 = self.propagate(edge_index, x=x)
return out1 + out2
def test_my_double_propagate_conv():
conv = MyDoublePropagateConv()
out = conv(x[1], edge_index)
assert out.tolist() == [5.0, 4.0, 2.0]
with pytest.raises(ValueError):
assert conv(x[0], adj_t).tolist() == out.tolist()
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
assert jitted(x[1], edge_index).tolist() == out.tolist()
class MyMessageAndAggregateConv1(MessagePassing):
def __init__(self):
super(MyMessageAndAggregateConv1, self).__init__()
def forward(self, x, edge_index):
return self.propagate(edge_index, x=x)
def message_and_aggregate(self, adj_t: SparseTensor, x):
assert adj_t is not None
return spmm(adj_t, x.view(-1, 1)).view(-1)
class MyMessageAndAggregateConv2(MessagePassing):
def __init__(self):
super(MyMessageAndAggregateConv2, self).__init__()
def forward(self, x, edge_index: SparseTensor):
return self.propagate(edge_index, x=x)
def message_and_aggregate(self, adj_t: SparseTensor, x):
assert adj_t is not None
return spmm(adj_t, x.view(-1, 1)).view(-1)
def test_my_message_and_aggregate_conv():
conv = MyMessageAndAggregateConv1()
out = conv(x[1], edge_index)
assert out.tolist() == [3.0, 1.0, 3.0]
assert conv(x[0], adj_t).tolist() == out.tolist()
jitted = conv.jittable(x=x[1], edge_index=edge_index)
jitted = torch.jit.script(jitted)
assert jitted(x[1], edge_index).tolist() == [3.0, 1.0, 3.0]
conv = MyMessageAndAggregateConv2()
out = conv(x[1], edge_index)
assert out.tolist() == [3.0, 1.0, 3.0]
assert conv(x[0], adj_t).tolist() == out.tolist()
jitted = conv.jittable(x=x[0], edge_index=adj_t)
jitted = torch.jit.script(jitted)
assert jitted(x[0], adj_t).tolist() == [3.0, 1.0, 3.0]
class MyCopyConv(MessagePassing):
def __init__(self):
super(MyCopyConv, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(10))
def forward(self, x, edge_index):
return self.propagate(edge_index, x=x)
def test_copy():
conv = MyCopyConv()
conv2 = copy.copy(conv)
assert conv != conv2
assert conv.weight.data_ptr == conv2.weight.data_ptr
conv = copy.deepcopy(conv)
assert conv != conv2
assert conv.weight.data_ptr != conv2.weight.data_ptr
| 6,169 | 161 | 931 |
37d9b671438cbab3ced21ae57cea76a44b2892f3 | 4,282 | py | Python | src/plot_jhu_data.py | astrophys/covid19 | a1978a87a4d090c1de29d38db1c04164c8b0ba8e | [
"MIT"
] | 1 | 2020-03-19T17:39:17.000Z | 2020-03-19T17:39:17.000Z | src/plot_jhu_data.py | astrophys/covid19 | a1978a87a4d090c1de29d38db1c04164c8b0ba8e | [
"MIT"
] | null | null | null | src/plot_jhu_data.py | astrophys/covid19 | a1978a87a4d090c1de29d38db1c04164c8b0ba8e | [
"MIT"
] | null | null | null | # Author : Ali Snedden
# Date : 3/21/20
# License: MIT
# Purpose:
# This code plots the Johns Hoptins Covid-19 Data
#
#
#
# Notes :
#
# References :
# 1. https://github.com/CSSEGISandData/COVID-19
#
#
# Future:
#
#
#
import sys
import numpy as np
import time
import pandas as pd
from matplotlib import pyplot as plt
from error import exit_with_error
from classes import ITALY_DATA
from scipy import optimize
def print_help(ExitCode):
"""
ARGS:
RETURN:
DESCRIPTION:
DEBUG:
FUTURE:
"""
sys.stderr.write(
"python3 ./src/plot_jhu_data.py country log-lin slice_index\n"
" country : See time_series_covid19_confirmed_global.csv\n"
" for coutries to plot options\n"
" log-lin : required, plot y axis in natural log, if fit is \n"
" straight line then experiencing exponential growth.\n"
" My hope is to someday implement other to be fit types \n"
" (e.g. lin-lin)\n"
" slice_index : required, for fitting, e.g. \n"
" if = -10, it will fit the last 10 points\n"
" if = 10, it will fit the first 10 points\n"
" \n"
" To Run: \n"
" source ~/.local/virtualenvs/python3.7/bin/activate\n")
sys.exit(ExitCode)
def main():
"""
ARGS:
RETURN:
DESCRIPTION:
DEBUG:
FUTURE:
1. Add option to fit only a specific section of data.
"""
# Check Python version
nArg = len(sys.argv)
# Use python 3
if(sys.version_info[0] != 3):
exit_with_error("ERROR!!! Use Python 3\n")
# Get options
if("-h" in sys.argv[1]):
print_help(0)
elif(nArg != 4 and nArg != 3):
print_help(1)
if(nArg == 4):
slcIdx = int(sys.argv[3])
startTime = time.time()
print("{} \n".format(sys.argv),flush=True)
print(" Start Time : {}".format(time.strftime("%a, %d %b %Y %H:%M:%S ",
time.localtime())),flush=True)
# Get args
country = sys.argv[1]
plotType = sys.argv[2] # Straight line equals linear growth
dataPath = "data/jhu/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
countryFound = False
df = pd.read_csv(dataPath)
lastDate = df.columns[-1]
for index, row in df.iterrows():
# Select country specified
if(row.values[1].lower() == country.lower()):
if(countryFound == True):
exit_with_error("ERROR!! {} should only occur "
"once".format(country.lower()))
yV = np.asarray(row.values[4:],dtype=np.float32) # y vector -cases
xV = np.asarray(range(len(yV))) # x vector - days
n = len(xV) # Number of days
countryFound = True
fig, ax = plt.subplots(1,1)
# Generate Plot
if(plotType == "log-lin"):
ylabel = "ln(cases + 1)"
print(yV)
yV = yV + 1
yV = np.log(yV)
# Slice and only keep what
if(nArg == 4):
if(slcIdx < 0):
xfit = xV[slcIdx:]
yfit = yV[slcIdx:]
elif(slcIdx > 0):
xfit = xV[:slcIdx]
yfit = yV[:slcIdx]
fit = np.polyfit(xfit,yfit,deg=1)
# Reuse xfit, and yfit
xfit= np.asarray([x for x in np.arange(0,n,n/100.0)])
yfit= fit[0]*xfit + fit[1]
ax.plot(xfit, yfit, label="Fit - y={:.3f}x+{:.3f}".format(fit[0],fit[1]))
ax.set_title("Covid-19 in {} (ending {})".format(country, lastDate))
elif(plotType == "lin-lin"):
ylabel = "Covid-19_Cases"
exit_with_error("ERROR!! I haven't handled this option yet\n")
else:
exit_with_error("ERROR!! Invalid plotType option\n")
ax.plot(xV, yV, label=ylabel)
ax.set_xlabel("Time spanning 0-{} days".format(n-1))
ax.set_ylabel("{}".format(ylabel))
ax.legend()
plt.show()
print("Ended : %s"%(time.strftime("%D:%H:%M:%S")))
print("Run Time : {:.4f} h".format((time.time() - startTime)/3600.0))
sys.exit(0)
if __name__ == "__main__":
main()
| 30.15493 | 111 | 0.541336 | # Author : Ali Snedden
# Date : 3/21/20
# License: MIT
# Purpose:
# This code plots the Johns Hoptins Covid-19 Data
#
#
#
# Notes :
#
# References :
# 1. https://github.com/CSSEGISandData/COVID-19
#
#
# Future:
#
#
#
import sys
import numpy as np
import time
import pandas as pd
from matplotlib import pyplot as plt
from error import exit_with_error
from classes import ITALY_DATA
from scipy import optimize
def print_help(ExitCode):
"""
ARGS:
RETURN:
DESCRIPTION:
DEBUG:
FUTURE:
"""
sys.stderr.write(
"python3 ./src/plot_jhu_data.py country log-lin slice_index\n"
" country : See time_series_covid19_confirmed_global.csv\n"
" for coutries to plot options\n"
" log-lin : required, plot y axis in natural log, if fit is \n"
" straight line then experiencing exponential growth.\n"
" My hope is to someday implement other to be fit types \n"
" (e.g. lin-lin)\n"
" slice_index : required, for fitting, e.g. \n"
" if = -10, it will fit the last 10 points\n"
" if = 10, it will fit the first 10 points\n"
" \n"
" To Run: \n"
" source ~/.local/virtualenvs/python3.7/bin/activate\n")
sys.exit(ExitCode)
def main():
"""
ARGS:
RETURN:
DESCRIPTION:
DEBUG:
FUTURE:
1. Add option to fit only a specific section of data.
"""
# Check Python version
nArg = len(sys.argv)
# Use python 3
if(sys.version_info[0] != 3):
exit_with_error("ERROR!!! Use Python 3\n")
# Get options
if("-h" in sys.argv[1]):
print_help(0)
elif(nArg != 4 and nArg != 3):
print_help(1)
if(nArg == 4):
slcIdx = int(sys.argv[3])
startTime = time.time()
print("{} \n".format(sys.argv),flush=True)
print(" Start Time : {}".format(time.strftime("%a, %d %b %Y %H:%M:%S ",
time.localtime())),flush=True)
# Get args
country = sys.argv[1]
plotType = sys.argv[2] # Straight line equals linear growth
dataPath = "data/jhu/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
countryFound = False
df = pd.read_csv(dataPath)
lastDate = df.columns[-1]
for index, row in df.iterrows():
# Select country specified
if(row.values[1].lower() == country.lower()):
if(countryFound == True):
exit_with_error("ERROR!! {} should only occur "
"once".format(country.lower()))
yV = np.asarray(row.values[4:],dtype=np.float32) # y vector -cases
xV = np.asarray(range(len(yV))) # x vector - days
n = len(xV) # Number of days
countryFound = True
fig, ax = plt.subplots(1,1)
# Generate Plot
if(plotType == "log-lin"):
ylabel = "ln(cases + 1)"
print(yV)
yV = yV + 1
yV = np.log(yV)
# Slice and only keep what
if(nArg == 4):
if(slcIdx < 0):
xfit = xV[slcIdx:]
yfit = yV[slcIdx:]
elif(slcIdx > 0):
xfit = xV[:slcIdx]
yfit = yV[:slcIdx]
fit = np.polyfit(xfit,yfit,deg=1)
# Reuse xfit, and yfit
xfit= np.asarray([x for x in np.arange(0,n,n/100.0)])
yfit= fit[0]*xfit + fit[1]
ax.plot(xfit, yfit, label="Fit - y={:.3f}x+{:.3f}".format(fit[0],fit[1]))
ax.set_title("Covid-19 in {} (ending {})".format(country, lastDate))
elif(plotType == "lin-lin"):
ylabel = "Covid-19_Cases"
exit_with_error("ERROR!! I haven't handled this option yet\n")
else:
exit_with_error("ERROR!! Invalid plotType option\n")
ax.plot(xV, yV, label=ylabel)
ax.set_xlabel("Time spanning 0-{} days".format(n-1))
ax.set_ylabel("{}".format(ylabel))
ax.legend()
plt.show()
print("Ended : %s"%(time.strftime("%D:%H:%M:%S")))
print("Run Time : {:.4f} h".format((time.time() - startTime)/3600.0))
sys.exit(0)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
26bf7610cfe93f9a459dcd52bd651b53365563d4 | 1,789 | py | Python | tkinter_module/tkinter_image.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | 1 | 2019-02-24T09:57:16.000Z | 2019-02-24T09:57:16.000Z | tkinter_module/tkinter_image.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | null | null | null | tkinter_module/tkinter_image.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | 4 | 2017-05-21T15:34:53.000Z | 2018-09-25T06:56:15.000Z | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: tkinter_image Version: 1.0
# Date: 26-12-16 Language: python3
# Description: tkinter inladen van image en text via menubar
#
###############################################################
from PIL import Image, ImageTk
from tkinter import *
root = Tk()
root.geometry('400x300')
app = Window(root)
root.mainloop()
| 27.953125 | 73 | 0.561766 | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: tkinter_image Version: 1.0
# Date: 26-12-16 Language: python3
# Description: tkinter inladen van image en text via menubar
#
###############################################################
from PIL import Image, ImageTk
from tkinter import *
class Window(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
Frame.tkraise(self)
self.master = master
self.init_window()
def init_window(self):
self.master.title('gui')
self.pack(fill=BOTH, expand=1)
# voegt een quit knop toe aan het frame
# quit_button = Button(self, text='quit', command=self.client_exit)
# quit_button.place(x=0, y=0)
menu = Menu(self.master)
self.master.config(menu=menu)
file = Menu(menu)
file.add_command(label='Exit', command= self.client_exit)
menu.add_cascade(label='File', menu=file)
edit = Menu(menu)
edit.add_command(label='Show image', command=self.show_img)
edit.add_command(label='Show txt', command=self.show_txt)
menu.add_cascade(label='Edit', menu=edit)
def client_exit(self):
exit()
def show_img(self):
load = Image.open('pic.png')
render = ImageTk.PhotoImage(load)
img = Label(self, image=render)
img.image = render
img.place(x=0, y=0)
def show_txt(self):
text = Label(self, text='hello world from kenny')
text.pack() # anders komt het niet op het scherm dus pack()
root = Tk()
root.geometry('400x300')
app = Window(root)
root.mainloop()
| 1,105 | -1 | 157 |
eb24c3cd1f6adf2f321fd6a4d6312c8759cb8014 | 117 | py | Python | app_bootstrap_examples/apps.py | bogdandrienko/kostanay-minerals | d266b3899f8403b5182e1dadf74b1f8bb580d17c | [
"MIT"
] | 1 | 2021-02-13T08:40:51.000Z | 2021-02-13T08:40:51.000Z | app_bootstrap_examples/apps.py | bogdandrienko/chrysotile-minerals | 47a4097e29ee40f2606807e28b2da466dfd7f3f4 | [
"MIT"
] | null | null | null | app_bootstrap_examples/apps.py | bogdandrienko/chrysotile-minerals | 47a4097e29ee40f2606807e28b2da466dfd7f3f4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 19.5 | 44 | 0.811966 | from django.apps import AppConfig
class AppBootstrapExamplesConfig(AppConfig):
name = 'app_bootstrap_examples'
| 0 | 59 | 23 |
1e000f54ec66535b78129eef7b6df9704797f75b | 1,098 | py | Python | steps/run-wait/step.py | relay-integrations/relay-puppet | 1e0c2b67e778a26fcd91ed6bf2680f120e9a67d8 | [
"Apache-2.0"
] | 1 | 2020-11-30T07:20:17.000Z | 2020-11-30T07:20:17.000Z | steps/run-wait/step.py | relay-integrations/relay-puppet | 1e0c2b67e778a26fcd91ed6bf2680f120e9a67d8 | [
"Apache-2.0"
] | null | null | null | steps/run-wait/step.py | relay-integrations/relay-puppet | 1e0c2b67e778a26fcd91ed6bf2680f120e9a67d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import logging
import time
from urllib.parse import urljoin
import requests
from relay_sdk import Interface, Dynamic as D
relay = Interface()
relay_api_url = relay.get(D.connection.relayAPIURL)
relay_api_token = relay.get(D.connection.token)
run_id = relay.get(D.id)
headers = {'Authorization': f'Bearer {relay_api_token}'}
while True:
r = requests.get(urljoin(relay_api_url, f'_puppet/runs/{run_id}'), headers=headers)
r.raise_for_status()
run = r.json()
if run['state']['status'] != 'complete':
# XXX: FIXME: We need to take into account next_update_before to handle
# this properly.
logging.info('Run is not yet complete (currently {}), waiting...'.format(run['state']['status']))
time.sleep(5)
continue
if run['state'].get('job_id'):
relay.outputs.set('jobID', run['state']['job_id'])
if run['state'].get('outcome'):
relay.outputs.set('outcome', run['state']['outcome'])
logging.info('Run complete with outcome {}'.format(run['state'].get('outcome', '(unknown)')))
break
| 26.780488 | 105 | 0.662113 | #!/usr/bin/env python
import logging
import time
from urllib.parse import urljoin
import requests
from relay_sdk import Interface, Dynamic as D
relay = Interface()
relay_api_url = relay.get(D.connection.relayAPIURL)
relay_api_token = relay.get(D.connection.token)
run_id = relay.get(D.id)
headers = {'Authorization': f'Bearer {relay_api_token}'}
while True:
r = requests.get(urljoin(relay_api_url, f'_puppet/runs/{run_id}'), headers=headers)
r.raise_for_status()
run = r.json()
if run['state']['status'] != 'complete':
# XXX: FIXME: We need to take into account next_update_before to handle
# this properly.
logging.info('Run is not yet complete (currently {}), waiting...'.format(run['state']['status']))
time.sleep(5)
continue
if run['state'].get('job_id'):
relay.outputs.set('jobID', run['state']['job_id'])
if run['state'].get('outcome'):
relay.outputs.set('outcome', run['state']['outcome'])
logging.info('Run complete with outcome {}'.format(run['state'].get('outcome', '(unknown)')))
break
| 0 | 0 | 0 |
6daf6cc726a7f05dfdd45c0c735937d37d9d3b2e | 1,671 | py | Python | src/config.py | gwohlgen/w2v_ol | bb67a3a1b54ca756cff1f28ef5ca5842e8a9c91d | [
"Apache-2.0"
] | 20 | 2017-04-05T10:37:26.000Z | 2022-03-02T17:24:16.000Z | src/config.py | gwohlgen/w2v_ol | bb67a3a1b54ca756cff1f28ef5ca5842e8a9c91d | [
"Apache-2.0"
] | 1 | 2019-04-02T07:33:53.000Z | 2019-04-02T07:33:53.000Z | src/config.py | gwohlgen/w2v_ol | bb67a3a1b54ca756cff1f28ef5ca5842e8a9c91d | [
"Apache-2.0"
] | 10 | 2018-02-06T12:10:37.000Z | 2021-02-13T05:54:57.000Z | #!/usr/bin/python
# you need to config this!
# set the model file, and if the model supports big-grams: set seed with bigrams..
## the conf dict stores all relevant config parameters
conf={}
conf['model'] = "climate2_2015_7.txt.2gram.small.model" # default dummy model
#conf['model'] = "climate2_2015_7.txt.2gram.model"
# if using a bigram model
conf['seedfn'] = "../data/climate.seed" # bigram seed for climate change models
# config for hypernym extraction
conf['num_taxomy_best'] = 1 # number of most similar terms to consider when building a taxonomy
conf['sim_threshold'] = 0.40
# if using a unigram model
#conf['seedfn'] = "../data/climate-single-word.seed"
# config for hypernym extraction
#conf['num_taxomy_best'] = 3 # number of most similar terms to consider when building a taxonomy
#conf['sim_threshold'] = 0.23
conf['binary_model'] = True # default: using a binary word2vec model (like created by Mikolov's C implementation)
conf['domain'] = "climate change" # your domain of knowledge -- not important for the algorithms ..
########################################################################################################################
# no need to change below this
DB_PATH= "../data/our.db"
#DB_PATH= "/home/wohlg/workspace/dl4j-0.4-examples/src/main/java/MinicBac/python/data/our.db"
print "db-path", DB_PATH
import sqlite3
def get_db():
""" just connect to the sqlite3 database """
conf['db'] = sqlite3.connect(DB_PATH)
# model file name
conf['MFN'] = "../data/models/" + conf['model']
# setup logging
import logging, os
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
| 30.944444 | 120 | 0.666667 | #!/usr/bin/python
# you need to config this!
# set the model file, and if the model supports big-grams: set seed with bigrams..
## the conf dict stores all relevant config parameters
conf={}
conf['model'] = "climate2_2015_7.txt.2gram.small.model" # default dummy model
#conf['model'] = "climate2_2015_7.txt.2gram.model"
# if using a bigram model
conf['seedfn'] = "../data/climate.seed" # bigram seed for climate change models
# config for hypernym extraction
conf['num_taxomy_best'] = 1 # number of most similar terms to consider when building a taxonomy
conf['sim_threshold'] = 0.40
# if using a unigram model
#conf['seedfn'] = "../data/climate-single-word.seed"
# config for hypernym extraction
#conf['num_taxomy_best'] = 3 # number of most similar terms to consider when building a taxonomy
#conf['sim_threshold'] = 0.23
conf['binary_model'] = True # default: using a binary word2vec model (like created by Mikolov's C implementation)
conf['domain'] = "climate change" # your domain of knowledge -- not important for the algorithms ..
########################################################################################################################
# no need to change below this
DB_PATH= "../data/our.db"
#DB_PATH= "/home/wohlg/workspace/dl4j-0.4-examples/src/main/java/MinicBac/python/data/our.db"
print "db-path", DB_PATH
import sqlite3
def get_db():
""" just connect to the sqlite3 database """
conf['db'] = sqlite3.connect(DB_PATH)
# model file name
conf['MFN'] = "../data/models/" + conf['model']
# setup logging
import logging, os
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
| 0 | 0 | 0 |
707397c9f2c573d36024259a0ef46852b223c300 | 71 | py | Python | neumiss/__init__.py | dirty-cat/NeuMiss | dd4c7b4c55d6cf9ccad194bac0caf3cbac320723 | [
"BSD-2-Clause"
] | 2 | 2021-08-10T10:04:05.000Z | 2021-08-18T16:09:34.000Z | neumiss/__init__.py | dirty-cat/NeuMiss | dd4c7b4c55d6cf9ccad194bac0caf3cbac320723 | [
"BSD-2-Clause"
] | null | null | null | neumiss/__init__.py | dirty-cat/NeuMiss | dd4c7b4c55d6cf9ccad194bac0caf3cbac320723 | [
"BSD-2-Clause"
] | null | null | null | from .neumiss_layer import NeuMiss
from .neumiss_mlp import NeuMissMLP
| 23.666667 | 35 | 0.859155 | from .neumiss_layer import NeuMiss
from .neumiss_mlp import NeuMissMLP
| 0 | 0 | 0 |
cb474b9a1b131517c08f3fdabe8b41a8937618ae | 1,726 | py | Python | app/api/resources.py | XIN-TU/easySpike | 727a37e4c88f640427a1c30f80bd0fead8427566 | [
"MIT"
] | null | null | null | app/api/resources.py | XIN-TU/easySpike | 727a37e4c88f640427a1c30f80bd0fead8427566 | [
"MIT"
] | null | null | null | app/api/resources.py | XIN-TU/easySpike | 727a37e4c88f640427a1c30f80bd0fead8427566 | [
"MIT"
] | null | null | null | """
REST API Resource Routing
http://flask-restplus.readthedocs.io
"""
from datetime import datetime
from flask import request
from flask_restplus import Resource
from .security import require_auth
from . import api_rest
from .func import *
from .engine import * # wildcard import the TTDS lib
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/resource/<string:resource_id>')
class ResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
@api_rest.route('/secure-resource/<string:resource_id>')
class SecureResourceOne(SecureResource):
""" Unsecure Resource Class: Inherit from Resource """
# this is the example hardcode test
@api_rest.route('/hello')
@api_rest.route('/demo10')
@api_rest.route('/search')
| 24.657143 | 61 | 0.673233 | """
REST API Resource Routing
http://flask-restplus.readthedocs.io
"""
from datetime import datetime
from flask import request
from flask_restplus import Resource
from .security import require_auth
from . import api_rest
from .func import *
from .engine import * # wildcard import the TTDS lib
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/resource/<string:resource_id>')
class ResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
# return {'timestamp': timestamp}
return {'hello': 'world'}
def post(self, resource_id):
json_payload = request.json
return {'timestamp': json_payload}, 201
@api_rest.route('/secure-resource/<string:resource_id>')
class SecureResourceOne(SecureResource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
# this is the example hardcode test
@api_rest.route('/hello')
class HelloWorld(Resource):
def get(self):
return {'Hello': 'World'}
def post(self):
return {'Hello' : 'Post'}
@api_rest.route('/demo10')
class demo10Recipes(Resource):
def get(self):
return csv2json('sample.csv', 10)
def post(self):
return csv2json('sample.csv', 10)
@api_rest.route('/search')
class SearchEntry(Resource):
def post(self):
data = request.get_json()
received = data['data']
return engine_init(received['key'], received['para'])
| 567 | 22 | 284 |
ed0ad42fc1a7ca3834303f1c38e3ba12b01a27a1 | 3,470 | py | Python | Simple Calc-Tkinter.py | sachinkatageri/Tkinter-Simple-calculator | a5ba889da12f34d2575ce5375cd9829ce2c538eb | [
"Apache-2.0"
] | null | null | null | Simple Calc-Tkinter.py | sachinkatageri/Tkinter-Simple-calculator | a5ba889da12f34d2575ce5375cd9829ce2c538eb | [
"Apache-2.0"
] | null | null | null | Simple Calc-Tkinter.py | sachinkatageri/Tkinter-Simple-calculator | a5ba889da12f34d2575ce5375cd9829ce2c538eb | [
"Apache-2.0"
] | null | null | null | #sachin_katageri
#SKATCODE
from tkinter import*
me=Tk()
me.geometry("354x460")
me.title("CALCULATOR")
melabel = Label(me,text="CALCULATOR",bg='White',font=("Times",30,'bold'))
melabel.pack(side=TOP)
me.config(background='Dark gray')
textin=StringVar()
operator=""
metext=Entry(me,font=("Courier New",12,'bold'),textvar=textin,width=25,bd=5,bg='powder blue')
metext.pack()
but1=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(1),text="1",font=("Courier New",16,'bold'))
but1.place(x=10,y=100)
but2=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(2),text="2",font=("Courier New",16,'bold'))
but2.place(x=10,y=170)
but3=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(3),text="3",font=("Courier New",16,'bold'))
but3.place(x=10,y=240)
but4=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(4),text="4",font=("Courier New",16,'bold'))
but4.place(x=75,y=100)
but5=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(5),text="5",font=("Courier New",16,'bold'))
but5.place(x=75,y=170)
but6=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(6),text="6",font=("Courier New",16,'bold'))
but6.place(x=75,y=240)
but7=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(7),text="7",font=("Courier New",16,'bold'))
but7.place(x=140,y=100)
but8=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(8),text="8",font=("Courier New",16,'bold'))
but8.place(x=140,y=170)
but9=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(9),text="9",font=("Courier New",16,'bold'))
but9.place(x=140,y=240)
but0=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(0),text="0",font=("Courier New",16,'bold'))
but0.place(x=10,y=310)
butdot=Button(me,padx=47,pady=14,bd=4,bg='white',command=lambda:clickbut("."),text=".",font=("Courier New",16,'bold'))
butdot.place(x=75,y=310)
butpl=Button(me,padx=14,pady=14,bd=4,bg='white',text="+",command=lambda:clickbut("+"),font=("Courier New",16,'bold'))
butpl.place(x=205,y=100)
butsub=Button(me,padx=14,pady=14,bd=4,bg='white',text="-",command=lambda:clickbut("-"),font=("Courier New",16,'bold'))
butsub.place(x=205,y=170)
butml=Button(me,padx=14,pady=14,bd=4,bg='white',text="*",command=lambda:clickbut("*"),font=("Courier New",16,'bold'))
butml.place(x=205,y=240)
butdiv=Button(me,padx=14,pady=14,bd=4,bg='white',text="/",command=lambda:clickbut("/"),font=("Courier New",16,'bold'))
butdiv.place(x=205,y=310)
butclear=Button(me,padx=14,pady=119,bd=4,bg='white',text="CE",command=clrbut,font=("Courier New",16,'bold'))
butclear.place(x=270,y=100)
butequal=Button(me,padx=151,pady=14,bd=4,bg='white',command=equlbut,text="=",font=("Courier New",16,'bold'))
butequal.place(x=10,y=380)
me.mainloop()
| 35.050505 | 119 | 0.663112 | #sachin_katageri
#SKATCODE
from tkinter import*
me=Tk()
me.geometry("354x460")
me.title("CALCULATOR")
melabel = Label(me,text="CALCULATOR",bg='White',font=("Times",30,'bold'))
melabel.pack(side=TOP)
me.config(background='Dark gray')
textin=StringVar()
operator=""
def clickbut(number): #lambda:clickbut(1)
global operator
operator=operator+str(number)
textin.set(operator)
def equlbut():
global operator
add=str(eval(operator))
textin.set(add)
operator=''
def equlbut():
global operator
sub=str(eval(operator))
textin.set(sub)
operator=''
def equlbut():
global operator
mul=str(eval(operator))
textin.set(mul)
operator=''
def equlbut():
global operator
div=str(eval(operator))
textin.set(div)
operator=''
def clrbut():
textin.set('')
metext=Entry(me,font=("Courier New",12,'bold'),textvar=textin,width=25,bd=5,bg='powder blue')
metext.pack()
but1=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(1),text="1",font=("Courier New",16,'bold'))
but1.place(x=10,y=100)
but2=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(2),text="2",font=("Courier New",16,'bold'))
but2.place(x=10,y=170)
but3=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(3),text="3",font=("Courier New",16,'bold'))
but3.place(x=10,y=240)
but4=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(4),text="4",font=("Courier New",16,'bold'))
but4.place(x=75,y=100)
but5=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(5),text="5",font=("Courier New",16,'bold'))
but5.place(x=75,y=170)
but6=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(6),text="6",font=("Courier New",16,'bold'))
but6.place(x=75,y=240)
but7=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(7),text="7",font=("Courier New",16,'bold'))
but7.place(x=140,y=100)
but8=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(8),text="8",font=("Courier New",16,'bold'))
but8.place(x=140,y=170)
but9=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(9),text="9",font=("Courier New",16,'bold'))
but9.place(x=140,y=240)
but0=Button(me,padx=14,pady=14,bd=4,bg='white',command=lambda:clickbut(0),text="0",font=("Courier New",16,'bold'))
but0.place(x=10,y=310)
butdot=Button(me,padx=47,pady=14,bd=4,bg='white',command=lambda:clickbut("."),text=".",font=("Courier New",16,'bold'))
butdot.place(x=75,y=310)
butpl=Button(me,padx=14,pady=14,bd=4,bg='white',text="+",command=lambda:clickbut("+"),font=("Courier New",16,'bold'))
butpl.place(x=205,y=100)
butsub=Button(me,padx=14,pady=14,bd=4,bg='white',text="-",command=lambda:clickbut("-"),font=("Courier New",16,'bold'))
butsub.place(x=205,y=170)
butml=Button(me,padx=14,pady=14,bd=4,bg='white',text="*",command=lambda:clickbut("*"),font=("Courier New",16,'bold'))
butml.place(x=205,y=240)
butdiv=Button(me,padx=14,pady=14,bd=4,bg='white',text="/",command=lambda:clickbut("/"),font=("Courier New",16,'bold'))
butdiv.place(x=205,y=310)
butclear=Button(me,padx=14,pady=119,bd=4,bg='white',text="CE",command=clrbut,font=("Courier New",16,'bold'))
butclear.place(x=270,y=100)
butequal=Button(me,padx=151,pady=14,bd=4,bg='white',command=equlbut,text="=",font=("Courier New",16,'bold'))
butequal.place(x=10,y=380)
me.mainloop()
| 460 | 0 | 153 |
41248fa6c1e374694f0ade81b43b48b465184936 | 640 | py | Python | Solver/Sorting.py | bchaiks/3D_Bin_Packing_Heuristics | f8c4ac1ec73f1b94287eb18fdcdff7bea685ec27 | [
"MIT"
] | 1 | 2021-03-17T03:36:58.000Z | 2021-03-17T03:36:58.000Z | Solver/Sorting.py | bchaiks/3D_Bin_Packing_Heuristics | f8c4ac1ec73f1b94287eb18fdcdff7bea685ec27 | [
"MIT"
] | 1 | 2021-03-13T11:36:54.000Z | 2021-03-16T03:42:11.000Z | Solver/Sorting.py | bchaiks/3D_Bin_Packing_Heuristics | f8c4ac1ec73f1b94287eb18fdcdff7bea685ec27 | [
"MIT"
] | 2 | 2020-11-10T15:24:16.000Z | 2021-03-13T05:38:59.000Z |
def SortArrayByArgMinIndex(array,index):
''' MAKE SURE TO SORT BY MOST IMPORTANT INDEX LAST!!! '''
a = array
L = len(a)
for i in range(L):
temp = a[i]
flag = 0
j = 0
while j < i and flag == 0:
if temp[index] < a[j][index]:
a[j+1] = a[j]
a[j] = temp
j += 1
else:
flag = 1
return(a)
| 19.393939 | 58 | 0.61875 | def Randomize(partObjects):
# eventually want to randomize the order
# and re-run the whole thing to get a broader
# picture of the solution space
return(partObjects)
def SortArrayByArgMinIndex(array,index):
''' MAKE SURE TO SORT BY MOST IMPORTANT INDEX LAST!!! '''
a = array
L = len(a)
for i in range(L):
temp = a[i]
flag = 0
j = 0
while j < i and flag == 0:
if temp[index] < a[j][index]:
a[j+1] = a[j]
a[j] = temp
j += 1
else:
flag = 1
return(a)
def UniqueValues(array):
u_a = []
L = len(array)
for i in range(L):
if array[i] in u_a:
continue
else:
u_a.append(array[i])
return(u_a)
| 278 | 0 | 45 |
9f8cd2ddde865984af8946621f65626e1d20b95b | 132 | py | Python | conftest.py | mclellac/connery | d7444a46e023c8f5261d5b462ae970096cfecf0f | [
"EFL-2.0"
] | null | null | null | conftest.py | mclellac/connery | d7444a46e023c8f5261d5b462ae970096cfecf0f | [
"EFL-2.0"
] | null | null | null | conftest.py | mclellac/connery | d7444a46e023c8f5261d5b462ae970096cfecf0f | [
"EFL-2.0"
] | null | null | null | # This file lists files which should be ignored by pytest
collect_ignore = ["setup.py", "connery.py", "connery/modules/ipython.py"]
| 44 | 73 | 0.75 | # This file lists files which should be ignored by pytest
collect_ignore = ["setup.py", "connery.py", "connery/modules/ipython.py"]
| 0 | 0 | 0 |
858037440c5f062f7910b0e62eaa39b16bfb5efd | 252 | py | Python | test/test_nas.py | yngtodd/nas | d2642f8b6910f40daf458504be04ff13869d3a75 | [
"MIT"
] | 6 | 2019-07-05T09:31:12.000Z | 2021-04-28T06:41:09.000Z | test/test_nas.py | yngtodd/nas | d2642f8b6910f40daf458504be04ff13869d3a75 | [
"MIT"
] | 1 | 2020-02-28T01:21:41.000Z | 2020-02-28T01:21:41.000Z | test/test_nas.py | yngtodd/nas | d2642f8b6910f40daf458504be04ff13869d3a75 | [
"MIT"
] | 1 | 2020-08-26T04:31:11.000Z | 2020-08-26T04:31:11.000Z | """
Tests for `nas` module.
"""
import pytest
from nas import nas
| 12.6 | 29 | 0.615079 | """
Tests for `nas` module.
"""
import pytest
from nas import nas
class TestNas(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
@classmethod
def teardown_class(cls):
pass
| 46 | 116 | 23 |
f8cda25e292e2fc1ec31f3eab7e56ffb77666b19 | 927 | py | Python | za/fileToC/test.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | za/fileToC/test.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | za/fileToC/test.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | #%%
import os
# IN_FILE = 'test.ncm'
# OUT_FILE = IN_FILE.split('.')[0]
fileToC('up5.html','htmlData')
# %%
| 22.071429 | 80 | 0.536138 | #%%
import os
# IN_FILE = 'test.ncm'
# OUT_FILE = IN_FILE.split('.')[0]
def fileToC(in_file, out_file):
# 1 head and open
IN_FILE=in_file
OUT_FILE=out_file
try:
in_file = open(IN_FILE, 'rb')
except Exception as e:
print(e)
return
out_file = open(OUT_FILE+'.c', 'w')
in_size = os.path.getsize(IN_FILE)
array_name = os.path.basename(OUT_FILE)
out_file.write('const unsigned char %s[%d] = {\n '%(array_name, in_size))
# 2 content
while True:
block = in_file.read(1024)
if block:
for i in range(0, len(block)):
out_file.write('0x%02x'%block[i]+', ')
if not (i+1)%16:
out_file.write('\n ')
else:
break
# 3 } and close
in_file.close()
out_file.write('\n};')
out_file.close()
print('complete')
fileToC('up5.html','htmlData')
# %%
| 793 | 0 | 23 |
010fc7a46411fd1fb19a7627a5c1ef23cb23efba | 38,007 | py | Python | model.py | Yan98/S2FGAN | 14405875c0182735afa053ae4d8c67cebd1a6ab2 | [
"MIT"
] | 6 | 2020-12-08T13:08:08.000Z | 2021-12-06T05:04:48.000Z | model.py | Yan98/S2FGAN | 14405875c0182735afa053ae4d8c67cebd1a6ab2 | [
"MIT"
] | 1 | 2021-04-12T11:43:28.000Z | 2021-10-31T23:59:40.000Z | model.py | Yan98/S2FGAN | 14405875c0182735afa053ae4d8c67cebd1a6ab2 | [
"MIT"
] | 1 | 2021-11-04T00:28:47.000Z | 2021-11-04T00:28:47.000Z | """
This module is the concrete implementation of S2FGAN.
This module structure is following:
make_kernel is used to intialise the kernel for blurring image
Blur, a layer used to apply blur kerbel to input
PixelNorm, a layer used to apply pixel normalization
EqualConv1d, convolution 1d with equalized learning trick
EqualConv2d, convolution 2d with equalized learning trick
Equallinear, linear layerwith equalized learning trick
Embedding, attribute mapping networks.
Encoder, the encoder of S2FGAN.
StyledConv, the upblock for the decoder of S2FGAN.
Discriminator, the discrimantor of S2FGAN.
VGGPerceptualLoss, the perceptual loss based on VGG19.
"""
import math
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from torch.nn.init import normal_
from torch import autograd, optim
from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d, conv2d_gradfix
#Pixel Normalization
#create blur kernel
#Blur Layer
#Equlized convlution 2d
#trainable input layer for decoder
#Block for Attribute Mapping Network
#Attribute Mapping Network
#encoder
#decoder
#convolution layer with dowmsample and activation function
#residual block
#domain discriminator
#model discriminator
def requires_grad(model, flag=True):
"""
Return None
Parameters
----------
model : pytorch model
flag : bool, default true
Returns
-------
None
set requires_grad flag for model
"""
for p in model.parameters():
p.requires_grad = flag
#calculate generator loss
#VGG Perceptual loss
#The function is used downsample and binarize the input
#calculte r1 loss
| 34.086996 | 269 | 0.527876 | """
This module is the concrete implementation of S2FGAN.
This module structure is following:
make_kernel is used to intialise the kernel for blurring image
Blur, a layer used to apply blur kerbel to input
PixelNorm, a layer used to apply pixel normalization
EqualConv1d, convolution 1d with equalized learning trick
EqualConv2d, convolution 2d with equalized learning trick
Equallinear, linear layerwith equalized learning trick
Embedding, attribute mapping networks.
Encoder, the encoder of S2FGAN.
StyledConv, the upblock for the decoder of S2FGAN.
Discriminator, the discrimantor of S2FGAN.
VGGPerceptualLoss, the perceptual loss based on VGG19.
"""
import math
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from torch.nn.init import normal_
from torch import autograd, optim
from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d, conv2d_gradfix
#Pixel Normalization
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
#create blur kernel
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
#Blur Layer
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer("kernel", kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer("kernel", kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
#Equlized convlution 2d
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
"""
Return, None
Parameters
----------
in_channels, int, the channels of input
out_channels, int, the channles expanded by the convolution
kernel_size, int, the size of kernel needed.
stride: int, controls the cross correlation during convolution
padding: int, the number of gride used to pad input.
bias: bool, controls adding of learnable biase
Returns
-------
None
"""
super().__init__()
#intialize weight
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
#calculate sacles for weight
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
#create bias
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
"""
Return, the convolutioned x.
Parameters
----------
x: pytorch tensor, used for the input of convolution
Returns
-------
the convolutioned x
"""
out = conv2d_gradfix.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
"""
Return, None
Parameters
----------
in_dim, int, number of features for input
out_dim, int, number of features for output
bias: bool, controls adding of learnable biase
lr_mul: int, the scales of biase
activation: bool, controls the use of leakly relu.
Returns
-------
None
"""
super().__init__()
#initialize weight
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
#create bias
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
#store activation function
self.activation = activation
#calculate sacles for weight
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
"""
Return, the transformed x.
Parameters
----------
x: pytorch tensor, used for the input of linear.
Returns
-------
the transformed x.
"""
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(
input, self.weight * self.scale, bias=self.bias * self.lr_mul
)
return out
def __repr__(self):
return (
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1]
):
"""
Return, None
Parameters
----------
in_channels, int, the channels of input
out_channels, int, the channles expanded by the convolution
kernel_size, int, the size of kernel needed.
style_dim, int, dimensionality of attribute latent space.
demodulate, int, decide applying demodulation
upsample, bool, decide if upsample the input
downsample, bool, decide if downsample the input
blur_kernel, [int], the kernel used to blur input.
Returns
-------
None
"""
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def forward(self, input, style):
"""
Return, the transformed x.
Parameters
----------
x: pytorch tensor. for appearance latent space.
style: pytorch tensor. for attribute editing latent space.
Returns
-------
the transformed x.
"""
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = conv2d_gradfix.conv_transpose2d(
input, weight, padding=0, stride=2, groups=batch
)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = conv2d_gradfix.conv2d(
input, weight, padding=0, stride=2, groups=batch
)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = conv2d_gradfix.conv2d(
input, weight, padding=self.padding, groups=batch
)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
#trainable input layer for decoder
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
):
"""
Return, None
Parameters
----------
in_channels, int, the channels of input
out_channels, int, the channles expanded by the convolution
kernel_size, int, the size of kernel needed.
style_dim, int, dimensionality of attribute latent space.
upsample, bool, decide if upsample the input
blur_kernel, [int], the kernel used to blur input.
demoulated, bool, decide applying demodulation
Returns
-------
None
"""
super().__init__()
self.conv1 = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.activate1 = FusedLeakyReLU(out_channel)
self.conv2 = ModulatedConv2d(
out_channel,
out_channel,
kernel_size,
style_dim,
upsample=False,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.activate2 = FusedLeakyReLU(out_channel)
def forward(self, input, style):
"""
Return, the transformed x.
Parameters
----------
x: pytorch tensor. latent code of appearance latent space.
style: pytorch tensor, latent code of attribute editing latent space.
Returns
-------
x, pytorch tensor, the transformed x.
"""
out = self.conv1(input, style)
out = self.activate1(out)
out = self.conv2(out,style)
out = self.activate2(out)
return out
class EqualConv1d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
"""
Return, None
Parameters
----------
in_channels, int, the channels of input
out_channels, int, the channles expanded by the convolution
kernel_size, int, the size of kernel needed.
stride: int, controls the cross correlation during convolution
padding: int, the number of gride used to pad input.
bias: bool, controls adding of learnable biase
Returns
-------
None
"""
self.weight = nn.Parameter(torch.randn(out_channel, in_channel, kernel_size))
self.scale = 2 / math.sqrt(in_channel * out_channel * kernel_size)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self,x):
"""
Return, the convolutioned x.
Parameters
----------
x: pytorch tensor, used for the input of convolution
Returns
-------
the convolutioned x
"""
x = F.conv1d(x, self.weight * self.scale,bias=self.bias, stride=self.stride, padding=self.padding)
return x
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
#Block for Attribute Mapping Network
class Modify(nn.Module):
def __init__(self, in_channel):
super().__init__()
self.model = nn.Sequential(
EqualConv1d(in_channel, 64, 3,padding = 1, bias=False),
nn.LeakyReLU(0.2, inplace = True),
EqualConv1d(64, 64, 3,padding = 1, bias=False),
nn.LeakyReLU(0.2, inplace = True),
)
self.w = EqualConv1d(64, 64, 3,padding = 1, bias=False)
self.h = EqualConv1d(64, 64, 3,padding = 1, bias=False)
self.n = EqualConv1d(64, 64, 3, padding = 1, bias=False)
self.skip = EqualConv1d(in_channel, 64, 1, bias=False)
def forward(self,input):
x = self.model(input)
f = self.w(x)
f = f / (torch.norm(f,p=2,dim = 1,keepdim= True) + 1e-8)
x = self.n(f.bmm(f.permute(0,2,1)).bmm(self.h(x)))
return x + self.skip(input)
#Attribute Mapping Network
class Embeding(nn.Module):
def __init__(self, c_dim):
super().__init__()
self.directions = nn.Parameter(torch.zeros(1, c_dim, 512))
self.b1 = Modify(c_dim + 1)
self.b2 = Modify(64)
self.b3 = Modify(64)
self.b4 = Modify(64)
self.b5 = EqualConv1d(64, 1, 1, bias=False)
def forward(self,x,a, reg = False):
d = self.directions.repeat(a.size(0),1,1)
is_reconstruct = ((a.sum(1, keepdim = True) != 0.0).float()).view(a.size(0),1,1)
d = torch.cat((d * a.view(-1,a.size(1),1),x.view(x.size(0),1,512) * is_reconstruct),1)
d = self.b1(d)
d = self.b2(d)
d = self.b3(d)
d = self.b4(d)
d = self.b5(d).view(-1,512)
if reg:
return d
else:
return x + d
#encoder
class Encoder(nn.Module):
def __init__(self, in_channels=1, dim=64, n_downsample = 5, max_dim = 512, noise = False):
super().__init__()
pool_size = {
32 : 4,
64 : 3,
128 : 2,
256 : 2,
512 : 1,
}
self.vision = ConvLayer(in_channels,dim,1)
conv_layers = []
linear_layers = []
# Downsampling
dim_cur = dim
dim_next = dim * 2
for _ in range(n_downsample):
conv_layers += [
nn.Sequential(
ResBlock(dim_cur,dim_next),
ResBlock(dim_next,dim_next,downsample= False)
)
]
linear_layers += [nn.Sequential(
nn.AdaptiveAvgPool2d(pool_size[dim_next]),
nn.Flatten(),
EqualLinear(dim_next * pool_size[dim_next] ** 2, 512, lr_mul = 0.01, activation="fused_lrelu"),
*[EqualLinear(512, 512, lr_mul = 0.01, activation="fused_lrelu") for _ in range(3)]
)
]
dim_cur = dim_next
dim_next = min(max_dim,dim_next * 2)
self.model = nn.ModuleList(conv_layers)
self.linear = nn.ModuleList(linear_layers)
self.norm = PixelNorm()
extra_dimension = 100 if noise else 0
self.final = nn.Sequential(
EqualLinear(512 + extra_dimension, 512, lr_mul = 0.01, activation="fused_lrelu"),
*[EqualLinear(512, 512, lr_mul = 0.01, activation="fused_lrelu") for _ in range(4)]
)
def forward(self, x, noise = None):
x = self.vision(x)
style = 0
for index in range(len(self.model)):
x = self.model[index](x)
style += self.linear[index](x)
style = style / (index + 1)
style = self.norm(style)
if noise != None:
noise = self.norm(noise)
style = torch.cat((style,noise),1)
style = self.final(style)
return style
#decoder
class Generator(nn.Module):
def __init__(
self,
c_dim,
style_dim = 512,
n_mlp = 8,
channel_multiplier= 1,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = ModulatedConv2d(
512,
512,
3,
style_dim,
upsample= False,
blur_kernel=blur_kernel,
demodulate=True,
)
self.activate1 = FusedLeakyReLU(512)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.convs = nn.ModuleList([
StyledConv(512,512,3,style_dim,blur_kernel), #4 - 8
StyledConv(512,512,3,style_dim,blur_kernel), #8 - 16
StyledConv(512,512,3,style_dim,blur_kernel), #16 - 32
StyledConv(512,256 * channel_multiplier,3,style_dim,blur_kernel), #32 - 64
StyledConv(256 * channel_multiplier, 128 * channel_multiplier,3,style_dim,blur_kernel), #64 - 128
StyledConv(128 * channel_multiplier, 64 * channel_multiplier,3,style_dim,blur_kernel), #128 - 256
])
self.to_rgbs = nn.ModuleList([
ToRGB(512, style_dim), #8
ToRGB(512, style_dim), #16
ToRGB(512, style_dim), #32
ToRGB(256 * channel_multiplier, style_dim), #64
ToRGB(128 * channel_multiplier, style_dim), #128
ToRGB(64 * channel_multiplier, style_dim), #256
])
def forward(self,style):
x = self.input(style)
x = self.conv1(x,style)
x = self.activate1(x)
skip = self.to_rgb1(x,style)
for index in range(len(self.convs)):
x = self.convs[index](x,style)
skip = self.to_rgbs[index](x,style,skip)
return skip
#convolution layer with dowmsample and activation function
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
layers.append(FusedLeakyReLU(out_channel, bias=bias))
super().__init__(*layers)
#residual block
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample = True):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
#domain discriminator
class GradReverse(Function):
@staticmethod
def forward(ctx, x, beta = 1.0):
ctx.beta = beta
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.neg() * ctx.beta
return grad_input, None
class Linear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
normal_(self.weight, 0, 0.001)
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(0))
self.scale = (1 / math.sqrt(in_dim))
def forward(self, input):
out = F.linear(input, self.weight * self.scale, bias=self.bias)
return out
class Domain_Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.feature = Linear(512, 512)
self.relu = nn.ReLU(inplace = True)
self.fc = Linear(512, 1)
def forward(self,x):
x = GradReverse.apply(x)
x = self.feature(x)
x = self.relu(x)
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self,c_dim):
super().__init__()
self.W = nn.Parameter(torch.randn(512, c_dim))
self.c_dim = c_dim
nn.init.xavier_uniform_(self.W.data, gain=1)
def forward(self,x, ortho = False):
self.W_norm = self.W / self.W.norm(dim=0)
if not ortho:
return torch.matmul(x,self.W_norm)
else:
return torch.matmul(x,self.W_norm), nn.L1Loss()(self.W_norm.transpose(1,0).matmul(self.W_norm), torch.diag(torch.ones(self.c_dim,device = x.device)))
def edit(self, x, a):
self.W_norm = self.W / self.W.norm(dim=0)
d = self.W_norm.view(1,512,-1)
a = a.view(a.size(0),1,-1)
return x + (d * a).sum(-1)
#model discriminator
class Discriminator(nn.Module):
def __init__(self, in_channels, c_dim, model_type, channel_multiplier=1, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.convs = nn.Sequential(
ConvLayer(in_channels, 64 * channel_multiplier, 1), #256
ResBlock(64 * channel_multiplier, 128 * channel_multiplier), #256 - 128
ResBlock(128 * channel_multiplier, 256 * channel_multiplier), #128 - 64
ResBlock(256 * channel_multiplier, 512), #64 - 32
ResBlock(512, 512), #32 - 16
ResBlock(512, 512), #16 - 8
ResBlock(512, 512) #8 - 4
)
self.final_linear = nn.Sequential(
EqualLinear(512 * 4 * 4, 512, activation="fused_lrelu"),
EqualLinear(512, 1),
)
if model_type == 1:
self.W = nn.Sequential(
EqualLinear(512 * 4 * 4, 512, activation="fused_lrelu"),
EqualLinear(512, c_dim),
)
self.model_type = model_type
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
out = out.view(batch, -1)
if self.model_type == 0:
return self.final_linear(out), (out * 0).detach()
else:
return self.final_linear(out), self.W(out)
def requires_grad(model, flag=True):
"""
Return None
Parameters
----------
model : pytorch model
flag : bool, default true
Returns
-------
None
set requires_grad flag for model
"""
for p in model.parameters():
p.requires_grad = flag
#calculate generator loss
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
#VGG Perceptual loss
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self):
super().__init__()
blocks = []
model = torchvision.models.vgg19(pretrained=True)
blocks.append(model.features[:2].eval())
blocks.append(model.features[2:7].eval())
blocks.append(model.features[7:12].eval())
blocks.append(model.features[12:21].eval())
blocks.append(model.features[21:30].eval())
blocks = nn.ModuleList(blocks)
self.blocks = torch.nn.ModuleList(blocks)
self.weights = [1/32.0,1.0/16, 1.0/8, 1.0/4, 1.0]
for p in self.parameters():
p.requires_grad = False
def forward(self, input, target):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
loss = 0.0
x = input
y = target
for i,block in enumerate(self.blocks):
x = block(x)
y = block(y)
loss += torch.nn.functional.l1_loss(x, y) * self.weights[i]
return loss
#The function is used downsample and binarize the input
def downsample(masks):
masks = F.interpolate(masks,scale_factor= 1/2, mode="bilinear",align_corners=True,recompute_scale_factor=True)
m = masks >= 0 #.5
masks[m] = 1
masks[~m] = 0
return masks
#calculte r1 loss
def d_r1_loss(real_pred, real_img):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
class Model(nn.Module):
def __init__(self, args,c_dim, augment):
super().__init__()
self.args = args
self.encoder_sketch = Encoder(1,128, 5)
self.encoder_img = Encoder(3,64, 6)
self.generator = Generator(c_dim)
self.classifier = Classifier(c_dim)
if args.model_type == 1:
self.edit = Embeding(c_dim)
self.img_discriminator = Discriminator(3,c_dim,args.model_type)
self.domain_discriminator = Domain_Discriminator()
self.vgg = VGGPerceptualLoss()
self.augment = augment
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
if args.model_type == 0:
self.g_optim = optim.Adam(
[{'params' : list(self.encoder_sketch.parameters()) + list(self.encoder_img.parameters()) + list(self.generator.parameters())},
{'params' : self.classifier.parameters(),"betas": (0.9,0.999), "weight_decay": 0.0005},
{'params' : list(self.domain_discriminator.parameters()),"betas": (0.9,0.999), "weight_decay": 0.0005}
],
lr= args.lr,
betas=(0, 0.99)
)
self.d_optim = optim.Adam(
self.img_discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
else:
self.g_optim = optim.Adam(
[{'params' : list(self.encoder_sketch.parameters()) + list(self.encoder_img.parameters()) + list(self.edit.parameters()) + list(self.generator.parameters())},
{'params' : list(self.domain_discriminator.parameters()),"betas": (0.9,0.999), "weight_decay": 0.0005}
],
lr= args.lr,
betas=(0, 0.99),
)
self.d_optim = optim.Adam(
[{'params' : self.img_discriminator.parameters()},
{'params' : self.classifier.parameters(),"betas": (0.9,0.999), "weight_decay": 0.0005}],
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio)
)
def forward(self, img = None,sketch = None,sampled_ratio = None, label = None, target_mask = None, domain_img = None, domain_sketch = None, ada_aug_p = None, noise = None,train_discriminator = False, d_regularize = False, train_generator = False, generate = False):
augment = self.augment
if train_discriminator or d_regularize:
requires_grad(self.encoder_sketch, False)
requires_grad(self.encoder_img, False)
requires_grad(self.generator, False)
requires_grad(self.domain_discriminator, False)
requires_grad(self.img_discriminator, True)
if self.args.model_type == 1:
requires_grad(self.edit, False)
requires_grad(self.classifier, False)
else:
requires_grad(self.classifier, True)
else:
requires_grad(self.encoder_sketch, True)
requires_grad(self.encoder_img, True)
requires_grad(self.generator, True)
requires_grad(self.domain_discriminator, True)
requires_grad(self.img_discriminator, False)
if self.args.model_type == 1:
requires_grad(self.edit, True)
requires_grad(self.classifier, True)
else:
requires_grad(self.classifier, False)
if train_discriminator:
if self.args.model_type == 0:
img_latent = self.encoder_img(img)
fake_img = self.generator(img_latent)
if self.args.augment:
real_img_aug, _ = augment(img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = img
fake_img_pred, _ = self.img_discriminator(fake_img)
real_img_pred, bce = self.img_discriminator(real_img_aug)
return fake_img_pred, real_img_pred, bce
else:
img_latent = self.encoder_img(img)
img_latent_1 = self.edit(img_latent, sampled_ratio)
fake_img = self.generator(img_latent_1)
bce = nn.MSELoss()(self.classifier(img_latent), label * 2 - 1)
if self.args.augment:
real_img_aug, _ = augment(img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = img
fake_img_pred, _ = self.img_discriminator(fake_img)
real_img_pred, real_class = self.img_discriminator(real_img_aug)
outer_bce = nn.BCEWithLogitsLoss()(real_class, label)
return fake_img_pred, real_img_pred, bce + outer_bce * 0.0
if d_regularize:
real_pred_img, _ = self.img_discriminator(img)
r1_loss = d_r1_loss(real_pred_img,img)
return r1_loss
if train_generator:
img_latent = self.encoder_img(img)
sketch_latent = self.encoder_sketch(downsample(sketch))
sketch_loss = nn.L1Loss()(sketch_latent, img_latent.detach())
reconstruct_img = self.generator(img_latent)
vgg_loss = self.vgg(reconstruct_img,img)
reconstruct_loss = nn.L1Loss()(reconstruct_img,img)
domain_loss = nn.BCEWithLogitsLoss()(self.domain_discriminator(img_latent.detach()), domain_img) + \
nn.BCEWithLogitsLoss()(self.domain_discriminator(sketch_latent), domain_sketch)
if self.args.model_type == 0:
bce,orthologoy = self.classifier(img_latent, True)
bce = nn.MSELoss()(bce, label * 2 - 1)
if self.args.augment:
reconstruct_img, GC = augment(reconstruct_img, ada_aug_p)
fake_pred_img, _ = self.img_discriminator(reconstruct_img)
g_loss_img = g_nonsaturating_loss(fake_pred_img)
g_total = sketch_loss * 2.5 +\
domain_loss * 0.1 +\
vgg_loss * 2.5 +\
reconstruct_loss * 2.5 +\
g_loss_img +\
bce * 0.5 +\
orthologoy
return g_total
else:
img_latent_1 = self.edit(img_latent, sampled_ratio)
sketch_latent_1 = self.edit(sketch_latent, sampled_ratio)
fake_img = self.generator(img_latent_1)
reg = self.edit(img_latent, sampled_ratio * 0.0, reg = True).abs().mean()
latent_reconstruct = (self.edit(self.edit(img_latent.detach(),sampled_ratio), -sampled_ratio) - img_latent.detach()).abs().mean()
base_score = self.classifier(img_latent).detach() + sampled_ratio
edit_loss = nn.MSELoss()(self.classifier(img_latent_1), base_score)
domain_loss = domain_loss + \
nn.BCEWithLogitsLoss()(self.domain_discriminator(img_latent_1.detach()), domain_img) + \
nn.BCEWithLogitsLoss()(self.domain_discriminator(sketch_latent_1),domain_sketch)
if self.args.augment:
fake_img, _ = augment(fake_img, ada_aug_p)
fake_pred_img, fake_class = self.img_discriminator(fake_img)
g_loss_img = g_nonsaturating_loss(fake_pred_img)
outer_edit = nn.BCEWithLogitsLoss()(fake_class,target_mask * 1.0)
g_total = vgg_loss * 2.5 +\
reg * 0.1 +\
(edit_loss + outer_edit * 0.0) * 1.0 +\
reconstruct_loss * 1.0 +\
latent_reconstruct * 1.0 +\
sketch_loss * 2.5 +\
domain_loss * 0.05 +\
g_loss_img
return g_total
if generate:
img = self.encoder_img(img)
sketch = self.encoder_sketch(downsample(sketch))
if self.args.model_type == 0:
sketch = self.classifier.edit(sketch,sampled_ratio)
else:
sketch = self.edit(sketch,sampled_ratio)
img = self.generator(img)
sketch = self.generator(sketch)
return img,sketch
| 24,635 | 9,883 | 1,590 |
5a1d2a0240e421bf1d94e403e1e15b300f595f14 | 21,266 | py | Python | site-packages/jedi/evaluate/finder.py | oz90210/Pyto | 59f185149b71e57e5debeb1c9a61a28739e81720 | [
"MIT"
] | null | null | null | site-packages/jedi/evaluate/finder.py | oz90210/Pyto | 59f185149b71e57e5debeb1c9a61a28739e81720 | [
"MIT"
] | 1 | 2020-04-25T20:36:07.000Z | 2020-04-25T20:36:07.000Z | site-packages/jedi/evaluate/finder.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | null | null | null | #\input texinfo
"""
Searching for names with given scope and name. This is very central in Jedi and
Python. The name resolution is quite complicated with descripter,
``__getattribute__``, ``__getattr__``, ``global``, etc.
Flow checks
+++++++++++
Flow checks are not really mature. There's only a check for ``isinstance``. It
would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
from itertools import chain
from jedi._compatibility import unicode, u
from jedi.parser import tree as pr
from jedi import debug
from jedi import common
from jedi import settings
from jedi.evaluate import representation as er
from jedi.evaluate import dynamic
from jedi.evaluate import compiled
from jedi.evaluate import docstrings
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate.cache import memoize_default
def filter_after_position(names, position):
"""
Removes all names after a certain position. If position is None, just
returns the names list.
"""
if position is None:
return names
names_new = []
for n in names:
# Filter positions and also allow list comprehensions and lambdas.
if n.start_pos[0] is not None and n.start_pos < position \
or isinstance(n.get_definition(), (pr.CompFor, pr.Lambda)):
names_new.append(n)
return names_new
def filter_definition_names(names, origin, position=None):
"""
Filter names that are actual definitions in a scope. Names that are just
used will be ignored.
"""
# Just calculate the scope from the first
stmt = names[0].get_definition()
scope = stmt.get_parent_scope()
if not (isinstance(scope, er.FunctionExecution)
and isinstance(scope.base, er.LambdaWrapper)):
names = filter_after_position(names, position)
names = [name for name in names if name.is_definition()]
# Private name mangling (compile.c) disallows access on names
# preceeded by two underscores `__` if used outside of the class. Names
# that also end with two underscores (e.g. __id__) are not affected.
for name in list(names):
if name.value.startswith('__') and not name.value.endswith('__'):
if filter_private_variable(scope, origin):
names.remove(name)
return names
@memoize_default([], evaluator_is_first_arg=True)
def _remove_statements(evaluator, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
types = []
# Remove the statement docstr stuff for now, that has to be
# implemented with the evaluator class.
#if stmt.docstr:
#res_new.append(stmt)
check_instance = None
if isinstance(stmt, er.InstanceElement) and stmt.is_class_var:
check_instance = stmt.instance
stmt = stmt.var
types += evaluator.eval_statement(stmt, seek_name=name)
if check_instance is not None:
# class renames
types = [er.get_instance_el(evaluator, check_instance, a, True)
if isinstance(a, (er.Function, pr.Function))
else a for a in types]
return types
def check_flow_information(evaluator, flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = []
if flow.is_scope():
# Check for asserts.
try:
names = reversed(flow.names_dict[search_name.value])
except (KeyError, AttributeError):
names = []
for name in names:
ass = name.get_parent_until(pr.AssertStmt)
if isinstance(ass, pr.AssertStmt) and pos is not None and ass.start_pos < pos:
result = _check_isinstance_type(evaluator, ass.assertion(), search_name)
if result:
break
if isinstance(flow, (pr.IfStmt, pr.WhileStmt)):
element = flow.children[1]
result = _check_isinstance_type(evaluator, element, search_name)
return result
def global_names_dict_generator(evaluator, scope, position):
"""
For global name lookups. Yields tuples of (names_dict, position). If the
position is None, the position does not matter anymore in that scope.
This function is used to include names from outer scopes. For example, when
the current scope is function:
>>> from jedi._compatibility import u, no_unicode_pprint
>>> from jedi.parser import Parser, load_grammar
>>> parser = Parser(load_grammar(), u('''
... x = ['a', 'b', 'c']
... def func():
... y = None
... '''))
>>> scope = parser.module.subscopes[0]
>>> scope
<Function: func@3-5>
`global_names_dict_generator` is a generator. First it yields names from
most inner scope.
>>> from jedi.evaluate import Evaluator
>>> evaluator = Evaluator(load_grammar())
>>> scope = er.wrap(evaluator, scope)
>>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0)))
>>> no_unicode_pprint(pairs[0])
({'func': [], 'y': [<Name: y@4,4>]}, (4, 0))
Then it yields the names from one level "lower". In this example, this
is the most outer scope. As you can see, the position in the tuple is now
None, because typically the whole module is loaded before the function is
called.
>>> no_unicode_pprint(pairs[1])
({'func': [<Name: func@3,4>], 'x': [<Name: x@2,0>]}, None)
After that we have a few underscore names that are part of the module.
>>> sorted(pairs[2][0].keys())
['__doc__', '__file__', '__name__', '__package__']
>>> pairs[3] # global names -> there are none in our example.
({}, None)
>>> pairs[4] # package modules -> Also none.
({}, None)
Finally, it yields names from builtin, if `include_builtin` is
true (default).
>>> pairs[5][0].values() #doctest: +ELLIPSIS
[[<CompiledName: ...>], ...]
"""
in_func = False
while scope is not None:
if not (scope.type == 'classdef' and in_func):
# Names in methods cannot be resolved within the class.
for names_dict in scope.names_dicts(True):
yield names_dict, position
if scope.type == 'funcdef':
# The position should be reset if the current scope is a function.
in_func = True
position = None
scope = er.wrap(evaluator, scope.get_parent_scope())
# Add builtins to the global scope.
for names_dict in compiled.builtin.names_dicts(True):
yield names_dict, None
def check_tuple_assignments(types, name):
"""
Checks if tuples are assigned.
"""
for index in name.assignment_indexes():
new_types = []
for r in types:
try:
func = r.get_exact_index_types
except AttributeError:
debug.warning("Invalid tuple lookup #%s of result %s in %s",
index, types, name)
else:
try:
new_types += func(index)
except IndexError:
pass
types = new_types
return types
def filter_private_variable(scope, origin_node):
"""Check if a variable is defined inside the same class or outside."""
instance = scope.get_parent_scope()
coming_from = origin_node
while coming_from is not None \
and not isinstance(coming_from, (pr.Class, compiled.CompiledObject)):
coming_from = coming_from.get_parent_scope()
# CompiledObjects don't have double underscore attributes, but Jedi abuses
# those for fakes (builtins.pym -> list).
if isinstance(instance, compiled.CompiledObject):
return instance != coming_from
else:
return isinstance(instance, er.Instance) and instance.base.base != coming_from
| 38.665455 | 114 | 0.622402 | #\input texinfo
"""
Searching for names with given scope and name. This is very central in Jedi and
Python. The name resolution is quite complicated with descripter,
``__getattribute__``, ``__getattr__``, ``global``, etc.
Flow checks
+++++++++++
Flow checks are not really mature. There's only a check for ``isinstance``. It
would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
from itertools import chain
from jedi._compatibility import unicode, u
from jedi.parser import tree as pr
from jedi import debug
from jedi import common
from jedi import settings
from jedi.evaluate import representation as er
from jedi.evaluate import dynamic
from jedi.evaluate import compiled
from jedi.evaluate import docstrings
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate.cache import memoize_default
def filter_after_position(names, position):
"""
Removes all names after a certain position. If position is None, just
returns the names list.
"""
if position is None:
return names
names_new = []
for n in names:
# Filter positions and also allow list comprehensions and lambdas.
if n.start_pos[0] is not None and n.start_pos < position \
or isinstance(n.get_definition(), (pr.CompFor, pr.Lambda)):
names_new.append(n)
return names_new
def filter_definition_names(names, origin, position=None):
"""
Filter names that are actual definitions in a scope. Names that are just
used will be ignored.
"""
# Just calculate the scope from the first
stmt = names[0].get_definition()
scope = stmt.get_parent_scope()
if not (isinstance(scope, er.FunctionExecution)
and isinstance(scope.base, er.LambdaWrapper)):
names = filter_after_position(names, position)
names = [name for name in names if name.is_definition()]
# Private name mangling (compile.c) disallows access on names
# preceeded by two underscores `__` if used outside of the class. Names
# that also end with two underscores (e.g. __id__) are not affected.
for name in list(names):
if name.value.startswith('__') and not name.value.endswith('__'):
if filter_private_variable(scope, origin):
names.remove(name)
return names
class NameFinder(object):
def __init__(self, evaluator, scope, name_str, position=None):
self._evaluator = evaluator
# Make sure that it's not just a syntax tree node.
self.scope = er.wrap(evaluator, scope)
self.name_str = name_str
self.position = position
@debug.increase_indent
def find(self, scopes, search_global=False):
# TODO rename scopes to names_dicts
names = self.filter_name(scopes)
types = self._names_to_types(names, search_global)
if not names and not types \
and not (isinstance(self.name_str, pr.Name)
and isinstance(self.name_str.parent.parent, pr.Param)):
if not isinstance(self.name_str, (str, unicode)): # TODO Remove?
if search_global:
message = ("NameError: name '%s' is not defined."
% self.name_str)
analysis.add(self._evaluator, 'name-error', self.name_str,
message)
else:
analysis.add_attribute_error(self._evaluator,
self.scope, self.name_str)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
return types
def scopes(self, search_global=False):
if search_global:
return global_names_dict_generator(self._evaluator, self.scope, self.position)
else:
return ((n, None) for n in self.scope.names_dicts(search_global))
def names_dict_lookup(self, names_dict, position):
def get_param(scope, el):
if isinstance(el.get_parent_until(pr.Param), pr.Param):
return scope.param_by_name(str(el))
return el
search_str = str(self.name_str)
try:
names = names_dict[search_str]
if not names: # We want names, otherwise stop.
return []
except KeyError:
return []
names = filter_definition_names(names, self.name_str, position)
name_scope = None
# Only the names defined in the last position are valid definitions.
last_names = []
for name in reversed(sorted(names, key=lambda name: name.start_pos)):
stmt = name.get_definition()
name_scope = er.wrap(self._evaluator, stmt.get_parent_scope())
if isinstance(self.scope, er.Instance) and not isinstance(name_scope, er.Instance):
# Instances should not be checked for positioning, because we
# don't know in which order the functions are called.
last_names.append(name)
continue
if isinstance(name_scope, compiled.CompiledObject):
# Let's test this. TODO need comment. shouldn't this be
# filtered before?
last_names.append(name)
continue
if isinstance(name, compiled.CompiledName) \
or isinstance(name, er.InstanceName) and isinstance(name._origin_name, compiled.CompiledName):
last_names.append(name)
continue
if isinstance(self.name_str, pr.Name):
origin_scope = self.name_str.get_parent_until(pr.Scope, reverse=True)
else:
origin_scope = None
if isinstance(stmt.parent, compiled.CompiledObject):
# TODO seriously? this is stupid.
continue
check = flow_analysis.break_check(self._evaluator, name_scope,
stmt, origin_scope)
if check is not flow_analysis.UNREACHABLE:
last_names.append(name)
if check is flow_analysis.REACHABLE:
break
if isinstance(name_scope, er.FunctionExecution):
# Replace params
return [get_param(name_scope, n) for n in last_names]
return last_names
def filter_name(self, names_dicts):
"""
Searches names that are defined in a scope (the different
`names_dicts`), until a name fits.
"""
names = []
for names_dict, position in names_dicts:
names = self.names_dict_lookup(names_dict, position)
if names:
break
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str,
self.scope, u(names), self.position)
return list(self._clean_names(names))
def _clean_names(self, names):
"""
``NameFinder.filter_name`` should only output names with correct
wrapper parents. We don't want to see AST classes out in the
evaluation, so remove them already here!
"""
for n in names:
definition = n.parent
if isinstance(definition, (pr.Function, pr.Class, pr.Module)):
yield er.wrap(self._evaluator, definition).name
else:
yield n
def _check_getattr(self, inst):
"""Checks for both __getattr__ and __getattribute__ methods"""
result = []
# str is important, because it shouldn't be `Name`!
name = compiled.create(self._evaluator, str(self.name_str))
with common.ignored(KeyError):
result = inst.execute_subscope_by_name('__getattr__', name)
if not result:
# this is a little bit special. `__getattribute__` is executed
# before anything else. But: I know no use case, where this
# could be practical and the jedi would return wrong types. If
# you ever have something, let me know!
with common.ignored(KeyError):
result = inst.execute_subscope_by_name('__getattribute__', name)
return result
def _names_to_types(self, names, search_global):
types = []
# Add isinstance and other if/assert knowledge.
if isinstance(self.name_str, pr.Name):
# Ignore FunctionExecution parents for now.
flow_scope = self.name_str
until = flow_scope.get_parent_until(er.FunctionExecution)
while not isinstance(until, er.FunctionExecution):
flow_scope = flow_scope.get_parent_scope(include_flows=True)
if flow_scope is None:
break
# TODO check if result is in scope -> no evaluation necessary
n = check_flow_information(self._evaluator, flow_scope,
self.name_str, self.position)
if n:
return n
for name in names:
new_types = _name_to_types(self._evaluator, name, self.scope)
if isinstance(self.scope, (er.Class, er.Instance)) and not search_global:
types += self._resolve_descriptors(name, new_types)
else:
types += new_types
if not names and isinstance(self.scope, er.Instance):
# handling __getattr__ / __getattribute__
types = self._check_getattr(self.scope)
return types
def _resolve_descriptors(self, name, types):
# The name must not be in the dictionary, but part of the class
# definition. __get__ is only called if the descriptor is defined in
# the class dictionary.
name_scope = name.get_definition().get_parent_scope()
if not isinstance(name_scope, (er.Instance, pr.Class)):
return types
result = []
for r in types:
try:
desc_return = r.get_descriptor_returns
except AttributeError:
result.append(r)
else:
result += desc_return(self.scope)
return result
@memoize_default([], evaluator_is_first_arg=True)
def _name_to_types(evaluator, name, scope):
types = []
typ = name.get_definition()
if typ.isinstance(pr.ForStmt):
for_types = evaluator.eval_element(typ.children[3])
for_types = iterable.get_iterator_types(for_types)
types += check_tuple_assignments(for_types, name)
elif typ.isinstance(pr.CompFor):
for_types = evaluator.eval_element(typ.children[3])
for_types = iterable.get_iterator_types(for_types)
types += check_tuple_assignments(for_types, name)
elif isinstance(typ, pr.Param):
types += _eval_param(evaluator, typ, scope)
elif typ.isinstance(pr.ExprStmt):
types += _remove_statements(evaluator, typ, name)
elif typ.isinstance(pr.WithStmt):
types += evaluator.eval_element(typ.node_from_name(name))
elif isinstance(typ, pr.Import):
types += imports.ImportWrapper(evaluator, name).follow()
elif isinstance(typ, pr.GlobalStmt):
# TODO theoretically we shouldn't be using search_global here, it
# doesn't make sense, because it's a local search (for that name)!
# However, globals are not that important and resolving them doesn't
# guarantee correctness in any way, because we don't check for when
# something is executed.
types += evaluator.find_types(typ.get_parent_scope(), str(name),
search_global=True)
elif isinstance(typ, pr.TryStmt):
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = evaluator.eval_element(name.prev_sibling().prev_sibling())
types = list(chain.from_iterable(
evaluator.execute(t) for t in exceptions))
else:
if typ.isinstance(er.Function):
typ = typ.get_decorated_func()
types.append(typ)
return types
def _remove_statements(evaluator, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
types = []
# Remove the statement docstr stuff for now, that has to be
# implemented with the evaluator class.
#if stmt.docstr:
#res_new.append(stmt)
check_instance = None
if isinstance(stmt, er.InstanceElement) and stmt.is_class_var:
check_instance = stmt.instance
stmt = stmt.var
types += evaluator.eval_statement(stmt, seek_name=name)
if check_instance is not None:
# class renames
types = [er.get_instance_el(evaluator, check_instance, a, True)
if isinstance(a, (er.Function, pr.Function))
else a for a in types]
return types
def _eval_param(evaluator, param, scope):
res_new = []
func = param.get_parent_scope()
cls = func.parent.get_parent_until((pr.Class, pr.Function))
from jedi.evaluate.param import ExecutedParam, Arguments
if isinstance(cls, pr.Class) and param.position_nr == 0 \
and not isinstance(param, ExecutedParam):
# This is where we add self - if it has never been
# instantiated.
if isinstance(scope, er.InstanceElement):
res_new.append(scope.instance)
else:
inst = er.Instance(evaluator, er.wrap(evaluator, cls),
Arguments(evaluator, ()), is_generated=True)
res_new.append(inst)
return res_new
# Instances are typically faked, if the instance is not called from
# outside. Here we check it for __init__ functions and return.
if isinstance(func, er.InstanceElement) \
and func.instance.is_generated and str(func.name) == '__init__':
param = func.var.params[param.position_nr]
# Add docstring knowledge.
doc_params = docstrings.follow_param(evaluator, param)
if doc_params:
return doc_params
if isinstance(param, ExecutedParam):
return res_new + param.eval(evaluator)
else:
# Param owns no information itself.
res_new += dynamic.search_params(evaluator, param)
if not res_new:
if param.stars:
t = 'tuple' if param.stars == 1 else 'dict'
typ = evaluator.find_types(compiled.builtin, t)[0]
res_new = evaluator.execute(typ)
if param.default:
res_new += evaluator.eval_element(param.default)
return res_new
def check_flow_information(evaluator, flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = []
if flow.is_scope():
# Check for asserts.
try:
names = reversed(flow.names_dict[search_name.value])
except (KeyError, AttributeError):
names = []
for name in names:
ass = name.get_parent_until(pr.AssertStmt)
if isinstance(ass, pr.AssertStmt) and pos is not None and ass.start_pos < pos:
result = _check_isinstance_type(evaluator, ass.assertion(), search_name)
if result:
break
if isinstance(flow, (pr.IfStmt, pr.WhileStmt)):
element = flow.children[1]
result = _check_isinstance_type(evaluator, element, search_name)
return result
def _check_isinstance_type(evaluator, element, search_name):
try:
assert element.type == 'power'
# this might be removed if we analyze and, etc
assert len(element.children) == 2
first, trailer = element.children
assert isinstance(first, pr.Name) and first.value == 'isinstance'
assert trailer.type == 'trailer' and trailer.children[0] == '('
assert len(trailer.children) == 3
# arglist stuff
arglist = trailer.children[1]
args = param.Arguments(evaluator, arglist, trailer)
lst = list(args.unpack())
# Disallow keyword arguments
assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None
name = lst[0][1][0] # first argument, values, first value
# Do a simple get_code comparison. They should just have the same code,
# and everything will be all right.
classes = lst[1][1][0]
call = helpers.call_of_name(search_name)
assert name.get_code() == call.get_code()
except AssertionError:
return []
result = []
for typ in evaluator.eval_element(classes):
for typ in (typ.values() if isinstance(typ, iterable.Array) else [typ]):
result += evaluator.execute(typ)
return result
def global_names_dict_generator(evaluator, scope, position):
"""
For global name lookups. Yields tuples of (names_dict, position). If the
position is None, the position does not matter anymore in that scope.
This function is used to include names from outer scopes. For example, when
the current scope is function:
>>> from jedi._compatibility import u, no_unicode_pprint
>>> from jedi.parser import Parser, load_grammar
>>> parser = Parser(load_grammar(), u('''
... x = ['a', 'b', 'c']
... def func():
... y = None
... '''))
>>> scope = parser.module.subscopes[0]
>>> scope
<Function: func@3-5>
`global_names_dict_generator` is a generator. First it yields names from
most inner scope.
>>> from jedi.evaluate import Evaluator
>>> evaluator = Evaluator(load_grammar())
>>> scope = er.wrap(evaluator, scope)
>>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0)))
>>> no_unicode_pprint(pairs[0])
({'func': [], 'y': [<Name: y@4,4>]}, (4, 0))
Then it yields the names from one level "lower". In this example, this
is the most outer scope. As you can see, the position in the tuple is now
None, because typically the whole module is loaded before the function is
called.
>>> no_unicode_pprint(pairs[1])
({'func': [<Name: func@3,4>], 'x': [<Name: x@2,0>]}, None)
After that we have a few underscore names that are part of the module.
>>> sorted(pairs[2][0].keys())
['__doc__', '__file__', '__name__', '__package__']
>>> pairs[3] # global names -> there are none in our example.
({}, None)
>>> pairs[4] # package modules -> Also none.
({}, None)
Finally, it yields names from builtin, if `include_builtin` is
true (default).
>>> pairs[5][0].values() #doctest: +ELLIPSIS
[[<CompiledName: ...>], ...]
"""
in_func = False
while scope is not None:
if not (scope.type == 'classdef' and in_func):
# Names in methods cannot be resolved within the class.
for names_dict in scope.names_dicts(True):
yield names_dict, position
if scope.type == 'funcdef':
# The position should be reset if the current scope is a function.
in_func = True
position = None
scope = er.wrap(evaluator, scope.get_parent_scope())
# Add builtins to the global scope.
for names_dict in compiled.builtin.names_dicts(True):
yield names_dict, None
def check_tuple_assignments(types, name):
"""
Checks if tuples are assigned.
"""
for index in name.assignment_indexes():
new_types = []
for r in types:
try:
func = r.get_exact_index_types
except AttributeError:
debug.warning("Invalid tuple lookup #%s of result %s in %s",
index, types, name)
else:
try:
new_types += func(index)
except IndexError:
pass
types = new_types
return types
def filter_private_variable(scope, origin_node):
"""Check if a variable is defined inside the same class or outside."""
instance = scope.get_parent_scope()
coming_from = origin_node
while coming_from is not None \
and not isinstance(coming_from, (pr.Class, compiled.CompiledObject)):
coming_from = coming_from.get_parent_scope()
# CompiledObjects don't have double underscore attributes, but Jedi abuses
# those for fakes (builtins.pym -> list).
if isinstance(instance, compiled.CompiledObject):
return instance != coming_from
else:
return isinstance(instance, er.Instance) and instance.base.base != coming_from
| 10,655 | 2,020 | 91 |
c7a0017098f53e1aca17da21cef0ed3d90134016 | 4,135 | py | Python | tools/generate-polls/generator/ansi_x931_aes128.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 473 | 2016-08-01T12:48:16.000Z | 2022-03-09T18:13:14.000Z | tools/generate-polls/generator/ansi_x931_aes128.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 71 | 2016-08-01T03:33:44.000Z | 2022-03-09T18:37:04.000Z | tools/generate-polls/generator/ansi_x931_aes128.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 121 | 2016-08-01T04:07:53.000Z | 2022-03-07T11:08:09.000Z | #!/usr/bin/env python
"""
A Python implementation of ANSI X9.31 using AES 128, following:
http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
Copyright (C) 2015 - Brian Caswell <bmc@lungetech.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#import random
#import unittest
from Crypto.Cipher import AES
class PRNG(object):
"""
A python implementation of ANSI X9.31 using AES 128
Attributes:
random_data: Currently available block of generated random data
V: "seed value which is also kept secret"
DT: "date/time vector updated upon each iteration"
I: Intermediate value
aes_ctx: AES state machine context
"""
BLOCK_SIZE = 16
def __init__(self, seed=None):
"""
Seed is V + Key + DT as a string
"""
if seed is not None:
assert len(seed) == 48
else:
seed = "zaybxcwdveuftgsh" + "0123456789abcdef" + "\x00" * 16
self.V, key, self.DT = [seed[i:i+PRNG.BLOCK_SIZE] for i in range(0, len(seed), PRNG.BLOCK_SIZE)]
self.random_data = ''
self.I = "\x00" * PRNG.BLOCK_SIZE
self.aes_ctx = AES.new(key, mode=AES.MODE_ECB)
@staticmethod
def _xor_string(value_1, value_2):
"""
value_1 ^ value_2
Exceptions:
AssertionError if value_1 and value_2 are not the same length
"""
assert len(value_1) == len(value_2)
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(value_1, value_2))
def _get_block(self):
"""
Get the next block from the PRNG, saving it to self.random_data
Arguments:
None
Returns:
None
Exceptions:
None
"""
# encrypt the counter value, giving intermediate value I
self.I = self.aes_ctx.encrypt(self.DT)
# XOR I with secret vector V, encrypt the result to obtain pseudo
# random data
tmp = self._xor_string(self.I, self.V)
self.random_data = self.aes_ctx.encrypt(tmp)
# XOR random data with I, and encrypt to get new secret vector V
tmp = self._xor_string(self.random_data, self.I)
self.V = self.aes_ctx.encrypt(tmp)
# update DT value
i = PRNG.BLOCK_SIZE - 1
while i >= 0:
out = (ord(self.DT[i]) + 1) % 256
self.DT = self.DT[:i] + chr(out) + self.DT[i+1:]
if out != 0:
break
i -= 1
def get(self, size):
"""
Get 'size' bytes of random data
Arguments:
size: Amount of random data to return
Returns:
str of length 'size' of random data
Exceptions:
AssertionError if size is not a positive integer
"""
assert isinstance(size, int)
assert size > 0
result = ''
while len(result) < size:
need = size - len(result)
if not len(self.random_data):
self._get_block()
result += self.random_data[:need]
self.random_data = self.random_data[need:]
return result
| 30.858209 | 104 | 0.62636 | #!/usr/bin/env python
"""
A Python implementation of ANSI X9.31 using AES 128, following:
http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
Copyright (C) 2015 - Brian Caswell <bmc@lungetech.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#import random
#import unittest
from Crypto.Cipher import AES
class PRNG(object):
"""
A python implementation of ANSI X9.31 using AES 128
Attributes:
random_data: Currently available block of generated random data
V: "seed value which is also kept secret"
DT: "date/time vector updated upon each iteration"
I: Intermediate value
aes_ctx: AES state machine context
"""
BLOCK_SIZE = 16
def __init__(self, seed=None):
"""
Seed is V + Key + DT as a string
"""
if seed is not None:
assert len(seed) == 48
else:
seed = "zaybxcwdveuftgsh" + "0123456789abcdef" + "\x00" * 16
self.V, key, self.DT = [seed[i:i+PRNG.BLOCK_SIZE] for i in range(0, len(seed), PRNG.BLOCK_SIZE)]
self.random_data = ''
self.I = "\x00" * PRNG.BLOCK_SIZE
self.aes_ctx = AES.new(key, mode=AES.MODE_ECB)
@staticmethod
def _xor_string(value_1, value_2):
"""
value_1 ^ value_2
Exceptions:
AssertionError if value_1 and value_2 are not the same length
"""
assert len(value_1) == len(value_2)
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(value_1, value_2))
def _get_block(self):
"""
Get the next block from the PRNG, saving it to self.random_data
Arguments:
None
Returns:
None
Exceptions:
None
"""
# encrypt the counter value, giving intermediate value I
self.I = self.aes_ctx.encrypt(self.DT)
# XOR I with secret vector V, encrypt the result to obtain pseudo
# random data
tmp = self._xor_string(self.I, self.V)
self.random_data = self.aes_ctx.encrypt(tmp)
# XOR random data with I, and encrypt to get new secret vector V
tmp = self._xor_string(self.random_data, self.I)
self.V = self.aes_ctx.encrypt(tmp)
# update DT value
i = PRNG.BLOCK_SIZE - 1
while i >= 0:
out = (ord(self.DT[i]) + 1) % 256
self.DT = self.DT[:i] + chr(out) + self.DT[i+1:]
if out != 0:
break
i -= 1
def get(self, size):
"""
Get 'size' bytes of random data
Arguments:
size: Amount of random data to return
Returns:
str of length 'size' of random data
Exceptions:
AssertionError if size is not a positive integer
"""
assert isinstance(size, int)
assert size > 0
result = ''
while len(result) < size:
need = size - len(result)
if not len(self.random_data):
self._get_block()
result += self.random_data[:need]
self.random_data = self.random_data[need:]
return result
| 0 | 0 | 0 |
be16679000ce269e3f1016e7c63e3ecfdfb22cef | 2,061 | py | Python | Python/data_structure/segment_tree.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | 2 | 2021-09-05T13:17:01.000Z | 2021-09-05T13:17:06.000Z | Python/data_structure/segment_tree.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | null | null | null | Python/data_structure/segment_tree.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | null | null | null | import typing
# Segment Tree | 22.9 | 84 | 0.534692 | import typing
# Segment Tree
class SegmentTree:
def __init__(
self,
lis: list,
ele: typing.Any,
op: typing.Callable[[typing.Any, typing.Any], typing.Any]) -> None:
self.n = len(lis)
self.log = (self.n - 1).bit_length()
self.size = 1 << self.log
self.op = op
self.ele = ele
self.tree = self._build(lis)
def _build(self, lis: list) -> list:
res_tree = [self.ele] * (2 * self.size)
for i, a in enumerate(lis):
res_tree[self.size + i] = a
for i in range(1, self.size)[::-1]:
res_tree[i] = self.op(res_tree[2 * i], res_tree[2 * i + 1])
return res_tree
def __getitem__(self, i: int) -> None:
return self.tree[self.size + i]
def __setitem__(self, p: int, x: int) -> None:
p += self.size
self.tree[p] = x
for i in range(1, self.log + 1):
self.tree[p >> i] = self.op(self.tree[2 * (p >> i)], self.tree[2 * (p >> i) + 1])
def prod(self, l: int, r: int) -> typing.Any:
l += self.size
r += self.size
L = R = self.ele
while l < r:
if l & 1:
L = self.op(L, self.tree[l])
l += 1
if r & 1:
r -= 1
R = self.op(self.tree[r], R)
l >>= 1
r >>= 1
return self.op(L, R)
def all_prod(self) -> typing.Any:
return self.tree[1]
def max_right(self, l: int, f) -> int:
if l == self.n:
return self.n
l += self.size
sm = self.ele
while True:
while l % 2 == 0:
l >>= 1
if not f(self.op(sm, self.tree[l])):
while l < self.size:
l *= 2
if f(self.op(sm, self.tree[l])):
sm = self.op(sm, self.tree[l])
l += 1
return l - self.size
sm = self.op(sm, self.tree[l])
l += 1
if (l & -l) == l:
return self.n
def min_left(self, r: int, f) -> int:
if r == 0:
return 0
r += self.size
sm = self.ele
while True:
r -= 1
while r > 1 and (r % 2):
r >>= 1
if not f(self.op(self.tree[r], sm)):
while r < self.size:
r = 2 * r + 1
if f(self.op(self.tree[r], sm)):
sm = self.op(self.tree[r], sm)
r -= 1
return r + 1 - self.size
sm = self.op(self.d[r], sm)
if (r & -r) == r:
return 0 | 1,822 | -3 | 213 |
ac1ea41500b4841349b0cc2485d44adb0c2d5525 | 1,292 | py | Python | category_encoders/tests/test_cat_boost.py | hell00lleh/categorical-encoding | 6a6cf0e9944eab298243635cd074c3f3d14b1b85 | [
"BSD-3-Clause"
] | 3 | 2019-09-29T15:10:26.000Z | 2019-10-03T08:39:04.000Z | category_encoders/tests/test_cat_boost.py | hell00lleh/categorical-encoding | 6a6cf0e9944eab298243635cd074c3f3d14b1b85 | [
"BSD-3-Clause"
] | null | null | null | category_encoders/tests/test_cat_boost.py | hell00lleh/categorical-encoding | 6a6cf0e9944eab298243635cd074c3f3d14b1b85 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from unittest2 import TestCase # or `from unittest import ...` if on Python 3.4+
import category_encoders as encoders
| 43.066667 | 147 | 0.575077 | import pandas as pd
from unittest2 import TestCase # or `from unittest import ...` if on Python 3.4+
import category_encoders as encoders
class TestBinaryEncoder(TestCase):
def test_catBoost(self):
X = pd.DataFrame({'col1': ['A', 'B', 'B', 'C', 'A']})
y = pd.Series([1, 0, 1, 0, 1])
enc = encoders.CatBoostEncoder()
obtained = enc.fit_transform(X, y)
self.assertEqual(list(obtained['col1']), [0.6, 0.6, 0.6/2, 0.6, 1.6/2], 'The nominator is incremented by the prior. The denominator by 1.')
X_t = pd.DataFrame({'col1': ['B', 'B', 'A']})
obtained = enc.transform(X_t)
self.assertEqual(list(obtained['col1']), [1.6/3, 1.6/3, 2.6/3])
def test_catBoost_missing(self):
X = pd.DataFrame({'col1': ['A', 'B', 'B', 'C', 'A', None, None, None]})
y = pd.Series([1, 0, 1, 0, 1, 0, 1, 0])
enc = encoders.CatBoostEncoder(handle_missing='value')
obtained = enc.fit_transform(X, y)
self.assertEqual(list(obtained['col1']), [0.5, 0.5, 0.5/2, 0.5, 1.5/2, 0.5, 0.5/2, 1.5/3], 'We treat None as another category.')
X_t = pd.DataFrame({'col1': ['B', 'B', 'A', None]})
obtained = enc.transform(X_t)
self.assertEqual(list(obtained['col1']), [1.5/3, 1.5/3, 2.5/3, 1.5/4])
| 1,061 | 13 | 77 |
ec2d3ee64d8ab2ff705f751b7a693c0dcc9904a3 | 828 | py | Python | pythonioc/test/test_depcycle.py | leitelyaya/python-ioc | 65e64272ce645a9ded71d30eb58c21832d917702 | [
"MIT"
] | 1 | 2017-06-30T13:49:47.000Z | 2017-06-30T13:49:47.000Z | pythonioc/test/test_depcycle.py | leitelyaya/python-ioc | 65e64272ce645a9ded71d30eb58c21832d917702 | [
"MIT"
] | null | null | null | pythonioc/test/test_depcycle.py | leitelyaya/python-ioc | 65e64272ce645a9ded71d30eb58c21832d917702 | [
"MIT"
] | null | null | null | import unittest
import pythonioc
class TestDepCycle(unittest.TestCase):
"""
Regression test for issue #3 dependency cycle on error.
"""
| 25.875 | 83 | 0.661836 | import unittest
import pythonioc
class SpecificException(Exception):
pass
class ErrorInitService(object):
def postInit(self):
raise SpecificException("not working")
def doSomething(self):
pass
class TestDepCycle(unittest.TestCase):
"""
Regression test for issue #3 dependency cycle on error.
"""
def test_depcycle(self):
# register a service that will fail on initialization
pythonioc.registerService(ErrorInitService, overwrite=True)
# repeatedly try to use it, accept its specific exception but
# fail on the cycle-exception (which is a normal Exception and not catched)
for _ in range(10):
try:
pythonioc.Inject('errorInitService').doSomething()
except SpecificException:
pass
| 518 | 33 | 127 |
a897296890225c2cce7c01ab3c21865c3ffad118 | 1,627 | py | Python | dploy/tasks/git.py | anton-shestakov/python-dploy | 61ddd906c75bc41afaeeee8bce9d2ad5c7c68002 | [
"MIT"
] | 2 | 2018-03-21T17:45:08.000Z | 2022-03-07T20:55:39.000Z | dploy/tasks/git.py | anton-shestakov/python-dploy | 61ddd906c75bc41afaeeee8bce9d2ad5c7c68002 | [
"MIT"
] | null | null | null | dploy/tasks/git.py | anton-shestakov/python-dploy | 61ddd906c75bc41afaeeee8bce9d2ad5c7c68002 | [
"MIT"
] | 2 | 2018-03-09T09:11:43.000Z | 2018-06-20T09:31:34.000Z | import os
import fabtools
from fabtools import require
from fabric.api import task, sudo, cd
from fabric.colors import cyan
from dploy.context import ctx
from dploy.utils import git_dirname
@task
def checkout():
"""
Checkouts the code on the remote location using git
"""
branch = ctx('git.branch')
git_root = ctx('git.dirs.root')
git_dir = git_dirname(ctx('git.repository'))
git_path = os.path.join(git_root, git_dir)
if not fabtools.deb.is_installed('git'):
fabtools.deb.install('git')
print(cyan('Checking out {} @ {} -> {}'.format(
branch, ctx('git.repository'), git_path)))
# Experimental
require.git.working_copy(ctx('git.repository'),
path=git_path, branch=branch, update=True,
use_sudo=True)
with cd(git_path):
sudo('git submodule update --init --recursive')
sudo("find . -iname '*.pyc' | xargs rm -f")
# /Experimental
# if files.exists(os.path.join(git_path, '.git'), use_sudo=True):
# print(cyan('Updating {} on {}'.format(branch, env.stage)))
# with cd(git_path):
# sudo('git reset --hard')
# sudo('git pull')
# sudo('git submodule update --init --recursive')
# sudo('git checkout {}'.format(branch))
# sudo("find . -iname '*.pyc' | xargs rm -f")
# else:
# print(cyan('Cloning {} on {}'.format(branch, env.stage)))
# with cd(git_root):
# sudo('git clone --recursive -b {} {} {}'.format(
# ctx('git.branch'), ctx('git.repository'), git_dir))
| 34.617021 | 71 | 0.579594 | import os
import fabtools
from fabtools import require
from fabric.api import task, sudo, cd
from fabric.colors import cyan
from dploy.context import ctx
from dploy.utils import git_dirname
@task
def checkout():
"""
Checkouts the code on the remote location using git
"""
branch = ctx('git.branch')
git_root = ctx('git.dirs.root')
git_dir = git_dirname(ctx('git.repository'))
git_path = os.path.join(git_root, git_dir)
if not fabtools.deb.is_installed('git'):
fabtools.deb.install('git')
print(cyan('Checking out {} @ {} -> {}'.format(
branch, ctx('git.repository'), git_path)))
# Experimental
require.git.working_copy(ctx('git.repository'),
path=git_path, branch=branch, update=True,
use_sudo=True)
with cd(git_path):
sudo('git submodule update --init --recursive')
sudo("find . -iname '*.pyc' | xargs rm -f")
# /Experimental
# if files.exists(os.path.join(git_path, '.git'), use_sudo=True):
# print(cyan('Updating {} on {}'.format(branch, env.stage)))
# with cd(git_path):
# sudo('git reset --hard')
# sudo('git pull')
# sudo('git submodule update --init --recursive')
# sudo('git checkout {}'.format(branch))
# sudo("find . -iname '*.pyc' | xargs rm -f")
# else:
# print(cyan('Cloning {} on {}'.format(branch, env.stage)))
# with cd(git_root):
# sudo('git clone --recursive -b {} {} {}'.format(
# ctx('git.branch'), ctx('git.repository'), git_dir))
| 0 | 0 | 0 |
64c47b1f89956e061ac9631658410771a007f9cf | 495 | py | Python | S1/Q1.py | grharon/HFD | a6abca134f7a3d6766962eb89ab9e33125dab662 | [
"CC0-1.0"
] | null | null | null | S1/Q1.py | grharon/HFD | a6abca134f7a3d6766962eb89ab9e33125dab662 | [
"CC0-1.0"
] | null | null | null | S1/Q1.py | grharon/HFD | a6abca134f7a3d6766962eb89ab9e33125dab662 | [
"CC0-1.0"
] | null | null | null |
number = 100
factorialnumber = factorial(number)
print(f"Factorial({number}) = {factorialnumber}")
sumfactorialnumber = sum_of_digits(factorialnumber)
print(
f"Sum of digits in the factorial number({number}) = {sumfactorialnumber}")
| 23.571429 | 79 | 0.640404 | def factorial(n):
if n == 1 or n == 0:
return 1
else:
return n * factorial(n-1)
def sum_of_digits(n):
sum_of_digits = 0
for digit in str(n):
sum_of_digits += int(digit)
return sum_of_digits
number = 100
factorialnumber = factorial(number)
print(f"Factorial({number}) = {factorialnumber}")
sumfactorialnumber = sum_of_digits(factorialnumber)
print(
f"Sum of digits in the factorial number({number}) = {sumfactorialnumber}")
| 198 | 0 | 48 |
9276cadc1fb3cc5a0610a89dfa0a0b7421b2175c | 20,617 | py | Python | pysnmp-with-texts/HPN-ICF-STACK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HPN-ICF-STACK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HPN-ICF-STACK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HPN-ICF-STACK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-STACK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:41:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifDescr", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Gauge32, Unsigned32, iso, Bits, Integer32, Counter64, ObjectIdentity, Counter32, TimeTicks, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "Unsigned32", "iso", "Bits", "Integer32", "Counter64", "ObjectIdentity", "Counter32", "TimeTicks", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hpnicfStack = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91))
hpnicfStack.setRevisions(('2008-04-30 16:50',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpnicfStack.setRevisionsDescriptions(('The initial revision of this MIB module.',))
if mibBuilder.loadTexts: hpnicfStack.setLastUpdated('200804301650Z')
if mibBuilder.loadTexts: hpnicfStack.setOrganization('')
if mibBuilder.loadTexts: hpnicfStack.setContactInfo('')
if mibBuilder.loadTexts: hpnicfStack.setDescription('This MIB is used to manage STM (Stack Topology Management) information for IRF (Intelligent Resilient Framework) device. This MIB is applicable to IRF-capable products. Some objects in this MIB may be used only for some specific products, so users should refer to the related documents to acquire more detailed information.')
hpnicfStackGlobalConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1))
hpnicfStackMaxMember = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMaxMember.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMaxMember.setDescription('The maximum number of members in a stack.')
hpnicfStackMemberNum = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMemberNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMemberNum.setDescription('The number of members currently in a stack.')
hpnicfStackMaxConfigPriority = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMaxConfigPriority.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMaxConfigPriority.setDescription('The highest priority that can be configured for a member in a stack.')
hpnicfStackAutoUpdate = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackAutoUpdate.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackAutoUpdate.setDescription('The function for automatically updating the image from the master to a device that is attempting to join the stack. When a new device tries to join a stack, STM verifies the image consistency between the joining device and the master. If the joining device uses a different image version than the master, the function updates the joining device with the image of the master. When this function is disabled, the new device can not join the stack if it uses a different software version than the master. disabled: disable auto update function enabled: enable auto update function')
hpnicfStackMacPersistence = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notPersist", 1), ("persistForSixMin", 2), ("persistForever", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackMacPersistence.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMacPersistence.setDescription('The mode of bridge MAC address persistence. When a stack starts, the bridge MAC address of the master is used as that of the stack. When the master leaves the stack, the bridge MAC address of the stack changes depending on the mode of bridge MAC address persistence. notPersist: The bridge MAC address of the new master is used as that of the stack immediately. persistForSixMin: The original bridge MAC address will be reserved for six minutes. In this period, if the master that has left rejoins the stack, the bridge MAC address of the stack will not change. If the old master does not rejoin the stack within this period, the bridge MAC address of the new master will be used as that of the stack. persistForever: Whether the master leaves or not, the bridge MAC address of the stack will never change.')
hpnicfStackLinkDelayInterval = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 30000))).setUnits('millisecond').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackLinkDelayInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackLinkDelayInterval.setDescription('Delay for stack ports to report a link down event. If the link comes up before the delay timer expires, the stack port will not report the link down event. If the link is not recovered before the delay timer expires, the stack port will report the change. If the delay is set to 0, the stack ports will report a link down event without delay. 0: no delay 1-30000(ms): delay time')
hpnicfStackTopology = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("chainConn", 1), ("ringConn", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackTopology.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackTopology.setDescription('Stack topology. chainConn: daisy-chain connection ringConn: ring connection')
hpnicfStackDeviceConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2), )
if mibBuilder.loadTexts: hpnicfStackDeviceConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackDeviceConfigTable.setDescription('This table contains objects to manage device information in a stack.')
hpnicfStackDeviceConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackDeviceConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackDeviceConfigEntry.setDescription('This table contains objects to manage device information in a stack.')
hpnicfStackMemberID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMemberID.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMemberID.setDescription('The member ID of the device in a stack.')
hpnicfStackConfigMemberID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackConfigMemberID.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackConfigMemberID.setDescription('The configured member ID of the device. The valid value ranges from 1 to the value in hpnicfStackMaxMember. The configured member ID will take effect at a reboot if it is unique within the stack.')
hpnicfStackPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackPriority.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPriority.setDescription('The priority of a device in the stack. The valid value ranges from 1 to the value in hpnicfStackMaxConfigPriority.')
hpnicfStackPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortNum.setDescription('The number of stack ports enabled in a device.')
hpnicfStackPortMaxNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortMaxNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortMaxNum.setDescription('The maximum number of stack ports in a device.')
hpnicfStackBoardConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3), )
if mibBuilder.loadTexts: hpnicfStackBoardConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardConfigTable.setDescription('This table contains objects to manage MPU information for a stack.')
hpnicfStackBoardConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackBoardConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardConfigEntry.setDescription('This table contains objects to manage MPU information for a stack.')
hpnicfStackBoardRole = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("slave", 1), ("master", 2), ("loading", 3), ("other", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackBoardRole.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardRole.setDescription('The role of the MPU in a stack. slave: Standby MPU master: Master MPU loading: Standby MPU is loading the software image from the master. other: other')
hpnicfStackBoardBelongtoMember = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackBoardBelongtoMember.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardBelongtoMember.setDescription('Member ID of the device that holds the current board.')
hpnicfStackPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4), )
if mibBuilder.loadTexts: hpnicfStackPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortInfoTable.setDescription('This table contains objects to manage stack port information for IRF stacked devices.')
hpnicfStackPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1), ).setIndexNames((0, "HPN-ICF-STACK-MIB", "hpnicfStackMemberID"), (0, "HPN-ICF-STACK-MIB", "hpnicfStackPortIndex"))
if mibBuilder.loadTexts: hpnicfStackPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortInfoEntry.setDescription('This table contains objects to manage stack port information for IRF stacked devices.')
hpnicfStackPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 1), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfStackPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortIndex.setDescription('The index of a stack port of the device.')
hpnicfStackPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortEnable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortEnable.setDescription("The status of a stack port of the device. If no physical ports are added to the stack port, its status is 'disabled'. If the stack port has physical ports, its status is 'enabled'. disabled: The stack port is disabled. enabled: The stack port is enabled.")
hpnicfStackPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("silent", 3), ("disabled", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortStatus.setDescription('The link status of a stack port on the device. up: A physical link is present on the stack port. down: No physical link is present on the stack port. silent: The link state of the stack port is up, but the port cannot transmit or receive traffic. disabled: The stack port does not contain physical links.')
hpnicfStackNeighbor = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackNeighbor.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackNeighbor.setDescription("The member ID of the stack port's neighbor. 0 means no neighbor exists.")
hpnicfStackPortForwardingPath = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 511))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortForwardingPath.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortForwardingPath.setDescription("List of egress member IDs on a stack port. Each member device uses the egress member ID lists to choose the outgoing stack port for known unicast frames to be sent out of other member devices. The egress member ID lists are comma separated. A zero-length string means no egress members exist. For example: In a ring stack of 1-2-3-4-5-7-1, if hpnicfStackPortForwardingPath.1.1 returns '7,5,4', IRF-port 1/1 will be the outgoing port for frames to reach members 7, 5, and 4 from member 1.")
hpnicfStackPhyPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5), )
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoTable.setDescription('This table contains objects to manage information about physical ports that can be used for IRF stacking.')
hpnicfStackPhyPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoEntry.setDescription('This table contains objects to manage information about physical ports that can be used for IRF stacking.')
hpnicfStackBelongtoPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackBelongtoPort.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBelongtoPort.setDescription('The index of the stack port to which the physical port is added. 0 means the physical port is not added to any stack port. The value will take effect when IRF is enabled on the device.')
hpnicfStackTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6))
hpnicfStackTrapOjbects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0))
hpnicfStackPortLinkStatusChange = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 1)).setObjects(("HPN-ICF-STACK-MIB", "hpnicfStackMemberID"), ("HPN-ICF-STACK-MIB", "hpnicfStackPortIndex"), ("HPN-ICF-STACK-MIB", "hpnicfStackPortStatus"))
if mibBuilder.loadTexts: hpnicfStackPortLinkStatusChange.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortLinkStatusChange.setDescription('The hpnicfStackPortLinkStatusChange trap indicates that the link status of the stack port has changed.')
hpnicfStackTopologyChange = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 2)).setObjects(("HPN-ICF-STACK-MIB", "hpnicfStackTopology"))
if mibBuilder.loadTexts: hpnicfStackTopologyChange.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackTopologyChange.setDescription('The hpnicfStackTopologyChange trap indicates that the topology type of the IRF stack has changed.')
hpnicfStackMadBfdChangeNormal = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 3)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeNormal.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeNormal.setDescription('The hpnicfStackMadBfdChangeNormal trap indicates that the BFD MAD function changed to the normal state.')
hpnicfStackMadBfdChangeFailure = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 4)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeFailure.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeFailure.setDescription('The hpnicfStackMadBfdChangeFailure trap indicates that the BFD MAD function changed to the failure state.')
hpnicfStackMadLacpChangeNormal = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 5)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeNormal.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeNormal.setDescription('The hpnicfStackMadLacpChangeNormal trap indicates that the LACP MAD function changed to the normal state.')
hpnicfStackMadLacpChangeFailure = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 6)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeFailure.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeFailure.setDescription('The hpnicfStackMadLacpChangeFailure trap indicates that the LACP MAD function changed to the failure state.')
mibBuilder.exportSymbols("HPN-ICF-STACK-MIB", hpnicfStackPortInfoEntry=hpnicfStackPortInfoEntry, hpnicfStackBelongtoPort=hpnicfStackBelongtoPort, PYSNMP_MODULE_ID=hpnicfStack, hpnicfStackMadBfdChangeNormal=hpnicfStackMadBfdChangeNormal, hpnicfStackTrapOjbects=hpnicfStackTrapOjbects, hpnicfStackMaxMember=hpnicfStackMaxMember, hpnicfStackPortForwardingPath=hpnicfStackPortForwardingPath, hpnicfStackTopologyChange=hpnicfStackTopologyChange, hpnicfStackPortLinkStatusChange=hpnicfStackPortLinkStatusChange, hpnicfStackPortMaxNum=hpnicfStackPortMaxNum, hpnicfStackNeighbor=hpnicfStackNeighbor, hpnicfStack=hpnicfStack, hpnicfStackPortStatus=hpnicfStackPortStatus, hpnicfStackTrap=hpnicfStackTrap, hpnicfStackMaxConfigPriority=hpnicfStackMaxConfigPriority, hpnicfStackMadLacpChangeNormal=hpnicfStackMadLacpChangeNormal, hpnicfStackTopology=hpnicfStackTopology, hpnicfStackBoardBelongtoMember=hpnicfStackBoardBelongtoMember, hpnicfStackConfigMemberID=hpnicfStackConfigMemberID, hpnicfStackMacPersistence=hpnicfStackMacPersistence, hpnicfStackPhyPortInfoEntry=hpnicfStackPhyPortInfoEntry, hpnicfStackMadBfdChangeFailure=hpnicfStackMadBfdChangeFailure, hpnicfStackPriority=hpnicfStackPriority, hpnicfStackPortNum=hpnicfStackPortNum, hpnicfStackPortIndex=hpnicfStackPortIndex, hpnicfStackMadLacpChangeFailure=hpnicfStackMadLacpChangeFailure, hpnicfStackPortEnable=hpnicfStackPortEnable, hpnicfStackMemberNum=hpnicfStackMemberNum, hpnicfStackBoardConfigTable=hpnicfStackBoardConfigTable, hpnicfStackBoardConfigEntry=hpnicfStackBoardConfigEntry, hpnicfStackDeviceConfigTable=hpnicfStackDeviceConfigTable, hpnicfStackLinkDelayInterval=hpnicfStackLinkDelayInterval, hpnicfStackMemberID=hpnicfStackMemberID, hpnicfStackAutoUpdate=hpnicfStackAutoUpdate, hpnicfStackBoardRole=hpnicfStackBoardRole, hpnicfStackPhyPortInfoTable=hpnicfStackPhyPortInfoTable, hpnicfStackDeviceConfigEntry=hpnicfStackDeviceConfigEntry, hpnicfStackGlobalConfig=hpnicfStackGlobalConfig, hpnicfStackPortInfoTable=hpnicfStackPortInfoTable)
| 156.189394 | 1,999 | 0.786826 | #
# PySNMP MIB module HPN-ICF-STACK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-STACK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:41:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifDescr", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Gauge32, Unsigned32, iso, Bits, Integer32, Counter64, ObjectIdentity, Counter32, TimeTicks, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "Unsigned32", "iso", "Bits", "Integer32", "Counter64", "ObjectIdentity", "Counter32", "TimeTicks", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hpnicfStack = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91))
hpnicfStack.setRevisions(('2008-04-30 16:50',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpnicfStack.setRevisionsDescriptions(('The initial revision of this MIB module.',))
if mibBuilder.loadTexts: hpnicfStack.setLastUpdated('200804301650Z')
if mibBuilder.loadTexts: hpnicfStack.setOrganization('')
if mibBuilder.loadTexts: hpnicfStack.setContactInfo('')
if mibBuilder.loadTexts: hpnicfStack.setDescription('This MIB is used to manage STM (Stack Topology Management) information for IRF (Intelligent Resilient Framework) device. This MIB is applicable to IRF-capable products. Some objects in this MIB may be used only for some specific products, so users should refer to the related documents to acquire more detailed information.')
hpnicfStackGlobalConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1))
hpnicfStackMaxMember = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMaxMember.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMaxMember.setDescription('The maximum number of members in a stack.')
hpnicfStackMemberNum = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMemberNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMemberNum.setDescription('The number of members currently in a stack.')
hpnicfStackMaxConfigPriority = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMaxConfigPriority.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMaxConfigPriority.setDescription('The highest priority that can be configured for a member in a stack.')
hpnicfStackAutoUpdate = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackAutoUpdate.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackAutoUpdate.setDescription('The function for automatically updating the image from the master to a device that is attempting to join the stack. When a new device tries to join a stack, STM verifies the image consistency between the joining device and the master. If the joining device uses a different image version than the master, the function updates the joining device with the image of the master. When this function is disabled, the new device can not join the stack if it uses a different software version than the master. disabled: disable auto update function enabled: enable auto update function')
hpnicfStackMacPersistence = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notPersist", 1), ("persistForSixMin", 2), ("persistForever", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackMacPersistence.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMacPersistence.setDescription('The mode of bridge MAC address persistence. When a stack starts, the bridge MAC address of the master is used as that of the stack. When the master leaves the stack, the bridge MAC address of the stack changes depending on the mode of bridge MAC address persistence. notPersist: The bridge MAC address of the new master is used as that of the stack immediately. persistForSixMin: The original bridge MAC address will be reserved for six minutes. In this period, if the master that has left rejoins the stack, the bridge MAC address of the stack will not change. If the old master does not rejoin the stack within this period, the bridge MAC address of the new master will be used as that of the stack. persistForever: Whether the master leaves or not, the bridge MAC address of the stack will never change.')
hpnicfStackLinkDelayInterval = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 30000))).setUnits('millisecond').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackLinkDelayInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackLinkDelayInterval.setDescription('Delay for stack ports to report a link down event. If the link comes up before the delay timer expires, the stack port will not report the link down event. If the link is not recovered before the delay timer expires, the stack port will report the change. If the delay is set to 0, the stack ports will report a link down event without delay. 0: no delay 1-30000(ms): delay time')
hpnicfStackTopology = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("chainConn", 1), ("ringConn", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackTopology.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackTopology.setDescription('Stack topology. chainConn: daisy-chain connection ringConn: ring connection')
hpnicfStackDeviceConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2), )
if mibBuilder.loadTexts: hpnicfStackDeviceConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackDeviceConfigTable.setDescription('This table contains objects to manage device information in a stack.')
hpnicfStackDeviceConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackDeviceConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackDeviceConfigEntry.setDescription('This table contains objects to manage device information in a stack.')
hpnicfStackMemberID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackMemberID.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMemberID.setDescription('The member ID of the device in a stack.')
hpnicfStackConfigMemberID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackConfigMemberID.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackConfigMemberID.setDescription('The configured member ID of the device. The valid value ranges from 1 to the value in hpnicfStackMaxMember. The configured member ID will take effect at a reboot if it is unique within the stack.')
hpnicfStackPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackPriority.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPriority.setDescription('The priority of a device in the stack. The valid value ranges from 1 to the value in hpnicfStackMaxConfigPriority.')
hpnicfStackPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortNum.setDescription('The number of stack ports enabled in a device.')
hpnicfStackPortMaxNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortMaxNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortMaxNum.setDescription('The maximum number of stack ports in a device.')
hpnicfStackBoardConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3), )
if mibBuilder.loadTexts: hpnicfStackBoardConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardConfigTable.setDescription('This table contains objects to manage MPU information for a stack.')
hpnicfStackBoardConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackBoardConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardConfigEntry.setDescription('This table contains objects to manage MPU information for a stack.')
hpnicfStackBoardRole = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("slave", 1), ("master", 2), ("loading", 3), ("other", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackBoardRole.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardRole.setDescription('The role of the MPU in a stack. slave: Standby MPU master: Master MPU loading: Standby MPU is loading the software image from the master. other: other')
hpnicfStackBoardBelongtoMember = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackBoardBelongtoMember.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBoardBelongtoMember.setDescription('Member ID of the device that holds the current board.')
hpnicfStackPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4), )
if mibBuilder.loadTexts: hpnicfStackPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortInfoTable.setDescription('This table contains objects to manage stack port information for IRF stacked devices.')
hpnicfStackPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1), ).setIndexNames((0, "HPN-ICF-STACK-MIB", "hpnicfStackMemberID"), (0, "HPN-ICF-STACK-MIB", "hpnicfStackPortIndex"))
if mibBuilder.loadTexts: hpnicfStackPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortInfoEntry.setDescription('This table contains objects to manage stack port information for IRF stacked devices.')
hpnicfStackPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 1), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfStackPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortIndex.setDescription('The index of a stack port of the device.')
hpnicfStackPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortEnable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortEnable.setDescription("The status of a stack port of the device. If no physical ports are added to the stack port, its status is 'disabled'. If the stack port has physical ports, its status is 'enabled'. disabled: The stack port is disabled. enabled: The stack port is enabled.")
hpnicfStackPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("silent", 3), ("disabled", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortStatus.setDescription('The link status of a stack port on the device. up: A physical link is present on the stack port. down: No physical link is present on the stack port. silent: The link state of the stack port is up, but the port cannot transmit or receive traffic. disabled: The stack port does not contain physical links.')
hpnicfStackNeighbor = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackNeighbor.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackNeighbor.setDescription("The member ID of the stack port's neighbor. 0 means no neighbor exists.")
hpnicfStackPortForwardingPath = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 4, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 511))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfStackPortForwardingPath.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortForwardingPath.setDescription("List of egress member IDs on a stack port. Each member device uses the egress member ID lists to choose the outgoing stack port for known unicast frames to be sent out of other member devices. The egress member ID lists are comma separated. A zero-length string means no egress members exist. For example: In a ring stack of 1-2-3-4-5-7-1, if hpnicfStackPortForwardingPath.1.1 returns '7,5,4', IRF-port 1/1 will be the outgoing port for frames to reach members 7, 5, and 4 from member 1.")
hpnicfStackPhyPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5), )
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoTable.setDescription('This table contains objects to manage information about physical ports that can be used for IRF stacking.')
hpnicfStackPhyPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPhyPortInfoEntry.setDescription('This table contains objects to manage information about physical ports that can be used for IRF stacking.')
hpnicfStackBelongtoPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 5, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfStackBelongtoPort.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackBelongtoPort.setDescription('The index of the stack port to which the physical port is added. 0 means the physical port is not added to any stack port. The value will take effect when IRF is enabled on the device.')
hpnicfStackTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6))
hpnicfStackTrapOjbects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0))
hpnicfStackPortLinkStatusChange = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 1)).setObjects(("HPN-ICF-STACK-MIB", "hpnicfStackMemberID"), ("HPN-ICF-STACK-MIB", "hpnicfStackPortIndex"), ("HPN-ICF-STACK-MIB", "hpnicfStackPortStatus"))
if mibBuilder.loadTexts: hpnicfStackPortLinkStatusChange.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackPortLinkStatusChange.setDescription('The hpnicfStackPortLinkStatusChange trap indicates that the link status of the stack port has changed.')
hpnicfStackTopologyChange = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 2)).setObjects(("HPN-ICF-STACK-MIB", "hpnicfStackTopology"))
if mibBuilder.loadTexts: hpnicfStackTopologyChange.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackTopologyChange.setDescription('The hpnicfStackTopologyChange trap indicates that the topology type of the IRF stack has changed.')
hpnicfStackMadBfdChangeNormal = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 3)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeNormal.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeNormal.setDescription('The hpnicfStackMadBfdChangeNormal trap indicates that the BFD MAD function changed to the normal state.')
hpnicfStackMadBfdChangeFailure = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 4)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeFailure.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadBfdChangeFailure.setDescription('The hpnicfStackMadBfdChangeFailure trap indicates that the BFD MAD function changed to the failure state.')
hpnicfStackMadLacpChangeNormal = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 5)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeNormal.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeNormal.setDescription('The hpnicfStackMadLacpChangeNormal trap indicates that the LACP MAD function changed to the normal state.')
hpnicfStackMadLacpChangeFailure = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 91, 6, 0, 6)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeFailure.setStatus('current')
if mibBuilder.loadTexts: hpnicfStackMadLacpChangeFailure.setDescription('The hpnicfStackMadLacpChangeFailure trap indicates that the LACP MAD function changed to the failure state.')
mibBuilder.exportSymbols("HPN-ICF-STACK-MIB", hpnicfStackPortInfoEntry=hpnicfStackPortInfoEntry, hpnicfStackBelongtoPort=hpnicfStackBelongtoPort, PYSNMP_MODULE_ID=hpnicfStack, hpnicfStackMadBfdChangeNormal=hpnicfStackMadBfdChangeNormal, hpnicfStackTrapOjbects=hpnicfStackTrapOjbects, hpnicfStackMaxMember=hpnicfStackMaxMember, hpnicfStackPortForwardingPath=hpnicfStackPortForwardingPath, hpnicfStackTopologyChange=hpnicfStackTopologyChange, hpnicfStackPortLinkStatusChange=hpnicfStackPortLinkStatusChange, hpnicfStackPortMaxNum=hpnicfStackPortMaxNum, hpnicfStackNeighbor=hpnicfStackNeighbor, hpnicfStack=hpnicfStack, hpnicfStackPortStatus=hpnicfStackPortStatus, hpnicfStackTrap=hpnicfStackTrap, hpnicfStackMaxConfigPriority=hpnicfStackMaxConfigPriority, hpnicfStackMadLacpChangeNormal=hpnicfStackMadLacpChangeNormal, hpnicfStackTopology=hpnicfStackTopology, hpnicfStackBoardBelongtoMember=hpnicfStackBoardBelongtoMember, hpnicfStackConfigMemberID=hpnicfStackConfigMemberID, hpnicfStackMacPersistence=hpnicfStackMacPersistence, hpnicfStackPhyPortInfoEntry=hpnicfStackPhyPortInfoEntry, hpnicfStackMadBfdChangeFailure=hpnicfStackMadBfdChangeFailure, hpnicfStackPriority=hpnicfStackPriority, hpnicfStackPortNum=hpnicfStackPortNum, hpnicfStackPortIndex=hpnicfStackPortIndex, hpnicfStackMadLacpChangeFailure=hpnicfStackMadLacpChangeFailure, hpnicfStackPortEnable=hpnicfStackPortEnable, hpnicfStackMemberNum=hpnicfStackMemberNum, hpnicfStackBoardConfigTable=hpnicfStackBoardConfigTable, hpnicfStackBoardConfigEntry=hpnicfStackBoardConfigEntry, hpnicfStackDeviceConfigTable=hpnicfStackDeviceConfigTable, hpnicfStackLinkDelayInterval=hpnicfStackLinkDelayInterval, hpnicfStackMemberID=hpnicfStackMemberID, hpnicfStackAutoUpdate=hpnicfStackAutoUpdate, hpnicfStackBoardRole=hpnicfStackBoardRole, hpnicfStackPhyPortInfoTable=hpnicfStackPhyPortInfoTable, hpnicfStackDeviceConfigEntry=hpnicfStackDeviceConfigEntry, hpnicfStackGlobalConfig=hpnicfStackGlobalConfig, hpnicfStackPortInfoTable=hpnicfStackPortInfoTable)
| 0 | 0 | 0 |
ddc6fba39e2884441d6015cadffdc8b157512ecc | 25,625 | py | Python | hwrt/features.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 65 | 2015-04-08T12:11:22.000Z | 2022-02-28T23:46:53.000Z | hwrt/features.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 35 | 2015-01-05T11:56:30.000Z | 2022-03-12T00:55:38.000Z | hwrt/features.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 18 | 2015-01-19T15:57:25.000Z | 2021-02-15T20:38:32.000Z | """
Feature extraction algorithms.
Each algorithm works on the HandwrittenData class. They have to be applied like
this:
>>> import hwrt.features
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = HandwrittenData(raw_data_id=2953, raw_data_json=data_json)
>>> feature_list = [StrokeCount(),
... ConstantPointCoordinates(strokes=4,
... points_per_stroke=20,
... fill_empty_with=0)]
>>> x = a.feature_extraction(feature_list)
"""
# Core Library modules
import abc
import logging
import sys
from itertools import combinations_with_replacement as combinations_wr
from typing import Any, Dict, List
# Third party modules
import numpy
from PIL import Image, ImageDraw
# Local modules
from . import geometry, handwritten_data, preprocessing, utils
logger = logging.getLogger(__name__)
def get_features(model_description_features: List[Dict[str, Any]]):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : List[Dict[str, Any]]
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
- pixel_env: 0
]
"""
return utils.get_objectlist(
model_description_features, config_key="features", module=sys.modules[__name__]
)
def print_featurelist(feature_list: List):
"""
Print the feature_list in a human-readable form.
Parameters
----------
feature_list : List
feature objects
"""
input_features = sum(n.get_dimension() for n in feature_list)
print("## Features (%i)" % input_features)
print("```")
for algorithm in feature_list:
print("* %s" % str(algorithm))
print("```")
class Feature(metaclass=abc.ABCMeta):
"""Abstract class which defines which methods to implement for features."""
@abc.abstractmethod
def __call__(self, hwr_obj):
"""Get the features value for a given recording ``hwr_obj``."""
assert isinstance(
hwr_obj, handwritten_data.HandwrittenData
), "handwritten data is not of type HandwrittenData, but of %r" % type(hwr_obj)
@abc.abstractmethod
def get_dimension(self):
"""Return the length of the list which __call__ will return."""
# Only feature calculation classes follow
# Every feature class must have a __str__, __repr__ function so that error
# messages can help you to find and fix bugs in features.
# Every feature class must have a __call__ function which is used to get the
# features value(s) for a given recording.
# Every feature class must have a get_dimension function so that the total
# number of features can be calculated and checked for consistency.
#
# * __call__ must take exactly one argument of type HandwrittenData
# * __call__ must return a list of length get_dimension()
# * get_dimension must return a positive number
# * have a 'normalize' attribute that is either True or False
# Local features
class ConstantPointCoordinates(Feature):
"""Take the first ``points_per_stroke=20`` points coordinates of the first
``strokes=4`` strokes as features. This leads to
:math:`2 \\cdot \\text{points_per_stroke} \\cdot \\text{strokes}`
features.
If ``points`` is set to 0, the first ``points_per_stroke`` point
coordinates and the ``pen_down`` feature is used. This leads to
:math:`3 \\cdot \\text{points_per_stroke}` features.
Parameters
----------
strokes : int
points_per_stroke : int
fill_empty_with : float
pen_down : boolean
pixel_env : int
How big should the pixel map around the given point be?
"""
normalize = False
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
if self.strokes > 0:
if self.pixel_env > 0:
return (
(2 + (1 + 2 * self.pixel_env) ** 2)
* self.strokes
* self.points_per_stroke
)
else:
return 2 * self.strokes * self.points_per_stroke
else:
if self.pen_down:
return 3 * self.points_per_stroke
else:
return 2 * self.points_per_stroke
def _features_with_strokes(self, hwr_obj):
"""Calculate the ConstantPointCoordinates features for the case of
a fixed number of strokes."""
x = []
img = Image.new(
"L",
(
(int(hwr_obj.get_width() * self.scaling_factor) + 2),
(int(hwr_obj.get_height() * self.scaling_factor) + 2),
),
"black",
)
draw = ImageDraw.Draw(img, "L")
pointlist = hwr_obj.get_pointlist()
bb = hwr_obj.get_bounding_box()
for stroke_nr in range(self.strokes):
last_point = None
# make sure that the current symbol actually has that many
# strokes
if stroke_nr < len(pointlist):
for point_nr in range(self.points_per_stroke):
if point_nr < len(pointlist[stroke_nr]):
point = pointlist[stroke_nr][point_nr]
x.append(pointlist[stroke_nr][point_nr]["x"])
x.append(pointlist[stroke_nr][point_nr]["y"])
if last_point is None:
last_point = point
y_from = int(
(-bb["miny"] + last_point["y"]) * self.scaling_factor
)
x_from = int(
(-bb["minx"] + last_point["x"]) * self.scaling_factor
)
y_to = int((-bb["miny"] + point["y"]) * self.scaling_factor)
x_to = int((-bb["minx"] + point["x"]) * self.scaling_factor)
draw.line([x_from, y_from, x_to, y_to], fill="#ffffff", width=1)
if self.pixel_env > 0:
pix = img.load()
for x_offset in range(-self.pixel_env, self.pixel_env + 1):
for y_offset in range(
-self.pixel_env, self.pixel_env + 1
):
xp = (
int(
(-bb["minx"] + point["x"])
* self.scaling_factor
)
+ x_offset
)
yp = (
int(
(-bb["miny"] + point["y"])
* self.scaling_factor
)
+ y_offset
)
xp = max(0, xp)
yp = max(0, yp)
x.append(pix[xp, yp])
last_point = point
else:
x.append(self.fill_empty_with)
x.append(self.fill_empty_with)
if self.pixel_env > 0:
for _ in range((1 + 2 * self.pixel_env) ** 2):
x.append(self.fill_empty_with)
else:
for _ in range(self.points_per_stroke):
x.append(self.fill_empty_with)
x.append(self.fill_empty_with)
if self.pixel_env > 0:
for _ in range((1 + 2 * self.pixel_env) ** 2):
x.append(self.fill_empty_with)
del draw
return x
def _features_without_strokes(self, hwr_obj):
"""Calculate the ConstantPointCoordinates features for the case of
a single (callapesed) stroke with pen_down features."""
x = []
for point in hwr_obj.get_pointlist()[0]:
if len(x) >= 3 * self.points_per_stroke or (
len(x) >= 2 * self.points_per_stroke and not self.pen_down
):
break
x.append(point["x"])
x.append(point["y"])
if self.pen_down:
if "pen_down" not in point:
logger.error(
"The "
"ConstantPointCoordinates(strokes=0) "
"feature should only be used after "
"SpaceEvenly preprocessing step."
)
else:
x.append(int(point["pen_down"]))
if self.pen_down:
while len(x) != 3 * self.points_per_stroke:
x.append(self.fill_empty_with)
else:
while len(x) != 2 * self.points_per_stroke:
x.append(self.fill_empty_with)
return x
class FirstNPoints(Feature):
"""Similar to the ``ConstantPointCoordinates`` feature, this feature takes
the first ``n=81`` point coordinates. It also has the
``fill_empty_with=0`` to make sure that the dimension of this feature is
always the same."""
normalize = False
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 2 * self.n
# Global features
class Bitmap(Feature):
"""Get a fixed-size bitmap of the recording."""
normalize = True
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.size ** 2
class StrokeCount(Feature):
"""Stroke count as a 1 dimensional recording."""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class Ink(Feature):
"""Ink as a 1 dimensional feature. It gives a numeric value for the amount
of ink this would eventually have consumed.
"""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class AspectRatio(Feature):
"""Aspect ratio of a recording as a 1 dimensional feature."""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class Width(Feature):
"""Width of a recording as a 1 dimensional feature.
.. note::
This is the current width. So if the recording was scaled, this will
not be the original width.
"""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class Height(Feature):
"""Height of a recording as a a 1 dimensional feature.
.. note::
This is the current hight. So if the recording was scaled, this will
not be the original height.
"""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class Time(Feature):
"""The time in milliseconds it took to create the recording. This is a 1
dimensional feature."""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
class CenterOfMass(Feature):
"""Center of mass of a recording as a 2 dimensional feature."""
normalize = True
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 2
class StrokeCenter(Feature):
"""Get the stroke center of mass coordinates as a 2 dimensional feature."""
normalize = True
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.strokes * 2
class DouglasPeuckerPoints(Feature):
"""Get the number of points which are left after applying the Douglas
Peucker line simplification algorithm.
"""
normalize = True
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def _stroke_simplification(self, pointlist):
"""The Douglas-Peucker line simplification takes a list of points as an
argument. It tries to simplifiy this list by removing as many points
as possible while still maintaining the overall shape of the stroke.
It does so by taking the first and the last point, connecting them
by a straight line and searchin for the point with the highest
distance. If that distance is bigger than 'epsilon', the point is
important and the algorithm continues recursively."""
# Find the point with the biggest distance
dmax = 0
index = 0
for i in range(1, len(pointlist)):
d = geometry.perpendicular_distance(
pointlist[i], pointlist[0], pointlist[-1]
)
if d > dmax:
index = i
dmax = d
# If the maximum distance is bigger than the threshold 'epsilon', then
# simplify the pointlist recursively
if dmax >= self.epsilon:
# Recursive call
rec_results1 = self._stroke_simplification(pointlist[0:index])
rec_results2 = self._stroke_simplification(pointlist[index:])
result_list = rec_results1[:-1] + rec_results2
else:
result_list = [pointlist[0], pointlist[-1]]
return result_list
class StrokeIntersections(Feature):
"""Count the number of intersections which strokes in the recording have
with each other in form of a symmetrical matrix for the first
``stroke=4`` strokes. The feature dimension is
:math:`round(\\frac{\\text{strokes}^2}{2} + \\frac{\\text{strokes}}{2})`
because the symmetrical part is discarded.
======= ======= ======= ======= ===
- stroke1 stroke2 stroke3
------- ------- ------- ------- ---
stroke1 0 1 0 ...
stroke2 1 2 0 ...
stroke3 0 0 0 ...
... ... ... ... ...
======= ======= ======= ======= ===
Returns values of upper triangular matrix (including diagonal)
from left to right, top to bottom.
..warning
This method has an error. It should probably not be used.
"""
normalize = True
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return int(round(float(self.strokes ** 2) / 2 + float(self.strokes) / 2))
class ReCurvature(Feature):
"""Re-curvature is a 1 dimensional, stroke-global feature for a recording.
It is the ratio
:math:`\\frac{\\text{height}(s)}{\\text{length}(s)}`.
If ``length(s) == 0``, then the re-curvature is defined to be 1.
"""
normalize = True
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.strokes
| 31.557882 | 88 | 0.554302 | """
Feature extraction algorithms.
Each algorithm works on the HandwrittenData class. They have to be applied like
this:
>>> import hwrt.features
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = HandwrittenData(raw_data_id=2953, raw_data_json=data_json)
>>> feature_list = [StrokeCount(),
... ConstantPointCoordinates(strokes=4,
... points_per_stroke=20,
... fill_empty_with=0)]
>>> x = a.feature_extraction(feature_list)
"""
# Core Library modules
import abc
import logging
import sys
from itertools import combinations_with_replacement as combinations_wr
from typing import Any, Dict, List
# Third party modules
import numpy
from PIL import Image, ImageDraw
# Local modules
from . import geometry, handwritten_data, preprocessing, utils
logger = logging.getLogger(__name__)
def get_features(model_description_features: List[Dict[str, Any]]):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : List[Dict[str, Any]]
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
- pixel_env: 0
]
"""
return utils.get_objectlist(
model_description_features, config_key="features", module=sys.modules[__name__]
)
def print_featurelist(feature_list: List):
"""
Print the feature_list in a human-readable form.
Parameters
----------
feature_list : List
feature objects
"""
input_features = sum(n.get_dimension() for n in feature_list)
print("## Features (%i)" % input_features)
print("```")
for algorithm in feature_list:
print("* %s" % str(algorithm))
print("```")
class Feature(metaclass=abc.ABCMeta):
"""Abstract class which defines which methods to implement for features."""
@abc.abstractmethod
def __call__(self, hwr_obj):
"""Get the features value for a given recording ``hwr_obj``."""
assert isinstance(
hwr_obj, handwritten_data.HandwrittenData
), "handwritten data is not of type HandwrittenData, but of %r" % type(hwr_obj)
@abc.abstractmethod
def get_dimension(self):
"""Return the length of the list which __call__ will return."""
# Only feature calculation classes follow
# Every feature class must have a __str__, __repr__ function so that error
# messages can help you to find and fix bugs in features.
# Every feature class must have a __call__ function which is used to get the
# features value(s) for a given recording.
# Every feature class must have a get_dimension function so that the total
# number of features can be calculated and checked for consistency.
#
# * __call__ must take exactly one argument of type HandwrittenData
# * __call__ must return a list of length get_dimension()
# * get_dimension must return a positive number
# * have a 'normalize' attribute that is either True or False
# Local features
class ConstantPointCoordinates(Feature):
"""Take the first ``points_per_stroke=20`` points coordinates of the first
``strokes=4`` strokes as features. This leads to
:math:`2 \\cdot \\text{points_per_stroke} \\cdot \\text{strokes}`
features.
If ``points`` is set to 0, the first ``points_per_stroke`` point
coordinates and the ``pen_down`` feature is used. This leads to
:math:`3 \\cdot \\text{points_per_stroke}` features.
Parameters
----------
strokes : int
points_per_stroke : int
fill_empty_with : float
pen_down : boolean
pixel_env : int
How big should the pixel map around the given point be?
"""
normalize = False
def __init__(
self,
strokes=4,
points_per_stroke=20,
fill_empty_with=0,
pen_down=True,
pixel_env=0,
scaling_factor=32,
):
self.strokes = strokes
self.points_per_stroke = points_per_stroke
self.fill_empty_with = fill_empty_with
self.pen_down = pen_down
self.pixel_env = pixel_env
self.scaling_factor = scaling_factor
def __repr__(self):
return (
"ConstantPointCoordinates\n"
" - strokes: %i\n"
" - points per stroke: %i\n"
" - fill empty with: %i\n"
" - pen down feature: %r\n"
" - pixel_env: %i\n"
) % (
self.strokes,
self.points_per_stroke,
self.fill_empty_with,
self.pen_down,
self.pixel_env,
)
def __str__(self):
return (
"constant point coordinates\n"
" - strokes: %i\n"
" - points per stroke: %i\n"
" - fill empty with: %i\n"
" - pen down feature: %r\n"
" - pixel_env: %i\n"
) % (
self.strokes,
self.points_per_stroke,
self.fill_empty_with,
self.pen_down,
self.pixel_env,
)
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
if self.strokes > 0:
if self.pixel_env > 0:
return (
(2 + (1 + 2 * self.pixel_env) ** 2)
* self.strokes
* self.points_per_stroke
)
else:
return 2 * self.strokes * self.points_per_stroke
else:
if self.pen_down:
return 3 * self.points_per_stroke
else:
return 2 * self.points_per_stroke
def _features_with_strokes(self, hwr_obj):
"""Calculate the ConstantPointCoordinates features for the case of
a fixed number of strokes."""
x = []
img = Image.new(
"L",
(
(int(hwr_obj.get_width() * self.scaling_factor) + 2),
(int(hwr_obj.get_height() * self.scaling_factor) + 2),
),
"black",
)
draw = ImageDraw.Draw(img, "L")
pointlist = hwr_obj.get_pointlist()
bb = hwr_obj.get_bounding_box()
for stroke_nr in range(self.strokes):
last_point = None
# make sure that the current symbol actually has that many
# strokes
if stroke_nr < len(pointlist):
for point_nr in range(self.points_per_stroke):
if point_nr < len(pointlist[stroke_nr]):
point = pointlist[stroke_nr][point_nr]
x.append(pointlist[stroke_nr][point_nr]["x"])
x.append(pointlist[stroke_nr][point_nr]["y"])
if last_point is None:
last_point = point
y_from = int(
(-bb["miny"] + last_point["y"]) * self.scaling_factor
)
x_from = int(
(-bb["minx"] + last_point["x"]) * self.scaling_factor
)
y_to = int((-bb["miny"] + point["y"]) * self.scaling_factor)
x_to = int((-bb["minx"] + point["x"]) * self.scaling_factor)
draw.line([x_from, y_from, x_to, y_to], fill="#ffffff", width=1)
if self.pixel_env > 0:
pix = img.load()
for x_offset in range(-self.pixel_env, self.pixel_env + 1):
for y_offset in range(
-self.pixel_env, self.pixel_env + 1
):
xp = (
int(
(-bb["minx"] + point["x"])
* self.scaling_factor
)
+ x_offset
)
yp = (
int(
(-bb["miny"] + point["y"])
* self.scaling_factor
)
+ y_offset
)
xp = max(0, xp)
yp = max(0, yp)
x.append(pix[xp, yp])
last_point = point
else:
x.append(self.fill_empty_with)
x.append(self.fill_empty_with)
if self.pixel_env > 0:
for _ in range((1 + 2 * self.pixel_env) ** 2):
x.append(self.fill_empty_with)
else:
for _ in range(self.points_per_stroke):
x.append(self.fill_empty_with)
x.append(self.fill_empty_with)
if self.pixel_env > 0:
for _ in range((1 + 2 * self.pixel_env) ** 2):
x.append(self.fill_empty_with)
del draw
return x
def _features_without_strokes(self, hwr_obj):
"""Calculate the ConstantPointCoordinates features for the case of
a single (callapesed) stroke with pen_down features."""
x = []
for point in hwr_obj.get_pointlist()[0]:
if len(x) >= 3 * self.points_per_stroke or (
len(x) >= 2 * self.points_per_stroke and not self.pen_down
):
break
x.append(point["x"])
x.append(point["y"])
if self.pen_down:
if "pen_down" not in point:
logger.error(
"The "
"ConstantPointCoordinates(strokes=0) "
"feature should only be used after "
"SpaceEvenly preprocessing step."
)
else:
x.append(int(point["pen_down"]))
if self.pen_down:
while len(x) != 3 * self.points_per_stroke:
x.append(self.fill_empty_with)
else:
while len(x) != 2 * self.points_per_stroke:
x.append(self.fill_empty_with)
return x
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
if self.strokes > 0:
x = self._features_with_strokes(hwr_obj)
else:
x = self._features_without_strokes(hwr_obj)
assert self.get_dimension() == len(
x
), "Dimension of %s should be %i, but was %i" % (
str(self),
self.get_dimension(),
len(x),
)
return x
class FirstNPoints(Feature):
"""Similar to the ``ConstantPointCoordinates`` feature, this feature takes
the first ``n=81`` point coordinates. It also has the
``fill_empty_with=0`` to make sure that the dimension of this feature is
always the same."""
normalize = False
def __init__(self, n=81):
self.n = n
def __repr__(self):
return f"FirstNPoints\n - n: {self.n}\n"
def __str__(self):
return f"first n points\n - n: {self.n}\n"
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 2 * self.n
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
x = []
pointlist = hwr_obj.get_pointlist()
left = self.n
for stroke in pointlist:
for point in stroke:
if left == 0:
break
else:
left -= 1
x.append(point["x"])
x.append(point["y"])
assert self.get_dimension() == len(
x
), "Dimension of %s should be %i, but was %i" % (
str(self),
self.get_dimension(),
len(x),
)
return x
# Global features
class Bitmap(Feature):
"""Get a fixed-size bitmap of the recording."""
normalize = True
def __init__(self, size=16):
self.size = size
def __repr__(self):
return "Bitmap(%i x %i)" % (self.size, self.size)
def __str__(self):
return "Bitmap(%i x %i)" % (self.size, self.size)
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.size ** 2
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
feat = hwr_obj.get_bitmap(size=self.size).flatten()
return list(feat)
class StrokeCount(Feature):
"""Stroke count as a 1 dimensional recording."""
normalize = True
def __repr__(self):
return "StrokeCount"
def __str__(self):
return "stroke count"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
return [len(hwr_obj.get_pointlist())]
class Ink(Feature):
"""Ink as a 1 dimensional feature. It gives a numeric value for the amount
of ink this would eventually have consumed.
"""
normalize = True
def __repr__(self):
return "Ink"
def __str__(self):
return "ink"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
ink = 0.0
# calculate ink used for this symbol
# TODO: What about dots? What about speed?
for stroke in hwr_obj.get_pointlist():
last_point = None
for point in stroke:
if last_point is not None:
ink += preprocessing.euclidean_distance(last_point, point)
last_point = point
return [ink]
class AspectRatio(Feature):
"""Aspect ratio of a recording as a 1 dimensional feature."""
normalize = True
def __repr__(self):
return "Aspect Ratio"
def __str__(self):
return "Aspect Ratio"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
width = float(hwr_obj.get_width() + 0.01)
height = float(hwr_obj.get_height() + 0.01)
return [width / height]
class Width(Feature):
"""Width of a recording as a 1 dimensional feature.
.. note::
This is the current width. So if the recording was scaled, this will
not be the original width.
"""
normalize = True
def __repr__(self):
return "Width"
def __str__(self):
return "Width"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
return [float(hwr_obj.get_width())]
class Height(Feature):
"""Height of a recording as a a 1 dimensional feature.
.. note::
This is the current hight. So if the recording was scaled, this will
not be the original height.
"""
normalize = True
def __repr__(self):
return "Height"
def __str__(self):
return "Height"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
return [float(hwr_obj.get_height())]
class Time(Feature):
"""The time in milliseconds it took to create the recording. This is a 1
dimensional feature."""
normalize = True
def __repr__(self):
return "Time"
def __str__(self):
return "Time"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
return [float(hwr_obj.get_time())]
class CenterOfMass(Feature):
"""Center of mass of a recording as a 2 dimensional feature."""
normalize = True
def __repr__(self):
return "CenterOfMass"
def __str__(self):
return "Center of mass"
def get_dimension(self): # pylint: disable=R0201
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 2
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
xs = []
ys = []
for stroke in hwr_obj.get_pointlist():
for point in stroke:
xs.append(point["x"])
ys.append(point["y"])
return [float(sum(xs)) / len(xs), float(sum(ys)) / len(ys)]
class StrokeCenter(Feature):
"""Get the stroke center of mass coordinates as a 2 dimensional feature."""
normalize = True
def __init__(self, strokes=4):
self.strokes = strokes
def __repr__(self):
return "StrokeCenter"
def __str__(self):
return "Stroke center"
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.strokes * 2
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
feature_vector = []
for i, stroke in enumerate(hwr_obj.get_pointlist()):
if i >= self.strokes:
break
xs = []
ys = []
for point in stroke:
xs.append(point["x"])
ys.append(point["y"])
feature_vector.append(numpy.mean(xs))
feature_vector.append(numpy.mean(ys))
while len(feature_vector) < self.get_dimension():
feature_vector.append(0)
return feature_vector
class DouglasPeuckerPoints(Feature):
"""Get the number of points which are left after applying the Douglas
Peucker line simplification algorithm.
"""
normalize = True
def __init__(self, epsilon=0.2):
self.epsilon = epsilon
def __repr__(self):
return "DouglasPeuckerPoints"
def __str__(self):
return "DouglasPeucker Points"
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return 1
def _stroke_simplification(self, pointlist):
"""The Douglas-Peucker line simplification takes a list of points as an
argument. It tries to simplifiy this list by removing as many points
as possible while still maintaining the overall shape of the stroke.
It does so by taking the first and the last point, connecting them
by a straight line and searchin for the point with the highest
distance. If that distance is bigger than 'epsilon', the point is
important and the algorithm continues recursively."""
# Find the point with the biggest distance
dmax = 0
index = 0
for i in range(1, len(pointlist)):
d = geometry.perpendicular_distance(
pointlist[i], pointlist[0], pointlist[-1]
)
if d > dmax:
index = i
dmax = d
# If the maximum distance is bigger than the threshold 'epsilon', then
# simplify the pointlist recursively
if dmax >= self.epsilon:
# Recursive call
rec_results1 = self._stroke_simplification(pointlist[0:index])
rec_results2 = self._stroke_simplification(pointlist[index:])
result_list = rec_results1[:-1] + rec_results2
else:
result_list = [pointlist[0], pointlist[-1]]
return result_list
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
dp_points = 0
for stroke in hwr_obj.get_pointlist():
points = self._stroke_simplification(stroke)
dp_points += len(points)
return [dp_points]
class StrokeIntersections(Feature):
"""Count the number of intersections which strokes in the recording have
with each other in form of a symmetrical matrix for the first
``stroke=4`` strokes. The feature dimension is
:math:`round(\\frac{\\text{strokes}^2}{2} + \\frac{\\text{strokes}}{2})`
because the symmetrical part is discarded.
======= ======= ======= ======= ===
- stroke1 stroke2 stroke3
------- ------- ------- ------- ---
stroke1 0 1 0 ...
stroke2 1 2 0 ...
stroke3 0 0 0 ...
... ... ... ... ...
======= ======= ======= ======= ===
Returns values of upper triangular matrix (including diagonal)
from left to right, top to bottom.
..warning
This method has an error. It should probably not be used.
"""
normalize = True
def __init__(self, strokes=4):
self.strokes = strokes
def __repr__(self):
return "StrokeIntersections"
def __str__(self):
return "StrokeIntersections"
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return int(round(float(self.strokes ** 2) / 2 + float(self.strokes) / 2))
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
pointlist = hwr_obj.get_pointlist()
polygonal_chains = []
# Make sure the dimension is correct
for i in range(self.strokes):
if i < len(pointlist):
polygonal_chains.append(geometry.PolygonalChain(pointlist[i]))
else:
polygonal_chains.append(geometry.PolygonalChain([]))
x = []
for chainA, chainB in combinations_wr(polygonal_chains, 2):
if chainA == chainB:
x.append(chainA.count_selfintersections())
else:
x.append(chainA.count_intersections(chainB))
assert self.get_dimension() == len(
x
), "Dimension of %s should be %i, but was %i" % (
str(self),
self.get_dimension(),
len(x),
)
return x
class ReCurvature(Feature):
"""Re-curvature is a 1 dimensional, stroke-global feature for a recording.
It is the ratio
:math:`\\frac{\\text{height}(s)}{\\text{length}(s)}`.
If ``length(s) == 0``, then the re-curvature is defined to be 1.
"""
normalize = True
def __init__(self, strokes=4):
assert strokes > 0, "This attribute has to be positive, but was %s" % str(
strokes
)
self.strokes = strokes
def __repr__(self):
return "ReCurvature"
def __str__(self):
return "Re-curvature"
def get_dimension(self):
"""Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers."""
return self.strokes
def __call__(self, hwr_obj):
super(self.__class__, self).__call__(hwr_obj)
x = []
for stroke in hwr_obj.get_pointlist():
stroke_y = [point["y"] for point in stroke]
height = max(stroke_y) - min(stroke_y)
length = 0.0
for last_point, point in zip(stroke, stroke[1:]):
length += preprocessing.euclidean_distance(point, last_point)
if length == 0:
x.append(1)
else:
x.append(height / length)
if len(x) == self.strokes:
break
while len(x) < self.strokes:
x.append(0)
assert self.get_dimension() == len(
x
), "Dimension of %s should be %i, but was %i" % (
str(self),
self.get_dimension(),
len(x),
)
return x
| 7,523 | 0 | 1,323 |
8a62813cebb672de3944eea8a62b8afb8631e71b | 1,553 | py | Python | pinguin/views.py | OpenHackC4H/2017-Gothenburg-Pinguin | aac60014973e130977f3ef9a78b41b67a13cc527 | [
"MIT"
] | 1 | 2018-05-05T18:25:47.000Z | 2018-05-05T18:25:47.000Z | pinguin/views.py | OpenHackC4H/2017-Gothenburg-Pinguin | aac60014973e130977f3ef9a78b41b67a13cc527 | [
"MIT"
] | null | null | null | pinguin/views.py | OpenHackC4H/2017-Gothenburg-Pinguin | aac60014973e130977f3ef9a78b41b67a13cc527 | [
"MIT"
] | null | null | null | from rest_framework import permissions, viewsets, generics, filters
from .serializers import JobsSerializer, HousingSerializer, ApplicantSerializer, HeatmapSerializer
from .models import Jobs, Housing, Applicant, Heatmap
from .data_collection.collect_data import CollectData
from django.shortcuts import render
debug = False
if(debug):
apa = CollectData() | 36.97619 | 98 | 0.777849 | from rest_framework import permissions, viewsets, generics, filters
from .serializers import JobsSerializer, HousingSerializer, ApplicantSerializer, HeatmapSerializer
from .models import Jobs, Housing, Applicant, Heatmap
from .data_collection.collect_data import CollectData
from django.shortcuts import render
debug = False
if(debug):
apa = CollectData()
class JobsViewSet(viewsets.ModelViewSet):
queryset = Jobs.objects.all().order_by('company')
serializer_class = JobsSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('city', 'city')
class HousingViewSet(viewsets.ModelViewSet):
queryset = Housing.objects.all().order_by('address')
serializer_class = HousingSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('city', 'city')
class ApplicantViewSet(viewsets.ModelViewSet):
queryset = Applicant.objects.all().order_by('name')
serializer_class = ApplicantSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class HeatmapViewSet(viewsets.ModelViewSet):
queryset = Heatmap.objects.all().order_by('occupation')
serializer_class = HeatmapSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('occupation', 'occupation')
def index(request):
return render(request, 'pinguin/index.html') | 47 | 1,027 | 115 |
130e54d3a59f85155c18c48d82dfb5f339ff8926 | 5,222 | py | Python | app/recipe/views.py | mydjangoprojects/recipe-mpa | bb36b3f488fcce3292be2f23e4201c1286fe113c | [
"MIT"
] | null | null | null | app/recipe/views.py | mydjangoprojects/recipe-mpa | bb36b3f488fcce3292be2f23e4201c1286fe113c | [
"MIT"
] | null | null | null | app/recipe/views.py | mydjangoprojects/recipe-mpa | bb36b3f488fcce3292be2f23e4201c1286fe113c | [
"MIT"
] | null | null | null | from django.views.generic import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from core.views import PaginatedListView
from .models import Tag, Ingredient, Recipe
from .forms.tag_forms import TagModelForm
from .forms.ingredient_forms import IngredientModelForm
from .forms.recipe_forms import RecipeModelForm
##############
# Tag Mixins #
##############
#############
# Tag Views #
#############
#####################
# Ingredient Mixins #
#####################
####################
# Ingredient Views #
####################
#################
# Recipe Mixins #
#################
################
# Recipe Views #
################
| 27.484211 | 78 | 0.684795 | from django.views.generic import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from core.views import PaginatedListView
from .models import Tag, Ingredient, Recipe
from .forms.tag_forms import TagModelForm
from .forms.ingredient_forms import IngredientModelForm
from .forms.recipe_forms import RecipeModelForm
##############
# Tag Mixins #
##############
class TagUserPassesTestMixin(UserPassesTestMixin):
def test_func(self):
tag = Tag.objects.get(pk=self.kwargs['pk'])
req_user = self.request.user
if not req_user.is_staff and not req_user.is_superuser:
if tag.user.id != req_user.id:
return False
return True
#############
# Tag Views #
#############
class TagList(LoginRequiredMixin, PaginatedListView):
model = Tag
template_name = 'tag/tag_list.html'
queryset = Tag.objects.all()
class TagDetail(LoginRequiredMixin, DetailView):
model = Tag
template_name = 'tag/tag_detail.html'
class TagUpdate(LoginRequiredMixin, TagUserPassesTestMixin, UpdateView):
model = Tag
form_class = TagModelForm
template_name = 'tag/tag_update.html'
class TagDelete(LoginRequiredMixin, TagUserPassesTestMixin, DeleteView):
model = Tag
template_name = 'tag/tag_delete.html'
success_url = reverse_lazy('recipe:tag_list')
class TagCreate(LoginRequiredMixin, CreateView):
model = Tag
form_class = TagModelForm
template_name = 'tag/tag_create.html'
success_url = reverse_lazy('recipe:tag_list')
def form_valid(self, form):
form.save(commit=False)
user = self.request.user
form.instance.user = user
form.save()
return super(TagCreate, self).form_valid(form)
#####################
# Ingredient Mixins #
#####################
class IngredientUserPassesTestMixin(UserPassesTestMixin):
def test_func(self):
ingredient = Ingredient.objects.get(pk=self.kwargs['pk'])
req_user = self.request.user
if not req_user.is_staff and not req_user.is_superuser:
if ingredient.user.id != req_user.id:
return False
return True
####################
# Ingredient Views #
####################
class IngredientList(LoginRequiredMixin, PaginatedListView):
model = Ingredient
template_name = 'ingredient/ingredient_list.html'
queryset = Ingredient.objects.all()
class IngredientDetail(LoginRequiredMixin, DetailView):
model = Ingredient
template_name = 'ingredient/ingredient_detail.html'
class IngredientUpdate(LoginRequiredMixin,
IngredientUserPassesTestMixin,
UpdateView):
model = Ingredient
form_class = IngredientModelForm
template_name = 'ingredient/ingredient_update.html'
class IngredientDelete(LoginRequiredMixin,
IngredientUserPassesTestMixin,
DeleteView):
model = Ingredient
template_name = 'ingredient/ingredient_delete.html'
success_url = reverse_lazy('recipe:ingredient_list')
class IngredientCreate(LoginRequiredMixin, CreateView):
model = Ingredient
form_class = IngredientModelForm
template_name = 'ingredient/ingredient_create.html'
success_url = reverse_lazy('recipe:ingredient_list')
def form_valid(self, form):
form.save(commit=False)
user = self.request.user
form.instance.user = user
form.save()
return super(IngredientCreate, self).form_valid(form)
#################
# Recipe Mixins #
#################
class RecipeUserPassesTestMixin(UserPassesTestMixin):
def test_func(self):
recipe = Recipe.objects.get(pk=self.kwargs['pk'])
req_user = self.request.user
if not req_user.is_staff or not req_user.is_superuser:
if recipe.user.id != req_user.id:
return False
return True
################
# Recipe Views #
################
class RecipeList(LoginRequiredMixin, PaginatedListView):
model = Recipe
queryset = Recipe.objects.all()
template_name = 'recipe/recipe_list.html'
class RecipeDetail(LoginRequiredMixin, DetailView):
model = Recipe
template_name = 'recipe/recipe_detail.html'
class RecipeUpdate(LoginRequiredMixin, RecipeUserPassesTestMixin, UpdateView):
model = Recipe
form_class = RecipeModelForm
template_name = 'recipe/recipe_update.html'
success_url = reverse_lazy('recipe:recipe_list')
class RecipeDelete(LoginRequiredMixin, RecipeUserPassesTestMixin, DeleteView):
model = Recipe
template_name = 'recipe/recipe_delete.html'
success_url = reverse_lazy('recipe:recipe_list')
class RecipeCreate(LoginRequiredMixin, CreateView):
model = Recipe
form_class = RecipeModelForm
template_name = 'recipe/recipe_create.html'
success_url = reverse_lazy('recipe:recipe_list')
def form_valid(self, form):
form.save(commit=False)
user = self.request.user
form.instance.user = user
form.save()
return super(RecipeCreate, self).form_valid(form)
| 1,317 | 2,600 | 494 |
d58f899d295336781afad98b29f2d573fa14549f | 510 | py | Python | tests/test_encrypt.py | realityone/CetTicket | 1a1cb7eec89061bda27435ee618802e30b2bf6f9 | [
"MIT"
] | 3 | 2015-05-13T16:10:52.000Z | 2015-07-16T10:24:53.000Z | tests/test_encrypt.py | realityone/CetTicket | 1a1cb7eec89061bda27435ee618802e30b2bf6f9 | [
"MIT"
] | null | null | null | tests/test_encrypt.py | realityone/CetTicket | 1a1cb7eec89061bda27435ee618802e30b2bf6f9 | [
"MIT"
] | null | null | null | import unittest
from libcet import cet
| 28.333333 | 94 | 0.672549 | import unittest
from libcet import cet
class TestEncrypt(unittest.TestCase):
def setUp(self):
self.crypter = cet.CETCrypter('021yO6d<', 'QghdW;O;')
def test_encrypt(self):
plaintext = '123456789'
self.assertEqual(
plaintext,
self.crypter.decrypt_ticket_number(self.crypter.encrypt_ticket_number(plaintext)))
self.assertEqual(
plaintext,
self.crypter.decrypt_request_data(self.crypter.encrypt_request_data(plaintext)))
| 377 | 16 | 76 |
2143068b1ccbc1e5fe085bbcd5a31d3a3cfd047d | 199 | py | Python | paranuara/companies/apps.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | null | null | null | paranuara/companies/apps.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | 4 | 2021-06-08T20:53:43.000Z | 2022-03-12T00:13:51.000Z | paranuara/companies/apps.py | SPLAYER-HD/RestServiceDjango | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
] | null | null | null | """Companies app"""
# Django
from django.apps import AppConfig
class CompaniesAppConfig(AppConfig):
"""Companies app config"""
name = "paranuara.companies"
verbose_name = 'Companies'
| 16.583333 | 36 | 0.703518 | """Companies app"""
# Django
from django.apps import AppConfig
class CompaniesAppConfig(AppConfig):
"""Companies app config"""
name = "paranuara.companies"
verbose_name = 'Companies'
| 0 | 0 | 0 |
80f9effd20d0ca021438532f79cd34779410673a | 210 | py | Python | examples/slots_simple.py | BarnabasSzabolcs/pyquasargui | 0ac684094c5a87dbb1a3ed1ff83f33c603d97e5a | [
"MIT"
] | 2 | 2021-09-06T20:23:43.000Z | 2022-02-02T18:24:35.000Z | examples/slots_simple.py | BarnabasSzabolcs/pyquasargui | 0ac684094c5a87dbb1a3ed1ff83f33c603d97e5a | [
"MIT"
] | 1 | 2022-02-27T01:19:14.000Z | 2022-02-27T01:19:14.000Z | examples/slots_simple.py | BarnabasSzabolcs/pyquasargui | 0ac684094c5a87dbb1a3ed1ff83f33c603d97e5a | [
"MIT"
] | null | null | null | from quasargui import *
layout = QInput(
classes='q-ma-lg',
label='Your city',
children=[
Slot('prepend', [
QIcon('place')
])
])
run(layout, title='slots example')
| 16.153846 | 34 | 0.528571 | from quasargui import *
layout = QInput(
classes='q-ma-lg',
label='Your city',
children=[
Slot('prepend', [
QIcon('place')
])
])
run(layout, title='slots example')
| 0 | 0 | 0 |
7cd31469139c7761ef44fc5874f8112465fade84 | 4,716 | py | Python | torchvision/prototype/datasets/_builtin/pcam.py | yassineAlouini/vision-1 | ee26e9c260a255e2afb5e691e713349529170c8b | [
"BSD-3-Clause"
] | 1 | 2022-02-14T09:16:02.000Z | 2022-02-14T09:16:02.000Z | torchvision/prototype/datasets/_builtin/pcam.py | yassineAlouini/vision-1 | ee26e9c260a255e2afb5e691e713349529170c8b | [
"BSD-3-Clause"
] | null | null | null | torchvision/prototype/datasets/_builtin/pcam.py | yassineAlouini/vision-1 | ee26e9c260a255e2afb5e691e713349529170c8b | [
"BSD-3-Clause"
] | null | null | null | import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Iterator, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import (
Dataset,
OnlineResource,
GDriveResource,
)
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "pcam"
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
| 35.19403 | 105 | 0.638465 | import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Iterator, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import (
Dataset,
OnlineResource,
GDriveResource,
)
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
| 1,872 | 35 | 233 |