max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
settings.py | ONSdigital/sdx-rabbit-monitor | 1 | 12769151 | <filename>settings.py
import logging
import os
LOGGING_FORMAT = "%(asctime)s|%(levelname)s: sdx-rabbit-monitor: %(message)s"
LOGGING_LEVEL = logging.getLevelName(os.getenv('LOGGING_LEVEL', 'DEBUG'))
PORT = os.getenv('SDX_RABBIT_MONITOR_PORT')
RABBITMQ_DEFAULT_PASS = os.getenv('SDX_RABBIT_MONITOR_PASS')
RABBITMQ_DEFAULT_USER = os.getenv('SDX_RABBIT_MONITOR_USER')
RABBITMQ_DEFAULT_VHOST = '%2f'
RABBIT_URL = 'http://{hostname}:{port}/api/'.format(
hostname=os.getenv('SDX_RABBIT_MONITOR_RABBIT_HOST'),
port=os.getenv('SDX_RABBIT_MONITOR_MGT_PORT')
)
WAIT_TIME = 120
# Number of seconds to look back and gather stats from
RABBIT_MONITOR_STATS_WINDOW = 120
# Sample frequency in stats window
RABBIT_MONITOR_STATS_INCREMENT = '30'
| 2.171875 | 2 |
orchestra/migrations/0002_auto_20141229_1543.py | ksbek/orchestra | 0 | 12769152 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='project',
field=models.ForeignKey(
on_delete=models.CASCADE, default=0, to='orchestra.Project'),
preserve_default=False,
),
migrations.AlterField(
model_name='process',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='depends_on',
field=models.ManyToManyField(
related_name='depends_on_rel_+', to='orchestra.Step', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='required_certifications',
field=models.ManyToManyField(
to='orchestra.Certification', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='review_policy',
field=jsonfield.fields.JSONField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='user_interface',
field=jsonfield.fields.JSONField(blank=True),
preserve_default=True,
),
]
| 1.617188 | 2 |
fuelclient/fuelclient/tests/test_client.py | Zipfer/fuel-web | 0 | 12769153 | # -*- coding: utf-8 -*-
#
# Copyright 2013-2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from mock import Mock
from mock import patch
from fuelclient.tests import base
class TestHandlers(base.BaseTestCase):
def test_env_action(self):
#check env help
help_msgs = ["usage: fuel environment [-h]",
"[--list | --set | --delete | --create | --update]",
"optional arguments:", "--help", "--list", "--set",
"--delete", "--rel", "--env-create",
"--create", "--name", "--env-name", "--mode", "--net",
"--network-mode", "--nst", "--net-segment-type",
"--deployment-mode", "--update", "--env-update"]
self.check_all_in_msg("env --help", help_msgs)
#no clusters
self.check_for_rows_in_table("env")
for action in ("set", "create", "delete"):
self.check_if_required("env {0}".format(action))
#list of tuples (<fuel CLI command>, <expected output of a command>)
expected_stdout = \
[(
"env --create --name=TestEnv --release=1",
"Environment 'TestEnv' with id=1, mode=ha_compact and "
"network-mode=nova_network was created!\n"
), (
"--env-id=1 env set --name=NewEnv",
("Following attributes are changed for "
"the environment: name=NewEnv\n")
), (
"--env-id=1 env set --mode=multinode",
("Following attributes are changed for "
"the environment: mode=multinode\n")
)]
for cmd, msg in expected_stdout:
self.check_for_stdout(cmd, msg)
def test_node_action(self):
help_msg = ["fuel node [-h] [--env ENV]",
"[--list | --set | --delete | --network | --disk |"
" --deploy | --delete-from-db | --provision]", "-h",
"--help", " -s", "--default", " -d", "--download", " -u",
"--upload", "--dir", "--node", "--node-id", " -r",
"--role", "--net"]
self.check_all_in_msg("node --help", help_msg)
self.check_for_rows_in_table("node")
for action in ("set", "remove", "--network", "--disk"):
self.check_if_required("node {0}".format(action))
self.load_data_to_nailgun_server()
self.check_number_of_rows_in_table("node --node 9f:b7,9d:24,ab:aa", 3)
def test_selected_node_deploy_or_provision(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller"
))
commands = ("--provision", "--deploy")
for action in commands:
self.check_if_required("--env-id=1 node {0}".format(action))
messages = (
"Started provisioning nodes [1].\n",
"Started deploying nodes [1].\n"
)
for cmd, msg in zip(commands, messages):
self.check_for_stdout(
"--env-id=1 node {0} --node=1".format(cmd),
msg
)
def test_check_wrong_server(self):
os.environ["SERVER_ADDRESS"] = "0"
result = self.run_cli_command("-h", check_errors=True)
self.assertEqual(result.stderr, '')
del os.environ["SERVER_ADDRESS"]
def test_destroy_node(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller"
))
msg = ("Nodes with id [1] has been deleted from fuel db.\n"
"You should still delete node from cobbler\n")
self.check_for_stdout(
"node --node 1 --delete-from-db",
msg
)
def test_for_examples_in_action_help(self):
actions = (
"node", "stop", "deployment", "reset", "task", "network",
"settings", "provisioning", "environment", "deploy-changes",
"role", "release", "snapshot", "health"
)
for action in actions:
self.check_all_in_msg("{0} -h".format(action), ("Examples",))
def test_task_action_urls(self):
self.check_all_in_msg(
"task --task-id 1 --debug",
[
"GET http://127.0.0.1",
"/api/v1/tasks/1/"
],
check_errors=True
)
self.check_all_in_msg(
"task --task-id 1 --delete --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=0"
],
check_errors=True
)
self.check_all_in_msg(
"task --task-id 1 --delete --force --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=1"
],
check_errors=True
)
self.check_all_in_msg(
"task --tid 1 --delete --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=0"
],
check_errors=True
)
def test_get_release_list_without_errors(self):
cmd = 'release --list'
self.run_cli_command(cmd)
class TestUserActions(base.BaseTestCase):
def test_change_password_params(self):
cmd = "user change-password"
msg = "Expect password [--newpass NEWPASS]"
result = self.run_cli_command(cmd, check_errors=True)
self.assertTrue(msg, result)
class TestCharset(base.BaseTestCase):
def test_charset_problem(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=привет --release=1",
"--env-id=1 node set --node 1 --role=controller",
"env"
))
class TestFiles(base.BaseTestCase):
def test_file_creation(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller",
"--env-id=1 node set --node 2,3 --role=compute"
))
for action in ("network", "settings"):
for format_ in ("yaml", "json"):
self.check_if_files_created(
"--env 1 {0} --download --{1}".format(action, format_),
("{0}_1.{1}".format(action, format_),)
)
command_to_files_map = (
(
"--env 1 deployment --default",
(
"deployment_1",
"deployment_1/primary-controller_1.yaml",
"deployment_1/compute_2.yaml",
"deployment_1/compute_3.yaml"
)
),
(
"--env 1 provisioning --default",
(
"provisioning_1",
"provisioning_1/engine.yaml",
"provisioning_1/node-1.yaml",
"provisioning_1/node-2.yaml",
"provisioning_1/node-3.yaml"
)
),
(
"--env 1 deployment --default --json",
(
"deployment_1/primary-controller_1.json",
"deployment_1/compute_2.json",
"deployment_1/compute_3.json"
)
),
(
"--env 1 provisioning --default --json",
(
"provisioning_1/engine.json",
"provisioning_1/node-1.json",
"provisioning_1/node-2.json",
"provisioning_1/node-3.json"
)
),
(
"node --node 1 --disk --default",
(
"node_1",
"node_1/disks.yaml"
)
),
(
"node --node 1 --network --default",
(
"node_1",
"node_1/interfaces.yaml"
)
),
(
"node --node 1 --disk --default --json",
(
"node_1/disks.json",
)
),
(
"node --node 1 --network --default --json",
(
"node_1/interfaces.json",
)
)
)
for command, files in command_to_files_map:
self.check_if_files_created(command, files)
def check_if_files_created(self, command, paths):
command_in_dir = "{0} --dir={1}".format(command, self.temp_directory)
self.run_cli_command(command_in_dir)
for path in paths:
self.assertTrue(os.path.exists(
os.path.join(self.temp_directory, path)
))
class TestDownloadUploadNodeAttributes(base.BaseTestCase):
def test_upload_download_interfaces(self):
self.load_data_to_nailgun_server()
cmd = "node --node-id 1 --network"
self.run_cli_commands((self.download_command(cmd),
self.upload_command(cmd)))
def test_upload_download_disks(self):
self.load_data_to_nailgun_server()
cmd = "node --node-id 1 --disk"
self.run_cli_commands((self.download_command(cmd),
self.upload_command(cmd)))
class TestDeployChanges(base.BaseTestCase):
def test_deploy_changes_no_failure(self):
self.load_data_to_nailgun_server()
env_create = "env create --name=test --release=1"
add_node = "--env-id=1 node set --node 1 --role=controller"
deploy_changes = "deploy-changes --env 1"
self.run_cli_commands((env_create, add_node, deploy_changes))
class TestAuthentication(base.UnitTestCase):
@patch('fuelclient.client.requests')
@patch('fuelclient.client.auth_client')
def test_wrong_credentials(self, mkeystone_cli, mrequests):
mkeystone_cli.return_value = Mock(auth_token='')
mrequests.get_request.return_value = Mock(status_code=200)
self.execute(
['fuel', '--user=a', '--password=a', 'node'])
mkeystone_cli.Client.assert_called_with(
username='a',
tenant_name='admin',
password='a',
auth_url='http://127.0.0.1:8003/keystone/v2.0')
self.execute(
['fuel', '--user=a', '--password', 'a', 'node'])
mkeystone_cli.Client.assert_called_with(
username='a',
tenant_name='admin',
password='a',
auth_url='http://1192.168.3.11:8003/keystone/v2.0')
| 1.828125 | 2 |
lcfeatures/results/cm.py | oscarpimentel/astro-lightcurves-features | 0 | 12769154 | from __future__ import print_function
from __future__ import division
from . import _C
import numpy as np
import fuzzytools.files as ftfiles
import fuzzytools.strings as ftstrings
from fuzzytools.datascience.cms import ConfusionMatrix
from fuzzytools.matplotlib.cm_plots import plot_custom_confusion_matrix
import matplotlib.pyplot as plt
from fuzzytools.datascience.xerror import XError
from IPython.display import display
from fuzzytools.strings import latex_bf_alphabet_count
from fuzzytools.latex.latex_tables import LatexTable
from fuzzytools.matplotlib.utils import save_fig
import fuzzytools.strings as strings
import lcfeatures.results.utils as utils
FIGSIZE = (6,5)
DPI = 200
RANDOM_STATE = None
NEW_ORDER_CLASS_NAMES = ['SNIa', 'SNIbc', 'SNII*', 'SLSN']
DICT_NAME = 'thdays_class_metrics'
###################################################################################################################################################
def plot_cm(rootdir, cfilename, kf, lcset_name, model_names,
figsize=FIGSIZE,
dpi=DPI,
new_order_class_names=NEW_ORDER_CLASS_NAMES,
dict_name=DICT_NAME,
alphabet_count=0,
verbose=0,
):
for model_name in model_names:
fmodel_name, mn_dict = utils.get_fmodel_name(model_name, returns_mn_dict=True)
method = mn_dict['method']
load_roodir = f'../save/{model_name}/performance/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={method}'
print(load_roodir)
files, files_ids, kfs = ftfiles.gather_files_by_kfold(load_roodir, kf, lcset_name,
fext='d',
imbalanced_kf_mode='oversampling', # error oversampling
random_state=RANDOM_STATE,
)
print(f'{files_ids}({len(files_ids)}#)')
if len(files)==0:
continue
class_names = files[0]()['class_names']
features = files[0]()['features']
thdays = files[0]()['thdays']
rank = files[0]()['rank']
for f in features:
#print(f)
pass
thday = files[0]()['thdays'][-1]
xe_dict = {}
for metric_name in ['recall', 'f1score']:
xe_metric = XError([f()['thdays_class_metrics_df'].loc[f()['thdays_class_metrics_df']['_thday']==thday][f'b-{metric_name}'].item() for f in files])
xe_dict[f'b-{metric_name}'] = xe_metric
brecall_xe = xe_dict['b-recall']
bf1score_xe = xe_dict['b-f1score']
new_order_class_names = ['SNIa', 'SNIbc', 'SNIIbn', 'SLSN']
new_order_class_names = ['SNIa', 'SNIbc', 'SNII*', 'SLSN']
cm = ConfusionMatrix([f()['thdays_cm'][thday] for f in files], class_names)
cm.reorder_classes(new_order_class_names)
for c in new_order_class_names:
print(cm.get_diagonal_dict()[c].get_raw_repr(f'brf_{c}_tp'))
pass
true_label_d = {c:f'({k}#)' for c,k in zip(class_names, np.sum(files[0]()['thdays_cm'][thday], axis=1))}
rank = files[0]()['rank'] # just show one
rank.names = ['Feature name=\\verb+'+n+'+' for n in rank.names]
rank.values = [v*100 for v in rank.values]
rank_df = rank.get_df()
latex_table = LatexTable(rank_df,
label='tab:brf_ranking',
)
if verbose:
display(rank_df)
print(latex_table)
title = ''
title += f'{latex_bf_alphabet_count(alphabet_count)}{fmodel_name}'+'\n'
title += f'b-Recall={brecall_xe}; b-$F_1$score={bf1score_xe}'+'\n'
title += f'th-day={thday:.0f} [days]'+'\n'
fig, ax = plot_custom_confusion_matrix(cm,
title=title[:-1],
figsize=figsize,
dpi=dpi,
true_label_d=true_label_d,
lambda_c=lambda c:c.replace('*', ''),
)
save_fig(fig, f'../temp/exp=cm/{model_name}.pdf', closes_fig=0)
plt.show() | 1.8125 | 2 |
vortexasdk/api/onshore_inventory.py | VorTECHsa/python-sdk | 9 | 12769155 | <reponame>VorTECHsa/python-sdk
from dataclasses import dataclass
from typing import Optional
from vortexasdk.api.asset_tank import AssetTank
from vortexasdk.api.serdes import FromDictMixin
from vortexasdk.api.shared_types import ISODate
from vortexasdk.api.id import ID
@dataclass(frozen=True)
class OnshoreInventory(FromDictMixin):
"""
Land Storage measurements are the base data set the Vortexa API is centred around.
Each measurement represents the total capacity and current amount being stored at each location.
[Land Storage Further Documentation](https://docs.vortexa.com/reference/intro-land-storage)
"""
measurement_id: ID
tank_id: ID
tank_details: AssetTank
measurement_timestamp: Optional[ISODate]
publish_timestamp: Optional[ISODate]
report_timestamp: ISODate
carry_forward: bool
fill_bbl: int
fill_tons: float
fill_cbm: float
reference_data_version: str
| 2.5 | 2 |
WebGarageSale/ApplicationGarage/urls.py | dayojohn19/Garage_Sale | 0 | 12769156 | <filename>WebGarageSale/ApplicationGarage/urls.py
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("magbenta/", views.magbenta, name="magbenta"),
path("BagongBenta", views.BagongBenta, name="bagongbenta"),
path("KuhainAngListing", views.getListings, name="KuhainAngListing")
]
| 1.898438 | 2 |
homeworks/kirill_shevchuk/lesson11/level04.py | tgrx/Z22 | 0 | 12769157 | <filename>homeworks/kirill_shevchuk/lesson11/level04.py
from homeworks.kirill_shevchuk.lesson11 import level03
class User(level03.User):
def validate(self):
validate_name(self.name)
validate_email(self.email)
email_name = self.email.split("@")[0]
email_adres = self.email.split("@")[1]
validate_name(email_name)
validate_email_adres(email_adres)
def validate_name(name):
if not name:
raise ValueError
if not isinstance(name, str):
raise ValueError
if name[0].isdigit():
raise ValueError
if not set(name).issubset("abcdefghijklmnopqrstuvwxyz" + "1234567890"):
raise ValueError
def validate_email(email):
if not email:
raise ValueError
if not isinstance(email, str):
raise ValueError
if email.count("@") > 1 and len(email) > 2:
raise ValueError
def validate_email_adres(email_adres):
if not email_adres:
raise ValueError
if not isinstance(email_adres, str):
raise ValueError
if email_adres[0].isdigit():
raise ValueError
if email_adres[0] == "." or email_adres[-1] == ".":
raise ValueError
| 3.734375 | 4 |
app2/views.py | hazemnossier/multidb | 0 | 12769158 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from .forms import fill_me_form
def fill_view2(request):
if request.method == 'POST':
print('watashi ga kitta 2 ')
form = fill_me_form(request.POST)
if form.is_valid():
data = form.cleaned_data
form.save()
print(data)
else:
form = fill_me_form()
return render(request, "Page2.html", {'form': form})
| 2.328125 | 2 |
matplot.py | buggoing/pythonTools | 0 | 12769159 | #coding: utf8
import matplotlib.pyplot as plt
import math
import numpy as np
xList = range(100)
y1 = [x*x for x in xList]
y2 = [math.sin(x) for x in xList]
y3 = [math.sqrt(x) for x in xList]
def draw():
# plt.plot(y1, 'b-', label='y=x*x')
plt.plot(y2, label='y=sin(x)')
plt.plot(y3, 'r*', label='y=sqrt(x)')
plt.grid()
plt.legend()
plt.show()
def drawWithTicks():
# plt.plot(y1, 'b-', label='y=x*x')
plt.plot(y2, label='y=sin(x)')
plt.plot(y3, 'r*', label='y=sqrt(x)')
yticks = np.arange(min(y2), max(y2), 1) # 设置坐标轴刻度
plt.yticks(yticks)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) # 坐标轴用科学计数法
plt.grid()
plt.legend()
plt.show()
draw()
xList = range(1, 23)
y = [math.sqrt((x**3 + 7*x + 11) % 23) for x in xList]
y2 = [-math.sqrt((x**3 + 7*x + 11) % 23) for x in xList]
def drawElliptic():
plt.plot(xList, y)
plt.plot(xList, y2)
plt.grid()
plt.show()
drawElliptic()
| 3.546875 | 4 |
ARRAYS/Easy/Shuffle the Array/Code.py | HassanRahim26/LEETCODE | 3 | 12769160 | <reponame>HassanRahim26/LEETCODE
#PROBLEM LINK:- https://leetcode.com/problems/shuffle-the-array/
class Solution:
def shuffle(self, nums, n):
v = []
for i in range(n):
v.append(nums[i])
v.append(nums[i+n])
return v
| 3.734375 | 4 |
alacode/migrations/0002_auto_20200116_1512.py | vanatteveldt/alacode | 0 | 12769161 | # Generated by Django 3.0.2 on 2020-01-16 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alacode', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='code',
name='q12',
field=models.CharField(default=0, help_text='Notes', max_length=500, verbose_name='q12'),
preserve_default=False,
),
migrations.AlterField(
model_name='code',
name='q1',
field=models.BooleanField(help_text='ERROR 1: the tweet has nothing to do with the societal discussion around vaccines (tick box & continue to next tweet)', verbose_name='q1'),
),
migrations.AlterField(
model_name='code',
name='q10',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of anger? (1 = Not at all; 5 = Extremely)', verbose_name='q10'),
),
migrations.AlterField(
model_name='code',
name='q11',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of fear? (1 = Not at all; 5 = Extremely)', verbose_name='q11'),
),
migrations.AlterField(
model_name='code',
name='q2',
field=models.BooleanField(help_text='ERROR 2: the link is not working / does not refer to a news article or blog (tick box & continue to next tweet', verbose_name='q2'),
),
migrations.AlterField(
model_name='code',
name='q3',
field=models.BooleanField(help_text="Tick the box if the tweet doesn't contain any text next to the link", verbose_name='q3'),
),
migrations.AlterField(
model_name='code',
name='q4',
field=models.BooleanField(help_text='Tick the box if the tweet only contains the title/header of the shared article', verbose_name='q4'),
),
migrations.AlterField(
model_name='code',
name='q5',
field=models.IntegerField(choices=[(0, 'The source does not contain a discernible opinion on vaccines'), (1, 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (5, 'Strongly In Favor')], default=0, help_text='To what extent would you describe the shared article as in favor or against the use of vaccines?', verbose_name='q5'),
),
migrations.AlterField(
model_name='code',
name='q6',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion on vaccines'), (1, 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (5, 'Strongly In Favor')], default=0, help_text='To what extent would you describe the text in the tweet as in favor or against the use of vaccines?', verbose_name='q6'),
),
migrations.AlterField(
model_name='code',
name='q7',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion towards the source'), (1, 'Strongly disagrees'), (2, 'disagrees'), (3, 'Neutral'), (4, 'Agrees'), (5, 'Strongly agrees')], default=0, help_text='To what extent does the text in the tweet (dis)agree withqi the source?', verbose_name='q7'),
),
migrations.AlterField(
model_name='code',
name='q8',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion towards the source'), (1, 'Very Negative'), (2, 'Negative'), (3, 'Neutral'), (4, 'Positive'), (5, 'Very positive')], default=0, help_text='To what extent would you describe the text in the tweet as positive or negative towards the source?', verbose_name='q8'),
),
migrations.AlterField(
model_name='code',
name='q9',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of enthusiasm? (1 = Not at all; 5 = Extremely)', verbose_name='q9'),
),
]
| 2.375 | 2 |
tests/test_boundary.py | denisvasilik/binalyzer-template-provider | 0 | 12769162 | <filename>tests/test_boundary.py
"""
test_boundary
~~~~~~~~~~~~~
This module implements tests related to a template's boundary. A boundary
attribute influences the following attributes:
* Offset of current template
* Absolute address of current template
* Size of current template
* Size of parent template
NOTE: The current template is the template where the boundary is declared.
"""
import io
from binalyzer_core import Binalyzer, Template, ValueProperty
from binalyzer_template_provider import XMLTemplateParser
def test_boundary_attribute():
template = XMLTemplateParser(
"""
<template boundary="0x800"></template>
"""
).parse()
assert isinstance(template, Template)
assert isinstance(template.boundary_property, ValueProperty)
assert template.offset == 0
assert template.boundary == 0x800
assert template.size == 0
def test_boundary_attribute_nested():
template = XMLTemplateParser(
"""
<template boundary="0x200">
<template boundary="0x100">
<field size="1"></field>
</template>
</template>
"""
).parse()
assert template.absolute_address == 0
assert template.offset == 0
assert template.size == 0x200
assert template.children[0].absolute_address == 0
assert template.children[0].offset == 0
assert template.children[0].size == 0x100
def test_boundary_attribute_nested_with_inner_element_having_greater_boundary():
template = XMLTemplateParser(
"""
<template boundary="0x200">
<template boundary="0x500">
<field name="field" size="1">
</field>
</template>
</template>
"""
).parse()
assert template.absolute_address == 0
assert template.offset == 0
assert template.size == 0x600
assert template.children[0].absolute_address == 0
assert template.children[0].offset == 0
assert template.children[0].size == 0x500
def test_boundary_attribute_with_offset():
template = XMLTemplateParser(
"""
<template offset="0x20" boundary="0x100">
</template>
"""
).parse()
assert isinstance(template, Template)
assert isinstance(template.boundary_property, ValueProperty)
assert template.offset == 256
assert template.size == 0
def test_boundary_attribute_with_offset_and_child():
template = XMLTemplateParser(
"""
<template offset="0x20" boundary="0x100">
<field size="1"></field>
</template>
"""
).parse()
assert isinstance(template, Template)
assert isinstance(template.boundary_property, ValueProperty)
assert template.offset == 256
assert template.size == 256
def test_boundary_attribute_with_parent_offset_on_boundary():
template = XMLTemplateParser(
"""
<template>
<layout name="layout" offset="0x200">
<area name="area" boundary="0x200">
<field name="field" size="1"></field>
</area>
</layout>
</template>
"""
).parse()
assert template.layout.area.absolute_address == 0x200
assert template.layout.offset == 0x200
assert template.layout.area.offset == 0x0
assert template.layout.area.boundary == 0x200
assert template.layout.area.size == 0x200
def test_boundary_attribute_with_parent_offset_besides_boundary():
template = XMLTemplateParser(
"""
<template>
<layout name="layout" offset="0x300">
<area name="area" boundary="0x200">
<field name="field" size="1"></field>
</area>
</layout>
</template>"""
).parse()
assert template.layout.offset == 0x300
assert template.layout.area.offset == 0x100
assert template.layout.area.absolute_address == 0x400
assert template.layout.area.boundary == 0x200
assert template.layout.area.size == 0x200
def test_boundary_attribute_with_parent_offset_besides_boundary_and_nested():
template = XMLTemplateParser(
"""
<template>
<layout name="layout" offset="0x300">
<area name="area" boundary="0x200">
<field name="field" boundary="0x100">
<field name="nested_field" size="1"></field>
</field>
</area>
</layout>
</template>
"""
).parse()
assert template.layout.area.absolute_address == 0x400
assert template.layout.area.offset == 0x100
assert template.layout.area.size == 0x200
assert template.layout.area.field.size == 0x100
assert template.layout.area.field.offset == 0x0
def test_boundary_and_sizing_stretch():
template = XMLTemplateParser(
"""
<template boundary="0x100">
<header name="header" size="4">
</header>
<payload name="payload" sizing="stretch">
</payload>
</template>
"""
).parse()
binalyzer = Binalyzer(template)
binalyzer.data = io.BytesIO(bytes([0x01] * binalyzer.template.size))
binalyzer.template.header.value = bytes([0x02] * 4)
assert binalyzer.template.payload.size == 252
assert binalyzer.template.size == 256
assert binalyzer.template.header.size == 4
assert binalyzer.template.header.value == bytes([0x02] * 4)
assert binalyzer.template.payload.value == bytes([0x01] * 252)
def test_boundary_and_sizing_stretch_with_offset():
template = XMLTemplateParser(
"""
<template offset="0x20" boundary="0x100">
<header name="header" size="4">
</header>
<payload name="payload" sizing="stretch">
</payload>
</template>
"""
).parse()
binalyzer = Binalyzer(template)
binalyzer.template.header.value = bytes([0x02] * 4)
binalyzer.template.payload.value = bytes([0x01] * binalyzer.template.payload.size)
assert binalyzer.template.offset == 256
assert binalyzer.template.size == 256
assert binalyzer.template.header.size == 4
assert binalyzer.template.header.value == bytes([0x02] * 4)
assert binalyzer.template.payload.offset == 4
assert binalyzer.template.payload.size == 252
assert binalyzer.template.payload.value == bytes(
[0x01] * binalyzer.template.payload.size
)
| 2.921875 | 3 |
app/zipcode_jp/models.py | miyagi389/zipcode-ja-python-django | 0 | 12769163 | <reponame>miyagi389/zipcode-ja-python-django
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import TimeStampedModel
class ZipCodeManager(models.Manager):
def search(self, search_key):
q = self.filter(
Q(jis_code__icontains=search_key) |
Q(code__icontains=search_key) |
Q(prefecture_kana__icontains=search_key) |
Q(city_kana__icontains=search_key) |
Q(town_kana__icontains=search_key) |
Q(prefecture__icontains=search_key) |
Q(city__icontains=search_key) |
Q(town__icontains=search_key)
)
return q
@python_2_unicode_compatible
class ZipCode(TimeStampedModel):
"""
郵便局が提供している郵便番号データを格納する。
see: http://www.post.japanpost.jp/zipcode/dl/readme.html
"""
objects = ZipCodeManager()
jis_code = models.CharField(
max_length=10,
blank=False,
verbose_name="全国地方公共団体コード(JIS X0401、X0402)",
)
old_code = models.CharField(
max_length=5,
blank=False,
verbose_name="郵便番号(旧)",
)
code = models.CharField(
max_length=7,
blank=False,
db_index=True,
verbose_name="郵便番号(新)",
)
prefecture_kana = models.CharField(
max_length=255,
blank=False,
verbose_name="都道府県名(カナ)",
)
city_kana = models.CharField(
max_length=255,
blank=False,
verbose_name="市区町村名(カナ)",
)
town_kana = models.CharField(
max_length=255,
blank=False,
verbose_name="町域名(カナ)",
)
prefecture = models.CharField(
max_length=255,
blank=False,
verbose_name="都道府県名",
)
city = models.CharField(
max_length=255,
blank=False,
verbose_name="市区町村名",
)
town = models.CharField(
max_length=255,
blank=False,
verbose_name="町域名",
)
town_divide = models.PositiveSmallIntegerField(
null=False,
verbose_name="一町域が二以上の郵便番号で表される場合の表示",
help_text="1:該当、0:該当せず",
)
koaza_banchi = models.PositiveSmallIntegerField(
null=False,
verbose_name="小字毎に番地が起番されている町域の表示",
help_text="1:該当、0:該当せず",
)
tyoume = models.PositiveSmallIntegerField(
null=False,
verbose_name="丁目を有する町域の場合の表示。",
help_text="1:該当、0:該当せず",
)
has_some_town = models.PositiveSmallIntegerField(
null=False,
verbose_name="一つの郵便番号で二以上の町域を表す場合の表示",
help_text="1:該当、0:該当せず",
)
update_state = models.PositiveSmallIntegerField(
null=False,
verbose_name="更新の表示",
help_text="0:変更なし、1:変更あり、2:廃止(廃止データのみ使用)",
)
update_reason = models.PositiveSmallIntegerField(
null=False,
verbose_name="変更理由",
help_text="0:変更なし、1:市政・区政・町政・分区・政令指定都市施行、2:住居表示の実施、3:区画整理、4:郵便区調整等、5:訂正、6:廃止(廃止データのみ使用)",
)
def __str__(self):
return "{code} {prefecture} {city} {town}".format(
code=self.code,
prefecture=self.prefecture,
city=self.city,
town=self.town
)
| 2.3125 | 2 |
molecule/default/tests/test_default.py | HadrienPatte/ansible-role-deluge | 0 | 12769164 | <filename>molecule/default/tests/test_default.py
import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('deluged'),
])
def test_package_is_installed(host, name):
package = host.package(name)
assert package.is_installed
@pytest.mark.parametrize('name', [
('deluged'),
])
def test_service_is_running(host, name):
service = host.service(name)
assert service.is_running
@pytest.mark.parametrize('name', [
('deluged'),
])
def test_service_is_enabled(host, name):
service = host.service(name)
assert service.is_enabled
@pytest.mark.parametrize('directory', [
('/var/lib/deluged'),
('/var/lib/deluged/config'),
('/var/log/deluged'),
])
def test_directory(host, directory):
directory = host.file(directory)
assert directory.exists
assert directory.is_directory
assert directory.user == 'deluge'
assert directory.group == 'deluge'
| 2.0625 | 2 |
tamilnlp/WikiByCategory.py | AshokR/TamilNLP | 64 | 12769165 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from wikiapi import WikiApi
import requests, pprint
# This is suitable for extracting content that is organized by categories and sub-categories
# This code requires the wiki-api python library created by <NAME> of UK
# https://github.com/richardasaurus/wiki-api
# Note that the Wikipedia categories and sub-categories are not in a tree structure. There are circular references.
wiki = WikiApi()
wiki = WikiApi({ 'locale' : 'ta'}) # to specify your locale, 'en' is default
# Get the page text of the article with the given title
def getArticleParagraphs(title):
articleFull = wiki.get_article(title)
fullText = articleFull.content
chapter = ""
paragraphs = fullText.split('\n\n')
# print(paragraphs)
# We want only whole paragraphs that end in a ".", "!", "?" or '"' not fragments
for paragraph in paragraphs:
if len(paragraph) > 30:
end = paragraph[-1]
if end == '.' or end == '!' or end == '?' or end == '"':
chapter = chapter + "\n\n" + paragraph
return chapter
def __getTitlesForCategory(title,f):
# url = 'https://ta.wikipedia.org/w/api.php?action=query&list=categorymembers&cmnamespace=14&cmlimit=500&format=json&cmtitle=Category:வரலாறு'
# http://ta.wikipedia.org/w/api.php?action=query # Base Url
# &format=json # want data in JSON, default is XML
# &cmlimit=500 # முதல் 500 துணைப் பகுப்புகள் / கட்டுரைகள்
# &cmnamespace=14 # 14 - துணைப் பகுப்புகள்; 0 - கட்டுரைகள்
# &list=categorymembers
# &cmtitle=Category:வரலாறு # பகுப்பு = வரலாறு
articleTitles = []
baseUrl = 'https://ta.wikipedia.org/w/api.php?action=query&list=categorymembers&cmlimit=500&format=json'
# For extracting the Wikisource content
# In the wikiapi.py file change the following two lines
# api_uri = 'wikisource.org/w/api.php'
# article_uri = 'wikisource.org/wiki/'
# And change the baseUrl here as follows
# baseUrl = 'https://ta.wikisource.org/w/api.php?action=query&list=categorymembers&cmlimit=500&format=json'
namespaceUrl = '&cmnamespace='
categoryUrl = '&cmtitle=Category:'
articleNamespace = '0'
categoryNamespace = '14'
url = baseUrl + namespaceUrl + articleNamespace + categoryUrl + title
# print(url)
data = requests.get(url)
result = data.json()
pprint.pprint(result)
# Get all the article titles and write to the list
for item in result["query"]["categorymembers"]:
print(str(len(articleTitles)) + ": " + item['title'])
# Skip duplicate titles that are already in the list
if item['title'] not in articleTitles:
articleTitles.append(item['title'])
f.write(getArticleParagraphs(item['title']))
else:
break
# Safety check to avoid an infinite loop
if len(articleTitles) > 15000:
return
# Get all the sub-categories
url = baseUrl + namespaceUrl + categoryNamespace + categoryUrl + title
# print(url)
data = requests.get(url)
result = data.json()
# For each sub-category
for item in result["query"]["categorymembers"]:
print("Item title: " + item['title'])
# When we get the title of categories, we have to strip out the first 8 characters to get the title
cat = item['title'][8:]
getTitlesForCategory(cat)
def getTitlesForCategory(category='வரலாறு',outputfile='/your/folder/wikipedia_content.txt'):
f = open(outputfile, 'wt', encoding='utf-8')
articleTitles = __getTitlesForCategory(category,f)
print(len(articleTitles))
return articleTitles
| 3.171875 | 3 |
robo_ai/resources/oauth.py | robo-ai/roboai-python-sdk | 0 | 12769166 | <reponame>robo-ai/roboai-python-sdk
import requests
from requests.auth import HTTPBasicAuth
from robo_ai.exception.api_error import ApiError
from robo_ai.exception.invalid_credentials_error import InvalidCredentialsError
from robo_ai.exception.invalid_token_error import InvalidTokenError
from robo_ai.model.auth.access_info import AccessInfo
from robo_ai.model.auth.access_token import AccessToken
from robo_ai.resources.client_resource import ClientResource
class OauthResource(ClientResource):
"""
Authentication manager.
"""
def authenticate(self, api_key: str) -> AccessToken:
"""
Initiate a new session.
Args:
api_key (str): a string containing the API key
Raises:
InvalidCredentialsError: if the credentials are invalid.
ApiError: if there's an error on the API side
Returns:
AccessToken: Contains the 'access_token', 'token_type', 'expires_in' and 'scope' fields.
See [robo_ai.model.auth.access_token.AccessToken]
"""
config = self.get_config()
data = {
'grant_type': 'client_credentials',
'apiKey': api_key,
}
auth = HTTPBasicAuth(
config.http_auth_username,
config.http_auth_password
)
endpoint = config.base_endpoint + '/oauth/token'
response = requests.post(endpoint, data=data, auth=auth)
if response.status_code == requests.codes.ok:
tokens = response.json()
return AccessToken(
tokens['access_token'],
tokens['token_type'],
tokens['expires_in'],
tokens['scope']
)
elif response.status_code == 401:
raise InvalidCredentialsError()
else:
raise ApiError()
def get_token_info(self, token: str) -> AccessInfo:
"""
Return the token information
Args:
token (str): a string containing the token
Raises:
InvalidTokenError: if the token is invalid.
ApiError: if there's an error on the API side.
Returns:
AccessInfo: Contains the 'active', 'exp', 'authorities', 'client_id' and 'scope' fields.
See [robo_ai.model.auth.access_info.AccessInfo].
"""
config = self.get_config()
auth = HTTPBasicAuth(
config.http_auth_username,
config.http_auth_password
)
data = {
'token': token,
}
endpoint = config.base_endpoint + '/oauth/check_token/'
response = requests.post(endpoint, data=data, auth=auth)
if response.status_code == requests.codes.ok:
info_dict = response.json()
return AccessInfo(
info_dict['active'],
info_dict['exp'],
info_dict['authorities'],
info_dict['client_id'],
info_dict['scope'],
)
elif response.status_code == 401:
raise InvalidTokenError()
else:
raise ApiError()
| 2.6875 | 3 |
middleware/legato/driver/controller/interface/parallel_smc/config/parallel_smc.py | rbryson74/gfx | 0 | 12769167 | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
def instantiateComponent(comp):
GFX_INTF_SMC_H = comp.createFileSymbol("GFX_INTF_SMC_H", None)
GFX_INTF_SMC_H.setDestPath("gfx/interface/")
GFX_INTF_SMC_H.setSourcePath("../drv_gfx_disp_intf.h")
GFX_INTF_SMC_H.setOutputName("drv_gfx_disp_intf.h")
GFX_INTF_SMC_H.setProjectPath("config/" + Variables.get("__CONFIGURATION_NAME") + "/gfx/interface")
GFX_INTF_SMC_H.setType("HEADER")
GFX_INTF_SMC = comp.createFileSymbol("GFX_INTF_SMC", None)
GFX_INTF_SMC.setDestPath("gfx/interface/parallel_smc/")
GFX_INTF_SMC.setSourcePath("templates/drv_gfx_disp_intf_parallel_smc.c.ftl")
GFX_INTF_SMC.setOutputName("drv_gfx_disp_intf_parallel_smc.c")
GFX_INTF_SMC.setProjectPath("config/" + Variables.get("__CONFIGURATION_NAME") + "/gfx/interface/parallel_smc")
GFX_INTF_SMC.setMarkup(True)
GFX_INTF_SMC.setType("SOURCE")
### Interface type is required for all interface components. This is queried by the driver to determine
### the display interface supported by the interface component. Valid values are "SPI 4-line", "Parallel 16-bit",
### and "Parallel 8-bit"
InterfaceType = comp.createStringSymbol("InterfaceType", None)
InterfaceType.setLabel("Interface Type")
InterfaceType.setDescription("The interface configuration")
InterfaceType.setDefaultValue("Parallel 16-bit")
InterfaceType.setVisible(False)
### Driver settings menu
DriverSettingsMenu = comp.createMenuSymbol("DriverSettingsMenu", None)
DriverSettingsMenu.setLabel("Driver Settings")
UseSyncBarriers = comp.createBooleanSymbol("UseSyncBarriers", DriverSettingsMenu)
UseSyncBarriers.setLabel("Use Synchronization Barriers")
UseSyncBarriers.setDescription("Use Synchronization Barriers.")
UseSyncBarriers.setDefaultValue(True)
UseSyncBarriers.setVisible(False)
DelayNOPCount = comp.createIntegerSymbol("DelayNOPCount", DriverSettingsMenu)
DelayNOPCount.setLabel("Number of NOP for delay")
DelayNOPCount.setDescription("Number of NOP for delay")
DelayNOPCount.setDefaultValue(4)
###
### Interface settings menu
InterfaceSettingsSMCMenu = comp.createMenuSymbol("InterfaceSettingsSMCMenu", None)
InterfaceSettingsSMCMenu.setLabel("Parallel 8080 Display Interface Settings")
EBIChipSelectIndex = comp.createIntegerSymbol("EBIChipSelectIndex", InterfaceSettingsSMCMenu)
EBIChipSelectIndex.setLabel("EBI Chip Select Index")
EBIChipSelectIndex.setDescription("The chip select index")
EBIChipSelectIndex.setDefaultValue(0)
EBIChipSelectIndex.setMin(0)
EBIChipSelectIndex.setMax(4)
EBIChipSelectIndex.setVisible(False)
ControlPinsMenu = comp.createMenuSymbol("ControlPinsMenu", InterfaceSettingsSMCMenu)
ControlPinsMenu.setLabel("Control Pin Settings")
ChipSelectControl = comp.createComboSymbol("ChipSelectControl", ControlPinsMenu, ["GPIO", "Peripheral"])
ChipSelectControl.setLabel("CS# Control")
ChipSelectControl.setDescription("Chip Select Control")
ChipSelectControl.setDefaultValue("GPIO")
ChipSelectControl.setReadOnly(True)
DataCommandSelectControl = comp.createComboSymbol("DataCommandSelectControl", ControlPinsMenu, ["GPIO", "Peripheral"])
DataCommandSelectControl.setLabel("D/C# Control")
DataCommandSelectControl.setDescription("Data Command Select Control")
DataCommandSelectControl.setDefaultValue("GPIO")
DataCommandSelectControl.setDependencies(onDataCommandSelectSet, ["DataCommandSelectControl"])
ReadStrobeControl = comp.createComboSymbol("ReadStrobeControl", ControlPinsMenu, ["GPIO", "Peripheral"])
ReadStrobeControl.setLabel("RD# Control")
ReadStrobeControl.setDescription("Read Strobe Control")
ReadStrobeControl.setDefaultValue("GPIO")
WriteStrobeControl = comp.createComboSymbol("WriteStrobeControl", ControlPinsMenu, ["GPIO", "Peripheral"])
WriteStrobeControl.setLabel("WR# Control")
WriteStrobeControl.setDescription("Write Strobe Control")
WriteStrobeControl.setDefaultValue("GPIO")
DCXAddressBit = comp.createIntegerSymbol("DCXAddressBit", DataCommandSelectControl)
DCXAddressBit.setLabel("DCX Address Bit")
DCXAddressBit.setDescription("Address bit used for DCX signal.")
DCXAddressBit.setDefaultValue(12)
DCXAddressBit.setMin(0)
DCXAddressBit.setMax(31)
###
def configureSMCComponent(comp, smcComponent):
print("Configuring SMC")
smcChipSelNum = comp.getSymbolValue("EBIChipSelectIndex")
smcComponent.setSymbolValue("SMC_CHIP_SELECT" + str(smcChipSelNum), True, 1)
smcComponent.setSymbolValue("SMC_MEM_SCRAMBLING_CS" + str(smcChipSelNum), False, 1)
# SMC Write Timings
smcComponent.setSymbolValue("SMC_NWE_SETUP_CS" + str(smcChipSelNum), 4, 1)
smcComponent.setSymbolValue("SMC_NCS_WR_SETUP_CS" + str(smcChipSelNum), 0, 1)
smcComponent.setSymbolValue("SMC_NWE_PULSE_CS" + str(smcChipSelNum), 4, 1)
smcComponent.setSymbolValue("SMC_NCS_WR_PULSE_CS" + str(smcChipSelNum), 10, 1)
smcComponent.setSymbolValue("SMC_DATA_BUS_CS" + str(smcChipSelNum), 1, 1)
smcComponent.setSymbolValue("SMC_NWE_CYCLE_CS" + str(smcChipSelNum), 3, 1)
# SMC Read Timings
smcComponent.setSymbolValue("SMC_NRD_SETUP_CS" + str(smcChipSelNum), 2, 1)
smcComponent.setSymbolValue("SMC_NCS_RD_SETUP_CS" + str(smcChipSelNum), 0, 1)
smcComponent.setSymbolValue("SMC_NRD_PULSE_CS" + str(smcChipSelNum), 63, 1)
smcComponent.setSymbolValue("SMC_NCS_RD_PULSE_CS" + str(smcChipSelNum), 63, 1)
smcComponent.setSymbolValue("SMC_NRD_CYCLE_CS" + str(smcChipSelNum), 110, 1)
# SMC Mode Configuration
smcComponent.setSymbolValue("SMC_DATA_BUS_CS" + str(smcChipSelNum), 1, 1)
smcComponent.setSymbolValue("SMC_BAT_CS" + str(smcChipSelNum), 0, 1)
smcComponent.setSymbolValue("SMC_READ_ENABLE_MODE_CS" + str(smcChipSelNum), True, 1)
smcComponent.setSymbolValue("SMC_WRITE_ENABLE_MODE_CS" + str(smcChipSelNum), False, 1)
def onDataCommandSelectSet(sourceSymbol, event):
if (sourceSymbol.getComponent().getSymbolByID("DataCommandSelectControl").getValue() == "GPIO"):
sourceSymbol.getComponent().getSymbolByID("DCXAddressBit").setVisible(False)
else:
sourceSymbol.getComponent().getSymbolByID("DCXAddressBit").setVisible(True)
def onInterfaceTypeChanged(sourceSymbol, event):
print("Interface type changed")
def onAttachmentConnected(source, target):
#print(source["component"].getID() + ": " + dependencyID + " dependent component added ")
if source["id"] == "SMC_CS":
configureSMCComponent(source["component"], target["component"]) | 1.132813 | 1 |
anvilfs/workloadidentitycredentials.py | anvilproject/fs.anvilfs | 3 | 12769168 | import datetime
from google.auth import credentials
import json
class WorkloadIdentityCredentials(credentials.Scoped, credentials.Credentials):
def __init__(self, scopes):
super(WorkloadIdentityCredentials, self).__init__()
self._scopes = scopes
def with_scopes(self, scopes):
return WorkloadIdentityCredentials(scopes=scopes)
@property
def requires_scopes(self):
return False
def refresh(self, request):
url = ('http://metadata.google.internal/computeMetadata/'
'v1/instance/service-accounts/default/token')
if self._scopes:
url += '?scopes=' + ','.join(self._scopes)
response = request(url=url, method="GET", headers={
'Metadata-Flavor': 'Google'})
if response.status == 200:
response_json = json.loads(response.data)
else:
raise RuntimeError('bad status from metadata server')
self.token = response_json['access_token']
self.expiry = datetime.datetime.utcnow(
) + datetime.timedelta(seconds=response_json['expires_in'])
| 2.59375 | 3 |
projects/min_max_average_finder/main.py | PrasadHonrao/python-samples | 3 | 12769169 | input_numbers = input('Enter numbers separated by a space : ').split()
max = int(input_numbers[0])
min = int(input_numbers[0])
avg = 0
sum = 0
for i in range(0, len(input_numbers)):
input_numbers[i] = int(input_numbers[i])
sum = sum + input_numbers[i]
if input_numbers[i] > max:
max = input_numbers[i]
if input_numbers[i] < min:
min = input_numbers[i]
avg = sum / len(input_numbers)
print (max, min, avg) | 3.765625 | 4 |
gadget/instrumentation/api/__init__.py | rlnsanz/inspectional-rara-parakeet | 1 | 12769170 | <gh_stars>1-10
from .experiment import *
| 1.070313 | 1 |
mak/build_framework/build/host/windows.py | motor-dev/Motor | 4 | 12769171 | import os
def build(bld):
environ = getattr(bld, 'environ', os.environ)
environ['PATH'] = os.pathsep.join([bld.bldnode.parent.parent.make_node("host/win32/bin").abspath(), environ['PATH']])
if bld.env.PATH:
bld.env.PATH = [bld.bldnode.parent.parent.make_node("host/win32/bin").abspath()] + bld.env.PATH
| 2.09375 | 2 |
pagetools/menus/tests/test_views.py | theithec/pagetools | 0 | 12769172 | <filename>pagetools/menus/tests/test_views.py
from django.test import RequestFactory
from django.views.generic import DetailView
from pagetools.menus.tests import MenuDataTestCase
from pagetools.menus.views import SelectedMenuentriesMixin
from pagetools.tests.test_models import ConcretePublishableLangModel
class SelectedMenuentriesMixinTest(MenuDataTestCase):
"""
Tests context-data in a Django Mixin like a boss
https://gist.github.com/dnmellen/6507189
"""
class DummyView(SelectedMenuentriesMixin, DetailView):
"""
To test get_context_data we need a TemplateView child
"""
def __init__(self, *args, **kwargs):
self.object = ConcretePublishableLangModel.objects.first()
super(*args, **kwargs)
model = ConcretePublishableLangModel
template_name = "any_template.html" # TemplateView requires this
def setUp(self):
super().setUp()
self.request = RequestFactory().get("/fake-path")
# Setup request and view.
self.view = self.DummyView()
def test_context_data_no_args(self):
# Prepare initial params
kwargs = {}
# Launch Mixin's get_context_data
context = self.view.get_context_data(**kwargs)
# Your checkings here
self.assertEqual(context["menukeys"], ["dummyview"])
| 2.125 | 2 |
py_paperdb.py | sungcheolkim78/py_paperdb | 0 | 12769173 | <gh_stars>0
""" py_paperdb """
import pandas as pd
import numpy as np
import os
import re
import tqdm
import pickle
import subprocess
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from py_readpaper import Paper
import bibdb
import filedb
from utils import safe_pickle_dump
class PaperDB(object):
""" paper database using pandas """
def __init__(self, dirname='.', cache=True, debug=False):
""" initialize database """
self._debug = debug
self._dirname = dirname
self._bibfilename = '.paperdb.csv'
self._metafname = './meta.p'
self._tfidfname = './tfidf.p'
self._simfname = './sim.p'
self._ldafname = './lda.p'
self._currentpaper = ''
self._updated = False
self._sim_dict = {}
self._vocab = {}
self._idf = []
self._selection = set()
if cache and os.path.exists(self._bibfilename):
p = pd.read_csv(self._bibfilename, index_col=0)
self._bibdb = bibdb.clean_db(p)
if debug: print('... read from {}'.format(self._bibfilename))
else:
p = filedb.build_filedb(dirname=dirname, debug=debug)
self._bibdb = bibdb.clean_db(p)
self._bibdb.to_csv(self._bibfilename)
if debug: print('... save to {}'.format(self._bibfilename))
# view database
def head(self, n=5, full=False, newest=False, items=[]):
""" show old papers """
if newest:
temp = self._bibdb.sort_values('import_date', ascending=False)[:n]
items.append('import_date')
else:
temp = self._bibdb[:n]
if full and (len(items) == 0):
return temp
else:
return quickview(temp, items=items)
def tail(self, n=5, full=False, items=[]):
""" show recently published papers """
if full and (len(items) == 0):
return self._bibdb[-n:]
else:
return quickview(self._bibdb[-n:], items=items)
# search database
def search_sep(self, year=0, author='', journal='', author1='', title='', doi=''):
""" search database by separate search keywords """
res = search(self._bibdb, year=year, author=author, journal=journal, author1=author1, title=title, doi=doi)
if len(res.index) > 0:
if len(res.index) > 10:
yesno = input('Will you include all these selection? [Yes/No] ')
if yesno in ['Yes', 'Y', 'y', 'yes']:
if self._debug: print('... save to selection: {}'.format(res.index.values))
self._selection = self._selection | set(res.index)
else:
if self._debug: print('... save to selection: {}'.format(res.index.values))
self._selection = self._selection | set(res.index)
return quickview(res)
def search_all(self, sstr=None, columns=None):
""" search searchword for all database """
if sstr is None:
print('... add search string')
os.exit(1)
if columns is None:
columns = ['title', 'abstract', 'author', 'keywords', 'doi', 'local-url']
if 'keywords' in columns:
self._bibdb['keywords_csv'] = [ ','.join(x) for x in self._bibdb['keywords'] ]
columns.remove('keywords')
columns.append('keywords_csv')
sindex = []
for c in columns:
res = self._bibdb[self._bibdb[c].str.contains(sstr)].index
if len(res) > 0:
sindex.extend(res)
if len(sindex) > 0:
sindex = sorted(list(set(sindex)))
self._selection = self._selection.union(set(sindex))
return quickview(self._bibdb.iloc[sindex])
def search_wrongname(self, columns=['doi', 'year', 'author1', 'journal']):
""" find wrong file name from filedb """
condition = (self._bibdb['has_bib'] == False)
for c in columns:
condition = condition | (self._bibdb[c] == '') | (self._bibdb[c] == 'nan')
#condition = (self._bibdb['doi'] == '') | (self._bibdb['year'] == '') | (self._bibdb['author1'] == '') | (self._bibdb['journal'] == '') | (self._bibdb['author1'] == 'None') | (self._bibdb['has_bib'] == False)
sindex = self._bibdb[condition].index
print('... total {}/{} incorrect papers'.format(len(sindex), len(self._bibdb)))
return quickview(self._bibdb.iloc[sindex])
def search_new(self, n=10):
""" print out recently added papers """
return quickview(self._bibdb.sort_values(by='import_date')[-n:])
def search_paper(self, paper, as_index=False):
""" from Paper object find out position in bibdb """
s_db = self._bibdb
if paper.doi() != '':
s_db = search(s_db, doi=paper.doi())
elif paper.year() is not None:
s_db = search(s_db, year=int(paper.year()))
#paper.bibtex()
# multiple match
if len(s_db) > 1:
if self._debug: print('... multiple matches')
return quickview(s_db)
# no match
if len(s_db) == 0:
if self._debug: print('... add to bibdb')
if paper._bib is None:
item = {'year': paper._year, 'journal': paper._journal, 'author': paper._author, \
'author1': paper._author1, 'abstract': paper._abstract, 'keywords': paper._keywords }
else:
item = paper._bib
self._bibdb = self._bibdb.append(item, ignore_index=True)
idx = len(self._bibdb) - 1
# exact match
if len(s_db) == 1:
if self._debug: print('... update bibdb')
idx = s_db.index[0]
if paper._bib is not None:
for keys in paper._bib.keys():
self._bibdb.at[idx, keys] = paper._bib.get(keys)
self._bibdb.at[idx, 'local-url'] = paper._fname
self._updated = True
if as_index:
return idx
else:
return quickview(self._bibdb.iloc[idx])
# selection operations
def selection_view(self):
""" print selection """
if len(self._selection) > 0:
if self._debug: print('... # of selection: {}'.format(len(self._selection)))
return quickview(self._bibdb.iloc[list(self._selection)])
def selection_bibtex(self, n=-1):
""" print bibtex items in selection """
if len(self._selection) == 0:
return
if n == -1:
n = len(self._selection)
for c, i in enumerate(list(self._selection)):
if c > n:
return
self.paper(i)
self._currentpaper.bibtex()
def selection_reset(self):
""" reset all selections """
yesno = input("Delete all selection? [Yes/No] ")
if yesno in ['Y', 'Yes', 'y', 'yes']:
self._selection = set()
def selection_add(self, idxs):
""" add papers by index """
self._selection = self._selection | set(idxs)
def selection_remove(self, idxs):
""" remove papers by index """
self._selection = self._selection - set(idxs)
# control paper
def paper(self, idx, exif=True):
""" open pdf file in osx """
try:
filename = self._bibdb.at[idx, 'local-url']
self._currentpaper = Paper(filename, exif=exif, debug=self._debug)
return self._currentpaper
except:
print('... error reading: {}/{}'.format(idx, len(self._bibdb)))
return False
def open(self, idx=-1):
""" open pdf file in osx """
if isinstance(self.paper(idx), Paper):
self._currentpaper.open()
else:
cmd = ["Open", self._bibfilename]
subprocess.call(cmd)
def readpaper(self, idx=-1, n=10):
""" open paper in text mode """
if isinstance(self.paper(idx), Paper):
return self._currentpaper.head(n=n)
def item(self, idx):
""" show records in idx """
# update using paper's information
if isinstance(self.paper(idx), Paper):
self._updated = True
self._currentpaper.save_bib()
for k, i in self._currentpaper._bib.items():
self._bibdb.at[idx, k] = i
self._bibdb.at[idx, "has_bib"] = True
return self._bibdb.iloc[idx]
# manage database
def export_bib(self, selection=False, bibfilename=None):
""" save bibtex file and csv file """
if selection:
if bibfilename is None:
bibfilename = 'selection.bib'
bibdb.to_bib(self._bibdb.iloc[list(self._selection)], bibfilename)
with open(bibfilename) as f:
print(f.readlines())
else:
bibdb.to_bib(self._bibdb, self._bibfilename)
def update(self, idx=-1):
""" save database """
if idx > -1:
self._bibdb = filedb.update_filedb(self._bibdb, self._bibdb.at[idx, 'local-url'], debug=self._debug)
self._updated = True
if self._updated:
print('... save database to {}'.format(self._bibfilename))
self._bibdb.to_csv(self._bibfilename)
def reload(self, update=True):
""" re-read bibdb """
self._bibdb = filedb.build_filedb(dirname=self._dirname, debug=debug)
print('... save database to {}'.format(self._bibfilename))
self._bibdb.to_csv(self._bibfilename)
# recommender system
def build_recommender(self, update=False):
""" using text contents build vectorized representation of papers """
pids = range(len(self._bibdb))
if os.path.exists(self._tfidfname) and os.path.exists(self._metafname) and (not update):
print('... read from {}, {}'.format(self._tfidfname, self._metafname))
out = pickle.load(open(self._tfidfname, 'rb'))
self._X = out['X']
meta = pickle.load(open(self._metafname, 'rb'))
self._vocab = meta['vocab']
self._idf = meta['idf']
else:
print('... read all texts')
corpus = []
for i in tqdm.tqdm(pids):
self.paper(i, exif=False)
txt = '{}\n{}'.format(self._currentpaper.abstract(), self._currentpaper.contents(split=False, update=False))
corpus.append(txt)
# clean up text
corpus = [ re.sub('\\n', ' ', str(x)) for x in corpus ]
corpus = [ re.sub("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",'',str(x)) for x in corpus ]
corpus = [ re.sub("(http://.*?\s)|(http://.*)",'',str(x)) for x in corpus ]
corpus = [ x.replace("royalsocietypublishing", "") for x in corpus ]
corpus = [ x.replace("annualreviews", "") for x in corpus ]
corpus = [ x.replace("science reports", "") for x in corpus ]
corpus = [ x.replace("nature publishing group", "") for x in corpus ]
self.corpus = corpus
# prepare vectorizer
v = TfidfVectorizer(input='content',
encoding='utf-8', decode_error='replace', strip_accents='unicode',
lowercase=True, analyzer='word', stop_words='english',
token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b',
ngram_range=(1, 3), max_features = 5000,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
max_df=1.0, min_df=1)
v.fit(corpus)
self._X = v.transform(corpus)
self._vocab = v.vocabulary_
self._idf = v._tfidf.idf_
# write full matrix out
out = {}
out['X'] = self._X
print('... writing: {}'.format(self._tfidfname))
safe_pickle_dump(out, self._tfidfname)
# writing metatdata
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = pids
print('... writing: {}'.format(self._metafname))
safe_pickle_dump(out, self._metafname)
if os.path.exists(self._simfname) and (not update):
print('... read from {}'.format(self._simfname))
self._sim_dict = pickle.load(open(self._simfname, 'rb'))
else:
print("...precomputing nearest neighbor queries in batches...")
#X = X.todense() # originally it's a sparse matrix
X = self._X.todense().astype(np.float32)
self._sim_dict = {}
batch_size = 200
for i in range(0,len(pids),batch_size):
i1 = min(len(pids), i+batch_size)
xquery = X[i:i1] # BxD
ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
IX = np.argsort(ds, axis=0) # NxB
for j in range(i1-i):
self._sim_dict[pids[i+j]] = [pids[q] for q in list(IX[:50,j])]
print('%d/%d...' % (i, len(pids)))
print('... writing: {}'.format(self._simfname))
safe_pickle_dump(self._sim_dict, self._simfname)
def recommend_similar(self, idx=0, n=5, items=[]):
""" recommend similar paper using feature matrix """
if len(self._sim_dict) == 0:
self.build_recommender()
rec_list = self._bibdb.iloc[self._sim_dict[idx][:n]]
return quickview(rec_list, items=items)
def build_topiclist(self, n_com=20, max_iter=10, n_keys=8, update=False):
""" make feature matrix using LDA """
if os.path.exists(self._ldafname) and (not update):
out = pickle.load(open(self._ldafname, 'rb'))
self._lda = out['lda']
self._topics = out['topics']
else:
if len(self._sim_dict) == 0:
self.build_recommender()
X = self._X.todense().astype(np.float32)
lda = LatentDirichletAllocation(n_components=n_com,
learning_method='batch',
max_iter=max_iter, verbose=1,
n_jobs=-1, random_state=0)
print("... computing decomposition matrix: n_com {}, max_iter {}".format(n_com, max_iter))
paper_topics = lda.fit_transform(X)
feature_names = sorted(list(self._vocab.keys()))
for topic_idx, topic in enumerate(lda.components_):
msg = 'Topic [{}]: '.format(topic_idx)
msg += ', '.join([feature_names[i] for i in topic.argsort()[:-n_keys-1:-1]])
print(msg)
# writing lda result
out = {}
out['lda'] = paper_topics
out['topics'] = lda.components_
print('... writing: {}'.format(self._ldafname))
safe_pickle_dump(out, self._ldafname)
self._lda = paper_topics
self._topics = lda.components_
def recommend_topic(self, tid=0, n=5, n_com=20, n_keys=8, items=[]):
""" recommend papers using decomposition """
if len(self._lda) == 0:
self.build_topiclist(n_com=ncom, n_keys=n_keys)
topic = self._topics[tid]
feature_names = sorted(list(self._vocab.keys()))
msg = " ".join([feature_names[i] for i in topic.argsort()[:-n_keys:-1]])
print('Topic {}: {}'.format(tid, msg))
idxlist = np.argsort(self._lda[:, tid])[::-1]
lda_list = self._bibdb.iloc[idxlist[:n]]
return quickview(lda_list, items=items)
def word_list(self):
""" print feature names """
if self._vocab is None:
self.build_recommender()
feature_names = sorted(list(self._vocab.keys()))
v = pd.DataFrame(self._idf, index=feature_names, columns=['idf'])
return v.sort_values(by='idf')
def search(pd_db, year=0, author='', journal='', author1='', title='', doi='', byindex=False):
""" search panda database by keywords """
if ("author1" not in pd_db.columns) and ("author" in pd_db.columns):
pd_db["author1"] = [ x.split(' and ')[0] for x in pd_db['author'].values ]
if year != 0:
pd_db.loc[:, 'year'] = pd_db.loc[:, 'year'].astype(int)
db = pd_db.loc[pd_db['year'] == year]
else:
db = pd_db
def _search_item(db, column, value):
if (value != '') and (column in db.columns):
db[column].fillna('', inplace=True)
return db.loc[db[column].str.contains(value)]
else:
return db
db = _search_item(db, "author", author)
db = _search_item(db, "author1", author1)
db = _search_item(db, "journal", journal)
db = _search_item(db, "title", title)
db = _search_item(db, "doi", doi)
if byindex:
return db.index
else:
return db
def quickview(pd_db, items=[], add=True):
""" view paperdb with essential columns """
views = ["year", "author1", "author", "title", "journal", "doi"]
if (len(items) > 0) and add:
views = views + items
elif (len(items) > 0) and not add:
views = items
#print('.... columns: {}'.format(views))
return pd_db[views]
| 2.265625 | 2 |
dashboard/migrations/0029_auto_20210818_0515.py | zidandff/primaseru | 0 | 12769174 | # Generated by Django 3.2.5 on 2021-08-18 05:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0028_alter_participantrepayment_payment_1'),
]
operations = [
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_1',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 1'),
),
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_2',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 2'),
),
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_3',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 3'),
),
]
| 1.429688 | 1 |
DenseFusion/tools/train.py | KochPJ/AutoPoseEstimation | 8 | 12769175 | # --------------------------------------------------------
# DenseFusion 6D Object Pose Estimation by Iterative Dense Fusion
# Licensed under The MIT License [see LICENSE for details]
# Written by Chen
# --------------------------------------------------------
import argparse
import os
import random
import time
import numpy as np
import torch
from pathlib import Path
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset
from DenseFusion.lib.network import PoseNet, PoseRefineNet
from DenseFusion.lib.loss import Loss
from DenseFusion.lib.loss_refiner import Loss_refine
#import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
import pc_reconstruction.open3d_utils as pc_utils
import json
from DenseFusion.tools.utils import *
from DenseFusion.lib.transformations import quaternion_matrix
def main(data_set_name, root, save_extra='', load_pretrained=True, load_trained=False, load_name='',
label_mode='new_pred', p_extra_data=0.0, p_viewpoints=1.0, show_sample=False, plot_train=False, device_num=0):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help='learning rate')
parser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--w', default=0.015, help='learning rate')
parser.add_argument('--w_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--decay_margin', default=0.016, help='margin to decay lr & w')
parser.add_argument('--refine_margin', default=0.010, help='margin to start the training of iterative refinement')
parser.add_argument('--noise_trans', default=0.03,
help='range of the random noise of translation added to the training data')
parser.add_argument('--iteration', type=int, default=2, help='number of refinement iterations')
parser.add_argument('--nepoch', type=int, default=500, help='max number of epochs to train')
parser.add_argument('--refine_epoch_margin', type=int, default=400, help='max number of epochs to train')
parser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')
opt = parser.parse_args()
opt.manualSeed = random.randint(1, 10000)
torch.cuda.set_device(device_num)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
print('bs', opt.batch_size, 'it', opt.iteration)
opt.refine_start = False
opt.num_points = 1000 #number of points on the input pointcloud
opt.outf = os.path.join(root, 'DenseFusion/trained_models', data_set_name+save_extra) #folder to save trained models
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
opt.log_dir = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra) #folder to save logs
opt.log_dir_images = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra, 'images')
if not os.path.exists(opt.log_dir):
os.makedirs(opt.log_dir)
if not os.path.exists(opt.log_dir_images):
os.makedirs(opt.log_dir_images)
opt.repeat_epoch = 1 #number of repeat times for one epoch training
print('create datasets')
dataset = PoseDataset('train',
opt.num_points,
True,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
test_dataset = PoseDataset('test',
opt.num_points,
False,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
opt.num_objects = dataset.num_classes #number of object classes in the dataset
print('n classes: {}'.format(dataset.num_classes))
print('create models')
estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects)
estimator.cuda()
refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects)
refiner.cuda()
if load_pretrained:
# load the pretrained estimator model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = estimator.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_model.pth'))
pretrained_dict['conv4_r.weight'] = init_state_dict['conv4_r.weight']
pretrained_dict['conv4_r.bias'] = init_state_dict['conv4_r.bias']
pretrained_dict['conv4_t.weight'] = init_state_dict['conv4_t.weight']
pretrained_dict['conv4_t.bias'] = init_state_dict['conv4_t.bias']
pretrained_dict['conv4_c.weight'] = init_state_dict['conv4_c.weight']
pretrained_dict['conv4_c.bias'] = init_state_dict['conv4_c.bias']
estimator.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
# load the pretrained refiner model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = refiner.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_refine_model.pth'))
pretrained_dict['conv3_r.weight'] = init_state_dict['conv3_r.weight']
pretrained_dict['conv3_r.bias'] = init_state_dict['conv3_r.bias']
pretrained_dict['conv3_t.weight'] = init_state_dict['conv3_t.weight']
pretrained_dict['conv3_t.bias'] = init_state_dict['conv3_t.bias']
refiner.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
elif load_trained:
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
estimator.load_state_dict(pretrained_dict)
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_refine_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
refiner.load_state_dict(pretrained_dict)
del pretrained_dict
print('create optimizer and dataloader')
#opt.refine_start = False
opt.decay_start = False
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=opt.workers,
# collate_fn=collate_fn)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
opt.sym_list = dataset.get_sym_list()
opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}'
'\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(
len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
best_test = np.Inf
best_test_epoch = 0
best_train = np.Inf
best_train_epoch = 0
if opt.start_epoch == 1:
for log in os.listdir(opt.log_dir):
if log !='images':
os.remove(os.path.join(opt.log_dir, log))
for img in os.listdir(opt.log_dir_images):
os.remove(os.path.join(opt.log_dir_images, img))
train_dists = []
test_dists = []
losses = []
refiner_losses = []
best_loss = np.inf
best_loss_epoch = 0
elapsed_times = 0.0
for epoch in range(opt.start_epoch, opt.nepoch):
start_time = time.time()
train_count = 0
train_dis_avg = 0.0
if opt.refine_start:
estimator.eval()
refiner.train()
else:
estimator.train()
optimizer.zero_grad()
epoch_losses = []
epoch_losses_refiner = []
for rep in range(opt.repeat_epoch):
#for batch in dataloader:
#points, choose, img, target, model_points, idx = batch
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
for i, data in enumerate(dataloader, 0):
points, choose, img, target, model_points, idx = data
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
loss, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
epoch_losses.append(loss.item())
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
dis.backward()
epoch_losses_refiner.append(dis.item())
else:
loss.backward()
epoch_losses_refiner.append(0)
train_dis_avg += dis.item()
train_count += 1
# make step after one epoch
if train_count % opt.batch_size == 0:
optimizer.step()
optimizer.zero_grad()
# make last step of epoch if something is remaining
if train_count % opt.batch_size != 0:
optimizer.step()
optimizer.zero_grad()
refiner_losses.append(np.mean(epoch_losses_refiner))
losses.append(np.mean(epoch_losses))
if losses[-1] < best_loss:
best_loss = losses[-1]
best_loss_epoch = epoch
train_dists.append(train_dis_avg/train_count)
if train_dists[-1] < best_train:
best_train_epoch = epoch
best_train = train_dists[-1]
test_dis = 0.0
test_count = 0
estimator.eval()
refiner.eval()
if plot_train:
# plot randomly selected validation preds
jj = 0
x_axis = 0
fig_x = 4
fig_y = 4
log_indexes = sorted(list(np.random.choice(list(range(len(testdataloader))), int(fig_x*(fig_y/2)), replace=False)))
plt.cla()
plt.close('all')
fig, axs = plt.subplots(fig_x, fig_y, constrained_layout=True, figsize=(25, 15))
for j, data in enumerate(testdataloader, 0):
points, choose, img, target, model_points, idx, intr, np_img = data
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_estimator_prediction(pred_r, pred_t, pred_c, opt.num_points, 1, points)
_, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_refined_prediction(pred_r, pred_t, my_r, my_t)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
if plot_train:
if j in log_indexes:
if jj == 4:
jj = 0
x_axis += 1
my_r = quaternion_matrix(my_r)[:3, :3]
np_pred = np.dot(model_points[0].data.cpu().numpy(), my_r.T)
np_pred = np.add(np_pred, my_t)
np_target = target[0].data.cpu().numpy()
np_img = np_img[0].data.numpy()
image_target = pc_utils.pointcloud2image(np_img.copy(), np_target, 3, intr)
image_prediction = pc_utils.pointcloud2image(np_img.copy(), np_pred, 3, intr)
axs[x_axis, jj].imshow(image_target)
axs[x_axis, jj].set_title('target {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
axs[x_axis, jj].imshow(image_prediction)
axs[x_axis, jj].set_title('prediction {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
test_dis += dis.item()
test_count += 1
test_dis = test_dis / test_count
test_dists.append(test_dis)
if plot_train:
fig.suptitle('epoch {}, with a average dist: {}'.format(epoch, test_dis), fontsize=16)
plt.savefig(os.path.join(opt.log_dir_images, 'test_images_epoch_{}.png'.format(epoch)))
if epoch > 1:
plt.close('all')
plt.cla()
fig, axs = plt.subplots(2, 2, constrained_layout=True, figsize=(30, 20))
axs[0, 0].plot(losses)
axs[0, 0].set_title('Training estimator loss')
axs[0, 0].set_xlabel('Epochs')
axs[0, 0].set_ylabel('Loss')
axs[0, 1].plot(refiner_losses)
axs[0, 1].set_title('Training refiner loss')
axs[0, 1].set_xlabel('Epochs')
axs[0, 1].set_ylabel('Loss')
axs[1, 0].plot(train_dists)
axs[1, 0].set_title('Training Avg. distance')
axs[1, 0].set_xlabel('Epochs')
axs[1, 0].set_ylabel('Avg. distance [m]')
axs[1, 1].plot(test_dists)
axs[1, 1].set_title('Test Avg. distance')
axs[1, 1].set_xlabel('Epochs')
axs[1, 1].set_ylabel('Avg. distance [m]')
plt.savefig(os.path.join(opt.log_dir_images, 'losses.png'))
out_dict = {
'losses': losses,
'refiner_losses': refiner_losses,
'train_dists': train_dists,
'test_dists': test_dists
}
with open(os.path.join(opt.log_dir, 'losses.json'), 'w') as outfile:
json.dump(out_dict, outfile)
del out_dict
print('>>>>>>>>----------Epoch {0} finished---------<<<<<<<<'.format(epoch))
if test_dis <= best_test:
best_test = test_dis
best_test_epoch = epoch
if opt.refine_start:
state_dict = refiner.state_dict()
torch.save(state_dict, '{0}/pose_refine_model.pth'.format(opt.outf))
del state_dict
else:
state_dict = estimator.state_dict()
torch.save(state_dict, '{0}/pose_model.pth'.format(opt.outf))
del state_dict
print('>>>>>>>>----------MODEL SAVED---------<<<<<<<<')
t_elapsed = time.time() - start_time
elapsed_times += t_elapsed/3600
print('elapsed time: {} min, total elapsed time: {} hours'.format(
np.round(t_elapsed/60, 2), np.round(elapsed_times), 2))
print('Train loss : {}'.format(losses[-1]))
print('Best train loss {} : {}'.format(best_loss_epoch, best_loss))
print('Train dist : {}'.format(train_dists[-1]))
print('Best train dist {} : {}'.format(best_train_epoch, best_train))
print('Test dist : {}'.format(test_dists[-1]))
print('Best test dist {} : {}'.format(best_test_epoch, best_test))
# changing stuff during training if...
if best_test < opt.decay_margin and not opt.decay_start:
print('decay lr')
opt.decay_start = True
opt.lr *= opt.lr_rate
opt.w *= opt.w_rate
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
if (best_test < opt.refine_margin or epoch >= opt.refine_epoch_margin) and not opt.refine_start:
#print('train refiner')
opt.refine_start = True
print('bs', opt.batch_size, 'it', opt.iteration)
opt.batch_size = int(opt.batch_size / opt.iteration)
print('new bs', opt.batch_size)
optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
#testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
#opt.sym_list = dataset.get_sym_list()
#opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------train refiner!---------<<<<<<<<')
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
if __name__ == '__main__':
data_set_name = 'bluedude_solo'
save_extra = '_test4'
root = Path(__file__).resolve().parent.parent.parent
main(data_set_name, root, save_extra=save_extra)
| 1.632813 | 2 |
custom_components/edgeos/managers/config_flow_manager.py | kcleong/homeassistant-config | 0 | 12769176 | <reponame>kcleong/homeassistant-config
import logging
from typing import Optional
from cryptography.fernet import InvalidToken
from homeassistant.config_entries import ConfigEntry
from ..clients.web_api import EdgeOSWebAPI
from ..helpers import get_ha
from ..helpers.const import *
from ..managers.configuration_manager import ConfigManager
from ..managers.password_manager import PasswordManager
from ..models import AlreadyExistsError, LoginError
from ..models.config_data import ConfigData
from ..models.exceptions import IncompatibleVersion, LoginException
from .version_check import VersionManager
_LOGGER = logging.getLogger(__name__)
class ConfigFlowManager:
_config_manager: ConfigManager
_password_manager: PasswordManager
_options: Optional[dict]
_data: Optional[dict]
_config_entry: Optional[ConfigEntry]
def __init__(self):
self._config_entry = None
self._options = None
self._data = None
self._is_initialized = True
self._hass = None
self._available_actions = {
CONF_STORE_DEBUG_FILE: self._execute_store_debug_file
}
async def initialize(self, hass, config_entry: Optional[ConfigEntry] = None):
self._config_entry = config_entry
self._hass = hass
self._password_manager = PasswordManager(self._hass)
self._config_manager = ConfigManager(self._password_manager)
data = {}
options = {}
if self._config_entry is not None:
data = self._config_entry.data
options = self._config_entry.options
await self.update_data(data, CONFIG_FLOW_INIT)
await self.update_options(options, CONFIG_FLOW_INIT)
@property
def config_data(self) -> ConfigData:
return self._config_manager.data
@property
def title(self) -> str:
return self._data.get(ENTRY_PRIMARY_KEY)
async def update_options(self, options: dict, flow: str):
_LOGGER.debug("Update options")
validate_login = False
actions = []
new_options = await self._clone_items(options, flow)
if flow == CONFIG_FLOW_OPTIONS:
self._validate_unique_name(new_options)
validate_login = self._should_validate_login(new_options)
self._move_option_to_data(new_options)
actions = self._get_actions(new_options)
self._options = new_options
await self._update_entry()
if validate_login:
await self._handle_data(flow)
for action in actions:
action()
return new_options
async def update_data(self, data: dict, flow: str):
_LOGGER.debug("Update data")
if flow == CONFIG_FLOW_DATA:
self._validate_unique_name(data)
self._data = await self._clone_items(data, flow)
await self._update_entry()
await self._handle_data(flow)
def get_data_user_input(self):
data = self.clone_items(self._data)
title = ""
if ENTRY_PRIMARY_KEY in data:
title = data[ENTRY_PRIMARY_KEY]
del data[ENTRY_PRIMARY_KEY]
return title, data
def get_options_user_input(self):
data = self.clone_items(self._options)
title = ""
if ENTRY_PRIMARY_KEY in data:
title = data[ENTRY_PRIMARY_KEY]
del data[ENTRY_PRIMARY_KEY]
return title, data
def _validate_unique_name(self, user_input):
entry_primary_key = user_input.get(ENTRY_PRIMARY_KEY, "")
if self.title is None or self.title != entry_primary_key:
ha = get_ha(self._hass, entry_primary_key)
if ha is not None:
raise AlreadyExistsError(entry_primary_key)
def _get_default_fields(self, flow, config_data: Optional[ConfigData] = None) -> dict:
if config_data is None:
config_data = self.config_data
fields = {}
if flow == CONFIG_FLOW_DATA:
fields[vol.Optional(CONF_NAME, default=config_data.name)] = str
fields[vol.Optional(CONF_HOST, default=config_data.host)] = str
fields[vol.Optional(CONF_USERNAME, default=config_data.username)] = str
fields[
vol.Optional(CONF_PASSWORD, default=config_data.password_clear_text)
] = str
fields[vol.Optional(CONF_UNIT, default=config_data.unit)] = vol.In(
ALLOWED_UNITS_LIST
)
return fields
async def get_default_data(self, user_input):
config_data = await self._config_manager.get_basic_data(user_input)
fields = self._get_default_fields(CONFIG_FLOW_DATA, config_data)
data_schema = vol.Schema(fields)
return data_schema
def get_default_options(self):
system_data = {}
config_data = self.config_data
ha = self._get_ha(self._config_entry.entry_id)
if ha is not None:
system_data = ha.data_manager.system_data
all_interfaces = self._get_available_options(system_data, INTERFACES_KEY)
all_devices = self._get_available_options(system_data, STATIC_DEVICES_KEY)
monitored_devices = self._get_options(config_data.monitored_devices)
monitored_interfaces = self._get_options(config_data.monitored_interfaces)
device_trackers = self._get_options(config_data.device_trackers)
fields = self._get_default_fields(CONFIG_FLOW_OPTIONS)
fields[vol.Optional(CONF_CLEAR_CREDENTIALS, default=False)] = bool
fields[
vol.Optional(
CONF_CONSIDER_AWAY_INTERVAL, default=config_data.consider_away_interval
)
] = int
fields[vol.Optional(CONF_UNIT, default=config_data.unit)] = vol.In(
ALLOWED_UNITS_LIST
)
fields[
vol.Optional(CONF_MONITORED_DEVICES, default=monitored_devices)
] = cv.multi_select(all_devices)
fields[
vol.Optional(CONF_MONITORED_INTERFACES, default=monitored_interfaces)
] = cv.multi_select(all_interfaces)
fields[
vol.Optional(CONF_TRACK_DEVICES, default=device_trackers)
] = cv.multi_select(all_devices)
fields[
vol.Optional(
CONF_UPDATE_ENTITIES_INTERVAL,
default=config_data.update_entities_interval,
)
] = cv.positive_int
fields[
vol.Optional(
CONF_UPDATE_API_INTERVAL, default=config_data.update_api_interval
)
] = cv.positive_int
fields[vol.Optional(CONF_STORE_DEBUG_FILE, default=False)] = bool
fields[vol.Optional(CONF_LOG_LEVEL, default=config_data.log_level)] = vol.In(
LOG_LEVELS
)
fields[
vol.Optional(
CONF_LOG_INCOMING_MESSAGES, default=config_data.log_incoming_messages
)
] = bool
data_schema = vol.Schema(fields)
return data_schema
async def _update_entry(self):
try:
entry = ConfigEntry(version=0,
domain="",
title="",
data=self._data,
source="",
options=self._options)
await self._config_manager.update(entry)
except InvalidToken:
_LOGGER.info("Reset password")
del self._data[CONF_PASSWORD]
entry = ConfigEntry(version=0,
domain="",
title="",
data=self._data,
source="",
options=self._options)
await self._config_manager.update(entry)
async def clear_credentials(self, user_input):
user_input[CONF_CLEAR_CREDENTIALS] = True
await self._handle_password(user_input)
async def _handle_password(self, user_input):
if CONF_CLEAR_CREDENTIALS in user_input:
clear_credentials = user_input.get(CONF_CLEAR_CREDENTIALS)
if clear_credentials:
del user_input[CONF_USERNAME]
del user_input[CONF_PASSWORD]
del user_input[CONF_CLEAR_CREDENTIALS]
if CONF_PASSWORD in user_input:
password_clear_text = user_input[CONF_PASSWORD]
password = await self._password_manager.encrypt(password_clear_text)
user_input[CONF_PASSWORD] = password
@staticmethod
def _get_user_input_option(options, key):
result = options.get(key, [])
return result
async def _clone_items(self, user_input, flow: str):
new_user_input = {}
if user_input is not None:
for key in user_input:
user_input_data = user_input[key]
new_user_input[key] = user_input_data
if flow != CONFIG_FLOW_INIT:
await self._handle_password(new_user_input)
return new_user_input
@staticmethod
def clone_items(user_input):
new_user_input = {}
if user_input is not None:
for key in user_input:
user_input_data = user_input[key]
new_user_input[key] = user_input_data
return new_user_input
def _should_validate_login(self, user_input: dict):
validate_login = False
data = self._data
for conf in CONF_ARR:
if data.get(conf) != user_input.get(conf):
validate_login = True
break
return validate_login
def _get_actions(self, options):
actions = []
for action in self._available_actions:
if action in options:
if options.get(action, False):
execute_action = self._available_actions[action]
actions.append(execute_action)
del options[action]
return actions
def _execute_store_debug_file(self):
ha = self._get_ha()
if ha is not None:
ha.service_save_debug_data()
def _get_ha(self, key: str = None):
if key is None:
key = self.title
ha = get_ha(self._hass, key)
return ha
def _move_option_to_data(self, options):
for conf in CONF_ARR:
if conf in options:
self._data[conf] = options[conf]
del options[conf]
async def _handle_data(self, flow):
if flow != CONFIG_FLOW_INIT:
await self._valid_login()
if flow == CONFIG_FLOW_OPTIONS:
config_entries = self._hass.config_entries
config_entries.async_update_entry(self._config_entry, data=self._data)
@staticmethod
def _get_options(data):
result = []
if data is not None:
if isinstance(data, list):
result = data
else:
clean_data = data.replace(" ", "")
result = clean_data.split(",")
return result
@staticmethod
def _get_available_options(system_data, key):
all_items = system_data.get(key, {})
available_items = {}
for item_key in all_items:
item = all_items[item_key]
item_name = item.get(CONF_NAME)
available_items[item_key] = item_name
return available_items
async def _valid_login(self):
errors = None
name = f"{DEFAULT_NAME} {self.title}"
try:
api = EdgeOSWebAPI(self._hass, self._config_manager)
await api.initialize()
if await api.login(throw_exception=True):
await api.async_send_heartbeat()
if not api.is_connected:
_LOGGER.warning(
f"Failed to login {name} due to invalid credentials"
)
errors = {"base": "invalid_credentials"}
device_data = await api.get_devices_data()
if device_data is None:
_LOGGER.warning(f"Failed to retrieve {name} device data")
errors = {"base": "empty_device_data"}
else:
system_data = device_data.get("system", {})
traffic_analysis_data = system_data.get("traffic-analysis", {})
dpi = traffic_analysis_data.get("dpi", "disable")
export = traffic_analysis_data.get("export", "disable")
error_prefix = f"Invalid {name} configuration -"
if dpi != "enable":
_LOGGER.warning(
f"{error_prefix} Deep Packet Inspection (DPI) is disabled"
)
errors = {"base": "invalid_dpi_configuration"}
if export != "enable":
_LOGGER.warning(
f"{error_prefix} Traffic Analysis Export is disabled"
)
errors = {"base": "invalid_export_configuration"}
system_info_data = await api.get_general_data(SYS_INFO_KEY)
vm = VersionManager()
vm.update(system_info_data)
vm.validate()
else:
_LOGGER.warning(f"Failed to login {name}")
errors = {"base": "auth_general_error"}
except LoginException as ex:
_LOGGER.warning(
f"Failed to login {name} due to HTTP Status Code: {ex.status_code}"
)
errors = {"base": HTTP_ERRORS.get(ex.status_code, "auth_general_error")}
except IncompatibleVersion as ivex:
_LOGGER.error(str(ivex))
errors = {"base": "incompatible_version"}
except Exception as ex:
_LOGGER.warning(f"Failed to login {name} due to general error: {str(ex)}")
errors = {"base": "auth_general_error"}
if errors is not None:
raise LoginError(errors)
| 1.984375 | 2 |
opencv_tutorial/opencv_python_tutorials/Image_Processing/hough_line_transfom.py | zeroam/TIL | 0 | 12769177 | <filename>opencv_tutorial/opencv_python_tutorials/Image_Processing/hough_line_transfom.py
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 14:52:34 2019
@author: jone
"""
#%%
import cv2
import numpy as np
def nothing(x):
pass
img = cv2.imread('img/chessboard2.jpg')
img = cv2.resize(img, (800, 800))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 200, 400, nothing)
cv2.namedWindow('canny')
cv2.createTrackbar('canny', 'canny', 50, 255, nothing)
while(1):
if cv2.waitKey(1) & 0xFF == 27:
break
img_copy = img.copy()
threshold = cv2.getTrackbarPos('threshold', 'image')
c = cv2.getTrackbarPos('canny', 'canny')
if threshold < 50:
threshold = 50
edges = cv2.Canny(gray, c, 3*c, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold)
for line in lines:
for rho, theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img_copy, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('canny', edges)
cv2.imshow('image', img_copy)
cv2.destroyAllWindows()
#%% 확률 허프 변환
import cv2
import numpy as np
def nothing(x):
pass
img = cv2.imread('img/building.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(img, 50, 150, apertureSize=3)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 100, 255, nothing)
cv2.createTrackbar('min_length', 'image', 100, 500, nothing)
cv2.createTrackbar('max_gap', 'image', 0, 100, nothing)
while(1):
if cv2.waitKey(1) & 0xFF == 27:
break
img_copy = img.copy()
threshold = cv2.getTrackbarPos('threshold', 'image')
min_length = cv2.getTrackbarPos('min_length', 'image')
max_gap = cv2.getTrackbarPos('max_gap', 'image')
lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, min_length, max_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img_copy, (x1,y1), (x2,y2), (0,255,0), 2)
cv2.imshow('image', img_copy)
cv2.destroyAllWindows() | 3.34375 | 3 |
Python/control/trajectory_controller.py | hpbader42/Klampt | 1 | 12769178 | <gh_stars>1-10
from controller import BaseController
from klampt.model import trajectory
class TrajectoryController(BaseController):
"""A controller that takes in a trajectory and outputs the position along
the trajectory. If type is a 2-tuple, this will also output the
derivative of the trajectory"""
def __init__(self,traj,type=('qcmd','dqcmd')):
self.traj = traj
self.outputType = type
self.startTime = None
def output(self,**inputs):
t = inputs['t']
if self.startTime == None:
self.startTime = t
t = t - self.startTime
if isinstance(self.outputType,tuple):
assert len(self.outputType)==2
return {self.outputType[0]:self.traj.eval(t),
self.outputType[1]:self.traj.deriv(t)}
else:
return {self.outputType:self.traj.eval(t)}
def signal(self,type,**inputs):
if type=='reset':
self.startTime = None
def make(robot,file="mypath.path"):
if robot == None:
l = trajectory.Trajectory()
else:
l = trajectory.RobotTrajectory(robot)
l.load(file)
return TrajectoryController(l)
| 3.390625 | 3 |
beqcatalogue/__init__.py | bmiller/beqcatalogue | 0 | 12769179 | import csv
import os
import re
from operator import itemgetter
from typing import Tuple
from urllib import parse
import json
from itertools import groupby
from markdown.extensions.toc import slugify
from iir import xml_to_filt
def extract_from_repo(path1: str, path2: str, content_type: str):
'''
extracts beq_metadata of following format
<beq_metadata>
<beq_title>9</beq_title>
<beq_alt_title />
<beq_sortTitle>9</beq_sortTitle>
<beq_year>2009</beq_year>
<beq_spectrumURL>https://i.imgur.com/aRic6II.jpg</beq_spectrumURL>
<beq_pvaURL>https://i.imgur.com/4DReGr5.jpg</beq_pvaURL>
<beq_edition />
<beq_season />
<beq_note />
<beq_warning />
<beq_gain>-1 gain</beq_gain>
<beq_language>English</beq_language>
<beq_source>Disc</beq_source>
<beq_author>aron7awol</beq_author>
<beq_avs>https://www.avsforum.com/threads/bass-eq-for-filtered-movies.2995212/post-57282106</beq_avs>
<beq_theMovieDB>12244</beq_theMovieDB>
<beq_poster>/usfcQZRqdXTSSQ55esiPHJZKkIU.jpg</beq_poster>
<beq_runtime>79</beq_runtime>
<beq_audioTypes>
<audioType>DTS-HD MA 5.1</audioType>
</beq_audioTypes>
<beq_genres>
<genre id="878">Science Fiction</genre>
<genre id="16">Animation</genre>
<genre id="12">Adventure</genre>
<genre id="28">Action</genre>
<genre id="53">Thriller</genre>
</beq_genres>
</beq_metadata>
new season format replaces beq_season with
<beq_season id="92137">
<number>1</number>
<poster>/q1X7Ev3Hcr0Q7aUiWgw1ZUZf1QZ.jpg</poster>
<episodes count="8">1,2,3,4,5,6,7,8</episodes>
</beq_season>
:return:
'''
import xml.etree.ElementTree as ET
import glob
elements = []
for xml in glob.glob(f"{path1}{path2}/**/*.xml", recursive=True):
et_tree = ET.parse(str(xml))
root = et_tree.getroot()
file_name = xml[:-4]
meta = {
'repo_file': str(xml),
'file_name': file_name.split('/')[-1],
'file_path': '/'.join(file_name[len(path1):].split('/')[:-1]),
'content_type': content_type
}
for child in root:
if child.tag == 'beq_metadata':
for m in child:
if len(m) == 0:
txt = m.text
if txt:
meta[m.tag[4:]] = m.text
elif m.tag == 'beq_audioTypes':
audio_types = [c.text.strip() for c in m]
meta['audioType'] = [at for at in audio_types if at]
elif m.tag == 'beq_season':
parse_season(m, meta, xml)
elif m.tag == 'beq_genres':
genres = [c.text.strip() for c in m]
meta['genres'] = [at for at in genres if at]
filts = [f for f in xml_to_filt(xml, unroll=True)]
meta['jsonfilters'] = [f.to_map() for f in filts]
meta['filters'] = '^'.join([str(f) for f in filts])
elements.append(meta)
return elements
def parse_season(m, meta, xml):
try:
meta['season'] = {'id': m.attrib['id']}
for c in m:
if c.tag == 'episodes':
meta['season']['episode_count'] = c.attrib['count']
meta['season'][c.tag] = c.text
complete = True
if 'episode_count' in meta['season'] and 'episodes' in meta['season']:
count = int(meta['season']['episode_count'])
epi_txt = meta['season']['episodes']
if epi_txt:
episodes = [int(e) for e in meta['season']['episodes'].split(',')]
for c in range(count):
if c + 1 not in episodes:
complete = False
meta['season']['complete'] = complete
except:
print(f"Unable to parse season info from {xml}")
def group_mobe1969_film_content(content_meta):
by_title = {}
fallback_pattern = re.compile(r'(.*) \((\d{4})\)(?: *\(.*\))? (.*)')
for meta in content_meta:
if 'title' in meta:
title = meta['title']
if title in by_title:
by_title[title].append(meta)
else:
by_title[title] = [meta]
else:
json = {
'title': meta['file_name'],
'author': 'mobe1969',
'content_type': meta['content_type']
}
match = fallback_pattern.match(meta['file_name'])
if match:
json['title'] = match.group(1)
json['year'] = match.group(2)
json['audioTypes'] = match.group(3).split('+')
print(f"Missing title entry, extracted {json}")
json['filters'] = meta['jsonfilters']
json_catalogue.append(json)
return by_title
def group_mobe1969_tv_content(content_meta):
by_title = {}
fallback_pattern = re.compile(r'(.*) \((\d{4})\)(?: *\(.*\))? (.*)')
for meta in content_meta:
if 'title' in meta:
title = meta['title']
if title[-4:-2] == ' E' and title[-2:].isdigit():
meta['episode'] = title[-2:]
title = title[:-4]
meta['title'] = title
elif 'note' in meta:
note = meta['note']
if note[0] == 'E':
if note[1:].isdigit():
meta['episode'] = note[1:]
elif '-' in note[1:]:
vals = [int(i) for i in note[1:].split('-')]
if len(vals) == 2:
meta['episode'] = ','.join([str(e) for e in range(vals[0], vals[1] + 1)])
elif note[0] == 'S':
frags = note.split('-')
if len(frags) == 2:
if frags[1][0] == 'E':
if frags[1][1:].isdigit():
meta['episode'] = frags[1][1:]
if 'episode' not in meta:
print(f"Unknown note format in {meta}")
if title in by_title:
by_title[title].append(meta)
else:
by_title[title] = [meta]
else:
json = {
'title': meta['file_name'],
'author': 'mobe1969',
'content_type': meta['content_type']
}
match = fallback_pattern.match(meta['file_name'])
if match:
json['title'] = match.group(1)
json['year'] = match.group(2)
json['audioTypes'] = match.group(3).split('+')
print(f"Missing title entry, extracted {json}")
json['filters'] = meta['jsonfilters']
json_catalogue.append(json)
return by_title
def process_mobe1969_content_from_repo(content_meta, index_entries, content_type):
''' converts beq_metadata into md '''
if content_type == 'film':
by_title = group_mobe1969_film_content(content_meta)
else:
by_title = group_mobe1969_tv_content(content_meta)
for title, metas in by_title.items():
title_md = slugify(title, '-')
with open(f"docs/mobe1969/{title_md}.md", mode='w+') as content_md:
generate_content_page(title_md, metas, content_md, index_entries, 'mobe1969', content_type)
def process_aron7awol_content_from_repo(content_meta, index_entries, content_type):
''' converts beq_metadata into md '''
for post_id, metas in group_aron7awol_content(content_meta, content_type).items():
with open(f"docs/aron7awol/{post_id}.md", mode='w+') as content_md:
generate_content_page(post_id, metas, content_md, index_entries, 'aron7awol', content_type)
def group_aron7awol_content(content_meta, content_type) -> dict:
grouped_meta = {}
if content_type == 'film':
for meta in content_meta:
if 'avs' in meta:
avs = meta['avs']
idx = avs.find('post?id=')
avs_post_id = None
if idx == -1:
idx = avs.find('post-')
if idx == -1:
print(f"Unparsable post id {meta['repo_file']} - {avs}")
else:
avs_post_id = avs[idx + 5:]
else:
avs_post_id = avs[idx + 8:]
if avs_post_id:
if avs_post_id in grouped_meta:
grouped_meta[avs_post_id].append(meta)
else:
grouped_meta[avs_post_id] = [meta]
else:
print(f"Missing beq_avs entry for {meta['repo_file']}")
else:
for meta in content_meta:
if 'title' in meta:
title = slugify(meta['title'], '-')
if title in grouped_meta:
grouped_meta[title].append(meta)
else:
grouped_meta[title] = [meta]
return grouped_meta
def generate_content_page(page_name, metas, content_md, index_entries, author, content_type):
if content_type == 'film':
generate_film_content_page(page_name, metas, content_md, index_entries, author)
else:
generate_tv_content_page(page_name, metas, content_md, index_entries, author)
def generate_film_content_page(page_name, metas, content_md, index_entries, author):
''' prints the md content page '''
print(f"# {metas[0]['title']}", file=content_md)
print("", file=content_md)
print(f"* Author: {author}", file=content_md)
if 'avs' in metas[0]:
print(f"* [Forum Post]({metas[0]['avs']})", file=content_md)
production_years = {m['year'] for m in metas}
img_idx = 0
if len(production_years) == 1:
print(f"* Production Year: {production_years.pop()}", file=content_md)
print("", file=content_md)
for meta in sorted(metas, key=lambda m: ', '.join(m.get('audioType', ''))):
if 'pvaURL' not in meta and 'spectrumURL' not in meta:
print(f"No charts found in {meta}")
else:
audio_type = meta.get('audioType', '')
beq_catalogue_url = ''
actual_img_links = []
if 'pvaURL' in meta:
actual_img_links.append(meta['pvaURL'])
if 'spectrumURL' in meta:
actual_img_links.append(meta['spectrumURL'])
if audio_type:
linked_content_format = ', '.join(audio_type)
print(f"## {linked_content_format}", file=content_md)
print("", file=content_md)
if production_years:
print(f"* Production Year: {meta['year']}", file=content_md)
print("", file=content_md)
for img in actual_img_links:
print(f"", file=content_md)
print('', file=content_md)
bd_url = generate_index_entry(author, page_name, linked_content_format, meta['title'], meta['year'],
meta.get('avs', None), len(metas) > 1, index_entries)
prefix = 'https://beqcatalogue.readthedocs.io/en/latest'
beq_catalogue_url = f"{prefix}/{author}/{page_name}/#{slugify(linked_content_format, '-')}"
cols = [
meta['title'],
meta['year'],
linked_content_format,
author,
meta.get('avs', ''),
beq_catalogue_url,
bd_url,
meta['filters']
]
db_writer.writerow(cols + actual_img_links)
else:
print(f"No audioTypes in {metas[0]['title']}")
json_catalogue.append({
'title': meta['title'],
'year': meta['year'],
'audioTypes': meta.get('audioType', []),
'content_type': 'film',
'author': author,
'catalogue_url': beq_catalogue_url,
'filters': meta['jsonfilters'],
'images': actual_img_links,
'warning': meta.get('warning', ''),
'mv': meta.get('gain', '0'),
'avs': meta.get('avs', ''),
'sortTitle': meta.get('sortTitle', ''),
'edition': meta.get('edition', ''),
'note': meta.get('note', ''),
'language': meta.get('language', ''),
'source': meta.get('source', ''),
'overview': meta.get('overview', ''),
'theMovieDB': meta.get('theMovieDB', ''),
'rating': meta.get('rating', ''),
'runtime': meta.get('runtime', '0'),
'genres': meta.get('genres', [])
})
def format_season_episode(m) -> Tuple[str, str, str, str]:
long_season_episode = ''
short_season_episode = ''
season = ''
episodes = ''
if 'season' in m:
season_meta = m['season']
if isinstance(season_meta, str):
season = season_meta
long_season_episode = f"Season {season}"
short_season_episode = f"S{season}"
if 'episode' in m:
episodes = m['episode']
long_season_episode += f" Episode {episodes}"
short_season_episode += f"E{episodes}"
else:
season = season_meta['number']
long_season_episode = f"Season {season}"
short_season_episode = f"S{season}"
if not season_meta['complete']:
episodes = season_meta['episodes']
to_print = episodes
s = ''
if ',' in episodes:
epi_nums = [int(e) for e in episodes.split(',')]
if len(epi_nums) > 1:
ranges = []
for k, g in groupby(enumerate(epi_nums), lambda t: t[0] - t[1]):
group = list(map(itemgetter(1), g))
if group[0] == group[-1]:
ranges.append(f"{group[0]}")
else:
ranges.append(f"{group[0]}-{group[-1]}")
to_print = ', '.join(ranges)
s = 's'
long_season_episode += f" Episode{s} {to_print}"
short_season_episode += f"E{to_print}"
return long_season_episode, short_season_episode, season, episodes
def generate_tv_content_page(page_name, metas, content_md, index_entries, author):
''' prints the md content page '''
print(f"# {metas[0]['title']}", file=content_md)
print("", file=content_md)
print(f"* Author: {author}", file=content_md)
img_idx = 0
print("", file=content_md)
def sort_meta(m):
sort_key = ''
if 'season' in m:
season_meta = m['season']
if isinstance(season_meta, str):
sort_key = season_meta
if 'episode' in m:
sort_key += m['episode']
else:
sort_key = season_meta['number']
if not season_meta['complete']:
sort_key += season_meta['episodes']
return sort_key
for meta in sorted(metas, key=sort_meta):
audio_type = meta.get('audioType', '')
linked_content_format = ''
actual_img_links = []
long_season, short_season, season, episodes = format_season_episode(meta)
if 'pvaURL' in meta:
actual_img_links.append(meta['pvaURL'])
if 'spectrumURL' in meta:
actual_img_links.append(meta['spectrumURL'])
if long_season:
print(f"## {long_season}", file=content_md)
print("", file=content_md)
if audio_type:
linked_content_format = ', '.join(audio_type)
print(f"* {linked_content_format}", file=content_md)
print("", file=content_md)
if 'avs' in meta:
print(f"* [Forum Post]({meta['avs']})", file=content_md)
if 'year' in meta:
print(f"* Production Year: {meta['year']}", file=content_md)
print("", file=content_md)
for img in actual_img_links:
print(f"", file=content_md)
print('', file=content_md)
extra_slug = f"#{slugify(long_season, '-')}" if long_season else ''
bd_url = generate_index_entry(author, page_name, linked_content_format, f"{meta['title']} {short_season}",
meta['year'], meta.get('avs', None), len(metas) > 1, index_entries,
content_type='TV', extra_slug=extra_slug)
prefix = 'https://beqcatalogue.readthedocs.io/en/latest'
slugified_link = f"/{extra_slug}" if extra_slug else ''
beq_catalogue_url = f"{prefix}/{author}/{page_name}{slugified_link}"
cols = [
meta['title'],
meta['year'],
linked_content_format,
author,
meta.get('avs', ''),
beq_catalogue_url,
bd_url,
meta['filters']
]
db_writer.writerow(cols + actual_img_links)
# TODO remove once metadata is added
if author == 'mobe1969' and len(actual_img_links) == 0:
from urllib.parse import quote
print(f"Generating img link for missing meta in {meta}")
fp = meta['file_path'].replace('TV BEQs', 'TV Series')
img = f"https://gitlab.com/Mobe1969/beq-reports/-/raw/master/{quote(fp)}/{quote(meta['file_name'])}.jpg"
actual_img_links = [img]
print(f"", file=content_md)
print('', file=content_md)
json_catalogue.append({
'title': meta['title'],
'year': meta['year'],
'audioTypes': meta.get('audioType', []),
'content_type': 'TV',
'author': author,
'catalogue_url': beq_catalogue_url,
'filters': meta['jsonfilters'],
'images': actual_img_links,
'warning': meta.get('warning', ''),
'season': season,
'episode': episodes,
'mv': meta.get('gain', '0'),
'avs': meta.get('avs', ''),
'sortTitle': meta.get('sortTitle', ''),
'edition': meta.get('edition', ''),
'note': meta.get('note', ''),
'language': meta.get('language', ''),
'source': meta.get('source', ''),
'overview': meta.get('overview', ''),
'theMovieDB': meta.get('theMovieDB', ''),
'rating': meta.get('rating', ''),
'genres': meta.get('genres', [])
})
def generate_index_entry(author, page_name, content_format, content_name, year, avs_url, multiformat, index_entries,
content_type='film', extra_slug=None):
''' dumps the summary info to the index page '''
escaped = parse.quote(content_name)
mdb_url = f"https://www.themoviedb.org/search?query={escaped}"
rt_url = f"https://www.rottentomatoes.com/search?search={escaped}"
bd_url = f"https://www.blu-ray.com/movies/search.php?keyword={escaped}&submit=Search&action=search&"
if content_type == 'film':
extra_slug = f"#{slugify(content_format, '-')}" if multiformat is True else ''
avs_link = f"[avsforum]({avs_url})" if avs_url else ''
index_entries.append(
f"| [{content_name}](./{author}/{page_name}.md{extra_slug}) | {content_type} | {year} | {content_format} | {'Yes' if multiformat else 'No'} | {avs_link} [blu-ray]({bd_url}) [themoviedb]({mdb_url}) [rottentoms]({rt_url}) |")
return bd_url
if os.getcwd() == os.path.dirname(os.path.abspath(__file__)):
print(f"Switching CWD from {os.getcwd()}")
os.chdir('..')
else:
print(f"CWD: {os.getcwd()}")
if __name__ == '__main__':
aron7awol_films = extract_from_repo('.input/bmiller/miniDSPBEQ/', 'Movie BEQs', 'film')
print(f"Extracted {len(aron7awol_films)} aron7awol film catalogue entries")
aron7awol_tv = extract_from_repo('.input/bmiller/miniDSPBEQ/', 'TV Shows BEQ', 'TV')
print(f"Extracted {len(aron7awol_tv)} aron7awol TV catalogue entries")
mobe1969_films = extract_from_repo('.input/Mobe1969/miniDSPBEQ/', 'Movie BEQs', 'film')
print(f"Extracted {len(mobe1969_films)} mobe1969 film catalogue entries")
mobe1969_tv = extract_from_repo('.input/Mobe1969/miniDSPBEQ/', 'TV BEQs', 'TV')
print(f"Extracted {len(mobe1969_tv)} mobe1969 TV catalogue entries")
json_catalogue = []
with open('docs/database.csv', 'w+', newline='') as db_csv:
db_writer = csv.writer(db_csv)
db_writer.writerow(['Title', 'Year', 'Format', 'Author', 'AVS', 'Catalogue', 'blu-ray.com', 'filters'])
index_entries = []
process_aron7awol_content_from_repo(aron7awol_films, index_entries, 'film')
process_aron7awol_content_from_repo(aron7awol_tv, index_entries, 'TV')
with open('docs/aron7awol.md', mode='w+') as index_md:
print(f"# aron7awol", file=index_md)
print('', file=index_md)
print(f"| Title | Type | Year | Format | Multiformat? | Links |", file=index_md)
print(f"|-|-|-|-|-|-|", file=index_md)
for i in sorted(index_entries, key=str.casefold):
print(i, file=index_md)
index_entries = []
process_mobe1969_content_from_repo(mobe1969_films, index_entries, 'film')
process_mobe1969_content_from_repo(mobe1969_tv, index_entries, 'TV')
with open('docs/mobe1969.md', mode='w+') as index_md:
print(f"# Mobe1969", file=index_md)
print('', file=index_md)
print(f"| Title | Type | Year | Format | Multiformat? | Links |", file=index_md)
print(f"|-|-|-|-|-|-|", file=index_md)
for i in sorted(index_entries, key=str.casefold):
print(i, file=index_md)
print('', file=index_md)
with open('docs/database.json', 'w+') as db_json:
json.dump(json_catalogue, db_json, indent=0)
| 2.53125 | 3 |
galaxytoqrcode.py | chrislintott/GZMaze | 0 | 12769180 | <reponame>chrislintott/GZMaze
import qrcode # get this https://pypi.python.org/pypi/qrcode eg with pip install qrcode
import json
import matplotlib.pyplot as plt
#Set up parameters for getting stuff
url = 'https://panoptes.zooniverse.org/api/subjects?workflow_id=2076&sort=queued'
headers = {'Content-Type':'application/json', 'Accept':'application/vnd.api+json; version=1'}
r=requests.get(url,headers=headers)
sub=r.json()
id=sub['subjects'][0]['id']
ImageURL=sub['subjects'][0]['locations'][0]['image/jpeg']
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(id)
qr.make(fit=True)
img = qr.make_image()
plt.imshow(img)
plt.show() | 2.90625 | 3 |
Ekeopara_Praise/Phase 2/STRINGS/Day33 Tasks/Task3.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 12769181 | '''3. Write a Python program to print the square and cube symbol in the
area of a rectangle and volume of a cylinder.
Sample output:
The area of the rectangle is 1256.66cm2
The volume of the cylinder is 1254.725cm3'''
area = 1256.66
volume = 1254.725
print(f"The area of the rectangle is {round(area, 2)}cm2")
print(f"The volume of the cylinder is {round(volume, 3)}cm3") | 4.4375 | 4 |
tests/apisports/test_client.py | MikeSmithEU/apisports | 0 | 12769182 | import os
from contextlib import contextmanager
from math import ceil
import pytest
import requests
import requests_mock
from apisports import _client_class
from apisports._client import ClientMeta, ClientInitError
from apisports.data import SingleData, NoneData, SimpleData, PagedData
from helpers import assert_response_ok
@contextmanager
def clientmeta_test_path():
"""
Get ClientMeta which loads from tests YAML location
"""
prev_dir = ClientMeta.data_dir
try:
ClientMeta.data_dir = os.path.join(os.path.dirname(__file__), 'data')
yield ClientMeta
finally:
ClientMeta.data_dir = prev_dir
def expect_client_init_error(name, version=None):
if version is None:
version = 1
expected_message = "Could not load API config for {name} from {path}"
with clientmeta_test_path():
with pytest.raises(ClientInitError) as excinfo:
_client_class(name, version)
assert str(excinfo.value) == expected_message.format(
name=name.lower(),
path=os.path.join(ClientMeta.data_dir, f'{name.lower()}-v{version}.yaml')
)
def clientmeta_test_class(name, version=None):
with clientmeta_test_path():
cls = _client_class(name, version)
return cls
@pytest.fixture
def test_v3():
return clientmeta_test_class('test', 3)
@pytest.fixture
def mock(session):
with requests_mock.mock() as mock:
return mock
@pytest.fixture
def session(adapter):
session = requests.Session()
session.mount('http+mock://', adapter)
return session
@pytest.fixture
def adapter():
return requests_mock.Adapter()
def register_mock_uri(adapter, *args, **kwargs):
def _(func):
def wrapped_func(request, context):
context.status_code = 200
context.headers['Content-Type'] = 'application/json'
return func(request, context)
adapter.register_uri(
'GET',
*args,
**kwargs,
json=wrapped_func
)
return _
def test_client_init_error():
expect_client_init_error('FileDoesNotExist')
expect_client_init_error('InvalidYAML')
def test_clientmeta(test_v3, session):
assert test_v3.default_host == 'http+mock://api-test1.server.local'
assert callable(test_v3.status)
assert callable(test_v3.ping)
assert callable(test_v3.null)
assert callable(test_v3.paginated_count)
assert callable(test_v3.import_)
def test_session(test_v3, session):
t = test_v3(session=session)
t2 = test_v3()
assert type(t._session) is requests.Session
assert t._session is session
assert type(t2._session) is requests.Session
assert t2._session is not session
def test_status(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/status')
def mock_status(request, context):
return {"response": {"status": "ok"}}
test = test_v3(session=session)
response = test.status()
assert_response_ok(response)
expected = dict(status="ok")
data = response.data
assert type(data) is SingleData
assert len(response) == 1
assert list(iter(data)) == [expected]
assert next(iter(response)) == expected
assert next(iter(data)) == expected
assert data.item() == expected
def test_null(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/null')
def mock_null(request, context):
return {"response": None}
test = test_v3(session=session)
response = test.null()
assert_response_ok(response)
assert response.data is NoneData
def test_python_keyword_import(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/import')
def mock_null(request, context):
return {"response": None}
test = test_v3(session=session)
response = test.import_()
assert_response_ok(response)
assert response.data is NoneData
def test_paginated_count(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/paginated-count')
def mock_paginated_count(request, context):
per_page = 3
params = {k: v[0] for k, v in request.qs.items()}
try:
start = int(params["from"]) if 'from' in params else 1
stop = int(params["to"]) + 1 if 'to' in params else 14
page = int(params['page']) if 'page' in params else 1
except ValueError as exc:
return {
"errors": [
{
"message": str(exc)
}
]
}
start = start + (page - 1) * per_page
if start > stop:
return {
"errors": [
{
"page": "value too high"
}
]
}
result = list(range(start, min(stop, start + per_page)))
return {
"get": "paginated-count",
"parameters": params,
"paging": {
"current": page,
"total": ceil((stop - start) / per_page),
},
"results": len(result),
"response": result
}
test = test_v3(session=session)
test.paginated_count()
response = test.paginated_count(**{"from": 1, "to": 10})
expected = list(range(1, 11))
assert type(response.data) is PagedData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
# test support for keyword safe parameter alias
response = test.paginated_count(from_=1, to=10)
expected = list(range(1, 11))
assert type(response.data) is PagedData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
response = test.paginated_count(from_=1, to=2)
expected = [1, 2]
assert type(response.data) is SimpleData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
response = test.paginated_count(from_=1, to=1)
expected = [1]
assert type(response.data) is SingleData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
assert response.data.item() == 1
| 2.140625 | 2 |
python/data-wrangling-components/tests/engine/test_ungroup.py | microsoft/data-wrangling-components | 4 | 12769183 | #
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project.
#
from data_wrangling_components.engine.verbs.groupby import groupby
from data_wrangling_components.engine.verbs.ungroup import ungroup
from data_wrangling_components.types import Step, Verb
from tests.engine.test_store import get_test_store
def test_ungroup():
step1 = Step(
Verb.Groupby,
"table10",
"output",
args={"columns": ["x", "y"]},
)
store = get_test_store()
groupby_result = groupby(step1, store)
store.set("newTable", groupby_result)
step2 = Step(
Verb.Ungroup,
"newTable",
"output",
)
result = ungroup(step2, store)
assert len(result.table.columns) == 3
assert len(result.table) == 3
assert result.table.loc[0, "x"] == "A"
assert result.table.loc[1, "x"] == "B"
assert result.table.loc[2, "x"] == "A"
| 2.359375 | 2 |
src/mylib/lgb/util.py | murez/mobile-semantic-segmentation | 713 | 12769184 | <reponame>murez/mobile-semantic-segmentation
from typing import List
import pandas as pd
from lightgbm import Booster
def make_imp_df(boosters: List[Booster]) -> pd.DataFrame:
df = pd.concat([
pd.DataFrame({'name': b.feature_name(), 'importance': b.feature_importance()})
for b in boosters
])
return df.groupby('name').mean() \
.sort_values('importance') \
.reset_index(level='name') \
.set_index('name')
| 2.640625 | 3 |
tests_basic/test_winstonlutz.py | alanphys/pylinac | 0 | 12769185 | <reponame>alanphys/pylinac
import copy
import io
import os.path as osp
import tempfile
from unittest import TestCase
import matplotlib.pyplot as plt
import pylinac
from pylinac import WinstonLutz
from pylinac.winston_lutz import Axis, WinstonLutzResult
from pylinac.core.geometry import Vector, vector_is_close
from tests_basic.utils import save_file, LoadingTestBase, LocationMixin, get_folder_from_cloud_test_repo, \
get_file_from_cloud_test_repo
TEST_DIR = get_folder_from_cloud_test_repo(['Winston-Lutz'])
class TestWLLoading(LoadingTestBase, TestCase):
klass = WinstonLutz
demo_load_method = 'from_demo_images'
url = 'winston_lutz.zip'
def test_loading_1_image_fails(self):
with self.assertRaises(ValueError):
folder = get_folder_from_cloud_test_repo(['Winston-Lutz', 'lutz', '1_image'])
WinstonLutz(folder)
def test_invalid_dir(self):
with self.assertRaises(ValueError):
WinstonLutz(r'nonexistant/dir')
def test_load_from_file_object(self):
path = osp.join(TEST_DIR, 'noisy_WL_30x5.zip')
ref_w = WinstonLutz.from_zip(path)
ref_w.analyze()
with open(path, 'rb') as f:
w = WinstonLutz.from_zip(f)
w.analyze()
self.assertIsInstance(w, WinstonLutz)
self.assertEqual(w.gantry_iso_size, ref_w.gantry_iso_size)
def test_load_from_stream(self):
path = osp.join(TEST_DIR, 'noisy_WL_30x5.zip')
ref_w = WinstonLutz.from_zip(path)
ref_w.analyze()
with open(path, 'rb') as f:
s = io.BytesIO(f.read())
w = WinstonLutz.from_zip(s)
w.analyze()
self.assertIsInstance(w, WinstonLutz)
self.assertEqual(w.gantry_iso_size, ref_w.gantry_iso_size)
class GeneralTests(TestCase):
@classmethod
def setUpClass(cls):
cls.wl = WinstonLutz.from_demo_images()
cls.wl.analyze()
def test_run_demo(self):
WinstonLutz.run_demo() # shouldn't raise
def test_results(self):
print(self.wl.results()) # shouldn't raise
def test_not_yet_analyzed(self):
wl = WinstonLutz.from_demo_images()
with self.assertRaises(ValueError):
wl.results() # not yet analyzed
with self.assertRaises(ValueError):
wl.plot_images()
with self.assertRaises(ValueError):
wl.plot_summary()
def test_str_or_enum(self):
# shouldn't raise
self.wl.plot_images('Gantry')
self.wl.plot_images(Axis.GANTRY)
self.wl.plot_axis_images('Gantry')
self.wl.plot_axis_images(Axis.GANTRY)
def test_bb_override(self):
with self.assertRaises(ValueError):
wl = pylinac.WinstonLutz.from_demo_images()
wl.analyze(bb_size_mm=8)
def test_bb_shift_instructions(self):
move = self.wl.bb_shift_instructions()
self.assertTrue("RIGHT" in move)
move = self.wl.bb_shift_instructions(couch_vrt=-2, couch_lat=1, couch_lng=100)
self.assertTrue("RIGHT" in move)
self.assertTrue("VRT" in move)
def test_results_data(self):
data = self.wl.results_data()
self.assertIsInstance(data, WinstonLutzResult)
self.assertEqual(data.num_couch_images, self.wl._get_images(axis=(Axis.COUCH, Axis.REFERENCE))[0])
self.assertEqual(data.max_2d_cax_to_epid_mm, self.wl.cax2epid_distance('max'))
self.assertEqual(data.median_2d_cax_to_epid_mm, self.wl.cax2epid_distance('median'))
data_dict = self.wl.results_data(as_dict=True)
self.assertIn('pylinac_version', data_dict)
self.assertEqual(data_dict['gantry_3d_iso_diameter_mm'], self.wl.gantry_iso_size)
def test_bb_too_far_away_fails(self):
"""BB is >20mm from CAX"""
file = get_file_from_cloud_test_repo([TEST_DIR, 'bb_too_far_away.zip'])
wl = WinstonLutz.from_zip(file)
with self.assertRaises(ValueError):
wl.analyze()
class TestPublishPDF(TestCase):
@classmethod
def setUpClass(cls):
cls.wl = WinstonLutz.from_demo_images()
cls.wl.analyze()
def test_publish_pdf(self):
# normal publish; shouldn't raise
with tempfile.TemporaryFile() as t:
self.wl.publish_pdf(t)
def test_publish_w_metadata_and_notes(self):
with tempfile.TemporaryFile() as t:
self.wl.publish_pdf(t, notes='stuff', metadata={'Unit': 'TB1'})
class TestPlottingSaving(TestCase):
def setUp(self):
self.wl = WinstonLutz.from_demo_images()
self.wl.analyze()
@classmethod
def tearDownClass(cls):
plt.close('all')
def test_plot(self):
self.wl.plot_images() # shouldn't raise
self.wl.plot_images(axis=Axis.GANTRY)
self.wl.plot_images(axis=Axis.COLLIMATOR)
self.wl.plot_images(axis=Axis.COUCH)
self.wl.plot_images(axis=Axis.GB_COMBO)
self.wl.plot_images(axis=Axis.GBP_COMBO)
def test_save(self):
save_file(self.wl.save_summary)
save_file(self.wl.save_images)
def test_plot_wo_all_axes(self):
# test that analyzing images w/o gantry images doesn't fail
wl_zip = osp.join(TEST_DIR, 'Naming.zip')
wl = WinstonLutz.from_zip(wl_zip, use_filenames=True)
wl.analyze()
wl.plot_summary() # shouldn't raise
class WinstonLutzMixin(LocationMixin):
cloud_dir = 'Winston-Lutz'
num_images = 0
zip = True
bb_size = 5
gantry_iso_size = 0
collimator_iso_size = 0
couch_iso_size = 0
cax2bb_max_distance = 0
cax2bb_median_distance = 0
epid_deviation = None
bb_shift_vector = Vector() # vector to place BB at iso
axis_of_rotation = {0: Axis.REFERENCE} # fill with as many {image#: known_axis_of_rotation} pairs as desired
print_results = False
use_filenames = False
@classmethod
def setUpClass(cls):
filename = cls.get_filename()
if cls.zip:
cls.wl = WinstonLutz.from_zip(filename, use_filenames=cls.use_filenames)
else:
cls.wl = WinstonLutz(filename, use_filenames=cls.use_filenames)
cls.wl.analyze(bb_size_mm=cls.bb_size)
if cls.print_results:
print(cls.wl.results())
print(cls.wl.bb_shift_vector)
def test_number_of_images(self):
self.assertEqual(self.num_images, len(self.wl.images))
def test_gantry_iso(self):
# test iso size
self.assertAlmostEqual(self.wl.gantry_iso_size, self.gantry_iso_size, delta=0.15)
def test_collimator_iso(self):
# test iso size
if self.collimator_iso_size is not None:
self.assertAlmostEqual(self.wl.collimator_iso_size, self.collimator_iso_size, delta=0.15)
def test_couch_iso(self):
# test iso size
if self.couch_iso_size is not None:
self.assertAlmostEqual(self.wl.couch_iso_size, self.couch_iso_size, delta=0.15)
def test_epid_deviation(self):
if self.epid_deviation is not None:
self.assertAlmostEqual(max(self.wl.axis_rms_deviation(Axis.EPID)), self.epid_deviation, delta=0.15)
def test_bb_max_distance(self):
self.assertAlmostEqual(self.wl.cax2bb_distance(metric='max'), self.cax2bb_max_distance, delta=0.15)
def test_bb_median_distance(self):
self.assertAlmostEqual(self.wl.cax2bb_distance(metric='median'), self.cax2bb_median_distance, delta=0.1)
def test_bb_shift_vector(self):
self.assertTrue(vector_is_close(self.wl.bb_shift_vector, self.bb_shift_vector, delta=0.15), msg="The vector {} is not sufficiently close to vector {}".format(self.wl.bb_shift_vector, self.bb_shift_vector))
def test_known_axis_of_rotation(self):
for idx, axis in self.axis_of_rotation.items():
self.assertEqual(axis, self.wl.images[idx].variable_axis)
class WLDemo(WinstonLutzMixin, TestCase):
num_images = 17
gantry_iso_size = 1
collimator_iso_size = 1.2
couch_iso_size = 2.3
cax2bb_max_distance = 1.2
cax2bb_median_distance = 0.7
epid_deviation = 1.3
axis_of_rotation = {0: Axis.REFERENCE}
bb_shift_vector = Vector(x=0.4, y=-0.4, z=-0.2)
@classmethod
def setUpClass(cls):
cls.wl = WinstonLutz.from_demo_images()
cls.wl.analyze()
class WLPerfect30x8(WinstonLutzMixin, TestCase):
"""30x30mm field, 8mm BB"""
file_path = ['perfect_WL_30x8.zip']
num_images = 4
gantry_iso_size = 0
collimator_iso_size = 0
couch_iso_size = 0
cax2bb_max_distance = 0
cax2bb_median_distance = 0
epid_deviation = 0
bb_shift_vector = Vector()
class WLPerfect30x2(WinstonLutzMixin, TestCase):
"""30x30mm field, 2mm BB"""
file_path = ['perfect_WL_30x2mm.zip']
num_images = 4
gantry_iso_size = 0
collimator_iso_size = 0
couch_iso_size = 0
cax2bb_max_distance = 0
cax2bb_median_distance = 0
epid_deviation = 0
bb_shift_vector = Vector()
bb_size = 2
class WLPerfect10x4(WinstonLutzMixin, TestCase):
"""10x10mm field, 4mm BB"""
file_path = ['perfect_WL_10x4.zip']
num_images = 4
gantry_iso_size = 0
collimator_iso_size = 0
couch_iso_size = 0
cax2bb_max_distance = 0
cax2bb_median_distance = 0
epid_deviation = 0
bb_shift_vector = Vector()
class WLNoisy30x5(WinstonLutzMixin, TestCase):
"""30x30mm field, 5mm BB. S&P noise added"""
file_path = ['noisy_WL_30x5.zip']
num_images = 4
gantry_iso_size = 0.08
collimator_iso_size = 0
couch_iso_size = 0
cax2bb_max_distance = 0
cax2bb_median_distance = 0
epid_deviation = 0
bb_shift_vector = Vector()
class WLLateral3mm(WinstonLutzMixin, TestCase):
# verified independently
file_path = ['lat3mm.zip']
num_images = 4
gantry_iso_size = 0.5
cax2bb_max_distance = 3.8
cax2bb_median_distance = 2.3
bb_shift_vector = Vector(x=-3.6, y=0.5, z=0.6)
class WLLongitudinal3mm(WinstonLutzMixin, TestCase):
# verified independently
file_path = ['lng3mm.zip']
num_images = 4
gantry_iso_size = 0.5
cax2bb_max_distance = 3.9
cax2bb_median_distance = 3.7
bb_shift_vector = Vector(x=-0.63, y=3.6, z=0.6)
class WLVertical3mm(WinstonLutzMixin, TestCase):
file_path = ['vrt3mm.zip']
num_images = 4
gantry_iso_size = 0.5
cax2bb_max_distance = 3.8
cax2bb_median_distance = 2.3
bb_shift_vector = Vector(x=-0.5, y=0.5, z=3.6)
print_results = True
class WLDontUseFileNames(WinstonLutzMixin, TestCase):
file_path = ['Naming.zip']
num_images = 4
gantry_iso_size = 0.3
cax2bb_max_distance = 0.9
cax2bb_median_distance = 0.8
bb_shift_vector = Vector(x=-0.4, y=0.6, z=0.6)
axis_of_rotation = {0: Axis.REFERENCE, 1: Axis.GANTRY, 2: Axis.GANTRY, 3: Axis.GANTRY}
class WLUseFileNames(WinstonLutzMixin, TestCase):
file_path = ['Naming.zip']
use_filenames = True
num_images = 4
collimator_iso_size = 1.2
cax2bb_max_distance = 0.9
cax2bb_median_distance = 0.8
bb_shift_vector = Vector(y=0.6)
axis_of_rotation = {0: Axis.COLLIMATOR, 1: Axis.COLLIMATOR, 2: Axis.COLLIMATOR, 3: Axis.COLLIMATOR}
class WLBadFilenames(TestCase):
def test_bad_filenames(self):
# tests_basic that using filenames with incorrect syntax will fail
wl_dir = osp.join(TEST_DIR, 'Bad-Names.zip')
with self.assertRaises(ValueError):
wl = WinstonLutz.from_zip(wl_dir, use_filenames=True)
wl.analyze()
| 2.109375 | 2 |
django_test/users/urls.py | sdfxisme/lesson21 | 0 | 12769186 | <reponame>sdfxisme/lesson21<filename>django_test/users/urls.py
from django.contrib import admin
from django.urls import path, include
from . import views
from django.contrib.auth.views import LogoutView
app_name = 'users'
urlpatterns = [
path('logout/', LogoutView.as_view(), name='logout'),
path('login/', views.UserLoginView.as_view(), name='login'),
path('register/', views.UserCreateView.as_view(), name='register'),
] | 1.976563 | 2 |
boxing_py/boxing.py | br-n518/small_games | 0 | 12769187 | #!/usr/bin/env python3
def clear_screen(lines=128):
if not isinstance(lines, int) or lines < 64:
lines = 64
print( '\n' * lines, end='' )
class Player:
def __init__(self, n):
self.score = 0
self.name = n
self.attack1 = ''
self.attack2 = ''
self.guard = ''
def step(self):
clear_screen()
print(self.name + '\'s turn:')
self.attack1 = ''
self.attack2 = ''
while len(self.attack1) <= 0 or len(self.attack2) <= 0:
print()
a = ''
if len(self.attack1) <= 0:
print('High or Low?')
if len(self.attack2) <= 0:
print('Left or Right?')
while len(a) <= 0:
a = input('Attack: ').lower()
# test input
if 'hi' in a:
self.attack1 = 'high'
elif 'lo' in a:
self.attack1 = 'low'
if 'le' in a:
self.attack2 = 'left'
elif 'ri' in a:
self.attack2 = 'right'
self.guard = ''
while len(self.guard) <= 0:
print()
a = ''
print('High, Low, Left, or Right?')
while len(a) <= 0:
a = input('Guard: ').lower()
# test input
if 'hi' in a:
self.guard = 'high'
elif 'lo' in a:
self.guard = 'low'
elif 'le' in a:
self.guard = 'left'
elif 'ri' in a:
self.guard = 'right'
clear_screen()
def attack(self, p):
print(self.name + ' attacks ' + p.name + ' with a ' + self.attack1 + ' ' + self.attack2 + '.')
print(p.name + ' guards ' + p.guard + '.')
if (self.attack1 != p.guard) and (self.attack2 != p.guard):
print('HIT!')
self.score += 1
else:
print('BLOCK!')
print()
class Engine:
def __init__(self):
self.players = list()
x = input("How many players?: ")
print()
try:
numOfPlayers = int(x)
except ValueError:
numOfPlayers = 2
if numOfPlayers > 8:
numOfPlayers = 8
elif numOfPlayers < 2:
numOfPlayers = 2
print('Setting to '+ str(numOfPlayers) +' players.\n')
x = 1
print('(Player Names)')
while len(self.players) < numOfPlayers:
a = input('Player '+str(x)+': ')
if len(a) > 0:
self.players.append(Player(a))
x += 1
print()
self.rounds = None
while not isinstance(self.rounds, int) or self.rounds < 1:
self.rounds = input('# of Rounds: ')
try:
self.rounds = int(self.rounds)
except ValueError:
self.rounds = None
self.roundsC = self.rounds
input("\nNOTE: You only need the first two letters. (HI LO LE RI) ")
def mainloop(self):
if len(self.players) > 1:
self.running = True
self.roundsC = self.rounds
for p in self.players:
p.score = 0
while self.running and self.roundsC > 0:
clear_screen()
print('Round ' + str((self.rounds - self.roundsC) + 1) + ':')
input('\n( Begin ) ...')
for p in self.players:
p.step()
# process player attacks
for p in self.players:
for q in self.players:
if p != q:
p.attack(q)
# display scores
for p in self.players:
print(p.name + ': ' + str(p.score))
a = ''
while 'ok' not in a:
a = input('\n( Type "ok" ) ').lower()
self.roundsC -= 1
print('\nFinal scores:\n')
for p in self.players:
print(p.name + ': ' + str(p.score) + '\n')
def main():
e = Engine()
e.mainloop()
if __name__=="__main__":
main()
| 3.75 | 4 |
DyCommon/Ui/DyStatsDataFrameTableWidget.py | Leonardo-YXH/DevilYuan | 135 | 12769188 | <filename>DyCommon/Ui/DyStatsDataFrameTableWidget.py
from PyQt5 import QtCore
import pandas as pd
from DyCommon.Ui.DyStatsTableWidget import *
class DyStatsDataFrameTableWidget(DyStatsTableWidget):
"""
只显示DF的列,index需要用户自己转换成列
"""
def __init__(self, df, parent=None):
super().__init__(parent=parent, readOnly=True, index=False, floatCut=True, autoScroll=False)
self._initDf(df)
def _initDf(self, df):
self.setColNames(list(df.columns))
self.fastAppendRows(df.values.tolist())
| 2.609375 | 3 |
gesund_projekt/xps/migrations/0002_xp_datestamp.py | asis2016/gesund-projekt | 0 | 12769189 | <reponame>asis2016/gesund-projekt
# Generated by Django 4.0.1 on 2022-06-05 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('xps', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='xp',
name='datestamp',
field=models.DateField(auto_now=True),
),
]
| 1.539063 | 2 |
test.py | elffer/ubi_pingan | 0 | 12769190 | <reponame>elffer/ubi_pingan
# -*- coding:utf8 -*-
import os
import csv
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
path_train = "data/dm/train.csv" # 训练文件
path_test = "data/dm/test.csv" # 测试文件
path_test_out = "model/" # 预测结果输出路径为model/xx.csv,有且只能有一个文件并且是CSV格式。
def read_csv_test(train_path, test_path):
"""
文件读取模块,头文件见columns.
:return: x_train, y_train, x_test, y_test
"""
features = ["TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE"]
# train data
train_df = pd.read_csv(train_path)
train_df.columns = ["TERMINALNO", "TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE", "Y"]
x_train = train_df[features].values
y_train = train_df['Y']
# test data
test_df = pd.read_csv(test_path)
test_df.columns = ["TERMINALNO", "TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE", "Y"]
x_test = test_df[features].values
y_test = test_df['Y']
return x_train, x_test, y_train, y_test
def split_demo_csv(train_path, test_path, train_frac):
"""
文件读取模块,头文件见columns.
:return: x_train, y_train, x_test, y_test
"""
# for filename in os.listdir(path_train):
data = pd.read_csv(train_path)
data.columns = ["TERMINALNO", "TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE", "Y"]
train_df, test_df = train_validate_test_split(data, train_percent=train_frac, seed=None)
train_df.to_csv(train_path)
test_df.to_csv(test_path)
def read_predict_data(data_path):
"""
文件读取模块,头文件见columns.
:return:
"""
# for filename in os.listdir(path_train):
data_df = pd.read_csv(data_path)
data_df.columns = ["TERMINALNO", "TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE"]
features = ["TIME", "TRIP_ID", "LONGITUDE", "LATITUDE", "DIRECTION", "HEIGHT", "SPEED",
"CALLSTATE"]
x_pred = data_df[features].values
id = data_df['TERMINALNO']
return id, x_pred
def train_validate_test_split(df, train_percent=.7, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.ix[perm[:train_end]]
test = df.ix[perm[train_end:]]
return train, test
def write_csv(data_df, save_path):
data_df.to_csv(save_path, columns=['Id', 'Pred'], index=False, header=False)
def process():
"""
处理过程,在示例中,使用随机方法生成结果,并将结果文件存储到预测结果路径下。
:return:
"""
with open(path_test) as lines:
with(open(os.path.join(path_test_out, "test.csv"), mode="w")) as outer:
writer = csv.writer(outer)
i = 0
ret_set = set([])
for line in lines:
if i == 0:
i += 1
writer.writerow(["Id", "Pred"]) # 只有两列,一列Id为用户Id,一列Pred为预测结果(请注意大小写)。
continue
item = line.split(",")
if item[0] in ret_set:
continue
# 此处使用随机值模拟程序预测结果
writer.writerow([item[0], np.random.rand()]) # 随机值
ret_set.add(item[0]) # 根据赛题要求,ID必须唯一。输出预测值时请注意去重
def train():
x_train, x_test, y_train, y_test = read_csv_test(path_train, train_frac=0.8)
model = LinearRegression()
model.fit(x_train, y_train)
# predict off-line
y_predict = model.predict(x_test)
# predict on-line
y_pred_df = pd.DataFrame(y_predict)
y_pred_df.reset_index()
print('y_predict = ', y_pred_df.head(n=10))
if __name__ == "__main__":
print("****************** start **********************")
# 程序入口
# train()
| 2.5625 | 3 |
Artesian/Query/_QueryParameters/VersionSelectionType.py | AndreaCuneo/Artesian.SDK-Python | 0 | 12769191 | <filename>Artesian/Query/_QueryParameters/VersionSelectionType.py<gh_stars>0
from enum import Enum
class VersionSelectionType(Enum):
LastN = 1
MUV = 2
LastOfDays = 3
LastOfMonths = 4
Version = 5
MostRecent = 6 | 1.8125 | 2 |
sw/groundstation/groundstation.py | nzjrs/wasp | 2 | 12769192 | #!/usr/bin/env python
import sys
import os.path
import logging
import optparse
if os.name == "posix":
stream = sys.stderr
else:
#when running from py2exe, if anything is printed to stderr
#then the app shows an annoying dialog when closed
stream = sys.stdout
logging.basicConfig(
level=logging.DEBUG,
format="[%(name)-20s][%(levelname)-7s] %(message)s (%(filename)s:%(lineno)d)",
stream=stream
)
try:
import gs
except ImportError:
#probbably running from the source dir
sys.path.insert(0,os.path.dirname(os.path.abspath(__file__)))
import gs
import gs.groundstation as groundstation
if __name__ == "__main__":
parser = gs.get_default_command_line_parser(True, True, True)
options, args = parser.parse_args()
if gs.IS_WINDOWS:
import gtk.gdk
gtk.gdk.threads_enter()
groundstation.Groundstation(options).main()
if gs.IS_WINDOWS:
import gtk.gdk
gtk.gdk.threads_leave()
sys.exit(0)
| 2.125 | 2 |
setup.py | comfort-framework/comfort-callgraph | 0 | 12769193 | <gh_stars>0
import os
from setuptools import setup, find_packages, Command
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.egg-info')
setup(
name='callgraph',
version='0.0.1',
description='Generate a call graph based on test executions.',
long_description='CallGraph data collector for COMFORT',
url='https://github.com/comfort-framework/comfort-callgraph/tree/master/comfort-callgraph',
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
],
install_requires=[
'nose',
'pytest'
],
entry_points={
'nose.plugins.0.10': [
'callgraph = callgraph.nose_plugin:CallGraphPlugin',
],
'pytest11': [
'callgraph = callgraph.pytest_plugin',
]
},
package_data={
'': ["*.callgraph*", "*.tests*"],
},
cmdclass={
'clean': CleanCommand,
}
)
| 1.695313 | 2 |
restfly/__init__.py | AlainODea/restfly | 7 | 12769194 | <reponame>AlainODea/restfly
'''
RESTfly package
'''
from .version import VERSION as __version__ # noqa: F401
from .session import APISession # noqa: F401
from .endpoint import APIEndpoint # noqa: F401
from .iterator import APIIterator # noqa: F401
| 1.039063 | 1 |
wotpy/wot/wot.py | JKRhb/wot-py | 24 | 12769195 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class that serves as the WoT entrypoint.
"""
import json
import logging
import warnings
import six
import tornado.concurrent
import tornado.gen
import tornado.ioloop
from rx import Observable
from six.moves import range
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from wotpy.support import is_dnssd_supported
from wotpy.utils.utils import handle_observer_finalization
from wotpy.wot.consumed.thing import ConsumedThing
from wotpy.wot.dictionaries.thing import ThingFragment
from wotpy.wot.enums import DiscoveryMethod
from wotpy.wot.exposed.thing import ExposedThing
from wotpy.wot.td import ThingDescription
from wotpy.wot.thing import Thing
DEFAULT_FETCH_TIMEOUT_SECS = 20.0
class WoT(object):
"""The WoT object is the API entry point and it is exposed by an
implementation of the WoT Runtime. The WoT object does not expose
properties, only methods for discovering, consuming and exposing a Thing."""
def __init__(self, servient):
self._servient = servient
self._logr = logging.getLogger(__name__)
@property
def servient(self):
"""Servient instance of this WoT entrypoint."""
return self._servient
@classmethod
def _is_fragment_match(cls, item, thing_filter):
"""Returns True if the given item (an ExposedThing, Thing or TD)
matches the fragment in the given Thing filter."""
td = None
if isinstance(item, ExposedThing):
td = ThingDescription.from_thing(item.thing)
elif isinstance(item, Thing):
td = ThingDescription.from_thing(item)
elif isinstance(item, ThingDescription):
td = item
assert td
fragment_dict = thing_filter.fragment if thing_filter.fragment else {}
return all(
item in six.iteritems(td.to_dict())
for item in six.iteritems(fragment_dict))
def _build_local_discover_observable(self, thing_filter):
"""Builds an Observable to discover Things using the local method."""
found_tds = [
ThingDescription.from_thing(exposed_thing.thing).to_str()
for exposed_thing in self._servient.exposed_things
if self._is_fragment_match(exposed_thing, thing_filter)
]
# noinspection PyUnresolvedReferences
return Observable.of(*found_tds)
def _build_dnssd_discover_observable(self, thing_filter, dnssd_find_kwargs):
"""Builds an Observable to discover Things using the multicast method based on DNS-SD."""
if not is_dnssd_supported():
warnings.warn("Unsupported DNS-SD multicast discovery")
# noinspection PyUnresolvedReferences
return Observable.empty()
dnssd_find_kwargs = dnssd_find_kwargs if dnssd_find_kwargs else {}
if not self._servient.dnssd:
# noinspection PyUnresolvedReferences
return Observable.empty()
def subscribe(observer):
"""Browses the Servient services using DNS-SD and retrieves the TDs that match the filters."""
state = {"stop": False}
@handle_observer_finalization(observer)
@tornado.gen.coroutine
def callback():
address_port_pairs = yield self._servient.dnssd.find(**dnssd_find_kwargs)
def build_pair_url(idx, path=None):
addr, port = address_port_pairs[idx]
base = "http://{}:{}".format(addr, port)
path = path if path else ''
return "{}/{}".format(base, path.strip("/"))
http_client = AsyncHTTPClient()
catalogue_resps = [
http_client.fetch(build_pair_url(idx))
for idx in range(len(address_port_pairs))
]
wait_iter = tornado.gen.WaitIterator(*catalogue_resps)
while not wait_iter.done() and not state["stop"]:
try:
catalogue_resp = yield wait_iter.next()
except Exception as ex:
self._logr.warning(
"Exception on HTTP request to TD catalogue: {}".format(ex))
else:
catalogue = json.loads(catalogue_resp.body)
if state["stop"]:
return
td_resps = yield [
http_client.fetch(build_pair_url(
wait_iter.current_index, path=path))
for thing_id, path in six.iteritems(catalogue)
]
tds = [
ThingDescription(td_resp.body)
for td_resp in td_resps
]
tds_filtered = [
td for td in tds if self._is_fragment_match(td, thing_filter)]
[observer.on_next(td.to_str()) for td in tds_filtered]
def unsubscribe():
state["stop"] = True
tornado.ioloop.IOLoop.current().add_callback(callback)
return unsubscribe
# noinspection PyUnresolvedReferences
return Observable.create(subscribe)
def discover(self, thing_filter, dnssd_find_kwargs=None):
"""Starts the discovery process that will provide ThingDescriptions
that match the optional argument filter of type ThingFilter."""
supported_methods = [
DiscoveryMethod.ANY,
DiscoveryMethod.LOCAL,
DiscoveryMethod.MULTICAST
]
if thing_filter.method not in supported_methods:
err = NotImplementedError("Unsupported discovery method")
# noinspection PyUnresolvedReferences
return Observable.throw(err)
if thing_filter.query:
err = NotImplementedError(
"Queries are not supported yet (please use filter.fragment)")
# noinspection PyUnresolvedReferences
return Observable.throw(err)
observables = []
if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.LOCAL]:
observables.append(
self._build_local_discover_observable(thing_filter))
if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.MULTICAST]:
observables.append(self._build_dnssd_discover_observable(
thing_filter, dnssd_find_kwargs))
# noinspection PyUnresolvedReferences
return Observable.merge(*observables)
@classmethod
@tornado.gen.coroutine
def fetch(cls, url, timeout_secs=None):
"""Accepts an url argument and returns a Future
that resolves with a Thing Description string."""
timeout_secs = timeout_secs or DEFAULT_FETCH_TIMEOUT_SECS
http_client = AsyncHTTPClient()
http_request = HTTPRequest(url, request_timeout=timeout_secs)
http_response = yield http_client.fetch(http_request)
td_doc = json.loads(http_response.body)
td = ThingDescription(td_doc)
raise tornado.gen.Return(td.to_str())
def consume(self, td_str):
"""Accepts a thing description string argument and returns a
ConsumedThing object instantiated based on that description."""
td = ThingDescription(td_str)
return ConsumedThing(servient=self._servient, td=td)
@classmethod
def thing_from_model(cls, model):
"""Takes a ThingModel and builds a Thing.
Raises if the model has an unexpected type."""
expected_types = (six.string_types, ThingFragment, ConsumedThing)
if not isinstance(model, expected_types):
raise ValueError("Expected one of: {}".format(expected_types))
if isinstance(model, six.string_types):
thing = ThingDescription(doc=model).build_thing()
elif isinstance(model, ThingFragment):
thing = Thing(thing_fragment=model)
else:
thing = model.td.build_thing()
return thing
def produce(self, model):
"""Accepts a model argument of type ThingModel and returns an ExposedThing
object, locally created based on the provided initialization parameters."""
thing = self.thing_from_model(model)
exposed_thing = ExposedThing(servient=self._servient, thing=thing)
self._servient.add_exposed_thing(exposed_thing)
return exposed_thing
@tornado.gen.coroutine
def produce_from_url(self, url, timeout_secs=None):
"""Return a Future that resolves to an ExposedThing created
from the thing description retrieved from the given URL."""
td_str = yield self.fetch(url, timeout_secs=timeout_secs)
exposed_thing = self.produce(td_str)
raise tornado.gen.Return(exposed_thing)
@tornado.gen.coroutine
def consume_from_url(self, url, timeout_secs=None):
"""Return a Future that resolves to a ConsumedThing created
from the thing description retrieved from the given URL."""
td_str = yield self.fetch(url, timeout_secs=timeout_secs)
consumed_thing = self.consume(td_str)
raise tornado.gen.Return(consumed_thing)
@tornado.gen.coroutine
def register(self, directory, thing):
"""Generate the Thing Description as td, given the Properties,
Actions and Events defined for this ExposedThing object.
Then make a request to register td to the given WoT Thing Directory."""
raise NotImplementedError()
@tornado.gen.coroutine
def unregister(self, directory, thing):
"""Makes a request to unregister the thing from the given WoT Thing Directory."""
raise NotImplementedError()
| 2.09375 | 2 |
util/addser.py | chriswaudby/pp | 0 | 12769196 | <gh_stars>0
#!/usr/bin/env python
import sys
import argparse
import numpy as np
import nmrglue as ng
parser = argparse.ArgumentParser(description='Sum a collection of bruker ser files. Data will be truncated to fit shortest ser file.')
parser.add_argument('-out', metavar='outputser', dest='out', help='Output filename', required=True)
parser.add_argument('serfiles', metavar='inputser', type=argparse.FileType('rb'), nargs='+',
help='ser files to be summed')
args = parser.parse_args()
fileformat = '<i4' # little-endian
# load first file
serfile = args.serfiles[0]
print('Loading {0}'.format(serfile))
data = np.frombuffer(serfile.read(), dtype=fileformat)
if len(args.serfiles)>1:
for serfile in args.serfiles[1:]:
print('Adding {0}'.format(serfile))
data1 = np.frombuffer(serfile.read(), dtype=fileformat)
maxlength = min([data.size, data1.size])
if data.size != data1.size:
print("*** WARNING! ser files are being truncated! ***")
data = data[:maxlength] + data1[:maxlength]
# write output
print('Writing output to {0}'.format(args.out))
with open(args.out,'wb') as f:
f.write(data.astype(fileformat).tostring())
print('Finished!')
| 2.703125 | 3 |
rjgtoys/cli/examples/greeter3.py | bobgautier/rjgtoys-cli | 0 | 12769197 | <filename>rjgtoys/cli/examples/greeter3.py
"""Example tool"""
from rjgtoys.cli import Tool
tool = Tool.from_yaml("""
_package: rjgtoys.cli.examples
say hello: hello.HelloCommand
say goodbye: goodbye.GoodbyeCommand
"""
)
if __name__ == "__main__":
import sys
sys.exit(tool.main())
| 1.484375 | 1 |
examples/demo_project/master_service/app/migrations/0002_fixtures.py | akodelia/django-cqrs | 52 | 12769198 | # Copyright © 2021 Ingram Micro Inc. All rights reserved.
from django.db import migrations
def create_users(apps, schema_editor):
User = apps.get_model('app', 'User')
to_create = []
for username in ('Mal', 'Zoe', 'Wash', 'Inara', 'Jayne', 'Kaylee', 'Simon', 'River'):
to_create.append(User(username=username))
User.objects.bulk_create(to_create)
def create_products(apps, schema_editor):
ProductType = apps.get_model('app', 'ProductType')
Product = apps.get_model('app', 'Product')
products = {
'food': ['apple', 'meat', 'banana'],
'weapon': ['blaster', 'gun', 'knife'],
'starships': ['Serenity'],
}
to_create = []
for key, items in products.items():
product_type = ProductType.objects.create(name=key)
for product in items:
to_create.append(Product(name=product, product_type=product_type))
Product.objects.bulk_create(to_create)
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RunPython(create_users, migrations.RunPython.noop),
migrations.RunPython(create_products, migrations.RunPython.noop),
]
| 2.0625 | 2 |
last_time/reading_utils.py | abdelabdalla/deepmind-research | 0 | 12769199 | <filename>last_time/reading_utils.py
import functools
import numpy as np
import tensorflow as tf
_FEATURE_DESCRIPTION = {
'velocity': tf.io.VarLenFeature(tf.string)
}
_FEATURE_DTYPES = {
'velocity': {
'in': np.float32,
'out': tf.float32
}
}
_CONTEXT_FEATURES = {
'key': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'locations': tf.io.VarLenFeature(tf.string),
'connections': tf.io.VarLenFeature(tf.string),
'n_nodes': tf.io.VarLenFeature(tf.string),
'n_cons': tf.io.VarLenFeature(tf.string)
}
def convert_to_tensor(x, encoded_dtype):
if len(x) == 1:
out = tf.io.parse_tensor(x[0], out_type=encoded_dtype)
else:
out = []
i = 0
for el in x:
out.append(np.frombuffer(el.numpy(), dtype=encoded_dtype))
i += 1
out = tf.convert_to_tensor(np.array(out))
return out
def parse_serialized_simulation_example(example_proto):
context, parsed_features = tf.io.parse_single_sequence_example(
example_proto,
context_features=_CONTEXT_FEATURES,
sequence_features=_FEATURE_DESCRIPTION)
for feature_key, item in parsed_features.items():
convert_fn = functools.partial(
convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])
parsed_features[feature_key] = tf.py_function(
convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])
tf.debugging.assert_all_finite(parsed_features['velocity'], 'No one will ever love you')
velocity_shape = [200, -1, 2]
parsed_features['velocity'] = tf.reshape(parsed_features['velocity'], velocity_shape)
context['locations'] = tf.py_function(
functools.partial(convert_to_tensor, encoded_dtype=np.float32),
inp=[context['locations'].values],
Tout=[tf.float32])
context['connections'] = tf.py_function(
functools.partial(convert_to_tensor, encoded_dtype=np.int64),
inp=[context['connections'].values],
Tout=[tf.int64])
context['n_nodes'] = tf.py_function(
functools.partial(convert_to_tensor, encoded_dtype=tf.int32),
inp=[context['n_nodes'].values],
Tout=[tf.int32])
context['n_cons'] = tf.py_function(
functools.partial(convert_to_tensor, encoded_dtype=tf.int32),
inp=[context['n_cons'].values],
Tout=[tf.int32])
context['locations'] = tf.reshape(context['locations'], [-1, 2])
context['connections'] = tf.reshape(context['connections'], [-1, 3])
context['n_nodes'] = tf.reshape(context['n_nodes'], [-1])
context['n_cons'] = tf.reshape(context['n_cons'], [-1])
tf.debugging.assert_all_finite(parsed_features['velocity'], 'Just give up on dissertation already')
return context, parsed_features
def split_trajectory(context, features, window_length=7):
trajectory_length = features['velocity'].get_shape().as_list()[0]
input_trajectory_length = trajectory_length - window_length + 1
model_input_features = {}
model_input_features['locations'] = tf.tile(
tf.expand_dims(context['locations'], axis=0),
[input_trajectory_length, 1, 1])
model_input_features['connections'] = tf.tile(
tf.expand_dims(context['connections'], axis=0),
[input_trajectory_length, 1, 1])
model_input_features['n_nodes'] = tf.tile(
tf.expand_dims(context['n_nodes'], axis=0),
[input_trajectory_length, 1])
model_input_features['n_cons'] = tf.tile(
tf.expand_dims(context['n_cons'], axis=0),
[input_trajectory_length, 1])
pos_stack = []
for idx in range(input_trajectory_length):
pos_stack.append(features['velocity'][idx:idx + window_length])
model_input_features['velocity'] = tf.stack(pos_stack)
return tf.data.Dataset.from_tensor_slices(model_input_features) | 2.328125 | 2 |
tests/test_kpm.py | lise1020/pybinding | 159 | 12769200 | <reponame>lise1020/pybinding
import pytest
import numpy as np
import pybinding as pb
from pybinding.repository import graphene, group6_tmd
models = {
'graphene-pristine': [graphene.monolayer(), pb.rectangle(15)],
'graphene-pristine-oversized': [graphene.monolayer(), pb.rectangle(20)],
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(15),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(15),
graphene.constant_magnetic_field(1e3)],
}
@pytest.fixture(scope='module', ids=list(models.keys()), params=models.values())
def model(request):
return pb.Model(*request.param)
ldos_models = {**models, "mos2": [group6_tmd.monolayer_3band("MoS2"), pb.rectangle(6)]}
@pytest.mark.parametrize("params", ldos_models.values(), ids=list(ldos_models.keys()))
def test_ldos(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "CSR", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "CSR", 'optimal_size': True, 'interleaved': False},
{'matrix_format': "CSR", 'optimal_size': False, 'interleaved': True},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, kernel=kernel, silent=True, **c) for c in configurations]
energy = np.linspace(0, 2, 25)
results = [kpm.calc_ldos(energy, broadening=0.15, position=[0, 0.07], reduce=False)
for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, 'plot', label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
def test_moments(model, plot_if_fails):
energy = np.linspace(0, 2, 25)
broadening = 0.15
position = dict(position=[0, 0], sublattice="A")
kpm = pb.kpm(model, silent=True)
expected_ldos = kpm.calc_ldos(energy, broadening, **position)
def manual_ldos():
idx = model.system.find_nearest(**position)
alpha = np.zeros(model.hamiltonian.shape[0])
alpha[idx] = 1
a, b = kpm.scaling_factors
num_moments = kpm.kernel.required_num_moments(broadening / a)
moments = kpm.moments(num_moments, alpha)
ns = np.arange(num_moments)
scaled_energy = (energy - b) / a
k = 2 / (a * np.pi * np.sqrt(1 - scaled_energy**2))
chebyshev = np.cos(ns * np.arccos(scaled_energy[:, np.newaxis]))
return k * np.sum(moments.real * chebyshev, axis=1)
ldos = expected_ldos.with_data(manual_ldos())
plot_if_fails(ldos, expected_ldos, "plot")
assert pytest.fuzzy_equal(ldos, expected_ldos, rtol=1e-4, atol=1e-6)
with pytest.raises(RuntimeError) as excinfo:
kpm.moments(10, [1, 2, 3])
assert "Size mismatch" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
kpm = pb.kpm(pb.Model(graphene.monolayer()))
kpm.moments(10, [1j, 2j])
assert "Hamiltonian is real, but the given argument 'alpha' is complex" in str(excinfo.value)
def test_kpm_multiple_indices(model):
"""KPM can take a vector of column indices and return the Green's function for all of them"""
kpm = pb.kpm(model, silent=True)
num_sites = model.system.num_sites
i, j = num_sites // 2, num_sites // 4
energy = np.linspace(-0.3, 0.3, 10)
broadening = 0.8
cols = [j, j + 1, j + 2]
gs = kpm.calc_greens(i, cols, energy, broadening)
assert len(gs) == len(cols)
g = kpm.calc_greens(j, i, energy, broadening)
assert pytest.fuzzy_equal(gs[0], g)
def test_kpm_reuse():
"""KPM should return the same result when a single object is used for multiple calculations"""
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10))
kpm = pb.kpm(model, silent=True)
energy = np.linspace(-5, 5, 50)
broadening = 0.1
for position in [0, 0], [6, 0]:
actual = kpm.calc_ldos(energy, broadening, position)
expected = pb.kpm(model).calc_ldos(energy, broadening, position)
assert pytest.fuzzy_equal(actual, expected, rtol=1e-3, atol=1e-6)
def test_ldos_sublattice():
"""LDOS for A and B sublattices should be antisymmetric for graphene with a mass term"""
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10), graphene.mass_term(1))
kpm = pb.kpm(model, silent=True)
a, b = (kpm.calc_ldos(np.linspace(-5, 5, 50), 0.1, [0, 0], sub) for sub in ('A', 'B'))
assert pytest.fuzzy_equal(a.data, b.data[::-1], rtol=1e-3, atol=1e-6)
def test_optimized_hamiltonian():
"""Currently available only in internal interface"""
from pybinding import _cpp
model = pb.Model(graphene.monolayer(), graphene.hexagon_ac(10))
h = model.hamiltonian
oh = _cpp.OptimizedHamiltonian(model.raw_hamiltonian, 0)
assert oh.matrix.shape == h.shape
assert oh.sizes[-1] == h.shape[0]
assert len(oh.indices) == h.shape[0]
dos_models = {
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(25),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(25),
graphene.constant_magnetic_field(1e3)],
}
@pytest.mark.parametrize("params", dos_models.values(), ids=list(dos_models.keys()))
def test_dos(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "ELL", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, kernel=kernel, silent=True, **c) for c in configurations]
energy = np.linspace(0, 2, 25)
results = [kpm.calc_dos(energy, broadening=0.15) for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, 'plot', label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
cond_models = {
'graphene-const_potential': [graphene.monolayer(), pb.rectangle(20),
pb.constant_potential(0.5)],
'graphene-magnetic_field': [graphene.monolayer(), pb.rectangle(20),
graphene.constant_magnetic_field(1e3)]
}
@pytest.mark.parametrize("params", cond_models.values(), ids=list(cond_models.keys()))
def test_conductivity(params, baseline, plot_if_fails):
configurations = [
{'matrix_format': "ELL", 'optimal_size': False, 'interleaved': False},
{'matrix_format': "ELL", 'optimal_size': True, 'interleaved': True},
]
model = pb.Model(*params)
kernel = pb.lorentz_kernel()
strategies = [pb.kpm(model, energy_range=[-9, 9], kernel=kernel, silent=True, **c)
for c in configurations]
energy = np.linspace(-2, 2, 25)
results = [kpm.calc_conductivity(energy, broadening=0.5, temperature=0, num_points=200)
for kpm in strategies]
expected = results[0].with_data(baseline(results[0].data.astype(np.float32)))
for i in range(len(results)):
plot_if_fails(results[i], expected, "plot", label=i)
for result in results:
assert pytest.fuzzy_equal(result, expected, rtol=1e-2, atol=1e-5)
| 1.960938 | 2 |
src/autotrainer/custom_vision_tests.py | JasonTheDeveloper/Custom-Vision-Autotrainer | 10 | 12769201 |
import os
import unittest
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import Project, ImageUrlCreateEntry
from autotrainer.blob.blob_client import LabelledBlob
from autotrainer.custom_vision.custom_vision_client import CustomVisionClient
from autotrainer.custom_vision.domain import Domain, to_domain_id
from autotrainer.custom_vision.classification_type import ClassificationType
CVTK=os.environ['CV_TRAINING_KEY']
endpoint=os.environ['CV_ENDPOINT']
training_client = CustomVisionTrainingClient(CVTK, endpoint)
class CustomVisionTests(unittest.TestCase):
projects: [Project]
def tearDown(self):
for project in self.projects:
training_client.delete_project(project.id)
self.projects.remove(project)
def setUp(self):
self.projects = []
def test_create_project(self):
client = CustomVisionClient(training_client)
project = client.create_project('test', 'test', Domain.GENERAL_CLASSIFICATION, ClassificationType.MULTICLASS)
self.projects.append(project) # add to delete later
self.assertIsNotNone(project)
self.assertIsInstance(project, Project)
self.assertIn('test', project.name)
projects = training_client.get_projects()
self.assertIn(project, projects)
def test_create_project_compact_multilabel(self):
client = CustomVisionClient(training_client)
project = client.create_project('test', 'test', Domain.GENERAL_CLASSIFICATION_COMPACT, ClassificationType.MULTILABEL)
self.projects.append(project)
self.assertIsNotNone(project)
self.assertIsInstance(project, Project)
self.assertIn('test', project.name)
self.assertEqual(project.settings.domain_id, to_domain_id(Domain.GENERAL_CLASSIFICATION_COMPACT) )
self.assertEqual(project.settings.classification_type, ClassificationType.MULTILABEL.value )
projects = training_client.get_projects()
self.assertIn(project, projects)
def test_create_image_url_list(self):
client = CustomVisionClient(training_client)
project = client.create_project('test','test', Domain.GENERAL_CLASSIFICATION, ClassificationType.MULTICLASS)
self.projects.append(project) # add to delete later
labelled_blobs = [LabelledBlob('url1', ['tomato','potato']), LabelledBlob('url2', ['banana','fig'])]
image_urls = client.create_image_url_list(project, labelled_blobs )
for labelled_blob in labelled_blobs:
self.assertIn(labelled_blob.download_url, [i.url for i in image_urls])
for image in image_urls:
self.assertIsInstance(image, ImageUrlCreateEntry) | 2.328125 | 2 |
pyranges/methods/coverage.py | iamjli/pyranges | 0 | 12769202 | import numpy as np
import pandas as pd
from ncls import NCLS
def _number_overlapping(scdf, ocdf, **kwargs):
keep_nonoverlapping = kwargs.get("keep_nonoverlapping", True)
column_name = kwargs.get("overlap_col", True)
if scdf.empty:
return None
if ocdf.empty:
if keep_nonoverlapping:
df = scdf.copy()
df.insert(df.shape[1], column_name, 0)
return df
else:
return None
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_self_indexes, _other_indexes = oncls.all_overlaps_both(
starts, ends, indexes)
s = pd.Series(_self_indexes)
counts_per_read = s.value_counts()[s.unique()].reset_index()
counts_per_read.columns = ["Index", "Count"]
df = scdf.copy()
if keep_nonoverlapping:
_missing_indexes = np.setdiff1d(scdf.index, _self_indexes)
missing = pd.DataFrame(data={"Index": _missing_indexes, "Count": 0}, index=_missing_indexes)
counts_per_read = pd.concat([counts_per_read, missing])
else:
df = df.loc[_self_indexes]
counts_per_read = counts_per_read.set_index("Index")
df.insert(df.shape[1], column_name, counts_per_read)
return df
def _coverage(scdf, ocdf, **kwargs):
fraction_col = kwargs["fraction_col"]
if scdf.empty:
return None
if ocdf.empty:
df = scdf.copy()
df.insert(df.shape[1], fraction_col, 0.0)
return df
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_lengths = oncls.coverage(starts, ends, indexes)
_lengths = _lengths / (ends - starts)
_fractions = _lengths
_fractions = _fractions.astype("float64")
_fractions = np.nan_to_num(_fractions)
scdf = scdf.copy()
scdf.insert(scdf.shape[1], fraction_col, _fractions)
return scdf
| 2.390625 | 2 |
fabric_bolt/web_hooks/forms.py | abossard/fabric-bolt | 1 | 12769203 | from django import forms
from django.core.urlresolvers import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit, HTML
from fabric_bolt.web_hooks import models
class HookCreateForm(forms.ModelForm):
button_prefix = "Create"
project = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = models.Hook
fields = [
'project',
'url',
]
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'project',
'url',
ButtonHolder(
Submit('submit', '%s Hook' % self.button_prefix, css_class='button')
)
)
super(HookCreateForm, self).__init__(*args, **kwargs)
def clean_project(self, *args, **kwargs):
if not self.cleaned_data['project']:
return None
project = models.Project.objects.get(pk=int(self.cleaned_data['project']))
return project
class HookUpdateForm(HookCreateForm):
button_prefix = "Update"
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
instance = kwargs['instance']
delete_url = reverse('hooks_hook_delete', args=(instance.pk,))
self.helper.layout = Layout(
'project',
'url',
ButtonHolder(
Submit('submit', '%s Hook' % self.button_prefix, css_class='button'),
HTML('<a href="' + delete_url + '" class="btn btn-danger">Delete Hook</a>'),
)
)
super(HookCreateForm, self).__init__(*args, **kwargs) | 2.234375 | 2 |
draw_boundingbox.py | chao0716/topbag | 0 | 12769204 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 16:51:06 2019
@author: chaoz
"""
import xml.etree.ElementTree as ET
import cv2
import os
import numpy as np
only1=0
kdanfkn=0
total_sc=0
image_dir='C:\\Users\\chaoz\\Desktop\\testset'
sname='RGB.png'
dname='D.png'
xmlname='xml'
RGB_dirlist=[]
depth_dirlist=[]
xml_dirlist=[]
for dire in os.listdir(image_dir):
pwd_dir=dire
if sname in os.path.split(pwd_dir)[1]:
RGB_dirlist.append(pwd_dir)
for i in range(len(RGB_dirlist)):
print(i,i/len(RGB_dirlist))
RGB_dir=os.path.join(image_dir,RGB_dirlist[i])
xml_dir='C:\\Users\\chaoz\\Desktop\\testxml\\'+RGB_dirlist[i].split('.')[0]+'.xml'
if os.path.exists(xml_dir)==True:
tree = ET.parse(xml_dir)
rect={}
line=""
root = tree.getroot()
rgb_image = np.array(cv2.imread(RGB_dir, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
#open image before draw
# depth_image_path=depth_dir
# depth_image = np.array(cv2.imread(depth_image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
#
# im=depth_image.astype(int)
# min_exclude_0=im[im!=0].min()
# max_exclude_0=im[im!=0].max()
#
# diff = max_exclude_0 - min_exclude_0
#
#
# for i in range (np.shape(im)[0]):
# for j in range(np.shape(im)[1]):
# if im[i,j]!=0:
# im[i,j]=(im[i,j]-min_exclude_0)*255/diff
#
# im[np.where(im==0)]=255
# im2=im
# im3=im
# im2=np.concatenate((im,im2),axis=1)
# im3=np.concatenate((im2,im3),axis=1)
img=rgb_image
for ob in root.iter('object'):
if ob[0].text=='bag1':
for bndbox in ob.iter('bndbox'):
for xmin in bndbox.iter('xmin'):
rect['xmin'] = xmin.text
for ymin in bndbox.iter('ymin'):
rect['ymin'] = ymin.text
for xmax in bndbox.iter('xmax'):
rect['xmax'] = xmax.text
for ymax in bndbox.iter('ymax'):
rect['ymax'] = ymax.text
# draw
cv2.rectangle(img, (int(rect['xmin']), int(rect['ymax'])), (int(rect['xmax']), int(rect['ymin'])), (0, 0, 255), 5)
elif ob[0].text=='bag2':
for bndbox in ob.iter('bndbox'):
for xmin in bndbox.iter('xmin'):
rect['xmin'] = xmin.text
for ymin in bndbox.iter('ymin'):
rect['ymin'] = ymin.text
for xmax in bndbox.iter('xmax'):
rect['xmax'] = xmax.text
for ymax in bndbox.iter('ymax'):
rect['ymax'] = ymax.text
# draw
cv2.rectangle(img, (int(rect['xmin']), int(rect['ymax'])), (int(rect['xmax']), int(rect['ymin'])), (0, 255, 0), 5)
# cv2.imwrite(depth_dir.split('_')[0]+'_boundingbox.png',img)
cv2.imwrite('C:\\Users\\chaoz\\Desktop\\ss\\'+RGB_dirlist[i],img)
| 2.40625 | 2 |
pennylane/transforms/classical_jacobian.py | aglitoiu/pennylane | 0 | 12769205 | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the classical Jacobian transform
"""
# pylint: disable=import-outside-toplevel
import pennylane as qml
def classical_jacobian(qnode):
"""Function to extract the Jacobian
matrix of the classical part of a QNode"""
def classical_preprocessing(*args, **kwargs):
"""Returns the trainable gate parameters for
a given QNode input"""
qnode.construct(args, kwargs)
return qml.math.stack(qnode.qtape.get_parameters())
if qnode.interface == "autograd":
return qml.jacobian(classical_preprocessing)
if qnode.interface == "torch":
import torch
def _jacobian(*args, **kwargs): # pylint: disable=unused-argument
return torch.autograd.functional.jacobian(classical_preprocessing, args)
return _jacobian
if qnode.interface == "jax":
import jax
return jax.jacobian(classical_preprocessing)
if qnode.interface == "tf":
import tensorflow as tf
def _jacobian(*args, **kwargs):
with tf.GradientTape() as tape:
tape.watch(args)
gate_params = classical_preprocessing(*args, **kwargs)
return tape.jacobian(gate_params, args)
return _jacobian
| 1.953125 | 2 |
ipynb/matter/py-server/save-data-on-site.py | NeuPhysics/codebase | 0 | 12769206 | <reponame>NeuPhysics/codebase
import numpy as np
from scipy.integrate import odeint
from scipy.integrate import ode
import matplotlib.pylab as plt
import csv
import time
endpoint = 1000000000; # integration range
dx = 10.0; # step size
lam0 = 0.845258; # in unit of omegam, omegam = 3.66619*10^-17
dellam = np.array([0.00003588645221954444, 0.06486364865874367]); # deltalambda/omegam
ks = [1.0,1.0/90]; # two k's
thm = 0.16212913985547778; # theta_m
psi0, x0 = [1.0+0.j, 0.0], 0 # initial condition
savestep = 100000; # save to file every savestep steps
xlin = np.arange(dx,endpoint+1*dx, dx)
psi = np.zeros([len(xlin) , 2], dtype='complex_')
xlinsave = np.zeros(len(xlin)/savestep);
psisave = np.zeros([len(xlinsave) , 2], dtype='complex_')
probsave = np.zeros([len(xlinsave) , 3])
def hamiltonian(x, deltalambda, k, thetam):
return [[ 0, 0.5* np.sin(2*thetam) * ( deltalambda[0] * np.sin(k[0]*x) + deltalambda[1] * np.sin(k[1]*x) ) * np.exp( 1.0j * ( - x - np.cos(2*thetam) * ( ( deltalambda[0]/k[0] * np.cos(k[0]*x) + deltalambda[1]/k[1] * np.cos(k[1]*x) ) ) ) ) ], [ 0.5* np.sin(2*thetam) * ( deltalambda[0] * np.sin(k[0]*x) + deltalambda[1] * np.sin(k[1]*x) ) * np.exp( -1.0j * ( - x - np.cos(2*thetam) * ( deltalambda[0] /k[0] * np.cos(k[0]*x) + deltalambda[1] /k[1] * np.cos(k[1]*x) ) ) ), 0 ]] # Hamiltonian for double frequency
def deripsi(t, psi, deltalambda, k , thetam):
return -1.0j * np.dot( hamiltonian(t, deltalambda,k,thetam), [psi[0], psi[1]] )
sol = ode(deripsi).set_integrator('zvode', method='bdf', atol=1e-8, with_jacobian=False)
sol.set_initial_value(psi0, x0).set_f_params(dellam,ks,thm)
flag = 0
flagsave = 0
timestampstr = time.strftime("%Y%m%d-%H%M%S")
print timestampstr
while sol.successful() and sol.t < endpoint:
sol.integrate(xlin[flag])
if np.mod(flag,savestep)==0:
probsave[flagsave] = [sol.t, np.absolute(sol.y[1])**2, np.absolute(sol.y[0])**2]
with open(r'probtrans-test-'+timestampstr+'.csv', 'a') as f_handle:
np.savetxt(f_handle, probsave[flagsave])
flagsave = flagsave + 1
flag = flag + 1
print "CONGRATS"
# # ploting using probsave array inside file
# plt.figure(figsize=(18,13))
# plt.plot(probsave[:,0], probsave[:,1],'-')
# plt.title("Probabilities",fontsize=20)
# plt.xlabel("$\hat x$",fontsize=20)
# plt.ylabel("Probability",fontsize=20)
# plt.show()
# # Template for reading the csv file
# # Ploting using data file
# probsavefromfile = np.loadtxt("probtrans-test-"+timestampstr+".csv")
# # print test
# # print len(test[1::2]), test[1::2], len(test[::2]), test[::2]
# plt.figure(figsize=(18,13))
# plt.plot(probsavefromfile[::2], probsavefromfile[1::2],'-')
# plt.title("Probabilities",fontsize=20)
# plt.xlabel("$\hat x$",fontsize=20)
# plt.ylabel("Probability",fontsize=20)
# plt.show()
| 2.015625 | 2 |
main.py | noirdevelopment/noirGen | 3 | 12769207 | import os
import sys
import platform
import numpy
import threading
import ctypes
import string
import random
import requests
import json
from colorama import Fore
VALID = 0
INVALID = 0
BOOST_LENGTH = 24
CLASSIC_LENGTH = 16
CODESET = []
BASEURL = "https://discord.gift/"
CODESET[:0] = string.ascii_letters + string.digits
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: 0 | Invalid: 0")
os.system("cls")
NOIRGEN = """[40;35m
[40;34mv 1.0.0[40;35m
/$$ /$$ /$$ /$$$$$$
| $$$ | $$ |__/ /$$__ $$
| $$$$| $$ /$$$$$$ /$$ /$$$$$$ | $$ \__/ /$$$$$$ /$$$$$$$
| $$ $$ $$ /$$__ $$| $$ /$$__ $$| $$ /$$$$ /$$__ $$| $$__ $$
| $$ $$$$| $$ \ $$| $$| $$ \__/| $$|_ $$| $$$$$$$$| $$ \ $$
| $$\ $$$| $$ | $$| $$| $$ | $$ \ $$| $$_____/| $$ | $$
| $$ \ $$| $$$$$$/| $$| $$ | $$$$$$/| $$$$$$$| $$ | $$
|__/ \__/ \______/ |__/|__/ \______/ \_______/|__/ |__/
"""
print(NOIRGEN)
for i in range(3):
print('')
CODE_AMOUNT = int(input(" [40;36mCodes to Generate => "))
for i in range(2):
print('')
BOOST_CLASSIC = str(input(" [40;32mBoost or Classic => "))
for i in range(2):
print('')
THREAD_COUNT = int(input(" [40;31mThreads => "))
for i in range(5):
print('')
def checkBoost(boostURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{boostURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genBoost():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, BOOST_LENGTH])
for i in code:
try:
boostCode = ''.join(e for e in i)
boostURL = BASEURL + boostCode
if checkBoost(boostURL):
with open("valid.txt", "w") as f:
f.write(boostURL + "\n")
print(Fore.GREEN + f"[!] VALID | {boostURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {boostURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
def checkClassic(classicURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{classicURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genClassic():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, CLASSIC_LENGTH])
for i in code:
try:
classicCode = ''.join(e for e in i)
classicURL = BASEURL + classicCode
if checkClassic(classicURL):
with open("valid.txt", "w") as f:
f.write(classicURL + "\n")
print(Fore.GREEN + f"[!] VALID | {classicURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {classicURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
if BOOST_CLASSIC == "Boost" or "B" or "b" or "boost":
for i in range(THREAD_COUNT):
threading.Thread(target=genBoost).start()
elif BOOST_CLASSIC == "Classic" or "C" or "c" or "classic":
for i in range(THREAD_COUNT):
threading.Thread(target=genClassic).start()
| 2.21875 | 2 |
autodcf/company/income_statement.py | jackmoody11/AutoDCF | 14 | 12769208 | <gh_stars>10-100
import datetime
def now(offset=0):
"""Utility for getting current time or current time minus offset number of years.
Args:
offset (int): Number of years to subtract from current year. Defaults to 0.
Returns:
date (datetime.datetime): Date offset from today by offset number of years (or today if offset is None).
"""
n = datetime.datetime.now()
return n.replace(year=n.year - offset)
class IncomeStatement:
"""Income statement object for specific company during a specific time period.
Args:
sales (Union[float, int]): Sales from period.
cogs (Union[float, int]): Cost of goods sold from period.
sga (Union[float, int]): Selling, General, and Administrative costs from period.
rd (Union[float, int]): Research & Development costs from period.
depreciation (Union[float, int]): Depreciation from period.
amortization (Union[float, int]): Amortization from period.
nonrecurring_cost (Union[float, int]): Non-recurring cost from period.
interest (Union[float, int]): Interest expense from period.
tax (Union[float, int]): Tax (as absolute currency amount, NOT as tax rate %).
start_date (datetime.datetime): First day of period for income statement.
end_date (datetime.datetime): Last day of period for income statement.
"""
def __init__(self,
sales,
cogs,
sga,
rd,
depreciation,
amortization,
nonrecurring_cost,
interest,
tax,
start_date=now(offset=1),
end_date=now()):
self._sales = sales
self._cogs = cogs
self._sga = sga
self._rd = rd
self._nonrecurring_cost = nonrecurring_cost
self._interest = interest
self._tax = tax
self._depreciation = depreciation
self._amortization = amortization
self._start_date = start_date
if start_date < end_date:
self._end_date = end_date
else:
raise ValueError("End date must be after start date. "
"Given start date {0} and end date {1}".format(start_date, end_date))
@property
def sales(self):
"""Sales from period."""
return self._sales
@sales.setter
def sales(self, val):
self._sales = val
@property
def cogs(self):
"""Cost of goods sold from period."""
return self._cogs
@cogs.setter
def cogs(self, val):
self._cogs = val
@property
def sga(self):
"""Selling, general, and administrative costs from period."""
return self._sga
@sga.setter
def sga(self, val):
self._sga = val
@property
def nonrecurring_cost(self):
"""Non-recurring costs from period."""
return self._nonrecurring_cost
@nonrecurring_cost.setter
def nonrecurring_cost(self, val):
self._nonrecurring_cost = val
@property
def tax(self):
"""Total taxes from period."""
return self._tax
@tax.setter
def tax(self, val):
self._tax = val
@property
def depreciation(self):
"""Total depreciation from period."""
return self._depreciation
@depreciation.setter
def depreciation(self, val):
self._depreciation = val
@property
def amortization(self):
"""Total amortization from period."""
return self._amortization
@amortization.setter
def amortization(self, val):
self._amortization = val
@property
def da(self):
"""Total depreciation plus amortization from period."""
return self.amortization + self.depreciation
@property
def rd(self):
"""Research and development costs from period."""
return self._rd
@rd.setter
def rd(self, val):
self._rd = val
@property
def interest(self):
"""Interest expense from period."""
return self._interest
@interest.setter
def interest(self, val):
self._interest = val
@property
def start_date(self):
"""Start date of period. Defaults to one year from today."""
return self._start_date
@property
def end_date(self):
"""End date of period. Defaults to today."""
return self._end_date
| 3.609375 | 4 |
projects/resources/python/benchmark/benchmark_main.py | QiZhou1512/grcuda | 0 | 12769209 | <filename>projects/resources/python/benchmark/benchmark_main.py
import argparse
from distutils.util import strtobool
from bench.bench_1 import Benchmark1
from bench.bench_2 import Benchmark2
from bench.bench_3 import Benchmark3
from bench.bench_4 import Benchmark4
from bench.bench_5 import Benchmark5
from bench.bench_6 import Benchmark6
from bench.bench_72 import Benchmark7
from bench.bench_8 import Benchmark8
from bench.bench_9 import Benchmark9
from bench.bench_10 import Benchmark10
from benchmark_result import BenchmarkResult
##############################
##############################
# Benchmark settings;
benchmarks = {
"b1": Benchmark1,
"b2": Benchmark2,
"b3": Benchmark3,
"b4": Benchmark4,
"b5": Benchmark5,
"b6": Benchmark6,
"b7": Benchmark7,
"b8": Benchmark8,
"b9": Benchmark9,
"b10": Benchmark10,
}
num_elem = {
"b1": [100],
"b2": [100],
"b3": [100],
"b4": [100],
"b5": [100],
"b6": [100],
"b7": [100],
"b8": [100],
"b9": [100],
"b10": [100],
}
policies = {
"b1": ["default"],
"b2": ["default"],
"b3": ["default"],
"b4": ["default"],
"b5": ["default"],
"b6": ["default"],
"b7": ["default"],
"b8": ["default"],
"b9": ["default"],
"b10": ["default"],
}
##############################
##############################
def create_block_size_list(block_size_1d, block_size_2d) -> list:
if (not block_size_1d) and block_size_2d: # Only 2D block size;
block_size = [{"block_size_2d": b} for b in block_size_2d]
elif (not block_size_2d) and block_size_1d: # Only 1D block size;
block_size = [{"block_size_1d": b} for b in block_size_1d]
elif block_size_1d and block_size_2d: # Both 1D and 2D size;
# Ensure they have the same size;
if len(block_size_2d) > len(block_size_1d):
block_size_1d = block_size_1d + [block_size_1d[-1]] * (len(block_size_2d) - len(block_size_1d))
elif len(block_size_1d) > len(block_size_2d):
block_size_2d = block_size_2d + [block_size_2d[-1]] * (len(block_size_1d) - len(block_size_2d))
block_size = [{"block_size_1d": x[0], "block_size_2d": x[1]} for x in zip(block_size_1d, block_size_2d)]
else:
block_size = [{}]
return block_size
##############################
##############################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="measure GrCUDA execution time")
parser.add_argument("-d", "--debug", action="store_true",
help="If present, print debug messages")
parser.add_argument("-i", "--num_iter", metavar="N", type=int, default=BenchmarkResult.DEFAULT_NUM_ITER,
help="Number of times each benchmark is executed")
parser.add_argument("-o", "--output_path", metavar="path/to/output.json",
help="Path to the file where results will be stored")
parser.add_argument("--realloc", metavar="[True|False]", type=lambda x: bool(strtobool(x)), nargs="*",
help="If True, allocate new memory and rebuild the GPU code at each iteration")
parser.add_argument("--reinit", metavar="[True|False]", type=lambda x: bool(strtobool(x)), nargs="*",
help="If True, re-initialize the values used in each benchmark at each iteration")
parser.add_argument("-c", "--cpu_validation", action="store_true", dest="cpu_validation",
help="Validate the result of each benchmark using the CPU")
parser.add_argument("--no_cpu_validation", action="store_false", dest="cpu_validation",
help="Validate the result of each benchmark using the CPU")
parser.add_argument("-b", "--benchmark", nargs="*",
help="If present, run the benchmark only for the specified kernel")
parser.add_argument("--policy",
help="If present, run the benchmark only with the selected policy")
parser.add_argument("-n", "--size", metavar="N", type=int, nargs="*",
help="Override the input data size used for the benchmarks")
parser.add_argument("--block_size_1d", metavar="N", type=int, nargs="*",
help="Number of threads per block when using 1D kernels")
parser.add_argument("--block_size_2d", metavar="N", type=int, nargs="*",
help="Number of threads per block when using 2D kernels")
parser.add_argument("-g", "--number_of_blocks", metavar="N", type=int, nargs="?",
help="Number of blocks in the computation")
parser.add_argument("-r", "--random", action="store_true",
help="Initialize benchmarks randomly whenever possible")
parser.add_argument("-p", "--time_phases", action="store_true",
help="Measure the execution time of each phase of the benchmark;"
" note that this introduces overheads, and might influence the total execution time")
parser.add_argument("--nvprof", action="store_true",
help="If present, enable profiling when using nvprof."
" For this option to have effect, run graalpython using nvprof, with flag '--profile-from-start off'")
parser.set_defaults(cpu_validation=BenchmarkResult.DEFAULT_CPU_VALIDATION)
# Parse the input arguments;
args = parser.parse_args()
debug = args.debug if args.debug else BenchmarkResult.DEFAULT_DEBUG
num_iter = args.num_iter if args.num_iter else BenchmarkResult.DEFAULT_NUM_ITER
output_path = args.output_path if args.output_path else ""
realloc = args.realloc if args.realloc else [BenchmarkResult.DEFAULT_REALLOC]
reinit = args.reinit if args.reinit else [BenchmarkResult.DEFAULT_REINIT]
random_init = args.random if args.random else BenchmarkResult.DEFAULT_RANDOM_INIT
cpu_validation = args.cpu_validation
time_phases = args.time_phases
nvprof_profile = args.nvprof
# Create a new benchmark result instance;
benchmark_res = BenchmarkResult(debug=debug, num_iterations=num_iter, output_path=output_path,
cpu_validation=cpu_validation, random_init=random_init)
if benchmark_res.debug:
BenchmarkResult.log_message(f"using CPU validation: {cpu_validation}")
if args.benchmark:
if benchmark_res.debug:
BenchmarkResult.log_message(f"using only benchmark: {args.benchmark}")
benchmarks = {b: benchmarks[b] for b in args.benchmark}
if args.policy:
if benchmark_res.debug:
BenchmarkResult.log_message(f"using only type: {args.policy}")
policies = {n: [args.policy] for n in policies.keys()}
if args.size:
if benchmark_res.debug:
BenchmarkResult.log_message(f"using only size: {args.size}")
num_elem = {n: args.size for n in num_elem.keys()}
# Setup the block size for each benchmark;
block_sizes = create_block_size_list(args.block_size_1d, args.block_size_2d)
number_of_blocks = args.number_of_blocks
if (args.block_size_1d or args.block_size_2d) and benchmark_res.debug:
BenchmarkResult.log_message(f"using block sizes: {block_sizes}")
if number_of_blocks:
BenchmarkResult.log_message(f"using number of blocks: {number_of_blocks}")
# Execute each test;
for b_name, b in benchmarks.items():
benchmark = b(benchmark_res, nvprof_profile=nvprof_profile)
for p in policies[b_name]:
for n in num_elem[b_name]:
prevent_reinit = False
for re in realloc:
for ri in reinit:
for block_size in block_sizes:
for i in range(num_iter):
benchmark.run(num_iter=i, policy=p, size=n, realloc=re, reinit=ri,
block_size=block_size, time_phases=time_phases, prevent_reinit=prevent_reinit, number_of_blocks=number_of_blocks)
prevent_reinit = True
# Print the summary of this block;
if benchmark_res.debug:
benchmark_res.print_current_summary(name=b_name, policy=p, size=n,
realloc=re, reinit=ri, block_size=block_size, skip=3)
| 1.921875 | 2 |
ptrello/core/config.py | Ibistylus/ptrello | 0 | 12769210 | <filename>ptrello/core/config.py
#!/usr/bin/env python
import logging
import logging.config
import yaml
import os
logger = logging.getLogger("ptrello."+__name__)
logger.setLevel("DEBUG")
# Check environment variable first:
env_var = os.environ.get("PTRELLO") or ""
# Check home directory second
user_path = os.path.join(os.path.expanduser('~'), ".config/ptrello/")
# Check project path last
user_project_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../etc")
settings = None
locations = [env_var, os.curdir, user_path, user_project_path]
def logging_configuration(config=None):
if not config:
for loc in locations:
path = (os.path.join(os.path.join(os.path.normpath(loc), "ptrello.ini")))
try:
if os.path.isfile(path):
logging.config.fileConfig(path)
except IOError:
pass
else:
logging.basicConfig(level=logging.WARNING)
logger.warning("A log ini file was not found")
pass
def settings_configuration():
for loc in locations:
path = os.path.normpath(os.path.join(os.path.join(loc, "ptrello_settings.yaml")))
try:
if os.path.isfile(path):
with open(path, "r") as f:
settings = yaml.load(f)
return settings
except IOError as e:
logger.warning("A log configuration was not found {}".format(e.args))
raise e
logging_configuration()
settings = settings_configuration()
if settings is None:
raise IOError("ptrello_settings.yaml was not found in the follow locations {}".format(locations))
logger.debug("Settings and logs configured")
| 2.390625 | 2 |
Audit.py | FireSpark142/Unit_4 | 0 | 12769211 | #!/usr/bin/env python
# coding: utf-8
# In[28]:
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = (r"C:\Users\Marcus\Documents\School Documents\Python Environments\Unit_4\sample1percent.osm")
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
postcodes_re = re.compile(r'^\D*(\d{5}).*')
cities_re = re.compile(r'.+', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
#Makes a dictionary of all of the street types to allow us to create a list to update
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
#If the element key for 'k' is 'addr:street', return the associated value pair
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
""" Here we create a dictionary of type set called street_types,
and turn the open function into a variable for ease of use in the future
Next is my pride and joy, instead of using "for et.iterparse" to iterate directly line by line
through the file instead we use the osm_file var to open the file in memory, and
then turn it into an iterable. This saves a TON of time, as we can iterate on the file
in memory instead of iterating the file line by line. Once we do this, we then iterate through and
for each tag that matches "node" or "way", we check if it is a street name, and if so we run the audit_street_types function.
we then clear the root tree, saving memory and time, close the file, and return the updated street_types dict.
"""
def audit_s(osmfile):
street_types = defaultdict(set)
osm_file = open(osmfile, "r")
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
root.clear()
osm_file.close()
return street_types
""" The update_street function takes the information we learned from the audit_s function
and utilizes that to check a manually created mapping dictionary and DONT_UPDATE tuple.
These two objects are created by reading the report from audit_s and choosing how we want to standardize the types.
to go above and beyond, we also standardized prefixes such as N for North. Unfortunely this caused an issue where Highway or Route,
which often had the suffix N would be incorrectly corrected to North, such as Route North.
Therefore we created the DON_UPDATE tuple, and check each value against the tuple, and if there is a match
the value is not updated. To fix the street types, we broke the value into parts
seperated by whitespace using .split(), then change the value if it matches the key found in mapping, to the paired value.
Finally, the seperated parts are then rejoined with a space inbetween using the .join() function.
"""
def update_street(name):
mapping = {"St": "Street",
"Rd.": "Road",
"Rd": "Road",
"N.": "North",
"N": "North",
"S.": "South",
"Blvd": "Boulevard",
"Blvd.": "Boulevard",
"Expy": "Expressway",
"Ln": "Lane",
"Ctr": "Center",
"Ctr.": "Center",
"5th": "Fifth",
"4th": "Fourth",
"3rd": "Third",
"2nd": "Second",
"1st": "First",
#There was a street named just dade...that's it..so I went on google to find the real address, so this corrects that occurance.
"Dade": "South Dade Avenue",
"MO-94": "Highway 94"
}
DONT_UPDATE = ('route','suite')
if name.lower().startswith(DONT_UPDATE):
return name
else:
return ' '.join(mapping.get(part, part).title() for part in name.split())
def dicti(data, item):
"""This function creates a dictionary where postcodes can be held.
The dictionary key will be the postcode itself and the dictionary value
is a count of postcodes that were repeated throughout the dataset."""
data[item] += 1
#This function returns the elem if 'k' matches "addr:postcode"
def is_postcode(elem):
return (elem.attrib['k'] == "addr:postcode")
#This codes is identical in function the the street function of similar name
def audit_p(osmfile):
osm_file = open(OSMFILE, "r")
data = defaultdict(int)
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_postcode(tag):
dicti(data, tag.attrib['v'])
root.clear()
osm_file.close()
return data
# This is the function that actually changes the post code to the proper values
# It is called in the OSM_to_XML file, when writing the changes to the .csv
def update_postcode(postcodes):
output = list()
if re.search(postcodes_re, postcodes):
new_zip = re.search(postcodes_re, postcodes).group(1)
output.append(new_zip)
return ', '.join(str(x) for x in output)
#Once again, this is similar in function to audit_street
def audit_city(city_dict, city_ex):
m = cities_re.search(city_ex)
if m:
city_group = m.group()
city_dict[city_group].add(city_ex)
#Same function as is_postcode, but for addr:city
def is_city(elem):
return (elem.attrib['k'] == "addr:city")
#Same function as audit_s, but for city values.
def audit_C(osmfile):
city_dict = defaultdict(set)
osm_file = open(osmfile, "r")
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_city(tag):
audit_city(city_dict, tag.attrib['v'])
root.clear()
osm_file.close()
return city_dict
""" Same function as the update_street, except instead of it skipping the
the matched tuple, instead it instead uses the ofallon_mapping dict to correct the
inconsistency of some cities being listed as O'fallon and some as O fallon.
"""
def update_city(name):
OFALLON = ('o')
ofallon_mapping = {"O": "O'"}
city_mapping = {"St": "Saint",
"St.": "Saint",
"bridgeton" : "Bridgeton",
"drive-through": "O'Fallon",
"Bass": "Saint",
"Pro": "Charles",
"Drive": "",
"UNINCORPORATED": "Saint Peters",
}
if name.lower().startswith(OFALLON):
return ''.join((ofallon_mapping.get(part, part)).title() for part in name.split())
return ' '.join((city_mapping.get(part, part)).title() for part in name.split())
def test():
street_types = audit_s(OSMFILE)
pprint.pprint(dict(street_types))
postcodes = audit_p(OSMFILE)
pprint.pprint(dict(postcodes))
c_names = audit_C(OSMFILE)
pprint.pprint(dict(c_names))
for st_type, ways in street_types.items():
for name in ways:
better_name = update_street(name)
print (name, "=>", better_name)
if name == "N. Main Ctr.":
assert better_name == "North Main Center"
if name == "Zumbehl Rd":
assert better_name == "Zumbehl Road"
if name == "N 3rd St":
assert better_name == "North Third Street"
if name == "Route N":
assert better_name == "Route N"
for postcode, nums in postcodes.items():
better_code = update_postcode(postcode)
print(postcode, "=>", better_code)
for c_name, ways in c_names.items():
for name in ways:
better_city_name = update_city(name)
print (name, "=>", better_city_name)
if __name__ == '__main__':
test()
# In[ ]:
| 3.53125 | 4 |
appion/bin/prepRefineXmippML3D.py | leschzinerlab/myami-3.2-freeHand | 0 | 12769212 | #!/usr/bin/env python
#appion
from appionlib import apPrepXmipp3D
from appionlib import apDisplay
class XmippPrepML3DRefinement(apPrepXmipp3D.XmippPrep3DRefinement):
def setRefineMethod(self):
self.refinemethod = 'xmippml3d'
#=====================
if __name__ == "__main__":
app = XmippPrepML3DRefinement()
app.start()
app.close()
| 2.15625 | 2 |
steps/step28.py | choiking10/mytorch | 0 | 12769213 | <reponame>choiking10/mytorch
import numpy as np
from mytorch import as_variable
from tests.complex_functions import rosenbrock
def ex1():
x0 = as_variable(0.0)
x1 = as_variable(2.0)
y = rosenbrock(x0, x1)
y.backward()
print(x0.grad, x1.grad)
def ex2():
x0 = as_variable(0.0)
x1 = as_variable(2.0)
lr = 0.001
iters = 50000
for i in range(iters):
if (i+1) % 1000 == 0:
print(i+1, x0, x1)
y = rosenbrock(x0, x1)
x0.zerograd()
x1.zerograd()
y.backward()
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
ex1()
ex2()
| 2.921875 | 3 |
s7.py | matthewmuccio/InterviewPrepKit | 2 | 12769214 | #!/usr/bin/env python3
import math
import os
# Complete the minimumPasses function below.
def minimumPasses(m, w, p, t):
iter = 0
coins = 0
def new_infra(n_items):
new_machines = m
new_workers = w
delta = max(new_machines, new_workers) - min(new_machines, new_workers)
delta = min(delta, n_items)
rest = n_items - delta
a,b = rest // 2, rest // 2
if rest & 1:
b += 1
if new_machines < new_workers:
new_machines += delta
else:
new_workers += delta
new_machines += a
new_workers += b
return new_machines, new_workers
def items_worth_buying():
n_items = coins // p
rest_coins = coins - n_items * p
new_machines, new_workers = new_infra(n_items)
rem_days_if_buying = math.ceil(float(t - rest_coins) / (new_machines * new_workers))
rem_days_if_not_buying = math.ceil(float(t - coins) / (m * w))
return n_items if rem_days_if_buying <= rem_days_if_not_buying else -1
while True:
speed = m * w
next_improv_iter = int(math.ceil(float(p - coins) / speed))
rem_iter_till_finish = int(math.ceil(float(t - coins) / speed))
if next_improv_iter >= rem_iter_till_finish:
return iter + rem_iter_till_finish
n_items = items_worth_buying()
if n_items == -1:
return iter + rem_iter_till_finish
coins_prod_till_next_improv_iter = next_improv_iter * speed
coins += coins_prod_till_next_improv_iter
m, w = new_infra(n_items)
coins -= p * n_items
iter += next_improv_iter
if __name__ == "__main__":
mwpn = input().split()
m = int(mwpn[0])
w = int(mwpn[1])
p = int(mwpn[2])
n = int(mwpn[3])
result = minimumPasses(m, w, p, n)
print(result)
| 3.203125 | 3 |
models/linear_models.py | clabrugere/numpy-basics | 1 | 12769215 | import numpy as np
class RidgeRegression:
def __init__(self, bias=True, weight_l2=1e-3, scale=True):
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
self.scale = scale
def _scale(self, X):
return (X - self._min) / (self._max - self._min)
def fit(self, X, y):
if self.scale:
self._min = X.min(axis=0)
self._max = X.max(axis=0)
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.linalg.pinv(X.T @ X + self.weight_l2 * np.eye(n_features)) @ X.T @ y
def predict(self, X):
if self.scale:
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return X @ self.weights
class LogisticRegression:
def __init__(self, lr=1e-2, bias=True, weight_l2=1e-3):
self.lr = lr
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def fit(self, X, y, max_iter=100):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
for _ in range(max_iter):
y_hat = self._sigmoid(X @ self.weights)
self.weights -= self.lr * (self.weight_l2 * 2 * self.weights + (1 / n_samples) * X.T @ (y_hat - y))
def predict(self, X):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return self._sigmoid(X @ self.weights) | 2.875 | 3 |
app/migrations/0002_jobapplicants.py | AyazSaiyed/Yudibot | 0 | 12769216 | # Generated by Django 3.0.5 on 2021-01-13 10:21
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='JobApplicants',
fields=[
('uid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('candidatename', models.TextField(default='', max_length=100)),
('appliedfor', models.TextField(default='', max_length=100)),
('email', models.EmailField(max_length=254)),
('experience', models.TextField(default='')),
('resumeurl', models.TextField(default='')),
],
),
]
| 1.875 | 2 |
ResultEvaluation.py | adakasky/PROFILE | 0 | 12769217 | <gh_stars>0
import codecs
from BeautifulSoup import BeautifulSoup as bs
import requests
import unicodedata
import time
url = 'https://www.google.com/search?q='
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
if __name__ == '__main__':
pairs = [(p.split('\t')[0], p.split('\t')[1]) for p in
codecs.open('../data/pairs.tsv', 'r', 'utf-8').read().split('\n') if p != '']
writer = codecs.open('../data/pairs_eval.tsv', 'w', 'utf-8')
for pair in pairs:
per = strip_accents(pair[0])
org = strip_accents(pair[1])
query = url + '%22' + per.replace(' ', '+') + '%22+%22"' + org.replace(' ', '+') + '%22'
request = requests.get(query)
html = bs(request.text)
results = [r.text for r in html.findAll('span', attrs={'class': 'st'})]
count = 0.0
for result in results:
if per in result and org in result:
count += 1.0
writer.write("%s\t%s\t%f\n" % (per, org, count / len(results)))
time.sleep(5)
writer.flush()
writer.close()
| 2.890625 | 3 |
Django/board_project/board/admin.py | sug5806/TIL | 0 | 12769218 | <gh_stars>0
from django.contrib import admin
from .models import Category
from .models import Document
from .models import Board
from .models import Comment
# Register your models here.
class BoardOption(admin.ModelAdmin):
list_display = ['id', 'name', 'slug', ]
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Board, BoardOption)
class CategoryOption(admin.ModelAdmin):
list_display = ['id', 'name', 'slug', 'board', ]
# name을 작성할때 자동으로 slug도 같이 작성됨
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryOption)
class CommentInline(admin.TabularInline):
model = Comment
class DocumentOption(admin.ModelAdmin):
list_display = ['id', 'author', 'title', 'slug', 'created', 'updated', 'category', 'board', ]
prepopulated_fields = {'slug': ('title',)}
inlines = [CommentInline]
admin.site.register(Document, DocumentOption)
admin.site.register(Comment)
| 2.125 | 2 |
Yello/core/dataset/sequence.py | teecha/Autonomous_Tello_Drone | 10 | 12769219 | # -*- coding: utf-8 -*-
import numpy as np
from tensorflow.keras.utils import Sequence
from core.dataset import augment
from core.image import read_image, preprocess_image
from core.utils import decode_annotation, decode_name
class Dataset(Sequence):
def __init__(self, cfg, verbose=0):
self.verbose = verbose
self.mask = cfg["yolo"]["mask"]
self.anchors = cfg["yolo"]["anchors"]
self.max_boxes = cfg["yolo"]["max_boxes"]
self.strides = cfg["yolo"]["strides"]
self.name_path = cfg['yolo']['name_path']
self.anno_path = cfg["train"]["anno_path"]
self.image_size = cfg["train"]["image_size"]
self.batch_size = cfg["train"]["batch_size"]
self.normal_method = cfg['train']["normal_method"]
self.mosaic = cfg['train']['mosaic']
self.label_smoothing = cfg['train']["label_smoothing"]
self.annotation = decode_annotation(anno_path=self.anno_path)
self.num_anno = len(self.annotation)
self.name = decode_name(name_path=self.name_path)
self.num_classes = len(self.name)
# init
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
def __len__(self):
return int(np.ceil(float(len(self.annotation)) / self.batch_size))
def __getitem__(self, idx):
l_bound = idx * self.batch_size
r_bound = (idx + 1) * self.batch_size
if r_bound > len(self.annotation):
r_bound = len(self.annotation)
l_bound = r_bound - self.batch_size
self._on_batch_start(idx)
batch_image = np.zeros((r_bound - l_bound, self._image_size, self._image_size, 3), dtype=np.float32)
batch_label = [np.zeros((r_bound - l_bound, size, size, len(mask_per_layer) * (5 + self.num_classes)),
dtype=np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
for i, sub_idx in enumerate(range(l_bound, r_bound)):
image, bboxes, labels = self._getitem(sub_idx)
if self.mosaic:
sub_idx = np.random.choice(np.delete(np.arange(self.num_anno), idx), 3, False)
image2, bboxes2, labels2 = self._getitem(sub_idx[0])
image3, bboxes3, labels3 = self._getitem(sub_idx[1])
image4, bboxes4, labels4 = self._getitem(sub_idx[2])
image, bboxes, labels = augment.mosic(image, bboxes, labels,
image2, bboxes2, labels2,
image3, bboxes3, labels3,
image4, bboxes4, labels4)
if self.normal_method:
image = augment.random_distort(image)
image = augment.random_grayscale(image)
image, bboxes = augment.random_flip_lr(image, bboxes)
image, bboxes = augment.random_rotate(image, bboxes)
image, bboxes, labels = augment.random_crop_and_zoom(image, bboxes, labels,
(self._image_size, self._image_size))
image, bboxes, labels = augment.bbox_filter(image, bboxes, labels)
labels = self._preprocess_true_boxes(bboxes, labels)
batch_image[i] = image
for j in range(len(self.mask)):
batch_label[j][i, :, :, :] = labels[j][:, :, :]
return batch_image, batch_label
def _getitem(self, sub_idx):
path, bboxes, labels = self.annotation[sub_idx]
image = read_image(path)
if len(bboxes) != 0:
bboxes, labels = np.array(bboxes), np.array(labels)
else:
bboxes, labels = np.zeros((0, 4)), np.zeros((0,))
image, bboxes = preprocess_image(image, (self._image_size, self._image_size), bboxes)
labels = augment.onehot(labels, self.num_classes, self.label_smoothing)
return image, bboxes, labels
def _preprocess_true_boxes(self, bboxes, labels):
bboxes_label = [np.zeros((size, size, len(mask_per_layer), 5 + self.num_classes), np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
bboxes = np.array(bboxes, dtype=np.float32)
# calculate anchor index for true boxes
anchor_area = self.anchors[:, 0] * self.anchors[:, 1]
bboxes_wh = bboxes[:, 2:4] - bboxes[:, 0:2]
bboxes_wh_exp = np.tile(np.expand_dims(bboxes_wh, 1), (1, self.anchors.shape[0], 1))
boxes_area = bboxes_wh_exp[..., 0] * bboxes_wh_exp[..., 1]
intersection = np.minimum(bboxes_wh_exp[..., 0], self.anchors[:, 0]) * np.minimum(bboxes_wh_exp[..., 1],
self.anchors[:, 1])
iou = intersection / (boxes_area + anchor_area - intersection + 1e-8) # (N, A)
best_anchor_idxs = np.argmax(iou, axis=-1) # (N,)
for i, bbox in enumerate(bboxes):
search = np.where(self.mask == best_anchor_idxs[i])
best_detect = search[0][0]
best_anchor = search[1][0]
coord_xy = (bbox[0:2] + bbox[2:4]) * 0.5
coord_xy /= self.strides[best_detect]
coord_xy = coord_xy.astype(np.int)
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, :4] = bbox
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 4:5] = 1.
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 5:] = labels[i, :]
return [layer.reshape([layer.shape[0], layer.shape[1], -1]) for layer in bboxes_label]
def _on_batch_start(self, idx, patience=10):
if idx % patience == 0:
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
if self.verbose:
print('Change image size to', self._image_size)
def on_epoch_end(self):
np.random.shuffle(self.annotation) # shuffle
from core.utils import decode_cfg, load_weights
cfg = decode_cfg("cfgs/custom.yaml")
train_dataset = Dataset(cfg)
| 2.703125 | 3 |
setup.py | heylouiz/scrapy-sticky-meta-params | 2 | 12769220 | from setuptools import setup
from scrapy_sticky_meta_params import __version__
with open("README.md") as f:
readme = f.read()
setup(
name="scrapy-sticky-meta-params",
version=__version__,
license="MIT license",
description="A spider middleware that forwards meta params through subsequent requests.",
long_description=readme,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/heylouiz/scrapy-sticky-meta-params",
packages=["scrapy_sticky_meta_params"],
platforms=["Any"],
keywords="scrapy meta middleware",
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=["Scrapy>=1.6.0"],
)
| 1.578125 | 2 |
changeInPoolSize.py | triesch/synaptic-competition | 0 | 12769221 | #!/usr/bin/env python
# Simple model of receptors diffusing in and out of synapses.
# Simulation of the Dynamcis with the Euler method.
# This simulates the effect of a sudden change in the pool size
#
# <NAME>, January-April 2017
import numpy as np
from matplotlib import pyplot as plt
# parameters
N = 3 # number of synapses
steps = 10000 # number of time steps to simulate
duration = 10.0 # duration in minutes
change_time = 2.0 # time at which number of pool size changes in minutes
ts = duration/steps # time step of the simulation
beta = 60.0/43.0 # transition rate out of slots in 1/min
delta = 1.0/14.0 # removal rate in 1/min
phi = 2.67 # relative pool size
F = 0.9 # set desired filling fraction
# initializations: the w_i and p are set to their steady state values
s = np.zeros(N)
for i in range(0,N):
s[i] = 40.0 + i*20.0
S = sum(s)
gamma = delta*F*S*phi # production rate set to achieve desired p*
alpha = beta/(phi*S*(1-F)) # set alpha accordingly
P = gamma/delta # total number of receptors in steady state
# variables we want to keep track of to plot them at the end:
# 'u' stands for up-regulation and 'd' stands for down-regulation.
# Up- and down-regulation are simulated simultaneously.
pu = np.zeros(steps) # pool size
pd = np.zeros(steps)
wu = np.zeros([N,steps]) # synaptic weights
wd = np.zeros([N,steps])
ru = np.zeros(steps) # relative change of synaptic weights
rd = np.zeros(steps)
times = np.zeros(steps)
pu[0] = P
pd[0] = P
ru[0] = 1.0
rd[0] = 1.0
for i in range(0,N):
wu[i,0] = F*s[i]
wd[i,0] = F*s[i]
# simulation loop
for t in range(0, steps-1):
if t==round(change_time/ts): # change pool size after some time
pu[t]=2.0*P # double number of receptors in the pool
pd[t]=0.0*P # set number of receptors in the pool to zero
Wu = sum(wu[:,t])
Wd = sum(wd[:,t])
wu[:,t+1] = wu[:,t] + ts * (alpha*pu[t] * (s-wu[:,t]) - beta*wu[:,t])
wd[:,t+1] = wd[:,t] + ts * (alpha*pd[t] * (s-wd[:,t]) - beta*wd[:,t])
pu[t+1] = pu[t] + ts * (beta*Wu - alpha*pu[t]*(S-Wu) - delta*pu[t] + gamma)
pd[t+1] = pd[t] + ts * (beta*Wd - alpha*pd[t]*(S-Wd) - delta*pd[t] + gamma)
ru[t+1] = wu[0,t+1]/wu[0,0]*100.0
rd[t+1] = wd[0,t+1]/wd[0,0]*100.0
times[t+1] = ts*(t+1)
# show results
f = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.gca().set_prop_cycle(plt.cycler('color', ['blue', 'green', 'red']))
[line1, line2, line3] = plt.plot(times, np.transpose(wu))
plt.plot(times, np.transpose(wd), ls='dotted')
plt.legend((line3, line2, line1), (r'$w_3$', r'$w_2$', r'$w_1$'), loc=1, fontsize=12)
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f.savefig("Fig4A.pdf", bbox_inches='tight')
f2 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, pu, "k")
plt.plot(times, pd, "k", ls='dotted')
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel('pool size', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f2.savefig("Fig4C.pdf", bbox_inches='tight')
f3 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, ru, "k")
plt.plot(times, rd, "k", ls='dotted')
plt.axis((0.0, 10.0, 40.0, 140.0))
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i(t)/w_i(0) \quad [\%]$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f3.savefig("Fig4B.pdf", bbox_inches='tight')
| 3.296875 | 3 |
ava_asd/activitynet_evaluate.py | tuanchien/asd | 18 | 12769222 | <reponame>tuanchien/asd
# The MIT License (MIT)
#
# Copyright (c) 2015 ActivityNet
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""Compute active speaker detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Smooth precision to be monotonically decreasing.
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def load_csv(filename, column_names):
"""Loads CSV from the filename using given column names.
Adds uid column.
Args:
filename: Path to the CSV file to load.
column_names: A list of column names for the data.
Returns:
df: A Pandas DataFrame containing the data.
"""
# Here and elsewhere, df indicates a DataFrame variable.
df = pd.read_csv(filename, header=None, names=column_names)
# Creates a unique id from frame timestamp and entity id.
df["uid"] = (df["frame_timestamp"].map(str) + ":" + df["entity_id"])
return df
def eq(a, b, tolerance=1e-09):
"""Returns true if values are approximately equal."""
return abs(a - b) <= tolerance
def make_uids(df):
df["uid"] = (df["frame_timestamp"].map(str) + ":" + df["entity_id"])
def merge_groundtruth_and_predictions(df_groundtruth, df_predictions):
"""Merges groundtruth and prediction DataFrames.
The returned DataFrame is merged on uid field and sorted in descending order
by score field. Bounding boxes are checked to make sure they match between
groundtruth and predictions.
Args:
df_groundtruth: A DataFrame with groundtruth data.
df_predictions: A DataFrame with predictions data.
Returns:
df_merged: A merged DataFrame, with rows matched on uid column.
"""
if df_groundtruth["uid"].count() != df_predictions["uid"].count():
raise ValueError(
"Groundtruth and predictions CSV must have the same number of "
"unique rows.")
if df_predictions["label"].unique() != ["SPEAKING_AUDIBLE"]:
raise ValueError(
"Predictions CSV must contain only SPEAKING_AUDIBLE label.")
if df_predictions["score"].count() < df_predictions["uid"].count():
raise ValueError("Predictions CSV must contain score value for every row.")
# Merges groundtruth and predictions on uid, validates that uid is unique
# in both frames, and sorts the resulting frame by the predictions score.
df_merged = df_groundtruth.merge(
df_predictions,
on="uid",
suffixes=("_groundtruth", "_prediction"),
validate="1:1").sort_values(
by=["score"], ascending=False).reset_index()
# Validates that bounding boxes in ground truth and predictions match for the
# same uids.
df_merged["bounding_box_correct"] = np.where(
eq(df_merged["entity_box_x1_groundtruth"],
df_merged["entity_box_x1_prediction"])
& eq(df_merged["entity_box_x2_groundtruth"],
df_merged["entity_box_x2_prediction"])
& eq(df_merged["entity_box_y1_groundtruth"],
df_merged["entity_box_y1_prediction"])
& eq(df_merged["entity_box_y2_groundtruth"],
df_merged["entity_box_y2_prediction"]), True, False)
if (~df_merged["bounding_box_correct"]).sum() > 0:
raise ValueError(
"Mismatch between groundtruth and predictions bounding boxes found at "
+ str(list(df_merged[~df_merged["bounding_box_correct"]]["uid"])))
return df_merged
def get_all_positives(df_merged):
"""Counts all positive examples in the groundtruth dataset."""
return df_merged[df_merged["label_groundtruth"] ==
"SPEAKING_AUDIBLE"]["uid"].count()
def calculate_precision_recall(df_merged):
"""Calculates precision and recall arrays going through df_merged row-wise."""
all_positives = get_all_positives(df_merged)
# Populates each row with 1 if this row is a true positive
# (at its score level).
df_merged["is_tp"] = np.where(
(df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") &
(df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0)
# Counts true positives up to and including that row.
df_merged["tp"] = df_merged["is_tp"].cumsum()
# Calculates precision for every row counting true positives up to
# and including that row over the index (1-based) of that row.
df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1)
# Calculates recall for every row counting true positives up to
# and including that row over all positives in the groundtruth dataset.
df_merged["recall"] = df_merged["tp"] / all_positives
logging.info(
"\n%s\n",
df_merged.head(10)[[
"uid", "score", "label_groundtruth", "is_tp", "tp", "precision",
"recall"
]])
return np.array(df_merged["precision"]), np.array(df_merged["recall"])
def run_evaluation(groundtruth, predictions):
"""Runs AVA Active Speaker evaluation, printing average precision result."""
df_groundtruth = load_csv(
groundtruth,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id"
])
df_predictions = load_csv(
predictions,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id", "score"
])
df_merged = merge_groundtruth_and_predictions(df_groundtruth, df_predictions)
precision, recall = calculate_precision_recall(df_merged)
print("average precision: ", compute_average_precision(precision, recall))
| 1.46875 | 1 |
utilities/dashboard/create_dashboard_data.py | covid-19-impact-lab/covidlab-utilities | 2 | 12769223 | <reponame>covid-19-impact-lab/covidlab-utilities<filename>utilities/dashboard/create_dashboard_data.py
from utilities.dashboard.components.intro_page.create_data import create_intro_page_data
from utilities.dashboard.components.run_charts.create_data import create_run_charts_data
from utilities.dashboard.components.boxplots.create_data import create_boxplots_data
from utilities.dashboard.components.univariate_distributions.create_data import (
create_univariate_distributions_data,
)
from utilities.dashboard.shared import create_general_variable_mappings
from utilities.dashboard.shared import get_menu_labels
def create_dashboard_data(
data,
data_name,
language,
data_desc=None,
group_info=None,
run_charts_desc=None,
boxplots_desc=None,
kde_cutoff=7,
april_wave=None
):
"""Create a dict with all data needed to generate a dashboard component.
Args:
data (pd.DataFrame): The empirical dataset.
data_desc (pd.DataFrame): Description of variables displayed in the
univariate distributions dashboard tabs. Default is None.
run_charts_desc (pd.DataFrame): Description of variables displayed in
the run charts dashboard tab. Default is None.
boxplots_desc (pd.DataFrame): Description of variables displayed in
the boxplots dashboard tab. Default is None.
group_info (pd.DataFrame): Description of groups, as defined for
univariate distributions dashboard tabs. Default is None.
data_name (str): "liss".
language (str): One of ["english", "german"]
april_wave (str): "yes" if the data is april wave data for the
univariate distributions: april dashboard tab. Default is None.
Returns:
dict: Dictionary whose entries depend on the pd.DataFrame(s) passed.
"""
shared_data = _create_shared_dashboad_data(
data=data,
data_desc=data_desc,
run_charts_desc=run_charts_desc,
boxplots_desc=boxplots_desc,
group_info=group_info,
language=language,
data_name=data_name,
)
variable_mappings = shared_data["variable_mappings"]
menu_labels = shared_data["menu_labels"]
if group_info is not None:
groups = _get_groups(group_info, language)
if data_desc is not None:
univariate_distributions_data = create_univariate_distributions_data(
data=data,
variable_mappings=variable_mappings,
nice_names=variable_mappings["variable_to_nice_name"],
groups=groups,
group_info=group_info,
menu_labels=menu_labels,
language=language,
april_wave=april_wave,
)
res = {}
res["shared_data"] = shared_data
res["intro_page_data"] = create_intro_page_data(language, data_name)
res["univariate_distributions_data"] = univariate_distributions_data
if run_charts_desc is not None:
run_charts_data = create_run_charts_data(
data=data,
variable_mappings=variable_mappings,
nice_names=variable_mappings["nice_names_run_charts"],
language=language,
)
res = {}
res["mapping"] = shared_data
res["run_charts_data"] = run_charts_data
if boxplots_desc is not None:
boxplots_data = create_boxplots_data(
data=data,
variable_mappings=variable_mappings,
nice_names=variable_mappings["nice_names_boxplots"],
language=language
)
res = {}
res["mapping"] = shared_data
res["boxplots_data"] = boxplots_data
return res
def _get_groups(group_info, language):
"""Get variables' group from `group_info`, given language.
Args:
group_info (pd.DataFrame): Description of groups, as defined for
univariate distributions dashboard tabs. Default is None.
language (str): One of ["english", "german"].
Returns:
list: List of groups.
"""
raw_groups = group_info[f"group_{language}"].unique().tolist() # noqa
bg_var_groups = ["Background Overview", "Background Correlation"]
groups = [group for group in raw_groups if group not in bg_var_groups]
return groups
def _create_shared_dashboad_data(
data, data_desc, run_charts_desc, boxplots_desc, group_info, language, data_name
):
"""Create dashboard data that will be used by all components.
Args:
data (pd.DataFrame): The empirical dataset.
data_desc (pd.DataFrame): Description of variables displayed in the
univariate distributions dashboard tabs. Default is None.
run_charts_desc (pd.DataFrame): Description of variables displayed in
the run charts dashboard tab. Default is None.
boxplots_desc (pd.DataFrame): Description of variables displayed in
the boxplots dashboard tab. Default is None.
group_info (pd.DataFrame): Description of groups, as defined for
univariate distributions dashboard tabs. Default is None.
language (str): One of ["english", "german"].
data_name (str): "liss".
Returns:
dict: Dictionary with the following entries:
- "language": string
- "variable_mapping": dict
- "menu_labels": dict
"""
vm = create_general_variable_mappings(
data=data,
data_desc=data_desc,
run_charts_desc=run_charts_desc,
boxplots_desc=boxplots_desc,
group_info=group_info,
language=language,
data_name=data_name,
)
menu_labels = get_menu_labels(language)
shared_data = {
"language": language,
"variable_mappings": vm,
"menu_labels": menu_labels,
}
return shared_data
| 2.21875 | 2 |
dreimac/circularcoords.py | ErnstRoell/ImageAnalysis | 12 | 12769224 | import subprocess
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import scipy
from scipy.sparse.linalg import lsqr
import time
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.widgets import Slider, RadioButtons
from .geomtools import *
from .emcoords import *
from ripser import ripser
import warnings
"""#########################################
Main Circular Coordinates Class
#########################################"""
SCATTER_SIZE = 50
class CircularCoords(EMCoords):
def __init__(self, X, n_landmarks, distance_matrix=False, prime=41, maxdim=1, verbose=False):
"""
Parameters
----------
X: ndarray(N, d)
A point cloud with N points in d dimensions
n_landmarks: int
Number of landmarks to use
distance_matrix: boolean
If true, treat X as a distance matrix instead of a point cloud
prime : int
Field coefficient with which to compute rips on landmarks
maxdim : int
Maximum dimension of homology. Only dimension 1 is needed for circular coordinates,
but it may be of interest to see other dimensions (e.g. for a torus)
"""
EMCoords.__init__(self, X, n_landmarks, distance_matrix, prime, maxdim, verbose)
self.type_ = "circ"
def get_coordinates(self, perc = 0.99, do_weighted = False, cocycle_idx = [0], partunity_fn = partunity_linear):
"""
Perform circular coordinates via persistent cohomology of
sparse filtrations (<NAME> 2018)
Parameters
----------
perc : float
Percent coverage
do_weighted : boolean
Whether to make a weighted cocycle on the representatives
cocycle_idx : list
Add the cocycles together in this list
partunity_fn: (dist_land_data, r_cover) -> phi
A function from the distances of each landmark to a bump function
"""
## Step 1: Come up with the representative cocycle as a formal sum
## of the chosen cocycles
n_landmarks = self.n_landmarks_
n_data = self.X_.shape[0]
dgm1 = self.dgms_[1]/2.0 #Need so that Cech is included in rips
cohomdeath = -np.inf
cohombirth = np.inf
cocycle = np.zeros((0, 3))
prime = self.prime_
for k in range(len(cocycle_idx)):
cocycle = add_cocycles(cocycle, self.cocycles_[1][cocycle_idx[k]], p=prime)
cohomdeath = max(cohomdeath, dgm1[cocycle_idx[k], 0])
cohombirth = min(cohombirth, dgm1[cocycle_idx[k], 1])
## Step 2: Determine radius for balls
dist_land_data = self.dist_land_data_
dist_land_land = self.dist_land_land_
coverage = np.max(np.min(dist_land_data, 1))
r_cover = (1-perc)*max(cohomdeath, coverage) + perc*cohombirth
self.r_cover_ = r_cover # Store covering radius for reference
if self.verbose:
print("r_cover = %.3g"%r_cover)
## Step 3: Setup coboundary matrix, delta_0, for Cech_{r_cover }
## and use it to find a projection of the cocycle
## onto the image of delta0
#Lift to integer cocycle
val = np.array(cocycle[:, 2])
val[val > (prime-1)/2] -= prime
Y = np.zeros((n_landmarks, n_landmarks))
Y[cocycle[:, 0], cocycle[:, 1]] = val
Y = Y + Y.T
#Select edges that are under the threshold
[I, J] = np.meshgrid(np.arange(n_landmarks), np.arange(n_landmarks))
I = I[np.triu_indices(n_landmarks, 1)]
J = J[np.triu_indices(n_landmarks, 1)]
Y = Y[np.triu_indices(n_landmarks, 1)]
idx = np.arange(len(I))
idx = idx[dist_land_land[I, J] < 2*r_cover]
I = I[idx]
J = J[idx]
Y = Y[idx]
NEdges = len(I)
R = np.zeros((NEdges, 2))
R[:, 0] = J
R[:, 1] = I
#Make a flat array of NEdges weights parallel to the rows of R
if do_weighted:
W = dist_land_land[I, J]
else:
W = np.ones(NEdges)
delta0 = make_delta0(R)
wSqrt = np.sqrt(W).flatten()
WSqrt = scipy.sparse.spdiags(wSqrt, 0, len(W), len(W))
A = WSqrt*delta0
b = WSqrt.dot(Y)
tau = lsqr(A, b)[0]
theta = np.zeros((NEdges, 3))
theta[:, 0] = J
theta[:, 1] = I
theta[:, 2] = -delta0.dot(tau)
theta = add_cocycles(cocycle, theta, real=True)
## Step 4: Create the open covering U = {U_1,..., U_{s+1}} and partition of unity
U = dist_land_data < r_cover
phi = np.zeros_like(dist_land_data)
phi[U] = partunity_fn(dist_land_data[U], r_cover)
# Compute the partition of unity
# varphi_j(b) = phi_j(b)/(phi_1(b) + ... + phi_{n_landmarks}(b))
denom = np.sum(phi, 0)
nzero = np.sum(denom == 0)
if nzero > 0:
warnings.warn("There are %i point not covered by a landmark"%nzero)
denom[denom == 0] = 1
varphi = phi / denom[None, :]
# To each data point, associate the index of the first open set it belongs to
ball_indx = np.argmax(U, 0)
## Step 5: From U_1 to U_{s+1} - (U_1 \cup ... \cup U_s), apply classifying map
# compute all transition functions
theta_matrix = np.zeros((n_landmarks, n_landmarks))
I = np.array(theta[:, 0], dtype = np.int64)
J = np.array(theta[:, 1], dtype = np.int64)
theta = theta[:, 2]
theta = np.mod(theta + 0.5, 1) - 0.5
theta_matrix[I, J] = theta
theta_matrix[J, I] = -theta
class_map = -tau[ball_indx]
for i in range(n_data):
class_map[i] += theta_matrix[ball_indx[i], :].dot(varphi[:, i])
thetas = np.mod(2*np.pi*class_map, 2*np.pi)
return thetas
def update_colors(self):
if len(self.selected) > 0:
idxs = np.array(list(self.selected))
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
## Step 2: Update circular coordinates on point cloud
thetas = self.coords
c = plt.get_cmap('magma_r')
thetas -= np.min(thetas)
thetas /= np.max(thetas)
thetas = np.array(np.round(thetas*255), dtype=int)
C = c(thetas)
if self.Y.shape[1] == 2:
self.coords_scatter.set_color(C)
else:
self.coords_scatter._facecolor3d = C
self.coords_scatter._edgecolor3d = C
else:
self.selected_plot.set_offsets(np.zeros((0, 2)))
if self.Y.shape[1] == 2:
self.coords_scatter.set_color('C0')
else:
self.coords_scatter._facecolor3d = 'C0'
self.coords_scatter._edgecolor3d = 'C0'
def recompute_coords_dimred(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
colors accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
self.update_colors()
def onpick_dimred(self, evt):
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_dimred(clicked)
self.ax_persistence.figure.canvas.draw()
self.ax_coords.figure.canvas.draw()
return True
def on_perc_slider_move_dimred(self, evt):
self.recompute_coords_dimred()
def on_partunity_selector_change_dimred(self, evt):
self.recompute_coords_dimred()
def plot_dimreduced(self, Y, using_jupyter = True, init_params = {'cocycle_idxs':[], 'perc':0.99, 'partunity_fn':partunity_linear, 'azim':-60, 'elev':30}, dpi=None):
"""
Do an interactive plot of circular coordinates, coloring a dimension
reduced version of the point cloud by the circular coordinates
Parameters
----------
Y: ndarray(N, d)
A 2D point cloud with the same number of points as X
using_jupyter: boolean
Whether this is an interactive plot in jupyter
init_params: dict
The intial parameters. Optional fields of the dictionary are as follows:
{
cocycle_idxs: list of int
A list of cocycles to start with
u: ndarray(3, float)
The initial stereographic north pole
perc: float
The percent coverage to start with
partunity_fn: (dist_land_data, r_cover) -> phi
The partition of unity function to start with
azim: float
Initial azimuth for 3d plots
elev: float
Initial elevation for 3d plots
}
dpi: int
Dot pixels per inch
"""
if Y.shape[1] < 2 or Y.shape[1] > 3:
raise Exception("Dimension reduced version must be in 2D or 3D")
self.Y = Y
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(2, 1)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*2, DREIMAC_FIG_RES), dpi=dpi)
## Step 1: Plot H1
self.ax_persistence = fig.add_subplot(121)
self.setup_ax_persistence(y_compress=1.37)
fig.canvas.mpl_connect('pick_event', self.onpick_dimred)
self.selected = set([])
## Step 2: Setup window for choosing coverage / partition of unity type
## and for displaying the chosen cocycle
self.perc_slider, self.partunity_selector, self.selected_cocycle_text, _ = EMCoords.setup_param_chooser_gui(self, fig, 0.25, 0.75, 0.4, 0.5, init_params)
self.perc_slider.on_changed(self.on_perc_slider_move_dimred)
self.partunity_selector.on_clicked(self.on_partunity_selector_change_dimred)
## Step 3: Setup axis for coordinates
if Y.shape[1] == 3:
self.ax_coords = fig.add_subplot(122, projection='3d')
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], Y[:, 2], s=SCATTER_SIZE, cmap='magma_r')
set_3dplot_equalaspect(self.ax_coords, Y)
if 'azim' in init_params:
self.ax_coords.azim = init_params['azim']
if 'elev' in init_params:
self.ax_coords.elev = init_params['elev']
else:
self.ax_coords = fig.add_subplot(122)
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], s=SCATTER_SIZE, cmap='magma_r')
self.ax_coords.set_aspect('equal')
self.ax_coords.set_title("Dimension Reduced Point Cloud")
if len(init_params['cocycle_idxs']) > 0:
# If some initial cocycle indices were chosen, update
# the plot
self.recompute_coords_dimred(init_params['cocycle_idxs'])
plt.show()
def get_selected_dimreduced_info(self):
"""
Return information about what the user selected and their viewpoint in
the interactive dimension reduced plot
Returns
-------
{
'partunity_fn': (dist_land_data, r_cover) -> phi
The selected function handle for the partition of unity
'cocycle_idxs':ndarray(dtype = int)
Indices of the selected cocycles,
'perc': float
The selected percent coverage,
'azim':float
Azumith if viewing in 3D
'elev':float
Elevation if viewing in 3D
}
"""
ret = EMCoords.get_selected_info(self)
if self.Y.shape[1] == 3:
ret['azim'] = self.ax_coords.azim
ret['elev'] = self.ax_coords.elev
return ret
def update_plot_torii(self, circ_idx):
"""
Update a joint plot of circular coordinates, switching between
2D and 3D modes if necessary
Parameters
----------
circ_idx: int
Index of the circular coordinates that have
been updated
"""
N = self.plots_in_one
n_plots = len(self.plots)
## Step 1: Figure out the index of the involved plot
plot_idx = int(np.floor(circ_idx/N))
plot = self.plots[plot_idx]
## Step 2: Extract the circular coordinates from all
## plots that have at least one cochain representative selected
labels = []
coords = []
for i in range(N):
idx = plot_idx*N + i
c_info = self.coords_info[idx]
if len(c_info['selected']) > 0:
# Only include circular coordinates that have at least
# one persistence dot selected
coords.append(c_info['coords'])
labels.append("Coords {}".format(idx))
## Step 3: Adjust the plot accordingly
if len(labels) > 0:
X = np.array([])
if len(labels) == 1:
# Just a single coordinate; put it on a circle
coords = np.array(coords).flatten()
X = np.array([np.cos(coords), np.sin(coords)]).T
else:
X = np.array(coords).T
updating_axes = False
if X.shape[1] == 3 and plot['axis_2d']:
# Need to switch from 2D to 3D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = False
updating_axes = True
elif X.shape[1] == 2 and not plot['axis_2d']:
# Need to switch from 3D to 2D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = True
updating_axes = True
if X.shape[1] == 3:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx, projection='3d')
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], X[:, 2], s=SCATTER_SIZE, c=self.coords_colors)
plot['ax'].set_title('Joint 3D Plot')
else:
plot['coords_scatter'].set_offsets(X)
set_pi_axis_labels(plot['ax'], labels)
else:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx)
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=self.coords_colors)
else:
plot['coords_scatter'].set_offsets(X)
if len(labels) > 1:
set_pi_axis_labels(plot['ax'], labels)
plot['ax'].set_title('Joint 2D Plot')
else:
plot['ax'].set_xlabel('')
plot['ax'].set_xlim([-1.1, 1.1])
plot['ax'].set_ylabel('')
plot['ax'].set_ylim([-1.1, 1.1])
plot['ax'].set_title(labels[0])
else:
X = np.array([])
if plot['axis_2d']:
X = -2*np.ones((self.X_.shape[0], 2))
else:
X = -2*np.ones((self.X_.shape[0], 3))
plot['coords_scatter'].set_offsets(X)
def recompute_coords_torii(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
joint torii plots accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
# Save away circular coordinates
self.coords_info[self.selected_coord_idx]['selected'] = self.selected
self.coords_info[self.selected_coord_idx]['coords'] = self.coords
self.update_plot_torii(self.selected_coord_idx)
def onpick_torii(self, evt):
"""
Handle a pick even for the torii plot
"""
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_torii(clicked)
self.ax_persistence.figure.canvas.draw()
self.fig.canvas.draw()
return True
def select_torii_coord(self, idx):
"""
Select a particular circular coordinate plot and un-select others
Parameters
----------
idx: int
Index of the plot to select
"""
for i, coordsi in enumerate(self.coords_info):
if i == idx:
self.selected_coord_idx = idx
coordsi = self.coords_info[idx]
# Swap in the appropriate GUI objects for selection
self.selected = coordsi['selected']
self.selected_cocycle_text = coordsi['selected_cocycle_text']
self.perc_slider = coordsi['perc_slider']
self.partunity_selector = coordsi['partunity_selector']
self.persistence_text_labels = coordsi['persistence_text_labels']
self.coords = coordsi['coords']
coordsi['button'].color = 'red'
for j in np.array(list(self.selected)):
self.persistence_text_labels[j].set_text("%i"%j)
idxs = np.array(list(self.selected), dtype=int)
if idxs.size > 0:
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
else:
self.selected_plot.set_offsets(np.array([[np.nan]*2]))
else:
coordsi['button'].color = 'gray'
self.ax_persistence.set_title("H1 Cocycle Selection: Coordinate {}".format(idx))
def on_perc_slider_move_torii(self, evt, idx):
"""
React to a change in coverage
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selected) > 0:
self.recompute_coords_torii()
def on_partunity_selector_change_torii(self, evt, idx):
"""
React to a change in partition of unity type for
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selectd) > 0:
self.recompute_coords_torii()
def on_click_torii_button(self, evt, idx):
"""
React to a click event, and change the selected
circular coordinate if necessary
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
def plot_torii(self, f, using_jupyter=True, zoom=1, dpi=None, coords_info=2, plots_in_one = 2, lowerleft_plot = None, lowerleft_3d=False):
"""
Do an interactive plot of circular coordinates, where points are drawn on S1,
on S1 x S1, or S1 x S1 x S1
Parameters
----------
f: Display information for the points
On of three options:
1) A scalar function with which to color the points, represented
as a 1D array
2) A list of colors with which to color the points, specified as
an Nx3 array
3) A list of images to place at each location
using_jupyter: boolean
Whether this is an interactive plot in jupyter
zoom: int
If using patches, the factor by which to zoom in on them
dpi: int
Dot pixels per inch
coords_info: Information about how to perform circular coordinates. There will
be as many plots as the ceil of the number of circular coordinates, and
they will be plotted pairwise.
This parameter is one of two options
1) An int specifying the number of different circular coordinate
functions to compute
2) A list of dictionaries with pre-specified initial parameters for
each circular coordinate. Each dictionary has the following keys:
{
'cocycle_reps': dictionary
A dictionary of cocycle representatives, with the key
as the cocycle index, and the value as the coefficient
TODO: Finish update to support this instead of a set
'perc': float
The percent coverage to start with,
'partunity_fn': (dist_land_data, r_cover) -> phi
The partition of unity function to start with
}
plots_in_one: int
The max number of circular coordinates to put in one plot
lowerleft_plot: function(matplotlib axis)
A function that plots something in the lower left
lowerleft_3d: boolean
Whether the lower left plot is 3D
"""
if plots_in_one < 2 or plots_in_one > 3:
raise Exception("Cannot be fewer than 2 or more than 3 circular coordinates in one plot")
self.plots_in_one = plots_in_one
self.f = f
## Step 1: Figure out how many plots are needed to accommodate all
## circular coordinates
n_plots = 1
if type(coords_info) is int:
n_plots = int(np.ceil(coords_info/plots_in_one))
coords_info = []
else:
n_plots = int(np.ceil(len(coords_info)/plots_in_one))
while len(coords_info) < n_plots*plots_in_one:
coords_info.append({'selected':set([]), 'perc':0.99, 'partunity_fn':partunity_linear})
self.selecting_idx = 0 # Index of circular coordinate which is currently being selected
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(n_plots+1, 2)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*(n_plots+1), DREIMAC_FIG_RES*2), dpi=dpi)
self.dpi = dpi
self.fig = fig
## Step 2: Setup H1 plot, along with initially empty text labels
## for each persistence point
self.ax_persistence = fig.add_subplot(2, n_plots+1, 1)
self.setup_ax_persistence()
fig.canvas.mpl_connect('pick_event', self.onpick_torii)
## Step 2: Setup windows for choosing coverage / partition of unity type
## and for displaying the chosen cocycle for each circular coordinate.
## Also store variables for selecting cocycle representatives
width = 1/(n_plots+1)
height = 1/plots_in_one
partunity_keys = tuple(PARTUNITY_FNS.keys())
for i in range(n_plots):
xstart = width*(i+1.4)
for j in range(plots_in_one):
idx = i*plots_in_one+j
# Setup plots and state for a particular circular coordinate
ystart = 0.8 - 0.4*height*j
coords_info[idx]['perc_slider'], coords_info[idx]['partunity_selector'], coords_info[idx]['selected_cocycle_text'], coords_info[idx]['button'] = self.setup_param_chooser_gui(fig, xstart, ystart, width, height, coords_info[idx], idx)
coords_info[idx]['perc_slider'].on_changed(callback_factory(self.on_perc_slider_move_torii, idx))
coords_info[idx]['partunity_selector'].on_clicked = callback_factory(self.on_partunity_selector_change_torii, idx)
coords_info[idx]['button'].on_clicked(callback_factory(self.on_click_torii_button, idx))
dgm = self.dgm1_lifetime
coords_info[idx]['persistence_text_labels'] = [self.ax_persistence.text(dgm[i, 0], dgm[i, 1], '') for i in range(dgm.shape[0])]
coords_info[idx]['idx'] = idx
coords_info[idx]['coords'] = np.zeros(self.X_.shape[0])
self.coords_info = coords_info
## Step 3: Figure out colors of coordinates
self.coords_colors = None
if not (type(f) is list):
# Figure out colormap if images aren't passed along
self.coords_colors = f
if f.size == self.X_.shape[0]:
# Scalar function, so need to apply colormap
c = plt.get_cmap('magma_r')
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
C = c(np.array(np.round(fscaled*255), dtype=np.int32))
self.coords_colors = C[:, 0:3]
## Step 4: Setup plots
plots = []
self.n_plots = n_plots
for i in range(n_plots):
# 2D by default, but can change to 3D later
ax = fig.add_subplot(2, n_plots+1, n_plots+3+i)
pix = -2*np.ones(self.X_.shape[0])
plot = {}
plot['ax'] = ax
plot['coords_scatter'] = ax.scatter(pix, pix, s=SCATTER_SIZE, c=self.coords_colors) # Scatterplot for circular coordinates
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
plot['axis_2d'] = True
plot['patch_boxes'] = [] # Array of image patch display objects
plots.append(plot)
self.plots = plots
## Step 5: Initialize plots with information passed along
for i in reversed(range(len(coords_info))):
self.select_torii_coord(i)
self.recompute_coords_torii([])
## Step 6: Plot something in the lower left corner if desired
if lowerleft_plot:
if lowerleft_3d:
ax = fig.add_subplot(2, n_plots+1, n_plots+2, projection='3d')
else:
ax = fig.add_subplot(2, n_plots+1, n_plots+2)
lowerleft_plot(ax)
plt.show()
def do_two_circle_test():
"""
Test interactive plotting with two noisy circles of different sizes
"""
prime = 41
np.random.seed(2)
N = 500
X = np.zeros((N*2, 2))
t = np.linspace(0, 1, N+1)[0:N]**1.2
t = 2*np.pi*t
X[0:N, 0] = np.cos(t)
X[0:N, 1] = np.sin(t)
X[N::, 0] = 2*np.cos(t) + 4
X[N::, 1] = 2*np.sin(t) + 4
perm = np.random.permutation(X.shape[0])
X = X[perm, :]
X = X + 0.2*np.random.randn(X.shape[0], 2)
f = np.concatenate((t, t + np.max(t)))
f = f[perm]
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
c = plt.get_cmap('magma_r')
C = c(np.array(np.round(fscaled*255), dtype=np.int32))[:, 0:3]
#plt.scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=C)
cc = CircularCoords(X, 100, prime = prime)
#cc.plot_dimreduced(X, using_jupyter=False)
cc.plot_torii(f, coords_info=2, plots_in_one=3)
def do_torus_test():
"""
Test interactive plotting with a torus
"""
prime = 41
np.random.seed(2)
N = 10000
R = 5
r = 2
X = np.zeros((N, 3))
s = np.random.rand(N)*2*np.pi
t = np.random.rand(N)*2*np.pi
X[:, 0] = (R + r*np.cos(s))*np.cos(t)
X[:, 1] = (R + r*np.cos(s))*np.sin(t)
X[:, 2] = r*np.sin(s)
cc = CircularCoords(X, 100, prime=prime)
f = s
def plot_torus(ax):
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=f, cmap='magma_r')
set_3dplot_equalaspect(ax, X)
cc.plot_torii(f, coords_info=2, plots_in_one=2, lowerleft_plot=plot_torus, lowerleft_3d=True) | 2.203125 | 2 |
bam2fastx/fasta.py | czbiohub/bam2fastx | 0 | 12769225 | # Import modified 'os' module with LC_LANG set so click doesn't complain
from .os_utils import os # noqa: F401
from collections import defaultdict
import click
DELIMITER = "X"
FASTA_PREFIX = "aligned_sequences"
CELL_BARCODE = 'CB'
UMI = 'UB'
BAM_FILENAME = 'possorted_genome_bam.bam'
BARCODES_TSV = 'barcodes.tsv'
def read_single_column(filename):
"""Read single-column barcodes.tsv and genes.tsv files from 10x"""
with open(filename) as f:
lines = set(line.strip() for line in f)
return lines
def read_10x_folder(folder):
"""Get QC-pass barcodes, genes, and bam file from a 10x folder
Parameters
----------
folder : str
Name of a 10x cellranger output folder containing
'possorted_genome_bam.bam' and 'barcodes.tsv' files
Returns
-------
barcodes : list
List of QC-passing barcodes from 'barcodes.tsv'
bam_file : bamnostic.AlignmentFile
Iterator over possorted_genome_bam.bam file
"""
import bamnostic as bs
barcodes = read_single_column(os.path.join(folder, BARCODES_TSV))
bam_file = bs.AlignmentFile(os.path.join(folder, BAM_FILENAME), mode='rb')
return barcodes, bam_file
def _pass_alignment_qc(alignment, barcodes):
"""Assert high quality mapping, QC-passing barcode and UMI of alignment"""
high_quality_mapping = alignment.mapq == 255
good_barcode = CELL_BARCODE in alignment.tags and \
alignment.get_tag(CELL_BARCODE) in barcodes
good_umi = UMI in alignment.tags
pass_qc = high_quality_mapping and good_barcode and good_umi
return pass_qc
def _parse_barcode_renamer(barcodes, barcode_renamer):
"""
:param barcodes:
:param barcode_renamer:
:return:
"""
if barcode_renamer is not None:
renamer = {}
with open(barcode_renamer) as f:
for line in f.readlines():
barcode, renamed = line.split()
assert barcode in barcodes
renamer[barcode] = renamed
else:
renamer = dict(zip(barcodes, barcodes))
return renamer
def barcode_iterator(bam, barcodes, barcode_renamer):
"""Yield a (barcode, list of str) pair for each QC-pass barcode"""
bam_filtered = (x for x in bam if _pass_alignment_qc(x, barcodes))
renamer = _parse_barcode_renamer(barcodes, barcode_renamer)
# alignments only have a CELL_BARCODE tag if they past QC
bam_sort_by_barcode = sorted(bam_filtered,
key=lambda x: x.get_tag(CELL_BARCODE))
previous_barcode = None
barcode_alignments = []
for alignment in bam_sort_by_barcode:
# Get barcode of alignment, looks like "AAATGCCCAAACTGCT-1"
barcode = alignment.get_tag(CELL_BARCODE)
# If this is a new non-null barcode, return all previous sequences
if previous_barcode is not None and barcode != previous_barcode:
yield renamer[previous_barcode], barcode_alignments
# Reset the barcode alignments
barcode_alignments = []
# Add only the aligned sequence to this list of barcode alignments
barcode_alignments.append(alignment.seq)
# Set this current barcode as the previous one
previous_barcode = barcode
# Yield the final one
yield renamer[previous_barcode], barcode_alignments
def _write_all_cells_in_one_file(cell_sequences, output_folder, fasta_prefix):
filename = os.path.join(output_folder,
f"{fasta_prefix}.fasta")
with open(filename, "w") as f:
for cell, seq in cell_sequences.items():
f.write(f">{cell}\n{seq}")
# this "pass" makes PyCharm happy
pass
return filename
def _write_one_cell_per_file(cell_sequences, output_folder, fasta_prefix):
os.makedirs(output_folder, exist_ok=True)
filenames = []
for cell, seq in cell_sequences.items():
filename = os.path.join(output_folder, f"{fasta_prefix}_{cell}.fasta")
with open(filename, "w") as f:
f.write(f">{cell}\n{seq}")
filenames.append(filename)
return filenames
def write_cell_sequences(cell_sequences, output_folder,
one_cell_per_file=False, fasta_prefix=FASTA_PREFIX):
if one_cell_per_file:
filenames = _write_one_cell_per_file(cell_sequences, output_folder,
fasta_prefix)
else:
filename = _write_all_cells_in_one_file(cell_sequences, output_folder,
fasta_prefix)
filenames = [filename]
return filenames
def bam_to_fasta(bam, barcodes, barcode_renamer, output_folder, delimiter="X",
one_cell_per_file=False, fasta_prefix=FASTA_PREFIX):
"""Convert 10x bam to one-record-per-cell fasta
Parameters
----------
bam : bamnostic.AlignmentFile
barcodes : list of str
QC-passing barcodes
barcode_renamer : str or None
Tab-separated filename mapping a barcode to a new name, e.g.
AAATGCCCAAACTGCT-1 lung_epithelial_cell|AAATGCCCAAACTGCT-1
delimiter : str, default "X"
Non-DNA or protein alphabet character to be ignored
Returns
-------
filenames : list
List of fasta filenames written
"""
bam_filtered = (x for x in bam if _pass_alignment_qc(x, barcodes))
renamer = _parse_barcode_renamer(barcodes, barcode_renamer)
cell_sequences = defaultdict(str)
for alignment in bam_filtered:
# Get barcode of alignment, looks like "AAATGCCCAAACTGCT-1"
barcode = alignment.get_tag(CELL_BARCODE)
renamed = renamer[barcode]
# Make a long string of all the cell sequences, separated
# by a non-alphabet letter
cell_sequences[renamed] += alignment.seq + delimiter + "\n"
return write_cell_sequences(cell_sequences, output_folder,
one_cell_per_file, fasta_prefix)
@click.command()
@click.argument("tenx_folder")
@click.option('--all-cells-in-one-file/--one-cell-per-file', default=True,
help="Create a single fasta, with each cell as a separate "
"record, whose sequences are separated by the delimiter "
f"'{DELIMITER}' (default), or create many fasta files, "
"one per cell")
@click.option('--barcode-renamer',
help="Tab-separated file mapping barcodes (column 1) to renamed "
"ids (column 2)")
@click.option("--output-folder", help="Folder to output to. Default is "
"current directory", default=".")
@click.option('--fasta-prefix', help="Filename prefix to use ",
default=FASTA_PREFIX)
@click.option('--delimiter', default=DELIMITER)
def fasta(tenx_folder, all_cells_in_one_file, barcode_renamer=None,
output_folder=".", fasta_prefix=FASTA_PREFIX, delimiter=DELIMITER):
"""Convert 10x bam to fasta of aligned sequences
Parameters
----------
tenx_folder : str
Location of tenx folder containing possorted_genome_bam.bam and
barcodes.tsv files
"""
barcodes, bam = read_10x_folder(tenx_folder)
one_cell_per_file = not all_cells_in_one_file
filenames = bam_to_fasta(bam, barcodes, barcode_renamer=barcode_renamer,
output_folder=output_folder, delimiter=delimiter,
fasta_prefix=fasta_prefix,
one_cell_per_file=one_cell_per_file)
if len(filenames) == 1:
filename = filenames[0]
click.echo(f"Wrote {filename}")
else:
n_files = len(filenames)
click.echo(f"Wrote {n_files} fasta files in {output_folder}")
| 2.328125 | 2 |
gtk/13_radioButton.py | ehbc221/thenewboston-python | 0 | 12769226 | <reponame>ehbc221/thenewboston-python
from gi.repository import Gtk
class RadioButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="RadioButton Demo")
self.set_border_width(10)
hbox = Gtk.Box(spacing=6)
self.add(hbox)
button1 = Gtk.RadioButton.new_with_label_from_widget(None, "Button 1")
button1.connect("toggled", self.on_button_toggled, "1")
hbox.pack_start(button1, False, False, 0)
button2 = Gtk.RadioButton.new_from_widget(button1)
button2.set_label("Button 2")
button2.connect("toggled", self.on_button_toggled, "2")
hbox.pack_start(button2, False, False, 0)
button3 = Gtk.RadioButton.new_with_mnemonic_from_widget(button1, "Button 3")
button3.connect("toggled", self.on_button_toggled, "3")
hbox.pack_start(button3, False, False, 0)
def on_button_toggled(self, button, name):
if button.get_active():
state = "on"
else:
state = "off"
print("Button", name, "was turned", state)
window = RadioButtonWindow()
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
| 2.875 | 3 |
django_cradmin/viewhelpers/formview/createview.py | appressoas/django_cradmin | 11 | 12769227 | <filename>django_cradmin/viewhelpers/formview/createview.py
import urllib.parse
from django.utils.translation import gettext_lazy
from django.views.generic import CreateView as DjangoCreateView
from django_cradmin import javascriptregistry
from django_cradmin.viewhelpers.mixins import CommonCradminViewMixin
from . import create_update_view_mixin
class CreateViewMixin(create_update_view_mixin.CreateUpdateViewMixin):
"""
Common mixin class for create views.
.. note:: You should import this class with ``from django_cradmin import viewhelpers``,
and refer to it using ``viewhelpers.formview.CreateViewMixin``.
"""
#: The viewname within this app for the edit view.
#: See :meth:`.get_editurl`.
editview_appurl_name = 'edit'
def get_pagetitle(self):
"""
Get the page title (the title tag).
Defaults to ``Create <verbose_name model>``.
"""
return gettext_lazy('Create %(what)s') % {'what': self.model_verbose_name}
def get_success_message(self, obj):
"""
Defaults to ``"Created "<str(obj)>".``
"""
return gettext_lazy('Created "%(object)s"') % {'object': obj}
def get_editurl(self, obj):
"""
Get the edit URL for ``obj``.
Defaults to::
self.request.cradmin_app.reverse_appurl(self.editview_appurl_name, args=[obj.pk])
You normally want to use :meth:`.get_full_editurl` instead of this method.
"""
return self.request.cradmin_app.reverse_appurl(self.editview_appurl_name, args=[obj.pk])
def get_full_editurl(self, obj):
"""
Get the full edit URL for the provided object.
Unlike :meth:`.get_editurl`, this ensures that any ``success_url`` in ``request.GET``
is included in the URL.
Args:
obj: A saved model object.
"""
url = self.get_editurl(obj)
if 'success_url' in self.request.GET:
url = '{}?{}'.format(
url, urllib.parse.urlencode({
'success_url': self.request.GET['success_url']}))
return url
class WithinRoleCreateView(CreateViewMixin,
DjangoCreateView, CommonCradminViewMixin,
javascriptregistry.viewmixin.WithinRoleViewMixin):
"""
Create view with the correct context data and sane base template
for views where we have a cradmin role.
.. note:: You should import this class with ``from django_cradmin import viewhelpers``,
and refer to it using ``viewhelpers.formview.WithinRoleCreateView``.
"""
template_name = 'django_cradmin/viewhelpers/formview/within_role_create_view.django.html'
def get_context_data(self, **kwargs):
context = super(WithinRoleCreateView, self).get_context_data(**kwargs)
self.add_javascriptregistry_component_ids_to_context(context=context)
self.add_common_view_mixin_data_to_context(context=context)
self.add_create_update_view_mixin_context_data(context=context)
return context
| 2.21875 | 2 |
networkx-d3-v2/lib/appengine_sessions/backends/db.py | suraj-testing2/Clock_Websites | 0 | 12769228 | """ Fix Django's 'write-through' (cache and datastore storage) session
backend to work with Appengine's datastore, along with whatever cache
backend is in settings.
Basically a reworking of django.contrib.sessions.backends.db, so have
a look there for definitive docs.
"""
from google.appengine.ext import ndb
from appengine_sessions.models import Session
from django.contrib.sessions.backends.base import CreateError
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.exceptions import SuspiciousOperation
from django.utils.encoding import force_unicode
from django.conf import settings
from datetime import datetime, timedelta
class SessionStore(DBStore):
"""Implements a session store using Appengine's datastore API instead
of Django's abstracted DB API (since we no longer have nonrel -- just
vanilla Django)
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def get_ndb_session_key(self,session_key=None):
return ndb.Key(Session, session_key and session_key or self._get_or_create_session_key())
"""
Session Date related methods overridden to handle the NDB DateTimeProperty
get_expiry_age
get_expiry_date
set_expiry
Making sure session dates always use UTC datetimes with no tzinfo
"""
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.utcnow()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object).
Overridden to make sure that UTC time is used for NDB datetime
properties """
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.utcnow() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.utcnow() + value
self['_session_expiry'] = value
def load(self):
s = self.get_ndb_session_key().get()
if s:
# Make sure you compare UTC datetime now for NDB.
if s.expire_date > datetime.utcnow():
try:
return self.decode(force_unicode(s.session_data))
except SuspiciousOperation:
return {}
self.create()
return {}
def exists(self, session_key):
# If session key is None then False
if session_key:
ndb_session_key = ndb.Key(Session,session_key)
s = ndb_session_key.get()
return s is not None
return False
def save(self, must_create=False):
"""Create and save a Session object using db.run_in_transaction, with
key_name = session_key, raising CreateError if
unsuccessful.
"""
if must_create:
s = self.get_ndb_session_key().get()
if s:
raise CreateError()
session_data = self._get_session(no_load=must_create)
#ed = self.get_expiry_date()
#print datetime.datetime.utcoffset(ed)
def txn():
s = Session(
id=self._get_or_create_session_key(),
session_key=self.session_key,
session_data=self.encode(session_data),
expire_date=self.get_expiry_date()
)
s.put()
# This is tricky and probably needs some sanity checking, because
# TransactionFailedError can be raised, but the transaction can still
# go on to be committed to the datastore. As far as I can see there's
# no way to manually roll it back at that point. No idea how to test
# this either.
try:
ndb.transaction(txn)
except (ndb.Rollback):
raise CreateError()
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._get_or_create_session_key()
self.get_ndb_session_key(session_key).delete()
# db.delete(db.Key.from_path('Session', session_key))
# Again, circular import fix
| 2.53125 | 3 |
grab/spider/transport/multicurl.py | gonchik/grab | 0 | 12769229 | import pycurl
import select
import six
import logging
from threading import Lock
from grab.error import GrabTooManyRedirectsError
ERROR_TOO_MANY_REFRESH_REDIRECTS = -2
#ERROR_INTERNAL_GRAB_ERROR = -3
ERROR_ABBR = {
ERROR_TOO_MANY_REFRESH_REDIRECTS: 'too-many-refresh-redirects',
#ERROR_INTERNAL_GRAB_ERROR: 'internal-grab-error',
}
for key in dir(pycurl):
if key.startswith('E_'):
abbr = key[2:].lower().replace('_', '-')
ERROR_ABBR[getattr(pycurl, key)] = abbr
class MulticurlTransport(object):
def __init__(self, socket_number):
self.socket_number = socket_number
self.multi = pycurl.CurlMulti()
self.multi.handles = []
self.freelist = []
self.registry = {}
self.connection_count = {}
self.network_op_lock = Lock()
# Create curl instances
for x in six.moves.range(self.socket_number):
curl = pycurl.Curl()
self.connection_count[id(curl)] = 0
self.freelist.append(curl)
# self.multi.handles.append(curl)
def ready_for_task(self):
return len(self.freelist)
def get_free_threads_number(self):
return len(self.freelist)
def get_active_threads_number(self):
return self.socket_number - len(self.freelist)
def process_connection_count(self, curl):
curl_id = id(curl)
self.connection_count[curl_id] += 1
if self.connection_count[curl_id] > 100:
del self.connection_count[curl_id]
del curl
new_curl = pycurl.Curl()
self.connection_count[id(new_curl)] = 1
return new_curl
else:
return curl
def start_task_processing(self, task, grab, grab_config_backup):
self.network_op_lock.acquire()
try:
curl = self.process_connection_count(self.freelist.pop())
self.registry[id(curl)] = {
'grab': grab,
'grab_config_backup': grab_config_backup,
'task': task,
}
grab.transport.curl = curl
try:
grab.prepare_request()
grab.log_request()
except Exception:
# If some error occurred while processing the request arguments
# then we should put curl object back to free list
del self.registry[id(curl)]
self.freelist.append(curl)
raise
else:
# Add configured curl instance to multi-curl processor
self.multi.add_handle(curl)
finally:
self.network_op_lock.release()
def process_handlers(self):
# Ok, frankly I have really bad understanding of
# how to deal with multicurl sockets ;-)
# It is a sort of miracle that Grab actually works
self.network_op_lock.acquire()
rlist, wlist, xlist = self.multi.fdset()
if rlist or wlist or xlist:
timeout = self.multi.timeout()
if timeout and timeout > 0:
select.select(rlist, wlist, xlist, timeout / 1000.0)
else:
pass
while True:
status, active_objects = self.multi.perform()
if status != pycurl.E_CALL_MULTI_PERFORM:
break
self.network_op_lock.release()
def iterate_results(self):
while True:
#try:
queued_messages, ok_list, fail_list = self.multi.info_read()
#except Exception as ex:
# # Usually that should not happen
# logging.error('', exc_info=ex)
# continue
results = []
for curl in ok_list:
results.append((True, curl, None, None))
for curl, ecode, emsg in fail_list:
# CURLE_WRITE_ERROR (23)
# An error occurred when writing received data
# to a local file, or
# an error was returned to libcurl from a write callback.
# This exception should be ignored if _callback_interrupted
# flag
# is enabled (this happens when nohead or
# nobody options enabeld)
#
# Also this error is raised when curl receives
# KeyboardInterrupt
# while it is processing some callback function
# (WRITEFUNCTION, HEADERFUNCTIO, etc)
if ecode == 23:
if getattr(curl, '_callback_interrupted', None) is True:
curl._callback_interrupted = False
results.append((True, curl, None, None))
else:
results.append((False, curl, ecode, emsg))
else:
results.append((False, curl, ecode, emsg))
for ok, curl, ecode, emsg in results:
# FORMAT: {ok, grab, grab_config_backup, task, emsg}
curl_id = id(curl)
task = self.registry[curl_id]['task']
grab = self.registry[curl_id]['grab']
grab_config_backup =\
self.registry[curl_id]['grab_config_backup']
try:
grab.process_request_result()
except GrabTooManyRedirectsError:
ecode = ERROR_TOO_MANY_REFRESH_REDIRECTS
emsg = 'Too many meta refresh redirects'
ok = False
#except Exception as ex:
# logging.error('', exc_info=ex)
# ecode = ERROR_INTERNAL_GRAB_ERROR
# emsg = 'Internal grab error'
# ok = False
grab.response.error_code = ecode
grab.response.error_msg = emsg
# Free resources
del self.registry[curl_id]
grab.transport.curl = None
if ok:
error_abbr = None
else:
error_abbr = ERROR_ABBR.get(ecode, 'unknown-%d' % ecode)
yield {'ok': ok,
'ecode': ecode,
'emsg': emsg,
'error_abbr': error_abbr,
'grab': grab,
'grab_config_backup': grab_config_backup,
'task': task}
self.multi.remove_handle(curl)
curl.reset()
self.freelist.append(curl)
if not queued_messages:
break
| 2.171875 | 2 |
main.py | sivavenub/attention | 0 | 12769230 | <reponame>sivavenub/attention
import torch
from trainer import Trainer
from config import get_config
from utils import prepare_dirs, save_config
from data_loader import get_test_loader, get_train_valid_loader
def main(config):
# ensure directories are setup
prepare_dirs(config)
# ensure reproducibility
torch.manual_seed(config.random_seed)
kwargs = {}
if config.use_gpu:
torch.cuda.manual_seed(config.random_seed)
kwargs = {'num_workers': 1, 'pin_memory': True}
# instantiate data loaders
if config.is_train:
data_loader = get_train_valid_loader(
config.data_dir, config.batch_size,
config.random_seed, config.valid_size,
config.shuffle, config.show_sample, **kwargs
)
else:
data_loader = get_test_loader(
config.data_dir, config.batch_size, **kwargs
)
# instantiate trainer
trainer = Trainer(config, data_loader)
# either train
if config.is_train:
save_config(config)
trainer.train()
# or load a pretrained model and test
else:
trainer.test()
if __name__ == '__main__':
config, unparsed = get_config()
main(config)
| 2.140625 | 2 |
opening_screen.py | krinskils/chess | 2 | 12769231 | import pygame
import screen
import os
import socket
import protocol
import logging
from screen import *
clock = pygame.time.Clock()
BACKGROUND_IMAGE_PATH = os.path.join(PICTURES_PATH, 'opening_screen_picture.png')
bg_image = pygame.image.load(BACKGROUND_IMAGE_PATH)
bg_image = pygame.transform.scale(bg_image, (SCREEN_WIDTH, SCREEN_HEIGHT))
BACK_BUTTON_IMAGE_PATH = os.path.join(PICTURES_PATH, 'back_sign.png')
back_button_image = pygame.image.load(BACK_BUTTON_IMAGE_PATH)
back_button_image = pygame.transform.scale(back_button_image, (int(SCREEN_WIDTH/10), int(SCREEN_HEIGHT/10)))
REFRESH_BUTTON_IMAGE_PATH = os.path.join(PICTURES_PATH, 'refresh_button.png')
refresh_button_image = pygame.image.load(REFRESH_BUTTON_IMAGE_PATH)
refresh_button_image = pygame.transform.scale(refresh_button_image,
(back_button_image.get_width(), back_button_image.get_height()))
SOUNDS_PATH = 'sounds'
PASSIVE_TEXTBOX_COLOR = colors.WHITE
ACTIVE_TEXTBOX_COLOR = colors.LIGHT_BLUE
BOT_GAME_TYPE = 1
ONLINE_GAME_TYPE = 2
TWO_PLAYERS_GAME_TYPE = 3
# Keys in rectangles dict
START_GAME = 0
NUMBER_OF_PLAYERS = 1
GAME_LENGTH = 2
BOT_LEVEL = 3
TEAM_SELECTION = 4
ONLINE_GAME = 5
BACK_BUTTON = 8
JOIN_GAME_RECTS = 9
REFRESH_BUTTON = 10
MAX_USERNAME_LENGTH = 10
ONE_RECT_GROUPS = [START_GAME, ONLINE_GAME, BACK_BUTTON, REFRESH_BUTTON]
# default values
is_one_players_playing = True
game_length = 5 # In minutes.
level = 3 # Bot Depth
is_white = True
username = ""
my_socket = None
opponent_player_name = ""
game_type = BOT_GAME_TYPE
TEXT_BOX_HEIGHT = REGULAR_FONT.get_height() + 20
TEXT_BOX_WIDTH = 600
class WaitingGame:
def __init__(self, name, is_other_player_white, current_game_length):
self.opponent_player_name = name
self.is_white = not is_other_player_white
self.length = current_game_length
def starting_screen():
global game_type
game_type = BOT_GAME_TYPE if is_one_players_playing else TWO_PLAYERS_GAME_TYPE
# Print background image.
screen.blit(bg_image, (0, 0))
# Print title.
text = LARGE_FONT.render("BeCheZ", False, colors.YELLOW)
screen.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2, 50))
rectangles = set_rectangles()
while True:
pygame.display.flip()
for event in pygame.event.get():
handle_event(event, rectangles)
def online_screen(*ignore):
global username
global game_type
global my_socket
game_type = ONLINE_GAME_TYPE
screen.blit(bg_image, (0, 0))
back_button_rect = draw_and_get_back_button()
# Print title.
text = LARGE_FONT.render("ENTER YOUR NAME:", False, colors.DARK_BLUE)
screen.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2, 100))
# Text box rectangle to get input from user.
text_box = pygame.Rect(MIDDLE_HORIZONTAL - TEXT_BOX_WIDTH/2, SCREEN_HEIGHT/2, TEXT_BOX_WIDTH, TEXT_BOX_HEIGHT)
create_game_rect, join_game_rect, create_game_text, join_game_text = get_join_create_rectangles(text_box)
# Colors of text box
is_active = False
draw_text_box(username, text_box, is_active)
while True:
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise exceptions.UserExitGame
if event.type == pygame.MOUSEBUTTONDOWN:
if text_box.collidepoint(pygame.mouse.get_pos()):
is_active = True
else:
is_active = False
draw_text_box(username, text_box, is_active)
if back_button_rect.collidepoint(pygame.mouse.get_pos()):
return starting_screen()
if create_game_rect.collidepoint(pygame.mouse.get_pos()):
if len(username) > 0:
create_game()
else:
pygame.mixer.Sound(os.path.join(SOUNDS_PATH, 'error.wav')).play()
if join_game_rect.collidepoint(pygame.mouse.get_pos()):
if len(username) > 0:
join_game_screen()
else:
pygame.mixer.Sound(os.path.join(SOUNDS_PATH, 'error.wav')).play()
if is_active and event.type == pygame.KEYDOWN:
# Delete last letter.
if event.key == pygame.K_BACKSPACE:
username = username[:-1]
elif len(username) < MAX_USERNAME_LENGTH:
username += event.unicode
draw_text_box(username, text_box, is_active)
# deactivate create and join game rect.
if len(username) == 0:
pygame.draw.rect(screen, colors.WHITE, create_game_rect)
pygame.draw.rect(screen, colors.WHITE, join_game_rect)
# Activate create and join game rect.
else:
pygame.draw.rect(screen, colors.LIGHT_BLUE, create_game_rect)
pygame.draw.rect(screen, colors.LIGHT_BLUE, join_game_rect)
# The 25 is padding from rect and text
screen.blit(create_game_text, (MIDDLE_HORIZONTAL - create_game_text.get_width() / 2, create_game_rect.top + 25))
screen.blit(join_game_text, (MIDDLE_HORIZONTAL - join_game_text.get_width() / 2, join_game_rect.top + 25))
def get_join_create_rectangles(textbox: pygame.Rect) -> tuple:
text = REGULAR_FONT.render("JOIN GAME", False, colors.BLACK)
join_game_rect = pygame.Rect(MIDDLE_HORIZONTAL - (text.get_width() / 2 + 50),
textbox.bottom + text.get_height() + 50, text.get_width() + 100, text.get_height() + 50)
pygame.draw.rect(screen, colors.YELLOW, join_game_rect)
screen.blit(text, (join_game_rect.centerx - text.get_width() / 2, join_game_rect.centery - text.get_height() / 2))
join_game_text = text
text = REGULAR_FONT.render("CREATE GAME", False, colors.BLACK)
create_game_rect = pygame.Rect(MIDDLE_HORIZONTAL - (text.get_width() / 2 + 50),
join_game_rect.bottom + text.get_height() + 50,
text.get_width() + 100, text.get_height() + 50)
pygame.draw.rect(screen, colors.WHITE, create_game_rect)
screen.blit(text, (create_game_rect.centerx - text.get_width() / 2, create_game_rect.top + 25))
create_game_text = text
return create_game_rect, join_game_rect, create_game_text, join_game_text
def join_game_screen(*ignore):
global opponent_player_name
rectangles = dict()
# Print background.
screen.blit(bg_image, (0, 0))
rectangles[BACK_BUTTON] = draw_and_get_back_button()
rectangles[REFRESH_BUTTON] = draw_and_get_refresh_button()
# Join server.
connect_to_server()
final_request = protocol.Request(username, protocol.GET_GAMES).set_request_to_server()
my_socket.send(final_request)
games_list = get_games_list()
logging.debug(f"Games list is: {games_list}")
rectangles[JOIN_GAME_RECTS] = create_join_game_rectangles(games_list)
while True:
pygame.display.flip()
for event in pygame.event.get():
try:
handle_event(event, rectangles)
except exceptions.BackToLastScreen:
online_screen()
except exceptions.JoinGameError:
pygame.mixer.Sound(os.path.join(SOUNDS_PATH, 'error.wav')).play()
def create_game():
global opponent_player_name
screen.blit(bg_image, (0, 0))
connect_to_server()
logging.debug("Creating game")
msg_content = "1" if is_white else "0"
msg_content += str(game_length).zfill(2)
my_socket.send(protocol.Request(username, protocol.CREATE_GAME, msg_content).set_request_to_server())
text = LARGE_FONT.render("waiting for second player...", False, colors.DARK_BLUE)
screen.blit(text, (SCREEN_WIDTH/2 - text.get_width()/2, SCREEN_HEIGHT/2 - text.get_height()/2))
pygame.display.flip()
opponent_player_name_length = int(my_socket.recv(1).decode())
opponent_player_name = my_socket.recv(opponent_player_name_length).decode()
raise exceptions.FinishStartingScreen
def create_join_game_rectangles(games_name: list):
rectangle_width = int(SCREEN_WIDTH * (3/4))
rectangle_height = REGULAR_FONT.get_height() + 20
last_rectangle_bottom = 0
rectangles = dict()
for game in games_name:
text_string = f"opponent player is: {game.opponent_player_name} - "
your_team = "white team" if game.is_white else "black team"
text_string += f"your team: {your_team} - "
text_string += f"game length: {str(game.length)}"
text = REGULAR_FONT.render(text_string, False, colors.WHITE)
current_game_rect = pygame.Rect(MIDDLE_HORIZONTAL - int(rectangle_width/2),
last_rectangle_bottom + rectangle_height, rectangle_width, rectangle_height)
last_rectangle_bottom = current_game_rect.bottom
rectangles[game] = current_game_rect
pygame.draw.rect(screen, colors.DARK_BLUE, current_game_rect)
screen.blit(text, (MIDDLE_HORIZONTAL - text.get_width()/2, current_game_rect.top + 10))
# Rect wouldn't be out of screen
if last_rectangle_bottom + (rectangle_height*2) >= SCREEN_HEIGHT:
break
pygame.display.flip()
return rectangles
def get_games_list() -> list:
""""
:return A list with all data of the games waiting for second player
data of game including: first player name, is first player white, game length
"""
games = list()
list_length = my_socket.recv(1).decode()
logging.debug(f"number of players waiting for their games is {list_length}")
for x in range(int(list_length)):
name_length = int(my_socket.recv(1).decode())
name = my_socket.recv(name_length).decode()
is_opponent_player_white = int(my_socket.recv(1).decode())
current_game_length = int(my_socket.recv(2).decode())
games.append(WaitingGame(name, is_opponent_player_white, current_game_length))
return games
def draw_text_box(username, text_box, is_active):
text_box_color = ACTIVE_TEXTBOX_COLOR if is_active else PASSIVE_TEXTBOX_COLOR
pygame.draw.rect(screen, text_box_color, text_box)
text = REGULAR_FONT.render(username, False, colors.BLACK)
screen.blit(text, (text_box.left + 10, text_box.top + 10))
def connect_to_server():
global my_socket
if my_socket is not None:
return
my_socket = socket.socket()
my_socket.connect((protocol.SERVER_IP, protocol.SERVER_PORT))
def set_rectangles():
rectangles = dict()
# Print start game rect.
text = REGULAR_FONT.render("START GAME", False, colors.BLACK)
rect_high = text.get_height() + 50
rect_width = text.get_width() + 50
start_game_rect = pygame.Rect(MIDDLE_HORIZONTAL - rect_width/2, 550, rect_width, rect_high)
pygame.draw.rect(screen, colors.YELLOW, start_game_rect)
screen.blit(text, (start_game_rect.centerx - text.get_width() / 2, start_game_rect.centery - text.get_height() / 2))
rectangles[START_GAME] = start_game_rect
# Print online game rect
text = REGULAR_FONT.render("ONLINE GAME", False, colors.BLACK)
rect_width = text.get_width() + 50
rect_high = text.get_height() + 50
online_game_rect = pygame.Rect(MIDDLE_HORIZONTAL - rect_width / 2, start_game_rect.top+200, rect_width, rect_high)
pygame.draw.rect(screen, colors.YELLOW, online_game_rect)
screen.blit(text, (online_game_rect.centerx - text.get_width() / 2, online_game_rect.centery - text.get_height() / 2))
rectangles[ONLINE_GAME] = online_game_rect
rectangles[NUMBER_OF_PLAYERS] = create_players_count_rects()
rectangles[GAME_LENGTH] = create_small_rects("GAME_LENGTH", GAME_LENGTH_OPTION, default=game_length,
color=colors.DARK_RED, chosen_color=colors.RED, is_left=True)
rectangles[BOT_LEVEL] = create_small_rects("BOT LEVEL", range(1, 5), default=level,
color=colors.DARK_BLUE, chosen_color=colors.LIGHT_BLUE, is_left=False)
# Passing the 'one player' rect as argument to the function.
rectangles[TEAM_SELECTION] = draw_team_selection_rects(rectangles[NUMBER_OF_PLAYERS]["One Player"].midright, is_white)
return rectangles
def handle_event(event, rectangles):
if event.type == pygame.QUIT:
raise exceptions.UserExitGame
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
try:
rect_group, rect_clicked, text_in_rect = get_rect(mouse_pos, rectangles)
return rect_group_to_function[rect_group](rect_clicked, text_in_rect, rectangles)
# The user clicked on something that not the rect
except exceptions.NonReturnValue:
pass
def get_rect(mouse_pos, rectangles):
for rect_group in rectangles:
if rect_group in ONE_RECT_GROUPS:
rect = rectangles[rect_group]
if rect.collidepoint(*mouse_pos):
return rect_group, rect, None
else:
rects = rectangles[rect_group]
for text in rects:
rect = rects[text]
if rect.collidepoint(*mouse_pos):
return rect_group, rect, text
raise exceptions.NonReturnValue
def join_to(rect_clicked, game: WaitingGame, rectangles):
final_request = protocol.Request(username, protocol.JOIN_GAME, game.opponent_player_name).set_request_to_server()
my_socket.send(final_request)
if my_socket.recv(1) == protocol.OK_MESSAGE:
global is_white
global opponent_player_name
global game_length
is_white = game.is_white
game_length = game.length
opponent_player_name = game.opponent_player_name
raise exceptions.FinishStartingScreen
else:
raise exceptions.JoinGameError
def set_team(rect_clicked, text, rectangles):
if not is_one_players_playing:
return
global is_white
is_white = True if text == "WHITE TEAM" else False
set_rects_color(rectangles[TEAM_SELECTION], rect_clicked,
colors.LIGHT_SILVER, colors.DARK_SILVER, colors.BLACK)
def back_to_last_screen(*ignore):
raise exceptions.BackToLastScreen
def finish_starting_screen(*ignore):
raise exceptions.FinishStartingScreen
def set_number_of_players(rect_clicked, text, rectangles):
global is_one_players_playing
global game_type
is_one_players_playing = (text == 'One Player')
if is_one_players_playing:
# Passing the 'one player' rect as argument to the function.
draw_team_selection_rects(rectangles[NUMBER_OF_PLAYERS]["One Player"].midright, is_white)
game_type = BOT_GAME_TYPE
else:
# Erase team selection rectangles.
screen.blit(bg_image, rectangles[TEAM_SELECTION]["WHITE TEAM"].topleft,
rectangles[TEAM_SELECTION]["WHITE TEAM"])
screen.blit(bg_image, rectangles[TEAM_SELECTION]["BLACK TEAM"].topleft,
rectangles[TEAM_SELECTION]["BLACK TEAM"])
game_type = TWO_PLAYERS_GAME_TYPE
set_rects_color(rectangles[NUMBER_OF_PLAYERS], rect_clicked,
colors.LIGHT_SILVER, colors.DARK_SILVER, colors.BLACK)
def set_bot_level(rect_clicked, text, rectangles):
global level
level = int(text)
set_rects_color(rectangles[BOT_LEVEL], rect_clicked, colors.LIGHT_BLUE, colors.DARK_BLUE)
def set_game_length(rect_clicked, text, rectangles):
global game_length
game_length = int(text)
set_rects_color(rectangles[GAME_LENGTH], rect_clicked, colors.RED, colors.DARK_RED)
def create_small_rects(title, options, default, color, chosen_color, is_left):
# Draw the rectangles in the sides of the starting screen.
# Return a dictionary. the key is the text and the value is the rect.
rects = {}
current_print_height = 100
x_pos = 5 if is_left else (SCREEN_WIDTH - SMALL_RECT_WIDTH - 5)
for option in options:
# Set the color of the rect. the chosen option is in other color.
rect_color = chosen_color if option == default else color
rect = pygame.Rect(x_pos, current_print_height, SMALL_RECT_WIDTH,
SMALL_RECT_HEIGHT)
pygame.draw.rect(screen, rect_color, rect)
# print the text in rect
text = f"{option}"
text_surface = REGULAR_FONT.render(text, False, colors.WHITE)
screen.blit(text_surface, (rect.centerx - text_surface.get_width() / 2
, rect.centery - text_surface.get_height() / 2))
rects[text] = rect
current_print_height += (SMALL_RECT_HEIGHT * 2)
# Print title.
text_surface = REGULAR_FONT.render(title, True, color)
if is_left:
screen.blit(text_surface,
(max(rect.centerx - text_surface.get_width() / 2, 0), 10)) # Space from top.
else:
screen.blit(text_surface,
(min(rect.centerx - text_surface.get_width() / 2, SCREEN_WIDTH - text_surface.get_width() - 10),
10)) # Space from top.
return rects
def create_players_count_rects():
# Return a dictionary. the key is the text and the value is the rect.
rects = dict()
current_print_height = 150
one_player_rect = pygame.Rect(MIDDLE_HORIZONTAL - RECT_WIDTH / 2, current_print_height, RECT_WIDTH, RECT_HEIGHT)
pygame.draw.rect(screen, colors.LIGHT_SILVER, one_player_rect)
text = "One Player"
text_surface = REGULAR_FONT.render(text, False, colors.BLACK)
screen.blit(text_surface, (one_player_rect.centerx - text_surface.get_width() / 2,
one_player_rect.centery - text_surface.get_height() / 2))
current_print_height += 200
rects[text] = one_player_rect
two_player_rect = pygame.Rect(MIDDLE_HORIZONTAL - RECT_WIDTH / 2, current_print_height, RECT_WIDTH, RECT_HEIGHT)
pygame.draw.rect(screen, colors.DARK_SILVER, two_player_rect)
text = "Two Players"
text_surface = REGULAR_FONT.render(text, False, colors.BLACK)
screen.blit(text_surface, (two_player_rect.centerx - text_surface.get_width() / 2,
two_player_rect.centery - text_surface.get_height() / 2))
rects[text] = two_player_rect
return rects
def draw_team_selection_rects(one_player_rect_cords, isWhite=True):
x_pos, y_pos = one_player_rect_cords
x_pos += SCREEN_WIDTH / 10
white_team_y_pos = y_pos - SCREEN_WIDTH / 20
black_team_y_pos = y_pos + SCREEN_WIDTH / 20
white_team_color, black_team_color = (colors.LIGHT_SILVER, colors.DARK_SILVER) if isWhite else \
(colors.DARK_SILVER, colors.LIGHT_SILVER)
rects = {}
rect = pygame.Rect(x_pos, white_team_y_pos, RECT_WIDTH, RECT_HEIGHT)
pygame.draw.rect(screen, white_team_color, rect)
text = "WHITE TEAM"
text_surface = REGULAR_FONT.render(text, False, colors.BLACK)
screen.blit(text_surface, (rect.centerx - text_surface.get_width() / 2,
rect.centery - text_surface.get_height() / 2))
rects[text] = rect
rect = pygame.Rect(x_pos, black_team_y_pos, RECT_WIDTH, RECT_HEIGHT)
pygame.draw.rect(screen, black_team_color, rect)
text = "<NAME>"
text_surface = REGULAR_FONT.render(text, False, colors.BLACK)
screen.blit(text_surface, (rect.centerx - text_surface.get_width() / 2,
rect.centery - text_surface.get_height() / 2))
rects[text] = rect
return rects
def set_rects_color(rects_and_texts: dict, chosen_rect, chosen_rect_color, unchosen_rect_color,
text_color=colors.WHITE):
for text, rect in rects_and_texts.items():
color = chosen_rect_color if rect is chosen_rect else unchosen_rect_color
pygame.draw.rect(screen, color, rect)
text_surface = REGULAR_FONT.render(text, False, text_color)
if rect.width == RECT_WIDTH:
screen.blit(text_surface, (rect.centerx - text_surface.get_width() / 2,
rect.centery - text_surface.get_height() / 2))
else:
screen.blit(text_surface, (rect.centerx - text_surface.get_width() / 2,
rect.centery - text_surface.get_height() / 2))
def draw_and_get_back_button():
back_button_x_pos = 0
back_button_y_pos = SCREEN_HEIGHT - back_button_image.get_height() - 20
screen.blit(back_button_image, (back_button_x_pos, back_button_y_pos))
tmp_rect = back_button_image.get_rect()
tmp_rect.topleft = (back_button_x_pos, back_button_y_pos)
return tmp_rect
def draw_and_get_refresh_button():
refresh_button_x_pos = SCREEN_WIDTH - refresh_button_image.get_width()
refresh_button_y_pos = SCREEN_HEIGHT - refresh_button_image.get_height() - 20
screen.blit(refresh_button_image, (refresh_button_x_pos, refresh_button_y_pos))
return pygame.Rect(refresh_button_x_pos, refresh_button_y_pos,
refresh_button_image.get_width(), refresh_button_image.get_width())
rect_group_to_function = dict()
rect_group_to_function[START_GAME] = finish_starting_screen
rect_group_to_function[NUMBER_OF_PLAYERS] = set_number_of_players
rect_group_to_function[GAME_LENGTH] = set_game_length
rect_group_to_function[BOT_LEVEL] = set_bot_level
rect_group_to_function[TEAM_SELECTION] = set_team
rect_group_to_function[ONLINE_GAME] = online_screen
rect_group_to_function[BACK_BUTTON] = back_to_last_screen
rect_group_to_function[JOIN_GAME_RECTS] = join_to
rect_group_to_function[REFRESH_BUTTON] = join_game_screen
| 2.578125 | 3 |
src/viur/ext/tasks/clear_kind.py | XeoN-GHMB/viur_ext | 1 | 12769232 | <filename>src/viur/ext/tasks/clear_kind.py<gh_stars>1-10
import logging, safeeval
from viur.core import errors, utils, skeleton, tasks
from viur.core.bones import baseBone, selectBone, stringBone
@tasks.CallableTask
class TaskClearKind(tasks.CallableTaskBase):
key = "clearKind"
name = "Clear all entities of a kind"
descr = "This task can be called to clean your database from a specific kind."
def canCall(self):
user = utils.getCurrentUser()
return user is not None and "root" in user["access"]
def dataSkel(self):
skel = skeleton.BaseSkeleton().clone()
skel.module = selectBone(
descr="Module",
values=skeleton.listKnownSkeletons(),
required=True,
multiple=True
)
skel.eval = baseBone(
descr="Eval",
params={
"tooltip": "Enter a SafeEval-Python expression here to filter entries by specific bone values."
},
validHtml=None
)
skel.confirm = stringBone(
descr="Type YES as your confirmation!",
required=True
)
return skel
def execute(self, module, confirm, eval=None, *args, **kwargs):
if confirm != "YES":
raise errors.PreconditionFailed("Confirm must be 'YES'!")
if eval and eval.strip():
try:
safeeval.SafeEval().compile(eval)
except SyntaxError as e:
logging.exception(e)
raise errors.PreconditionFailed("The expression is not valid")
usr = utils.getCurrentUser()
if not usr:
logging.warning("Don't know who to inform after rebuilding finished")
notify = None
else:
notify = usr["name"]
@tasks.callDeferred
def processChunk(module, eval=None, notify=None, cursor=None, total=0):
Skel = skeleton.skeletonByKind(module)
if not Skel:
logging.error("Invalid module %r", module)
return
query = Skel().all().setCursor(cursor)
lol = safeeval.SafeEval()
if eval and eval.strip():
ast = lol.compile(eval)
else:
ast = None
for obj in query.run(99):
total += 1
skel = Skel()
if not skel.fromDB(obj.key):
logging.warning("Cannot remove %r, it doesn't exist", obj.key)
continue
if ast and not lol.execute(ast, skel):
logging.info("The eval expression prohibits deletion of %r, its what you wanted :)", obj.key)
continue
skel.delete()
cursor = query.getCursor()
if not cursor: # We're done
try:
if notify:
txt = ("Subject: Clearing %s done\n\n" +
"ViUR finished to clear all entries of %s.\n" +
"%d records updated in total on this kind.") % (module, module, total)
utils.sendEMail([notify], txt, None)
except: # OverQuota, whatever
pass
logging.info("Finished clearing %d entries of %r", total, module)
return
logging.debug("Cleared %d entries of %r so far", total, module)
processChunk(module, eval, notify, cursor, total)
for mod in module:
processChunk(mod, eval, notify=notify)
| 2.109375 | 2 |
input_output.py | mixanik379/python_lesson_2 | 0 | 12769233 | # однострочный комментарий
'''
первая строка
вторая строка
'''
print('Hello!')
print('Hello!', 'student!', 123, sep='xxx')
print('Hello!', 'student!', 123, end='yyy')
print()
# Ввод
age = input('Input your age')
print(age, type(age))
print(int(age), type(int(age)))
'''
print(1+1, 'student', sep='yyy', end='!')
print()
'''
| 4.1875 | 4 |
src/plugins_/cfdocs/__init__.py | jcberquist/sublimetext-cfml | 130 | 12769234 | <filename>src/plugins_/cfdocs/__init__.py
from .. import plugin
from .cfdocs import (
get_inline_documentation,
get_completion_docs,
get_goto_cfml_file
)
class CFMLPlugin(plugin.CFMLPlugin):
def get_completion_docs(self, cfml_view):
return get_completion_docs(cfml_view)
def get_inline_documentation(self, cfml_view, doc_type):
return get_inline_documentation(cfml_view, doc_type)
def get_goto_cfml_file(self, cfml_view):
return get_goto_cfml_file(cfml_view)
| 1.75 | 2 |
source/interstate_love_song/plugins/simple.py | ilpvfx/interstate_love_song | 9 | 12769235 | from dataclasses import dataclass
import dataclasses
import argparse
from hashlib import pbkdf2_hmac
from typing import Mapping, Any
from interstate_love_song.mapping.base import *
def hash_pass(s: str, salt="IGNORED"):
"""Hash a password.
Not the best way to store a password since the salt is known, but it offers a bit more protection than storing it
plaintext."""
return pbkdf2_hmac("sha256", s.encode("utf-8"), salt.encode("utf-8"), 100000).hex()
@dataclass
class SimpleMapperSettings:
username: str = "test"
password_hash: str = "<PASSWORD>"
resources: Sequence[Resource] = dataclasses.field(default_factory=lambda: [])
domains: Sequence[str] = dataclasses.field(default_factory=lambda: [])
class SimpleMapper(Mapper):
"""A very simple mapper that accepts one set of credentials and returns a given set of resources."""
def __init__(
self, username: str, password_hash: str, resources: Sequence[Resource], domains: Sequence[str],
):
"""
:param username:
The username to accept.
:param password_hash:
A password hash, output from hash_pass.
:param resources:
:param domains:
A list of valid domains.
:raises TypeError:
"""
super().__init__()
self._username = str(username)
self._password_hash = str(password_hash)
self._resources = list(resources)
self._domains = list(domains)
@property
def username(self) -> str:
return self._username
@property
def password_hash(self) -> str:
return self._password_hash
@property
def resources(self) -> Sequence[Resource]:
return self._resources
def map(self, credentials: Credentials, previous_host: Optional[str] = None) -> MapperResult:
usr, psw = credentials
if not isinstance(usr, str) or not isinstance(psw, str):
raise ValueError("username and password must be strings.")
if usr == self.username and hash_pass(psw) == self._password_hash:
if self._resources:
return (
MapperStatus.SUCCESS,
dict((str(k), v) for k, v in enumerate(self.resources)),
)
else:
return MapperStatus.NO_MACHINE, {}
else:
return MapperStatus.AUTHENTICATION_FAILED, {}
@property
def domains(self):
return self._domains
@property
def name(self):
return "SimpleMapper"
@classmethod
def create_from_dict(cls, data: Mapping[str, Any]):
from interstate_love_song.settings import load_dict_into_dataclass
settings = load_dict_into_dataclass(SimpleMapperSettings, data)
return cls(settings.username, settings.password_hash, settings.resources, settings.domains,)
if __name__ == "__main__":
parser = argparse.ArgumentParser("hasher")
parser.add_argument("PASSWORD")
args = parser.parse_args()
print(hash_pass(args.PASSWORD))
| 3.25 | 3 |
multirotor example/optimization paper demonstration/check_flight_script.py | DesignEngrLab/fmdkit | 8 | 12769236 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 10:23:55 2020
@author: <NAME>
"""
from drone_opt import *
import fmdtools.faultsim.propagate as propagate
import fmdtools.resultdisp.graph as graph
mdl_med = x_to_mdl([1,1,80,1,1])
calc_oper(mdl_med)
endresults_med, resgraph, mdlhist_med =propagate.nominal(mdl_med)
plot_xy(mdlhist_med, endresults_med, title='Flight plan at 80 m')
app_med = SampleApproach(mdl_med, faults='single-component', phases={'forward'})
faulttime_med = app_med.times
landtime_med = min([i for i,a in enumerate(mdlhist_med['functions']['Planpath']['mode']) if a=='taxi'])
rcost_med = calc_res(mdl_med)
rcost_med2 = x_to_rcost([1,1],[80],[1,1])
mdl_low = x_to_mdl([1,1,50,1,1])
calc_oper(mdl_low)
endresults_low, resgraph, mdlhist_low =propagate.nominal(mdl_low)
plot_xy(mdlhist_low, endresults_low, title='Flight plan at 50 m')
app_low = SampleApproach(mdl_low, faults='single-component', phases={'forward'})
faulttime_low = app_low.times
landtime_low = min([i for i,a in enumerate(mdlhist_low['functions']['Planpath']['mode']) if a=='taxi'])
rcost_low = calc_res(mdl_low)
rcost_low2 = x_to_rcost([1,1],[50],[1,1])
mdl_hi = x_to_mdl([1,1,180,1,1])
calc_oper(mdl_hi)
endresults_hi, resgraph, mdlhist_hi =propagate.nominal(mdl_hi)
plot_xy(mdlhist_hi, endresults_hi, title='Flight plan at 180 m')
app_hi = SampleApproach(mdl_hi, faults='single-component', phases={'forward'})
faulttime_hi = app_hi.times
landtime_hi = min([i for i,a in enumerate(mdlhist_hi['functions']['Planpath']['mode']) if a=='taxi'])
rcost_hi = calc_res(mdl_hi)
rcost_hi2 = x_to_rcost([1,1],[180],[1,1])
plot_xys({'80 m': mdlhist_med , '50 m':mdlhist_low , '180 m': mdlhist_hi}, {'80 m': endresults_med , '50 m':endresults_low , '180 m': endresults_hi})
| 1.929688 | 2 |
dissonance/storage/redis.py | jhgg/dissonance | 5 | 12769237 | from collections import UserDict
from six import StringIO
from gevent.lock import Semaphore
from ..utils.importing import import_dotted_path
try:
from cPickle import Unpickler, Pickler
except ImportError:
from pickle import Unpickler, Pickler
try:
StrictRedis = import_dotted_path('redis.StrictRedis')
except ImportError:
raise ImportError("redis-py is not installed. Install it using `pip install redis` "
"(see https://github.com/andymccurdy/redis-py for more details)")
class RedisStorage(object):
_redis_opt_keys = 'host', 'port', 'db', 'password', 'socket_timeout', 'socket_connect_timeout', 'socket_keepalive', \
'socket_keepalive_options', 'connection_pool', 'unix_socket_path', 'encoding', 'encoding_errors', \
'errors', 'decode_responses', 'retry_on_timeout', 'ssl', 'ssl_keyfile', 'ssl_certfile', \
'ssl_cert_reqs', 'ssl_ca_certs'
_redis_int_opts = 'port', 'db', 'socket_timeout',
_redis_float_opts = 'socket_timeout',
_redis_bool_opts = 'decode_responses', 'ssl', 'retry_on_timeout'
def __init__(self, client, opts):
self._client = client
self._opts = opts
self._redis = None
self._prefix = opts.get('redis_key_prefix', '')
def _get_redis_kwargs(self):
kwargs = {}
for key in self._redis_opt_keys:
opt_key = 'redis_%s' % key
if opt_key in self._opts:
kwargs[key] = self._opts[opt_key]
for key in self._redis_int_opts:
if key in kwargs:
kwargs[key] = int(kwargs[key])
for key in self._redis_float_opts:
if key in kwargs:
kwargs[key] = float(kwargs[key])
return kwargs
def _get_redis(self):
if 'redis_url' in self._opts:
return StrictRedis.from_url(self._opts['redis_url'])
return StrictRedis(**self._get_redis_kwargs())
def _get_hash_key(self, module_name):
return '%s%s' % (self._prefix, module_name)
@property
def redis(self):
if self._redis is None:
raise RuntimeError("Attempting to access RedisStorage.redis from a RedisStorage that has not been started.")
return self._redis
def start(self):
if self._redis is None:
self._redis = self._get_redis()
def stop(self):
if self._redis:
self._redis.connection_pool.disconnect()
self._redis = None
def get_data_for_module_name(self, module_name):
return RedisDict(self, self._get_hash_key(module_name))
storage = RedisStorage
class RedisDict(UserDict.DictMixin):
def __init__(self, storage, hash_key):
self._storage = storage
self._hash_key = hash_key
self._protocol = 0
self._cache = {}
self._cache_write_lock = Semaphore()
def keys(self):
return self._storage.redis.hkeys(self._hash_key)
def __len__(self):
return self._storage.redis.hlen(self._hash_key)
def has_key(self, key):
return key in self
def __contains__(self, key):
if key in self._cache:
return True
return self._storage.redis.hexists(self._hash_key, key)
def get(self, key, default=None):
if key in self:
return self[key]
return default
def __getitem__(self, key):
try:
value = self._cache[key]
except KeyError:
if key not in self:
raise KeyError(key)
f = StringIO(self._storage.redis.hget(self._hash_key, key))
value = Unpickler(f).load()
self._cache[key] = value
return value
def __setitem__(self, key, value):
with self._cache_write_lock:
self._cache[key] = value
f = StringIO()
p = Pickler(f, self._protocol)
p.dump(value)
self._storage.redis.hset(self._hash_key, key, f.getvalue())
def __delitem__(self, key):
self._storage.redis.hdel(self._hash_key, key)
with self._cache_write_lock:
self._cache.pop(key, None)
def close(self):
self.sync()
self._storage = None
def __del__(self):
self.close()
def sync(self):
if not self._cache:
return
with self._cache_write_lock, self._storage.redis.pipeline() as pipeline:
for key, entry in self._cache.items():
f = StringIO()
p = Pickler(f, self._protocol)
p.dump(entry)
pipeline.hset(self._hash_key, key, f.getvalue())
pipeline.execute()
self._cache.clear()
| 2.125 | 2 |
openbot/plugin.py | jcb1317/OpenBot | 0 | 12769238 | class Plugin(object):
"""
Plugin Base Class
Meant to be inherited for plugins
"""
def __init__(self):
self._commands = {}
def command(self, name, **kwargs):
"""
Decorator to register a coroutine to a plugin command
:param name: Command Name
:return:
"""
kwargs = {i: kwargs[i] for i in kwargs.keys() if i != "_coro"}
def command_func(func):
self._commands[name] = {}
self._commands[name]["_coro"] = func
for i in kwargs.keys():
self._commands[name][i] = kwargs[i]
return func
return command_func
def on_load(self, bot):
"""
Function to be called when a plugin is loaded into a Bot. Meant to be overridden
:param bot: Bot Instance into which the Plugin is loaded
:return:
"""
pass
| 3.25 | 3 |
hmcdashboard/common.py | hayesall/hmc-dashboard | 0 | 12769239 | <filename>hmcdashboard/common.py
# Copyright 2022 <NAME>
# MIT License
from collections import namedtuple
from functools import lru_cache
import json
from pathlib import Path
import pandas as pd
import plotly
import plotly.graph_objects as go
from plotly import subplots
# A "View" is a slice of user data with a
# readable name,
# attribute type (e.g. heart rates are measured in BPM)
# shape for plotting (e.g. heart rate is linear)
View = namedtuple("View", "readable attribute shape")
class Views:
VIEWS = {
"sleep": View("Sleep", "level", "hv"),
"heart_rate": View("BPM", "bpm", "linear"),
"blood_oxygenation": View("SpO2", "spo2", "linear"),
"stress": View("Stress", "stress", "linear"),
}
@staticmethod
def views():
return list(Views.VIEWS.keys())
@staticmethod
def view_name(view: str):
return Views.VIEWS[view].readable
@staticmethod
def view_attribute(view: str):
return Views.VIEWS[view].attribute
@staticmethod
def fig_params(view: str):
return {"line_shape": Views.VIEWS[view].shape, "mode": "lines"}
@staticmethod
def fig_update(view: str):
if view == "sleep":
return {
"ticktext": ["REM", "deep sleep", "light sleep", "awake"],
"tickvals": [0, 1, 2, 3],
}
return dict()
class Users:
def __init__(
self, data_path: str = "data", metadata_file: str = "user_metadata.csv"
):
self.METADATA = pd.read_csv(metadata_file)
self.data_path = data_path
def load_user_from_csv(self, user: str, view: str):
assert view in Views.views()
pth = Path(self.data_path).joinpath(view).joinpath(user + ".csv")
return pd.read_csv(pth)
@lru_cache(maxsize=None)
def get_users(self, view_pair: tuple([str, str]) = None):
"""
Get user IDs (user_full) and short slugs (user).
If a `view_pair` is specified, only return users where 1
or more records exist.
"""
if not view_pair:
return self.METADATA[["user_full", "user"]].to_numpy().tolist()
if len(view_pair) != 2:
raise ValueError("get_users needs a pair of views, or None")
assert len(view_pair) == 2
assert view_pair[0] in Views.views()
assert view_pair[1] in Views.views()
_col0, _col1 = view_pair
return [
tuple(a)
for a in self.METADATA.loc[
(self.METADATA[_col0] > 0) & (self.METADATA[_col1] > 0)
][["user_full", "user"]]
.to_numpy()
.tolist()
]
def plot_one(
dataframe, column: str, readable_name: str, fig_params: dict, fig_update: dict
):
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=dataframe["timestamp"],
y=dataframe[column],
name=readable_name,
**fig_params,
)
)
if fig_update:
fig.update_yaxes(**fig_update)
return figure_to_json(fig)
def plot_two(dataframe1, dataframe2, view1, view2):
fig = subplots.make_subplots(rows=2, shared_xaxes=True)
# Which column should we be looking at?
col1 = Views.view_attribute(view1)
col2 = Views.view_attribute(view2)
fig.add_trace(
go.Scatter(
x=dataframe1["timestamp"],
y=dataframe1[col1],
name=Views.view_name(view1),
**Views.fig_params(view1),
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=dataframe2["timestamp"],
y=dataframe2[col2],
name=Views.view_name(view2),
**Views.fig_params(view2),
),
row=2,
col=1,
)
if view1 == "sleep":
fig.update_layout(
yaxis1=dict(
ticktext=["REM", "deep sleep", "light sleep", "awake"],
tickvals=[0, 1, 2, 3],
)
)
return figure_to_json(fig)
def plot_comparison(dataframe1, dataframe2, view, usernames: tuple = None):
if usernames is None:
uname1, uname2 = "", ""
else:
uname1, uname2 = usernames
fig = subplots.make_subplots(rows=2, shared_xaxes=True)
col = Views.view_attribute(view)
fig.add_trace(
go.Scatter(
x=dataframe1.timestamp,
y=dataframe1[col],
name=uname1 + " " + Views.view_name(view),
**Views.fig_params(view),
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=dataframe2.timestamp,
y=dataframe2[col],
name=uname2 + " " + Views.view_name(view),
**Views.fig_params(view),
),
row=2,
col=1,
)
if view == "sleep":
fig.update_layout(
yaxis1=dict(
ticktext=["REM", "deep sleep", "light sleep", "awake"],
tickvals=[0, 1, 2, 3],
),
yaxis2=dict(
ticktext=["REM", "deep sleep", "light sleep", "awake"],
tickvals=[0, 1, 2, 3],
),
)
return figure_to_json(fig)
def figure_to_json(figure):
return json.dumps(figure, cls=plotly.utils.PlotlyJSONEncoder)
| 2.53125 | 3 |
pulse/app.py | emptymalei/social-pulse | 7 | 12769240 | import click
from pulse import Config
from douban import DoubanInterest, DoubanStatus
from github import GitHub
from loguru import logger
from pulse import CombinedPulse
@click.command()
@click.option("-c", "--config", type=click.Path(), help="Path to config")
def pulse(config):
conf = Config(config)
logger.debug(f"The full config: {conf}")
# Douban
logger.debug(f'Douban interest config: {conf[["social", "douban", "book_movie_music"]]}')
douban_interests = DoubanInterest(
conf[["social", "douban", "book_movie_music"]], base_folder="dashboard/data"
)
douban_interests.run()
logger.debug(f'Douban status config: {conf[["social", "douban", "status"]]}')
douban_status = DoubanStatus(
conf[["social", "douban", "status"]], base_folder="dashboard/data"
)
douban_status.run()
# GitHub
logger.debug(f'GitHub config: {conf[["social", "github", "events"]]}')
github = GitHub(conf[["social", "github", "events"]], base_folder="dashboard/data")
github.run()
# Combine
combined_publse = CombinedPulse(
[douban_interests.pulses, github.pulses], conf[["combined"]], base_folder="dashboard/data"
)
combined_publse.save()
sm_combined_publse = CombinedPulse(
[douban_interests.pulses, douban_status.pulses], conf[["combined-social-media"]], base_folder="dashboard/data"
)
sm_combined_publse.save()
tech_combined_publse = CombinedPulse(
[github.pulses], conf[["combined-tech"]], base_folder="dashboard/data"
)
tech_combined_publse.save()
if __name__ == "__main__":
pulse()
| 2.21875 | 2 |
year2020/day2/part2.py | kalaspuff/advent-of-code | 0 | 12769241 | <filename>year2020/day2/part2.py
from helpers import int_minus
from values import values
async def run():
for pos_1, pos_2, char, password in values.match_rows(r"^([0-9]+)-([0-9]+) (.): (.*)$", (int_minus, int_minus)):
if bool(password[pos_1] == char) ^ bool(password[pos_2] == char):
values.counter += 1
return values.counter
# [values.year] (number) 2020
# [values.day] (number) 2
# [values.part] (number) 2
# [values.input_filename] (str) ./year2020/day2/input
# [values.counter] (number) 267
#
# Result: 267
| 2.953125 | 3 |
dogesay/script.py | jinnovation/dogesay | 10 | 12769242 | from argparse import ArgumentParser
import pkgutil
import random
from random import randrange, choice
DOGE_PREFIXES = ["such", "much", "so", "many", "wow", "very"]
DOGE_EJACULATES = ["wow"]
WOW_CHANCE = 8
MAX_WHITESPACE = 15
MIN_WHITESPACE = 2
def doge_syntax(clause):
return clause if len(clause.split())>1 else choice(DOGE_PREFIXES)+" "+clause
used_indices = []
def random_select_no_repeat(max, ref_pool):
index = randrange(0,max)
while index in ref_pool:
index = randrange(0,max)
ref_pool.append(index)
return index
def random_insert_clause(clause, img_file):
insert_index = random_select_no_repeat(len(img_file), used_indices)
img_file[insert_index] += (random_whitespace()+clause)
def random_whitespace():
return randrange(MIN_WHITESPACE, MAX_WHITESPACE)*" "
def generate_ejacs(output):
while randrange(0,10) > WOW_CHANCE:
random_insert_clause(choice(DOGE_EJACULATES), output)
parser = ArgumentParser(description="Cowsay for a new generation.")
parser.add_argument("clauses", nargs="*",
help="things you want doge to say")
def main():
args = parser.parse_args()
doge_face_data = pkgutil.get_data(__name__, "static/doge")
doge_face_lines = doge_face_data.decode('utf8').split("\n")
clauses_source = args.clauses
indices = random.sample(range(len(doge_face_lines)), len(clauses_source))
for clause, index in zip(clauses_source, indices):
clause = random_whitespace()+doge_syntax(clause.strip())
generate_ejacs(doge_face_lines)
doge_face_lines[index] += (random_whitespace() + clause)
for line in doge_face_lines:
print(line)
if __name__ == "__main__":
main()
| 2.609375 | 3 |
yoapi/models/status.py | YoApp/yo-api | 1 | 12769243 | <filename>yoapi/models/status.py
# -*- coding: utf-8 -*-
"""Status model"""
from flask_mongoengine import Document
from mongoengine import StringField
from .helpers import DocumentMixin, ReferenceField
from yoapi.models import User
class Status(DocumentMixin, Document):
meta = {'collection': 'status',
'indexes': ['user'],
'auto_create_index': False}
user = ReferenceField(User)
status = StringField(required=True)
def get_public_dict(self):
return {
'status': self.status,
'created': self.created
}
| 2.15625 | 2 |
networks/model.py | HighCWu/anime_biggan_toy | 140 | 12769244 | <reponame>HighCWu/anime_biggan_toy
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import layers, dygraph as dg
from paddle.fluid.initializer import Normal, Constant, Uniform
class ModelCache(object):
G = None
D = None
train_mode = False
initialized = False
model_cache = ModelCache
def unpool(value):
"""Unpooling operation.
N-dimensional version of the unpooling operation from
https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
Taken from: https://github.com/tensorflow/tensorflow/issues/2169
Args:
value: a Tensor of shape [b, d0, d1, ..., dn, ch]
name: name of the op
Returns:
A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]
"""
value = layers.transpose(value, [0,2,3,1])
sh = value.shape
dim = len(sh[1:-1])
out = (layers.reshape(value, [-1] + sh[-dim:]))
for i in range(dim, 0, -1):
out = layers.concat([out, layers.zeros_like(out)], i)
out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]
out = layers.reshape(out, out_size)
out = layers.transpose(out, [0,3,1,2])
return out
class ReLU(dg.Layer):
def forward(self, x):
return layers.relu(x)
class SoftMax(dg.Layer):
def __init__(self, **kwargs):
super().__init__()
self.kwargs = kwargs
def forward(self, x):
return layers.softmax(x, **self.kwargs)
class BatchNorm(dg.BatchNorm): # not trainable
def __init__(self, *args, **kwargs):
if 'affine' in kwargs:
affine = kwargs.pop('affine')
else:
affine = True
super().__init__(*args, **kwargs)
self._use_global_stats = True
if not affine:
weight = (self.weight * 0 + 1).detach()
bias = (self.bias * 0).detach()
del self._parameters['bias']
del self._parameters['weight']
self.weight = weight
self.bias = bias
self.weight.stop_gradient = True
self.bias.stop_gradient = True
self.accumulated_mean = self.create_parameter(shape=[args[0]], default_initializer=Constant(0.0))
self.accumulated_var = self.create_parameter(shape=[args[0]], default_initializer=Constant(0.0))
self.accumulated_counter = self.create_parameter(shape=[1], default_initializer=Constant(1e-12))
self.accumulated_mean.stop_gradient = True
self.accumulated_var.stop_gradient = True
self.accumulated_counter.stop_gradient = True
def forward(self, inputs, *args, **kwargs):
if '_mean' in self._parameters:
del self._parameters['_mean']
if '_variance' in self._parameters:
del self._parameters['_variance']
if not model_cache.initialized and not model_cache.train_mode:
self._mean = (self.accumulated_mean / self.accumulated_counter)
self._variance = (self.accumulated_var / self.accumulated_counter)
if model_cache.train_mode:
axes = [0] + ([] if len(inputs.shape) == 2 else list(range(2,len(inputs.shape))))
_mean = layers.reduce_mean(inputs, axes, keep_dim=True)
self._mean = layers.reduce_mean(inputs, axes, keep_dim=False)
self._variance = layers.reduce_mean((inputs-_mean)**2, axes)
else:
self._mean = self._mean.detach()
self._variance = self._variance.detach()
return super().forward(inputs, *args, **kwargs)
class SpectralNorm(dg.Layer): # not trainable
def __init__(self, module, name='weight', power_iterations=2):
super().__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
self.initialized = False
if not self._made_params():
self._make_params()
def _update_u(self):
w = self.weight
u = self.weight_u
if len(w.shape) == 4:
_w = layers.transpose(w, [2,3,1,0])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
else:
_w = layers.reshape(w, [-1, w.shape[-1]])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
singular_value = "left" if _w.shape[0] <= _w.shape[1] else "right"
norm_dim = 0 if _w.shape[0] <= _w.shape[1] else 1
for _ in range(self.power_iterations):
if singular_value == "left":
v = layers.l2_normalize(layers.matmul(_w, u, transpose_x=True), axis=norm_dim)
u = layers.l2_normalize(layers.matmul(_w, v), axis=norm_dim)
else:
v = layers.l2_normalize(layers.matmul(u, _w, transpose_y=True), axis=norm_dim)
u = layers.l2_normalize(layers.matmul(v, _w), axis=norm_dim)
if singular_value == "left":
sigma = layers.matmul(layers.matmul(u, _w, transpose_x=True), v)
else:
sigma = layers.matmul(layers.matmul(v, _w), u, transpose_y=True)
_w = w / sigma.detach()
setattr(self.module, self.name, _w.detach()) # setattr(self.module, self.name, _w)
# self.weight_u.set_value(u)
def _made_params(self):
try:
self.weight
self.weight_u
return True
except AttributeError:
return False
def _make_params(self):
# paddle linear weight is similar with tf's, and conv weight is similar with pytorch's.
w = getattr(self.module, self.name)
if len(w.shape) == 4:
_w = layers.transpose(w, [2,3,1,0])
_w = layers.reshape(_w, [-1, _w.shape[-1]])
else:
_w = layers.reshape(w, [-1, w.shape[-1]])
singular_value = "left" if _w.shape[0] <= _w.shape[1] else "right"
norm_dim = 0 if _w.shape[0] <= _w.shape[1] else 1
u_shape = (_w.shape[0], 1) if singular_value == "left" else (1, _w.shape[-1])
u = self.create_parameter(shape=u_shape, default_initializer=Normal(0, 1))
u.stop_gradient = True
u.set_value(layers.l2_normalize(u, axis=norm_dim))
del self.module._parameters[self.name]
self.add_parameter("weight", w)
self.add_parameter("weight_u", u)
def forward(self, *args, **kwargs):
if not self.initialized:
self._update_u()
self.initialized = True
return self.module.forward(*args, **kwargs)
class SelfAttention(dg.Layer):
def __init__(self, in_dim, activation=layers.relu):
super().__init__()
self.chanel_in = in_dim
self.activation = activation
self.theta = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
self.phi = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
self.pool = dg.Pool2D(2, 'max', 2)
self.g = SpectralNorm(dg.Conv2D(in_dim, in_dim // 2, 1, bias_attr=False))
self.o_conv = SpectralNorm(dg.Conv2D(in_dim // 2, in_dim, 1, bias_attr=False))
self.gamma = self.create_parameter([1,], default_initializer=Constant(0.0))
self.softmax = SoftMax(axis=-1)
def forward(self, x):
m_batchsize, C, width, height = x.shape
N = height * width
theta = self.theta(x)
phi = self.phi(x)
phi = self.pool(phi)
phi = layers.reshape(phi,(m_batchsize, -1, N // 4))
theta = layers.reshape(theta,(m_batchsize, -1, N))
theta = layers.transpose(theta,(0, 2, 1))
attention = self.softmax(layers.bmm(theta, phi))
g = self.g(x)
g = layers.reshape(self.pool(g),(m_batchsize, -1, N // 4))
attn_g = layers.reshape(layers.bmm(g, layers.transpose(attention,(0, 2, 1))),(m_batchsize, -1, width, height))
out = self.o_conv(attn_g)
return self.gamma * out + x
class ConditionalBatchNorm(dg.Layer):
def __init__(self, num_features, num_classes, epsilon=1e-5, momentum=0.1):
super().__init__()
self.bn_in_cond = BatchNorm(num_features, affine=False, epsilon=epsilon, momentum=momentum)
self.gamma_embed = SpectralNorm(dg.Linear(num_classes, num_features, bias_attr=False))
self.beta_embed = SpectralNorm(dg.Linear(num_classes, num_features, bias_attr=False))
def forward(self, x, y):
out = self.bn_in_cond(x)
if isinstance(y, list):
gamma, beta = y
out = layers.reshape(gamma, (0, 0, 1, 1)) * out + layers.reshape(beta, (0, 0, 1, 1))
return out
gamma = self.gamma_embed(y)
beta = self.beta_embed(y)
out = layers.reshape(gamma, (0, 0, 1, 1)) * out + layers.reshape(beta, (0, 0, 1, 1))
return out
class ResBlock(dg.Layer):
def __init__(
self,
in_channel,
out_channel,
kernel_size=[3, 3],
padding=1,
stride=1,
n_class=None,
conditional=True,
activation=layers.relu,
upsample=True,
downsample=False,
z_dim=128,
use_attention=False,
skip_proj=None
):
super().__init__()
if conditional:
self.cond_norm1 = ConditionalBatchNorm(in_channel, z_dim)
self.conv0 = SpectralNorm(
dg.Conv2D(in_channel, out_channel, kernel_size, stride, padding)
)
if conditional:
self.cond_norm2 = ConditionalBatchNorm(out_channel, z_dim)
self.conv1 = SpectralNorm(
dg.Conv2D(out_channel, out_channel, kernel_size, stride, padding)
)
self.skip_proj = False
if skip_proj is not True and (upsample or downsample):
self.conv_sc = SpectralNorm(dg.Conv2D(in_channel, out_channel, 1, 1, 0))
self.skip_proj = True
if use_attention:
self.attention = SelfAttention(out_channel)
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.conditional = conditional
self.use_attention = use_attention
def forward(self, input, condition=None):
out = input
if self.conditional:
out = self.cond_norm1(out, condition[0] if isinstance(condition, list) else condition)
out = self.activation(out)
if self.upsample:
out = unpool(out)
out = self.conv0(out)
if self.conditional:
out = self.cond_norm2(out, condition[1] if isinstance(condition, list) else condition)
out = self.activation(out)
out = self.conv1(out)
if self.downsample:
out = layers.pool2d(out, 2, pool_type='avg', pool_stride=2)
if self.skip_proj:
skip = input
if self.upsample:
skip = unpool(skip)
skip = self.conv_sc(skip)
if self.downsample:
skip = layers.pool2d(skip, 2, pool_type='avg', pool_stride=2)
out = out + skip
else:
skip = input
if self.use_attention:
out = self.attention(out)
return out
class Generator(dg.Layer): # not trainable
def __init__(self, code_dim=128, n_class=1000, chn=96, blocks_with_attention="B4", resolution=512):
super().__init__()
def GBlock(in_channel, out_channel, n_class, z_dim, use_attention):
return ResBlock(in_channel, out_channel, n_class=n_class, z_dim=z_dim, use_attention=use_attention)
self.embed_y = dg.Linear(n_class, 128, bias_attr=False)
self.chn = chn
self.resolution = resolution
self.blocks_with_attention = set(blocks_with_attention.split(","))
self.blocks_with_attention.discard('')
gblock = []
in_channels, out_channels = self.get_in_out_channels()
self.num_split = len(in_channels) + 1
z_dim = code_dim//self.num_split + 128
self.noise_fc = SpectralNorm(dg.Linear(code_dim//self.num_split, 4 * 4 * in_channels[0]))
self.sa_ids = [int(s.split('B')[-1]) for s in self.blocks_with_attention]
for i, (nc_in, nc_out) in enumerate(zip(in_channels, out_channels)):
gblock.append(GBlock(nc_in, nc_out, n_class=n_class, z_dim=z_dim, use_attention=(i+1) in self.sa_ids))
self.blocks = dg.LayerList(gblock)
self.output_layer_bn = BatchNorm(1 * chn, epsilon=1e-5)
self.output_layer_conv = SpectralNorm(dg.Conv2D(1 * chn, 3, [3, 3], padding=1))
def get_in_out_channels(self):
resolution = self.resolution
if resolution == 1024:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1, 1, 1]
elif resolution == 512:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1, 1]
elif resolution == 256:
channel_multipliers = [16, 16, 8, 8, 4, 2, 1]
elif resolution == 128:
channel_multipliers = [16, 16, 8, 4, 2, 1]
elif resolution == 64:
channel_multipliers = [16, 16, 8, 4, 2]
elif resolution == 32:
channel_multipliers = [4, 4, 4, 4]
else:
raise ValueError("Unsupported resolution: {}".format(resolution))
in_channels = [self.chn * c for c in channel_multipliers[:-1]]
out_channels = [self.chn * c for c in channel_multipliers[1:]]
return in_channels, out_channels
def forward(self, input, class_id, input_class_emb=False):
if isinstance(input, list):
codes = [input[0]]
codes += [input[2*i+1:2*i+3] for i in range(len(input)//2)]
else:
codes = layers.split(input, self.num_split, 1)
if not input_class_emb:
class_emb = self.embed_y(class_id) # 128
else:
class_emb = class_id
out = self.noise_fc(codes[0])
out = layers.transpose(layers.reshape(out,(out.shape[0], 4, 4, -1)),(0, 3, 1, 2))
for i, (code, gblock) in enumerate(zip(codes[1:], self.blocks)):
if isinstance(input, list):
condition = [layers.concat([c, class_emb], 1) for c in code]
else:
condition = layers.concat([code, class_emb], 1)
out = gblock(out, condition)
out = self.output_layer_bn(out)
out = layers.relu(out)
out = self.output_layer_conv(out)
return (layers.tanh(out) + 1) / 2
class Discriminator(dg.Layer):
def __init__(self, n_class=1000, chn=96, blocks_with_attention="B2", resolution=256):
super().__init__()
def DBlock(in_channel, out_channel, downsample=True, use_attention=False, skip_proj=None):
return ResBlock(in_channel, out_channel, conditional=False, upsample=False,
downsample=downsample, use_attention=use_attention, skip_proj=skip_proj)
self.chn = chn
self.colors = 3
self.resolution = resolution
self.blocks_with_attention = set(blocks_with_attention.split(","))
self.blocks_with_attention.discard('')
dblock = []
in_channels, out_channels = self.get_in_out_channels()
self.sa_ids = [int(s.split('B')[-1]) for s in self.blocks_with_attention]
for i, (nc_in, nc_out) in enumerate(zip(in_channels[:-1], out_channels[:-1])):
dblock.append(DBlock(nc_in, nc_out, downsample=True,
use_attention=(i+1) in self.sa_ids, skip_proj=nc_in==nc_out))
dblock.append(DBlock(in_channels[-1], out_channels[-1], downsample=False,
use_attention=len(out_channels) in self.sa_ids, skip_proj=in_channels[-1]==out_channels[-1]))
self.blocks = dg.LayerList(dblock)
self.final_fc = SpectralNorm(dg.Linear(16 * chn, 1))
self.embed_y = dg.Embedding(size=[n_class, 16 * chn], is_sparse=False, param_attr=Uniform(-0.1,0.1))
self.embed_y = SpectralNorm(self.embed_y)
def get_in_out_channels(self):
colors = self.colors
resolution = self.resolution
if resolution == 1024:
channel_multipliers = [1, 1, 1, 2, 4, 8, 8, 16, 16]
elif resolution == 512:
channel_multipliers = [1, 1, 2, 4, 8, 8, 16, 16]
elif resolution == 256:
channel_multipliers = [1, 2, 4, 8, 8, 16, 16]
elif resolution == 128:
channel_multipliers = [1, 2, 4, 8, 16, 16]
elif resolution == 64:
channel_multipliers = [2, 4, 8, 16, 16]
elif resolution == 32:
channel_multipliers = [2, 2, 2, 2]
else:
raise ValueError("Unsupported resolution: {}".format(resolution))
out_channels = [self.chn * c for c in channel_multipliers]
in_channels = [colors] + out_channels[:-1]
return in_channels, out_channels
def forward(self, input, class_id=None):
out = input
features = []
for i, dblock in enumerate(self.blocks):
out = dblock(out)
features.append(out)
out = layers.relu(out)
out = layers.reduce_sum(out, [2,3])
out_linear = self.final_fc(out)
if class_id is None:
prod = 0
else:
class_emb = self.embed_y(class_id)
prod = layers.reduce_sum((class_emb * out), 1, keep_dim=True)
return layers.sigmoid(out_linear + prod), features
| 2.796875 | 3 |
StrategyResearch/ma5_ma10.py | webclinic017/stock_quant | 1 | 12769245 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Project : stock_quant
# @Date : 2021/12/19 16:27
# @Author : Adolf
# @File : ma5_ma10.py
# @Function:
import pandas as pd
import pandas_ta as ta
from GetBaseData.hanle_data_show import get_show_data
from Utils.ShowKline.base_kline import draw_chart
pd.set_option("expand_frame_repr", False)
pd.set_option("display.max_rows", None)
df = pd.read_csv("Data/RealData/hfq/600570.csv")
# print(df)
# df.set_index(pd.DatetimeIndex(df["date"]), inplace=True)
# df.ta.log_return(cumulative=True, append=True)
# df.ta.percent_return(cumulative=True, append=True)
df["sma5"] = ta.sma(df['close'], length=5)
df["sma10"] = ta.sma(df['close'], length=10)
df["ema10"] = ta.ema(df['close'], length=10)
# print(help(ta.macd))
macd_df = ta.macd(close=df['close'])
df['macd'], df['histogram'], df['signal'] = [macd_df['MACD_12_26_9'], macd_df['MACDh_12_26_9'],
macd_df['MACDs_12_26_9']]
# pd.concat([df, ta.macd(close=df['close'])])
df = df[df["date"] > "2020-01-01"]
df.reset_index(inplace=True, drop=True)
df.loc[(df["sma5"] > df["sma10"]) & (df["sma5"].shift(1) < df["sma10"].shift(1)), "trade"] = "BUY"
# df.loc[(df["sma5"] < df["sma10"]) & (df["sma5"].shift(1) > df["sma10"].shift(1)), "trade"] = "SELL"
# df = df.loc[df["trade"].notnull() & (df['macd'] > 0) & (df["histogram"] > 0)]
df_chose = df.loc[df["trade"].notnull()]
# print(df_chose)
for show_index in df_chose.index:
# print(show_index)
show_df = df[max(0, show_index - 60):min(len(df), show_index + 10)]
show_data = get_show_data(_df=show_df)
draw_chart(show_data, show_html_path="ShowHtml/Ma5Ma10.html")
break
# df.dropna(subset=['trade'], inplace=True)
# print(df.tail(10))
| 2.421875 | 2 |
onfido/urls.py | snicks1/django-onfido | 0 | 12769246 | <reponame>snicks1/django-onfido
# -*- coding: utf-8 -*-
"""onfido urls."""
from django.conf.urls import url
from onfido.views import status_update
urlpatterns = [
url(r'^webhook/$', status_update, name='status_update'),
]
| 1.382813 | 1 |
pypoll.py | jzebker/Election_Analysis | 0 | 12769247 | #data we need:
#1) total votes cast
#2) list of all aandidates who received votes
#3) percentage of votes each candidate won
#4) number of votes each candidate won
#5) winner of the election based on popular vote
import os
import csv
#assign variable to csv
file_to_load = "../Resources/election_results.csv"
#assign variable to written filename
file_to_save = os.path.join("..","analysis", "election_analysis.txt")
# Winning Candidate and Winning Count Tracker
winning_candidate = ""
winning_count = 0
winning_percentage = 0
#initialize counters
total_votes=0
candidate_options=[]
candidate_votes={}
#open results and read
with open(file_to_load) as election_data:
file_reader = csv.reader(election_data)
headers=next(file_reader)
#count votes
for row in file_reader:
total_votes+=1
candidate_name=row[2]
if candidate_name not in candidate_options:
candidate_options.append(candidate_name)
candidate_votes[candidate_name]=0
candidate_votes[candidate_name]+=1
#write results to txt
with open(file_to_save,'w') as txt_file:
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n")
#print(election_results, end="")
txt_file.write(election_results)
#find the winner, print the results
for candidate in candidate_votes:
votes=candidate_votes[candidate]
vote_percentage=round((float(votes)/total_votes)*100,1)
candidate_results=(f"{candidate}: {vote_percentage}% ({votes:,})\n")
txt_file.write(candidate_results)
if (votes>winning_count) and (vote_percentage>winning_percentage):
winning_count=votes
winning_percentage=vote_percentage
winning_candidate=candidate
#print the winner
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
txt_file.write(winning_candidate_summary)
| 3.8125 | 4 |
tune.py | HuicheolMoon/P4_ModelOptimization | 0 | 12769248 | import argparse
import copy
import optuna
import os
from datetime import datetime
import torch
import torch.nn as nn
import torch.optim as optim
from src.dataloader import create_dataloader
from src.model import Model
from src.utils.torch_utils import model_info
from src.utils.common import read_yaml
from src.utils.macs import calc_macs
from src.trainer import TorchTrainer
from typing import Any, Dict, List, Tuple, Union
from train import train
MODEL_CONFIG = read_yaml(cfg="configs/model/effinetb1.yaml")
DATA_CONFIG = read_yaml(cfg="configs/data/taco.yaml")
def search_hyperparam(trial: optuna.trial.Trial) -> Dict[str, Any]:
"""Search hyperparam from user-specified search space."""
# epochs = trial.suggest_int("epochs", low=400, high=600, step=100)
img_size = trial.suggest_int("img_size", low=42, high=98, step=14)
# n_select = trial.suggest_int("n_select", low=1, high=2, step=1)
batch_size = trial.suggest_int("batch_size", low=32, high=128, step=32)
# "EPOCHS": epochs,
#"n_select": n_select,
return {
"IMG_SIZE": img_size,
"BATCH_SIZE": batch_size
}
def search_model(trial: optuna.trial.Trial) -> List[Any]:
"""Search model structure from user-specified search space."""
model = []
n_stride = 0
MAX_NUM_STRIDE = 5
UPPER_STRIDE = 2
# Module 1
m1 = trial.suggest_categorical("m1", ["Conv", "DWConv"])
m1_args = []
m1_repeat = trial.suggest_int("m1/repeat", 1, 3)
m1_out_channel = trial.suggest_int("m1/out_channels", low=16, high=64, step=16)
m1_stride = trial.suggest_int("m1/stride", low=1, high=UPPER_STRIDE)
if m1_stride == 2:
n_stride += 1
m1_activation = trial.suggest_categorical(
"m1/activation", ["ReLU", "Hardswish"]
)
if m1 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m1_args = [m1_out_channel, 3, m1_stride, None, 1, m1_activation]
elif m1 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m1_args = [m1_out_channel, 3, m1_stride, None, m1_activation]
model.append([m1_repeat, m1, m1_args])
# Module 2
m2 = trial.suggest_categorical(
"m2",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m2_args = []
m2_repeat = trial.suggest_int("m2/repeat", 1, 5)
m2_out_channel = trial.suggest_int("m2/out_channels", low=16, high=128, step=16)
m2_stride = trial.suggest_int("m2/stride", low=1, high=UPPER_STRIDE)
# force stride m2
if n_stride == 0:
m2_stride = 2
if m2 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m2_kernel = trial.suggest_int("m2/kernel_size", low=1, high=5, step=2)
m2_activation = trial.suggest_categorical("m2/activation", ["ReLU", "Hardswish"])
m2_args = [m2_out_channel, m2_kernel, m2_stride, None, 1, m2_activation]
elif m2 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m2_kernel = trial.suggest_int("m2/kernel_size", low=1, high=5, step=2)
m2_activation = trial.suggest_categorical("m2/activation", ["ReLU", "Hardswish"])
m2_args = [m2_out_channel, m2_kernel, m2_stride, None, m2_activation]
elif m2 == "MBConv":
m2_c = trial.suggest_int("m2/c", low=16, high=320, step=16)
m2_k = trial.suggest_int("m2/k", low=3, high=5, step=2)
m2_args = [1, m2_c, m2_stride, m2_k]
elif m2 == "InvertedResidualv2":
m2_c = trial.suggest_int("m2/v2_c", low=16, high=32, step=16)
m2_t = trial.suggest_int("m2/v2_t", low=1, high=4)
m2_args = [m2_c, m2_t, m2_stride]
elif m2 == "InvertedResidualv3":
m2_kernel = trial.suggest_int("m2/kernel_size", low=3, high=5, step=2)
m2_t = round(trial.suggest_float("m2/v3_t", low=1.0, high=6.0, step=0.1), 1)
m2_c = trial.suggest_int("m2/v3_c", low=16, high=40, step=8)
m2_se = trial.suggest_categorical("m2/v3_se", [0, 1])
m2_hs = trial.suggest_categorical("m2/v3_hs", [0, 1])
# k t c SE HS s
m2_args = [m2_kernel, m2_t, m2_c, m2_se, m2_hs, m2_stride]
if not m2 == "Pass":
if m2_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m2_repeat, m2, m2_args])
# Module 3
m3 = trial.suggest_categorical(
"m3",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m3_args = []
m3_repeat = trial.suggest_int("m3/repeat", 1, 5)
m3_out_channel = trial.suggest_int("m3/out_channels", low=16, high=128, step=16)
m3_stride = trial.suggest_int("m3/stride", low=1, high=UPPER_STRIDE)
if m3 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m3_out_channel = trial.suggest_int("m3/out_channels", low=16, high=128, step=16)
m3_kernel = trial.suggest_int("m3/kernel_size", low=1, high=5, step=2)
m3_activation = trial.suggest_categorical("m3/activation", ["ReLU", "Hardswish"])
m3_args = [m3_out_channel, m3_kernel, m3_stride, None, 1, m3_activation]
elif m3 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m3_out_channel = trial.suggest_int("m3/out_channels", low=16, high=128, step=16)
m3_kernel = trial.suggest_int("m3/kernel_size", low=1, high=5, step=2)
m3_activation = trial.suggest_categorical("m3/activation", ["ReLU", "Hardswish"])
m3_args = [m3_out_channel, m3_kernel, m3_stride, None, m3_activation]
elif m3 == "MBConv":
m3_c = trial.suggest_int("m3/c", low=16, high=320, step=16)
m3_k = trial.suggest_int("m3/k", low=3, high=5, step=2)
m3_args = [6, m3_c, m3_stride, m3_k]
elif m3 == "InvertedResidualv2":
m3_c = trial.suggest_int("m3/v2_c", low=8, high=32, step=8)
m3_t = trial.suggest_int("m3/v2_t", low=1, high=8)
m3_args = [m3_c, m3_t, m3_stride]
elif m3 == "InvertedResidualv3":
m3_kernel = trial.suggest_int("m3/kernel_size", low=3, high=5, step=2)
m3_t = round(trial.suggest_float("m3/v3_t", low=1.0, high=6.0, step=0.1), 1)
m3_c = trial.suggest_int("m3/v3_c", low=8, high=40, step=8)
m3_se = trial.suggest_categorical("m3/v3_se", [0, 1])
m3_hs = trial.suggest_categorical("m3/v3_hs", [0, 1])
m3_args = [m3_kernel, m3_t, m3_c, m3_se, m3_hs, m3_stride]
if not m3 == "Pass":
if m3_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m3_repeat, m3, m3_args])
# Module 4
m4 = trial.suggest_categorical(
"m4",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m4_args = []
m4_repeat = trial.suggest_int("m4/repeat", 1, 5)
m4_out_channel = trial.suggest_int("m4/out_channels", low=16, high=128, step=16)
m4_stride = trial.suggest_int("m4/stride", low=1, high=UPPER_STRIDE)
# force stride m4
if n_stride == 1:
m4_stride = 2
if m4 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m4_out_channel = trial.suggest_int("m4/out_channels", low=16, high=256, step=16)
m4_kernel = trial.suggest_int("m4/kernel_size", low=1, high=5, step=2)
m4_activation = trial.suggest_categorical("m4/activation", ["ReLU", "Hardswish"])
m4_args = [m4_out_channel, m4_kernel, m4_stride, None, 1, m4_activation]
elif m4 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m4_out_channel = trial.suggest_int("m4/out_channels", low=16, high=256, step=16)
m4_kernel = trial.suggest_int("m4/kernel_size", low=1, high=5, step=2)
m4_activation = trial.suggest_categorical("m4/activation", ["ReLU", "Hardswish"])
m4_args = [m4_out_channel, m4_kernel, m4_stride, None, m4_activation]
elif m4 == "MBConv":
m4_c = trial.suggest_int("m4/c", low=16, high=320, step=16)
m4_k = trial.suggest_int("m4/k", low=3, high=5, step=2)
m4_args = [6, m4_c, m4_stride, m4_k]
elif m4 == "InvertedResidualv2":
m4_c = trial.suggest_int("m4/v2_c", low=8, high=64, step=8)
m4_t = trial.suggest_int("m4/v2_t", low=1, high=8)
m4_args = [m4_c, m4_t, m4_stride]
elif m4 == "InvertedResidualv3":
m4_kernel = trial.suggest_int("m4/kernel_size", low=3, high=5, step=2)
m4_t = round(trial.suggest_float("m4/v3_t", low=1.0, high=6.0, step=0.1), 1)
m4_c = trial.suggest_int("m4/v3_c", low=8, high=80, step=8)
m4_se = trial.suggest_categorical("m4/v3_se", [0, 1])
m4_hs = trial.suggest_categorical("m4/v3_hs", [0, 1])
m4_args = [m4_kernel, m4_t, m4_c, m4_se, m4_hs, m4_stride]
if not m4 == "Pass":
if m4_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m4_repeat, m4, m4_args])
# Module 5
m5 = trial.suggest_categorical(
"m5",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m5_args = []
m5_repeat = trial.suggest_int("m5/repeat", 1, 5)
m5_out_channel = trial.suggest_int("m5/out_channels", low=16, high=128, step=16)
m5_stride = 1
if m5 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m5_out_channel = trial.suggest_int("m5/out_channels", low=16, high=256, step=16)
m5_kernel = trial.suggest_int("m5/kernel_size", low=1, high=5, step=2)
m5_activation = trial.suggest_categorical("m5/activation", ["ReLU", "Hardswish"])
m5_stride = trial.suggest_int("m5/stride", low=1, high=UPPER_STRIDE)
m5_args = [m5_out_channel, m5_kernel, m5_stride, None, 1, m5_activation]
elif m5 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m5_out_channel = trial.suggest_int("m5/out_channels", low=16, high=256, step=16)
m5_kernel = trial.suggest_int("m5/kernel_size", low=1, high=5, step=2)
m5_activation = trial.suggest_categorical("m5/activation", ["ReLU", "Hardswish"])
m5_stride = trial.suggest_int("m5/stride", low=1, high=UPPER_STRIDE)
m5_args = [m5_out_channel, m5_kernel, m5_stride, None, m5_activation]
elif m5 == "MBConv":
m5_c = trial.suggest_int("m5/c", low=16, high=320, step=16)
m5_k = trial.suggest_int("m5/k", low=3, high=5, step=2)
m5_args = [6, m5_c, m5_stride, m5_k]
elif m5 == "InvertedResidualv2":
m5_c = trial.suggest_int("m5/v2_c", low=16, high=128, step=16)
m5_t = trial.suggest_int("m5/v2_t", low=1, high=8)
m5_stride = trial.suggest_int("m5/stride", low=1, high=UPPER_STRIDE)
m5_args = [m5_c, m5_t, m5_stride]
elif m5 == "InvertedResidualv3":
m5_kernel = trial.suggest_int("m5/kernel_size", low=3, high=5, step=2)
m5_t = round(trial.suggest_float("m5/v3_t", low=1.0, high=6.0, step=0.1), 1)
m5_c = trial.suggest_int("m5/v3_c", low=16, high=80, step=16)
m5_se = trial.suggest_categorical("m5/v3_se", [0, 1])
m5_hs = trial.suggest_categorical("m5/v3_hs", [0, 1])
m5_stride = trial.suggest_int("m5/stride", low=1, high=UPPER_STRIDE)
m5_args = [m5_kernel, m5_t, m5_c, m5_se, m5_hs, m5_stride]
if not m5 == "Pass":
if m5_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m5_repeat, m5, m5_args])
# Module 6
m6 = trial.suggest_categorical(
"m6",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m6_args = []
m6_repeat = trial.suggest_int("m6/repeat", 1, 5)
m6_out_channel = trial.suggest_int("m6/out_channels", low=16, high=128, step=16)
m6_stride = trial.suggest_int("m6/stride", low=1, high=UPPER_STRIDE)
# force stride m6
if n_stride == 2:
m4_stride = 2
if m6 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m6_out_channel = trial.suggest_int("m6/out_channels", low=16, high=512, step=16)
m6_kernel = trial.suggest_int("m6/kernel_size", low=1, high=5, step=2)
m6_activation = trial.suggest_categorical("m6/activation", ["ReLU", "Hardswish"])
m6_args = [m6_out_channel, m6_kernel, m6_stride, None, 1, m6_activation]
elif m6 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m6_out_channel = trial.suggest_int("m6/out_channels", low=16, high=512, step=16)
m6_kernel = trial.suggest_int("m6/kernel_size", low=1, high=5, step=2)
m6_activation = trial.suggest_categorical("m6/activation", ["ReLU", "Hardswish"])
m6_args = [m6_out_channel, m6_kernel, m6_stride, None, m6_activation]
elif m6 == "MBConv":
m6_c = trial.suggest_int("m6/c", low=16, high=320, step=16)
m6_k = trial.suggest_int("m6/k", low=3, high=5, step=2)
m6_args = [6, m6_c, m6_stride, m6_k]
elif m6 == "InvertedResidualv2":
m6_c = trial.suggest_int("m6/v2_c", low=16, high=128, step=16)
m6_t = trial.suggest_int("m6/v2_t", low=1, high=8)
m6_args = [m6_c, m6_t, m6_stride]
elif m6 == "InvertedResidualv3":
m6_kernel = trial.suggest_int("m6/kernel_size", low=3, high=5, step=2)
m6_t = round(trial.suggest_float("m6/v3_t", low=1.0, high=6.0, step=0.1), 1)
m6_c = trial.suggest_int("m6/v3_c", low=16, high=160, step=16)
m6_se = trial.suggest_categorical("m6/v3_se", [0, 1])
m6_hs = trial.suggest_categorical("m6/v3_hs", [0, 1])
m6_args = [m6_kernel, m6_t, m6_c, m6_se, m6_hs, m6_stride]
if not m6 == "Pass":
if m6_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m6_repeat, m6, m6_args])
# Module 7
m7 = trial.suggest_categorical(
"m7",
["Conv",
"DWConv",
"MBConv",
"InvertedResidualv2",
"InvertedResidualv3",
"Pass"]
)
m7_args = []
m7_repeat = trial.suggest_int("m7/repeat", 1, 5)
m7_out_channel = trial.suggest_int("m7/out_channels", low=16, high=128, step=16)
m7_stride = trial.suggest_int("m7/stride", low=1, high=UPPER_STRIDE)
if m7 == "Conv":
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
m7_out_channel = trial.suggest_int("m7/out_channels", low=128, high=1024, step=128)
m7_kernel = trial.suggest_int("m7/kernel_size", low=1, high=5, step=2)
m7_activation = trial.suggest_categorical("m7/activation", ["ReLU", "Hardswish"])
m7_args = [m7_out_channel, m7_kernel, m7_stride, None, 1, m7_activation]
elif m7 == "DWConv":
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
m7_out_channel = trial.suggest_int("m7/out_channels", low=128, high=1024, step=128)
m7_kernel = trial.suggest_int("m7/kernel_size", low=1, high=5, step=2)
m7_activation = trial.suggest_categorical("m7/activation", ["ReLU", "Hardswish"])
m7_args = [m7_out_channel, m7_kernel, m7_stride, None, m7_activation]
elif m7 == "MBConv":
m7_c = trial.suggest_int("m7/c", low=16, high=320, step=16)
m7_k = trial.suggest_int("m7/k", low=3, high=5, step=2)
m7_args = [6, m7_c, m7_stride, m7_k]
elif m7 == "InvertedResidualv2":
m7_c = trial.suggest_int("m7/v2_c", low=16, high=160, step=16)
m7_t = trial.suggest_int("m7/v2_t", low=1, high=8)
m7_args = [m7_c, m7_t, m7_stride]
elif m7 == "InvertedResidualv3":
m7_kernel = trial.suggest_int("m7/kernel_size", low=3, high=5, step=2)
m7_t = round(trial.suggest_float("m7/v3_t", low=1.0, high=6.0, step=0.1), 1)
m7_c = trial.suggest_int("m7/v3_c", low=8, high=160, step=8)
m7_se = trial.suggest_categorical("m7/v3_se", [0, 1])
m7_hs = trial.suggest_categorical("m7/v3_hs", [0, 1])
m7_args = [m7_kernel, m7_t, m7_c, m7_se, m7_hs, m7_stride]
if not m7 == "Pass":
if m7_stride == 2:
n_stride += 1
if n_stride>=MAX_NUM_STRIDE:
UPPER_STRIDE = 1
model.append([m7_repeat, m7, m7_args])
# last layer
last_dim = trial.suggest_int("last_dim", low=128, high=512, step=128)
# We can setup fixed structure as well
model.append([1, "Conv", [last_dim, 1, 1]])
model.append([1, "GlobalAvgPool", []])
model.append([1, "FixedConv", [last_dim, 1, 1, None, 1, None]])
return model
def tuning_score(test_f1: float, macs: float) -> float:
f1_pivot = 0.85
f1_limit = 0.5
macs_pivot = 100000
if test_f1 < f1_limit:
score_f1 = 1
elif f1_limit <= test_f1 < f1_pivot:
score_f1 = 1 - (test_f1 / f1_pivot)
else:
score_f1 = 0.5 * (1 - (test_f1 / f1_pivot))
score_macs = macs / macs_pivot
result = score_f1 + score_macs
return result
def objective(trial: optuna.trial.Trial, device) -> float:
"""Optuna objective.
Args:
trial
Returns:
float: tuning_score(accuracy & params)
"""
model_config = copy.deepcopy(MODEL_CONFIG)
data_config = copy.deepcopy(DATA_CONFIG)
# hyperparams: EPOCHS, IMG_SIZE, n_select, BATCH_SIZE
hyperparams = search_hyperparam(trial)
model_config["input_size"] = [data_config["IMG_SIZE"], data_config["IMG_SIZE"]]
# model_config["backbone"] = search_model(trial)
# data_config["AUG_TRAIN_PARAMS"]["n_select"] = hyperparams["n_select"]
data_config["BATCH_SIZE"] = hyperparams["BATCH_SIZE"]
# data_config["EPOCHS"] = hyperparams["EPOCHS"]
data_config["IMG_SIZE"] = hyperparams["IMG_SIZE"]
log_dir = os.path.join("exp", datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
os.makedirs(log_dir, exist_ok=True)
model_instance = Model(model_config, verbose=False)
macs = calc_macs(model_instance.model, (3, data_config["IMG_SIZE"], data_config["IMG_SIZE"]))
# model_config, data_config
_, test_f1, _ = train(
model_config=model_config,
data_config=data_config,
log_dir=log_dir,
fp16=data_config["FP16"],
device=device,
)
return tuning_score(test_f1, macs)
def tune(gpu_id: int, storage: Union[str, None] = None, study_name: str = "pstage_automl"):
if not torch.cuda.is_available():
device = torch.device("cpu")
elif 0 <= gpu_id < torch.cuda.device_count():
device = torch.device(f"cuda:{gpu_id}")
sampler = optuna.samplers.TPESampler(n_startup_trials=20)
if storage is not None:
rdb_storage = optuna.storages.RDBStorage(url=storage)
else:
rdb_storage = None
study = optuna.create_study(
directions=["minimize"],
storage=rdb_storage,
study_name=study_name,
sampler=sampler,
load_if_exists=True
)
study.optimize(lambda trial: objective(trial, device), n_trials=20)
pruned_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE
]
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trials:")
best_trials = study.best_trials
# trials that satisfies Pareto Fronts
for tr in best_trials:
print(f" value1:{tr.values[0]}, value2:{tr.values[1]}")
for key, value in tr.params.items():
print(f" {key}:{value}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Optuna tuner.")
parser.add_argument(
"--gpu", default=0, type=int, help="GPU id to use"
)
parser.add_argument(
"--storage", default="", type=str, help="RDB Storage URL for optuna."
)
parser.add_argument(
"--study-name", default="pstage_automl", type=str, help="Optuna study name."
)
args = parser.parse_args()
tune(args.gpu, storage=None if args.storage == "" else args.storage, study_name=args.study_name)
| 1.992188 | 2 |
card_generate.py | minfun/OralCardGenerator | 0 | 12769249 | """
1. 选择题型
普通算式 x +/-/*// y
类型算式1 x +/- y +/- z
类型算式2 x +/- y * z
类型算式3 x +/- y / z
2. 选择题量
3. 生成题目 / 打印题目
"""
import random
import wx
import os
import time
from student import math1, math2, math3, math4
'''
frame(窗口):带标题和边框的最顶层窗体
panel(面板):容器类,提供空间放其他组件,包括其他panel
'''
math_list = [math1, math2, math3, math4]
class MyApp(wx.App):
def __init__(self):
wx.App.__init__(self)
self.selected_list = []
self.math_list = []
self.total_num = 40
def OnInit(self):
self.Frame = wx.Frame(parent=None, title="口算题卡生成器", pos=(100, 100), size=(300, 200))
self.Frame.SetMaxSize((300, 300))
self.Frame.SetMinSize((300, 300))
self.SetTopWindow(self.Frame)
self.panel = wx.Panel(self.Frame, -1)
self.Set_Math_Type()
self.Set_Add_Data_Button()
self.Set_Generate_Button()
self.Frame.Show()
return True
def Set_Generate_Button(self):
print('set generate button')
self.generate_button = wx.Button(self.panel, -1, "生成题目", pos=(150, 100), size=(150, 50))
font = wx.Font(18, wx.ROMAN, wx.NORMAL, wx.NORMAL)
self.generate_button.SetFont(font)
self.generate_button.SetBackgroundColour("black")
self.generate_button.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON, self.generate_data, self.generate_button)
def Set_Add_Data_Button(self):
print('set add data button')
self.add_data_button = wx.Button(self.panel, -1, "增加题目", pos=(0, 100), size=(150, 50))
font = wx.Font(18, wx.ROMAN, wx.NORMAL, wx.NORMAL)
self.add_data_button.SetFont(font)
self.add_data_button.SetBackgroundColour("black")
self.add_data_button.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON, self.add_data, self.add_data_button)
def Set_Math_Type(self):
list1 = ["普通算式 x +/-/*// y", "类型算式1 x +/- y +/- z", "类型算式2 x +/- y * z", "类型算式3 x +/- y / z"]
# self.mathlistbox1 = wx.ListBox(self.panel, -1, (-1, -1), (200, 60), list1, wx.LB_MULTIPLE)
self.mathlistbox1 = wx.CheckListBox(self.panel, -1, (-1, -1), (300, 150), list1)
# self.mathlistbox1.Bind(wx.EVT_CHECKLISTBOX, self.printselect1)
self.mathlistbox1.Bind(wx.EVT_CHECKLISTBOX, self.printselect1)
def generate_data(self, event):
print("generate data")
print(self.total_num)
for i in range(int(self.total_num / 4)):
for j in range(4):
k = random.randint(0, len(self.math_list) - 1)
self.math_list[k](1, 10)
print('\n')
def add_data(self, event):
self.total_num += 40
print(self.total_num)
def printselect1(self, data):
self.selected_list.append(data.GetInt())
print(self.selected_list)
self.math_list.append(math_list[data.GetInt()])
print(self.math_list)
def loop_new():
MyApp().MainLoop()
if __name__ == "__main__":
loop_new()
| 2.96875 | 3 |
venv/lib/python3.6/site-packages/mypy/version.py | humphrey-mutuma/Awards | 2 | 12769250 | <filename>venv/lib/python3.6/site-packages/mypy/version.py
__version__ = "0.720"
| 1.007813 | 1 |