blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c39d266b53a4726e2f9ccbf27b058e403f6ec001 | df20743069e3c81128438ecc8a368b1853dc8137 | /overrides/scr/Spell1089 - Curse of Impending Blades Mass.py | e0bedd89acc27b2c6e79f91edcd36f5598f7abee | [
"MIT"
] | permissive | dolio/ToEE_Mods | 3f020d82e590a63a04047912d8d76fa2212957d7 | 53aa8086b89b25d7afb3104c5d8896c8a38c89b0 | refs/heads/main | 2023-04-09T06:17:47.064224 | 2021-04-29T09:41:58 | 2021-04-29T09:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | from toee import *
def OnBeginSpellCast(spell):
print "Curse of Impending Blades Mass OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
#game.particles("sp-enchantment-conjure",spell.caster )
def OnSpellEffect(spell):
print "Curse of Impending Blades Mass OnSpellEffect"
targetsToRemove = []
spell.duration = 10 * spell.caster_level # 1 min/cl
for spellTarget in spell.target_list:
targetIsFriendly = spellTarget.obj.is_friendly(spell.caster)
if targetIsFriendly: # Curse only affects enemies
targetsToRemove.append(spellTarget.obj)
else:
spellTarget.obj.float_text_line("Curse of Impending Blades", tf_red)
game.create_history_freeform(spellTarget.obj.description + " is affected by ~Curse of Impending Blades~[TAG_SPELLS_CURSE_OF_IMPENDING_BLADES]\n\n")
spellTarget.obj.condition_add_with_args('sp-Curse of Impending Blades', spell.id, spell.duration)
spellTarget.partsys_id = game.particles('sp-Phantasmal Killer', spellTarget.obj)
spell.target_list.remove_list(targetsToRemove)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Curse of Impending Blades Mass OnBeginRound"
def OnEndSpellCast(spell):
print "Curse of Impending Blades Mass OnEndSpellCast" | [
"herbstgeist@googlemail.com"
] | herbstgeist@googlemail.com |
e9b649f995d933213dd1ba667e0997810ba41994 | 57300751060249be6553e6e0155f39eda8e08fe0 | /2015/Day 22/box.py | 3ed90e1aea220af6711d5d9f98fcffef91f5f54c | [] | no_license | shekeru/advent-of-code | f01a2b653173a326deed6a0ffc4f5b9cdd4635b2 | 0ab4158b1c8ced9353a88f25223abe761dddc57e | refs/heads/main | 2022-12-10T03:16:10.367596 | 2022-12-02T07:09:15 | 2022-12-02T07:09:15 | 160,104,399 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,333 | py | import copy, queue, dataclasses
# Spell Class
class Effect:
def __repr__(s):
return f"{s.__class__.__name__}: {s.Turns} Turns"
def __init__(s, Cost, World, Turns = 0):
s.Boss, s.Turns = World.Boss, Turns
s.Effects = World.Effects
s.Player = World.Player
if Turns:
s.Effects[type(s)] = s
s.Player.Mana -= s.Cost
World.Spent += Cost
def StartTurn(s):
s.Turns -= 1
return s.Turns
def EndEffect(s):
del s.Effects[type(s)]
# Children
class Missile(Effect):
def __init__(s, World):
super().__init__(s.Cost, World)
s.Boss.HP -= 4
Cost = 53
class Drain(Effect):
def __init__(s, World):
super().__init__(s.Cost, World)
s.Player.HP += 2
s.Boss.HP -= 2
Cost = 73
class Shield(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 6)
s.Player.Armor += 7
def EndEffect(s):
super().EndEffect()
s.Player.Armor -= 7
Cost = 113
class Poison(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 6)
def StartTurn(s):
s.Boss.HP -= 3
return super().StartTurn()
Cost = 173
class Recharge(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 5)
def StartTurn(s):
s.Player.Mana += 101
return super().StartTurn()
Cost = 229
# Entities
@dataclasses.dataclass
class Player:
def __repr__(s):
return f"[Player] HP: {s.HP}, Mana: {s.Mana}, Armor: {s.Armor}"
HP: int; Mana: int; Armor: int = 0
@dataclasses.dataclass
class Boss:
def __repr__(s):
return f"[Boss] HP: {s.HP}, Damage: {s.Damage}"
HP: int; Damage: int
Spells = Effect.__subclasses__()
# Compact State
class World:
def __init__(s, Player, Boss):
s.Cast, s.Spent, s.Effects = True, 0, {}
s.Player, s.Boss = Player, Boss
def __repr__(s):
return "\n".join(map(repr, [s.Player, s.Boss, s.Effects]))
def __lt__(s, o):
return s.Boss.HP * s.Spent < o.Boss.HP * o.Spent
def CastOptions(s):
return filter(lambda x: x not in s.Effects
and x.Cost <= s.Player.Mana, Spells)
def ExecuteTurn(s, Delta = 0):
Copies = []
if s.Cast:
s.Player.HP -= Delta
if s.Player.HP <= 0:
return Copies
for Active in (*s.Effects.values(),):
if not Active.StartTurn():
Active.EndEffect()
if s.Boss.HP <= 0:
World.Least = s.Spent
return s.Spent
if s.Cast:
for Opt in s.CastOptions():
Opt(Alt := copy.deepcopy(s))
if Alt.Spent < World.Least:
Alt.Cast = not s.Cast
Copies.append(Alt)
else:
s.Player.HP -= max(1, s.Boss.Damage -
s.Player.Armor); s.Cast = not s.Cast
if s.Player.HP > 0:
Copies.append(s)
return Copies
# A* Like Search
def A_Search(Delta = 0):
World.Least, Q = 5000, queue.PriorityQueue()
Q.put(World(Player(50, 500), Boss(71, 10)))
while isinstance(Value := Q.get().ExecuteTurn
(Delta), list): [*map(Q.put, Value)]
return Value
# Run Problem
print("Silver:", A_Search())
print("Gold:", A_Search(1))
| [
"sheks@desu.systems"
] | sheks@desu.systems |
3a548fdfd613987b601ea37f5d41c018a5d6017f | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc018/A/answers/128595_akio0803.py | f301ad920080964bf1a66d445f2a6ae22c9224d8 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py |
T = [int(input()) for i in range(3)]
T = sorted(list(enumerate(T)), key=lambda x: x[1])[::-1]
T = sorted(list(enumerate(T)), key=lambda x: x[1][0])
for t in T:
print(t[0] + 1)
| [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
797acc8e3f3c01cdac84072d59c826b177596681 | 7be4f595d555614a28f708c1ba7edda321f0cf30 | /practice/algorithms/sorting/counting_sort_2/counting_sort_2.py | 522e58a38e665fbc0c6ea29552a97a5a4d58c5ce | [] | no_license | orel1108/hackerrank | de31a2d31aaf8aeb58477d1f2738744bfe492555 | 55da1f3a94e8c28ed0f0dea3103e51774f0047de | refs/heads/master | 2021-04-09T17:38:25.112356 | 2017-01-22T11:21:19 | 2017-01-22T11:21:19 | 50,198,159 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python
from collections import Counter
n = int(raw_input().strip())
a = map(int, raw_input().strip().split())
counter = Counter(a)
for VAL in range(100):
if VAL in counter.keys():
while counter[VAL] > 0:
print VAL,
counter[VAL] -= 1
| [
"r.orlovskyi@gmail.com"
] | r.orlovskyi@gmail.com |
8f300c37d9eebc1c1ced3d8d1e0035bcd19b974e | 7b221a4981edad73991cf1e357274b46c4054eff | /stacks/XIAOMATECH/1.0/services/HBASE/package/scripts/setup_ranger_hbase.py | 962460a669ff3b614cd7546af73086fab901a1a3 | [
"Apache-2.0"
] | permissive | aries-demos/dataops | a4e1516ef6205ad1ac5f692822e577e22ee85c70 | 436c6e89a1fdd0593a17815d3ec79c89a26d48f1 | refs/heads/master | 2020-05-29T17:20:12.854005 | 2019-05-22T06:06:00 | 2019-05-22T06:06:00 | 189,270,801 | 2 | 3 | Apache-2.0 | 2019-05-29T17:35:25 | 2019-05-29T17:35:24 | null | UTF-8 | Python | false | false | 5,217 | py | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.logger import Logger
import sys, os
script_path = os.path.realpath(__file__).split(
'/services')[0] + '/../../../stack-hooks/before-INSTALL/scripts/ranger'
sys.path.append(script_path)
from setup_ranger_plugin_xml import setup_ranger_plugin
def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
import params
if params.enable_ranger_hbase:
if params.retryAble:
Logger.info(
"HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !"
)
else:
Logger.info(
"HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
)
if params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master':
try:
params.HdfsResource(
"/ranger/audit",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0755,
recursive_chmod=True)
params.HdfsResource(
"/ranger/audit/hbaseMaster",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True)
params.HdfsResource(
"/ranger/audit/hbaseRegional",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True)
params.HdfsResource(None, action="execute")
except Exception, err:
Logger.exception(
"Audit directory creation in HDFS for HBASE Ranger plugin failed with error:\n{0}"
.format(err))
api_version = 'v2'
setup_ranger_plugin(
'hbase-client',
'hbase',
None,
None,
None,
None,
params.java64_home,
params.repo_name,
params.hbase_ranger_plugin_repo,
params.ranger_env,
params.ranger_plugin_properties,
params.policy_user,
params.policymgr_mgr_url,
params.enable_ranger_hbase,
conf_dict=params.hbase_conf_dir,
component_user=params.hbase_user,
component_group=params.user_group,
cache_service_list=['hbaseMaster', 'hbaseRegional'],
plugin_audit_properties=params.config['configurations']
['ranger-hbase-audit'],
plugin_audit_attributes=params.config['configurationAttributes']
['ranger-hbase-audit'],
plugin_security_properties=params.config['configurations']
['ranger-hbase-security'],
plugin_security_attributes=params.config['configurationAttributes']
['ranger-hbase-security'],
plugin_policymgr_ssl_properties=params.config['configurations']
['ranger-hbase-policymgr-ssl'],
plugin_policymgr_ssl_attributes=params.
config['configurationAttributes']['ranger-hbase-policymgr-ssl'],
component_list=[
'hbase-client', 'hbase-master', 'hbase-regionserver'
],
audit_db_is_enabled=False,
credential_file=params.credential_file,
xa_audit_db_password=None,
ssl_truststore_password=params.ssl_truststore_password,
ssl_keystore_password=params.ssl_keystore_password,
skip_if_rangeradmin_down=not params.retryAble,
api_version=api_version,
is_security_enabled=params.security_enabled,
is_stack_supports_ranger_kerberos=params.
stack_supports_ranger_kerberos
if params.security_enabled else None,
component_user_principal=params.ranger_hbase_principal
if params.security_enabled else None,
component_user_keytab=params.ranger_hbase_keytab
if params.security_enabled else None)
else:
Logger.info('Ranger HBase plugin is not enabled')
| [
"xianhuawei@MacBook-Air.local"
] | xianhuawei@MacBook-Air.local |
335f685ef600c1073b4914abf6ff91b8708d62a0 | 3958a948646610cbe76bed6e3a285ecc457c1958 | /akshare/article/ff_factor.py | 93c13038436cc94448c0606f495e946e0a4ffea2 | [
"MIT"
] | permissive | moon-chaser/akshare | 1745abda950c8259a24782364e73d0b376b576d1 | f243df40c54e102f0faf88e8149b57ae28ea0a76 | refs/heads/master | 2020-09-18T17:49:13.992266 | 2019-11-26T09:18:38 | 2019-11-26T09:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,993 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/11/14 20:31
contact: jindaxiang@163.com
desc: FF-data-library: http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html
"""
import requests
import pandas as pd
from akshare.article.cons import ff_home_url
def ff_crr():
res = requests.get(ff_home_url)
# first table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[0]
.dropna()
.tolist()
)
table_one = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# second table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[1, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[1]
.dropna()
.tolist()
)
table_two = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# third table
df = pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :]
name_list = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(r" ", expand=True)
.iloc[2, :]
.tolist()
)
value_list_0 = df[0].split(" ")
value_list_0.insert(0, "-")
value_list_0.insert(1, "-")
value_list_0.insert(8, "-")
value_list_0.insert(15, "-")
value_list_1 = df[1].split(" ")
value_list_1.insert(0, "-")
value_list_1.insert(1, "-")
value_list_1.insert(8, "-")
value_list_1.insert(15, "-")
value_list_2 = df[2].split(" ")
value_list_2.insert(0, "-")
value_list_2.insert(1, "-")
value_list_2.insert(8, "-")
value_list_2.insert(15, "-")
name_list.remove("Small Growth Big Value")
name_list.insert(5, "Small Growth")
name_list.insert(6, "Big Value")
temp_list = [item for item in name_list if "Portfolios" not in item]
temp_list.insert(0, "Fama/French Research Portfolios")
temp_list.insert(1, "Size and Book-to-Market Portfolios")
temp_list.insert(8, "Size and Operating Profitability Portfolios")
temp_list.insert(15, "Size and Investment Portfolios")
temp_df = pd.DataFrame([temp_list, value_list_0, value_list_1, value_list_2]).T
temp_df.index = temp_df.iloc[:, 0]
temp_df = temp_df.iloc[:, 1:]
# concat
all_df = pd.DataFrame()
all_df = all_df.append(table_one)
all_df = all_df.append(table_two)
temp_df.columns = table_two.columns
all_df = all_df.append(temp_df)
return all_df
if __name__ == "__main__":
df_data = ff_crr()
print(df_data)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
5a0e0b0e79986691cab2f17277cc89e724a62f89 | a2f606b4028a308a7f4682a9e5e390a9bdc6da43 | /add key.py | fa2d174377823cfa2410406b6a1ce58d388fb0f9 | [] | no_license | Anamikaswt/dictionary | c83f8afa6581debaf5b1ca56670126104b423335 | c5eb05b30830ca753dcaedfdfbab4004cd9889d2 | refs/heads/master | 2023-04-01T20:40:43.682928 | 2021-04-02T16:25:40 | 2021-04-02T16:25:40 | 354,071,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | sample_dic={0:10,1:20}
sample_dic[2]=30
print(sample_dic) | [
"you@example.com"
] | you@example.com |
b02eea7039a321a818867e86600c250c26a864d1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_042/ch7_2020_09_09_13_59_45_863917.py | f1d538b6472e30d088f305988068e5ff54bdc24e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | #calcula_area_do_triangul
def calcula_area_do_triangulo(b,h):
y= b * h
return y | [
"you@example.com"
] | you@example.com |
5e67671c5eac3faa7543afa89657bf0b16fd6cd2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bugle.py | 827fd7e3c07f021180c489a04288a6051e4e65f4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
#calss header
class _BUGLE():
def __init__(self,):
self.name = "BUGLE"
self.definitions = [u'a musical instrument like a simple trumpet, used especially in the army']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9c4e70425e5c2faf4eb71757339669d9799d3ce7 | 2f92274606b4a8f91bf11e6383197f77d92fbd5e | /tests/utils/test_template_parser.py | 9cd347b626cdb9977453afc4b87b4a51634299b7 | [
"BSD-2-Clause"
] | permissive | tws0002/anima | 1db50532ab50dcc034db7300a3cd106b30bc8e00 | 73c256d1f7716a2db7933d6d8519a51333c7e5b4 | refs/heads/master | 2020-12-24T12:05:53.385210 | 2019-07-19T07:41:43 | 2019-07-19T07:41:43 | 73,074,603 | 0 | 0 | BSD-2-Clause | 2019-08-06T04:00:17 | 2016-11-07T12:06:41 | Python | UTF-8 | Python | false | false | 2,847 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2017, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pytest
@pytest.fixture('session')
def test_data():
"""reads test data
"""
# reads the test data as text
import os
here = os.path.dirname(__file__)
test_data_file_path = os.path.join(here, 'data', 'test_template.json')
import json
with open(test_data_file_path) as f:
test_data = json.load(f)
yield test_data
@pytest.fixture('session')
def create_db():
"""creates a test database
"""
import os
os.environ.pop('STALKER_PATH')
from stalker import db
db.setup({'sqlalchemy.url': 'sqlite://'})
db.init()
@pytest.fixture('session')
def create_project():
"""creates test data
"""
from stalker import Repository, Project
repo = Repository(
name='Test Repository',
windows_path='T:/',
linux_path='/mnt/T/',
osx_path='/Volumes/T/'
)
project = Project(
name='Test Project',
code='TP',
repository=repo
)
yield project
def test_database_is_correctly_created(create_db):
"""testing if the fixture is working properly
"""
from stalker.db.session import DBSession
assert str(DBSession.connection().engine.dialect.name) == 'sqlite'
def test_template_argument_accepts_only_a_json_as_text():
"""testing if a TypeError will be raised when the template argument is not
a string containing JSON data.
"""
from anima.utils.task_template_parser import TaskTemplateParser
with pytest.raises(TypeError):
TaskTemplateParser('not json data')
def test_template_argument_is_working_properly(test_data):
"""testing if the template argument value is parsed and passed to the
template attribute
"""
from anima.utils.task_template_parser import TaskTemplateParser
ttp = TaskTemplateParser(test_data)
assert ttp is not None
def test_creating_test_data(create_db, create_project):
"""testing if the test project is created correctcly
"""
project = create_project
from stalker import Project
assert isinstance(project, Project)
def test_creating_tasks_from_template(create_db, create_project):
"""testing if tasks are created out of Templates
"""
project = create_project
from anima.utils.task_template_parser import TaskTemplateParser
from anima import defaults
ttp = TaskTemplateParser(task_data=defaults.task_template)
asset = ttp.create(project, 'Asset', 'Character')
from stalker import Asset
assert isinstance(asset, Asset)
# def test_create_entity_type_is_not_a_string(prepare_db):
# """testing if a TypeError will be raised if the entity_type is not
# """
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
5d27d9b5a003bd3336600af6e1e5651cf34b8bf0 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayBossContractManagementCreateResponse.py | 8903d731dfb6cc885f8cffdadbc4bfbd10096c7a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InterTradeStartContractApprovalResult import InterTradeStartContractApprovalResult
class AlipayBossContractManagementCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossContractManagementCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, InterTradeStartContractApprovalResult):
self._result_set = value
else:
self._result_set = InterTradeStartContractApprovalResult.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossContractManagementCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
391ce8cb0cb0d48ea2565f84872784ab46e5bf5e | 0f8254a1d49aa55130fe9bfd4e0488b92c75aa3c | /cookie_auth/cookie_auth/data/album.py | 914ed6a616abad5fb03e53cd804e99cc56167c59 | [] | no_license | Durant21/cookie_auth | 5c41dee21bc0b18ee80bf25389b5c24475ff804a | e94244e430b6c87ed08108b2ba58c769daad647d | refs/heads/master | 2020-03-29T00:45:03.354865 | 2018-09-18T21:30:04 | 2018-09-18T21:30:04 | 149,354,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import sqlalchemy
import sqlalchemy.orm
from sqlalchemy.ext.orderinglist import ordering_list
from cookie_auth.data.modelbase import SqlAlchemyBase
class Album(SqlAlchemyBase):
__tablename__ = 'Album'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True, nullable=False)
url = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True, nullable=False)
year = sqlalchemy.Column(sqlalchemy.Integer, index=True)
price = sqlalchemy.Column(sqlalchemy.Float, index=True)
album_image = sqlalchemy.Column(sqlalchemy.String)
has_preview = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
is_published = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
tracks = sqlalchemy.orm.relationship('Track', back_populates='album',
order_by='Track.display_order',
collection_class=ordering_list('display_order'),
cascade='all')
| [
"durant.crimson@icloud.com"
] | durant.crimson@icloud.com |
1ccdf89cc474766550a06c99cb71f19bf678915d | d121dbf198d835d1f040da8e8212948d469d16cb | /baekjoon/Python/2530.py | 4e6b2beef79bf32d6ea29ff41cf6bdfc28b41105 | [] | no_license | yeonjooyou/algorithm | ad66d2477aaed1656751d56db19a90ab1957df93 | 067f0ca746949328695f51f458cf5db9adfb91af | refs/heads/master | 2023-08-26T07:29:43.000966 | 2021-11-01T13:38:56 | 2021-11-01T13:38:56 | 391,618,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # 인공지능 시계
A, B, C = map(int, input().split())
D = int(input())
C += D%60
if C >= 60 :
C -= 60
B += 1
D //= 60
B += D%60
if B >= 60 :
B -= 60
A += 1
D //= 60
A += D%24
if A >= 24 :
A -= 24
print(A, B, C)
# 틀린 코드
# print((A + (B+D//60)//60)%24, (B + (C+D)//60%60)%60, (C + D%60)%60) | [
"yeonjooyou@naver.com"
] | yeonjooyou@naver.com |
ce980daa1aba4aaf7f0bb4ee521d812d89d91503 | 08acec95bd1dc302633fadf7b47cd8ba3b749ff3 | /day-2018-05-10/myproject/venv/lib/python2.7/site-packages/zope/security/tests/test_permission.py | 82ddea20e391f459841634eaa3cdc6f517c06cb0 | [] | no_license | WeAreHus/StudyRecord | 74a312103ad2c037de23534160fa42d6a68ad174 | 047b7d9dcbee7c01ad2e8b888b160e66dfa9012d | refs/heads/master | 2022-12-16T14:47:15.984939 | 2019-04-29T15:16:15 | 2019-04-29T15:16:15 | 127,758,387 | 2 | 1 | null | 2022-11-22T02:50:30 | 2018-04-02T13:15:07 | Python | UTF-8 | Python | false | false | 7,808 | py | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test permissions
"""
import unittest
from zope.component.testing import PlacelessSetup
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
class PermissionTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.permission import Permission
return Permission
def _makeOne(self, id, *args):
klass = self._getTargetClass()
return klass(id, *args)
def test_class_conforms_to_IPermission(self):
from zope.interface.verify import verifyClass
from zope.security.interfaces import IPermission
verifyClass(IPermission, self._getTargetClass())
def test_instance_conforms_to_IPermission(self):
from zope.interface.verify import verifyObject
from zope.security.interfaces import IPermission
from zope.schema import getValidationErrors
verifyObject(IPermission, self._makeOne('testing'))
self.assertEqual([],
getValidationErrors(IPermission,
self._makeOne('testing')))
def test_ctor_only_id(self):
permission = self._makeOne('testing')
self.assertEqual(permission.id, u'testing')
self.assertEqual(permission.title, u'')
self.assertEqual(permission.description, u'')
def test_ctor_w_title_and_description(self):
permission = self._makeOne('testing', u'TITLE', u'DESCRIPTION')
self.assertEqual(permission.id, 'testing')
self.assertEqual(permission.title, u'TITLE')
self.assertEqual(permission.description, u'DESCRIPTION')
class Test_checkPermission(PlacelessSetup, unittest.TestCase):
def _callFUT(self, context, permission_id):
from zope.security.permission import checkPermission
return checkPermission(context, permission_id)
def test_w_CheckerPublic(self):
from zope.security.checker import CheckerPublic
self._callFUT(None, CheckerPublic) # no raise
def test_miss(self):
self.assertRaises(ValueError, self._callFUT, None, 'nonesuch')
def test_hit(self):
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
self._callFUT(None, 'testing') # no raise
class Test_allPermissions(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import allPermissions
return allPermissions()
def test_empty(self):
self.assertEqual(list(self._callFUT()), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
self.assertEqual(list(self._callFUT()), ['testing'])
def test_skips_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
self.assertEqual(list(self._callFUT()), ['testing'])
class Test_PermissionsVocabulary(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import PermissionsVocabulary
return PermissionsVocabulary()
def test_empty(self):
from zope.schema.vocabulary import SimpleVocabulary
vocabulary = self._callFUT()
self.assertTrue(isinstance(vocabulary, SimpleVocabulary))
self.assertEqual(list(vocabulary), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
vocabulary = self._callFUT()
self.assertEqual([x.token for x in vocabulary], ['testing'])
def test_includes_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
vocabulary = self._callFUT()
self.assertEqual(sorted([x.token for x in vocabulary]),
['testing', zope_Public])
def test_zcml_valid(self):
from zope.configuration import xmlconfig
import zope.security
from zope.interface.verify import verifyObject
from zope.security.interfaces import IPermission
from zope.schema import getValidationErrors
xmlconfig.file('configure.zcml', zope.security)
vocabulary = self._callFUT()
vocabulary = sorted(vocabulary, key=lambda term: term.token)
self.assertEqual(6, len(vocabulary))
for term in vocabulary:
p = term.value
__traceback_info__ = term.token, p
verifyObject(IPermission, p)
self.assertEqual([], getValidationErrors(IPermission, p))
class Test_PermissionIdsVocabulary(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import PermissionIdsVocabulary
return PermissionIdsVocabulary()
def test_empty(self):
from zope.schema.vocabulary import SimpleVocabulary
vocabulary = self._callFUT()
self.assertTrue(isinstance(vocabulary, SimpleVocabulary))
self.assertEqual(list(vocabulary), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
vocabulary = self._callFUT()
self.assertEqual([x.value for x in vocabulary], ['testing'])
self.assertEqual([x.token for x in vocabulary], ['testing'])
def test_includes_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
vocabulary = self._callFUT()
self.assertEqual([x.value for x in vocabulary],
[CheckerPublic, 'testing'])
self.assertEqual([x.token for x in vocabulary],
[zope_Public, 'testing'])
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| [
"1131360171@qq.com"
] | 1131360171@qq.com |
b5e2c73c9143273582301d3fa689f293b7767799 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/automation/azure-mgmt-automation/azure/mgmt/automation/aio/operations/_node_count_information_operations.py | 9c3773f1b89ba48b3ba0f6ab556eb8dcf40263f3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,458 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._node_count_information_operations import build_get_request
from .._vendor import AutomationClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NodeCountInformationOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.aio.AutomationClient`'s
:attr:`node_count_information` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
automation_account_name: str,
count_type: Union[str, _models.CountType],
**kwargs: Any
) -> _models.NodeCounts:
"""Retrieve counts for Dsc Nodes.
:param resource_group_name: Name of an Azure Resource group. Required.
:type resource_group_name: str
:param automation_account_name: The name of the automation account. Required.
:type automation_account_name: str
:param count_type: The type of counts to retrieve. Known values are: "status" and
"nodeconfiguration". Required.
:type count_type: str or ~azure.mgmt.automation.models.CountType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NodeCounts or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.NodeCounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-01-13-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-01-13-preview")
)
cls: ClsType[_models.NodeCounts] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
count_type=count_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NodeCounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/nodecounts/{countType}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
607386616a143c398ff7b721265e63b4d30a4f6c | 54cabe33c24f17f1101b5e7444db44732665e078 | /api/common/api_client.py | 9d66cfbb061b50c3e7a8439b74061c831c36b8f2 | [] | no_license | DemocracyClub/aggregator-api | 6fc40381f880849df6f32a87e6bf1de18fadbe2e | eec276791133d84027195e8b4c12bb9133e34957 | refs/heads/master | 2023-08-09T22:23:49.867773 | 2023-07-31T15:57:15 | 2023-07-31T15:57:15 | 158,564,104 | 3 | 2 | null | 2023-07-31T15:57:17 | 2018-11-21T14:54:36 | HTML | UTF-8 | Python | false | false | 131 | py | from abc import ABC
class BaseAPIClient(ABC):
def __init__(self, base_url="") -> None:
self.base_url: str = base_url
| [
"sym.roe@talusdesign.co.uk"
] | sym.roe@talusdesign.co.uk |
5d804d7b72d8ebee648187865092b82c313430de | d8f0cc9f3469c20b976f8216b344c1e67f66cef5 | /tesdjango3_15789/settings.py | f856a318bac7f6e6e9c5c08af70bcf36929ae080 | [] | no_license | crowdbotics-apps/tesdjango3-15789 | d3ab71c76a28e4e7ffcc84e00252e94a340d3937 | d271a8e1176399db1f85faf8969b2722cf71b4f0 | refs/heads/master | 2023-05-28T10:47:51.272732 | 2020-04-13T06:18:11 | 2020-04-13T06:18:11 | 255,247,132 | 0 | 0 | null | 2021-06-13T05:30:38 | 2020-04-13T06:17:55 | Python | UTF-8 | Python | false | false | 5,546 | py | """
Django settings for tesdjango3_15789 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tesdjango3_15789.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tesdjango3_15789.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
bad52bc81cb15fb632fb67f4271b25dc83af451f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/02111bc6c8800d8f644b52ad842cd738a17e192a-<create_host_port_group>-bug.py | 4b1a7042cbd94d7de38cf5f7162c638b57555a91 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | def create_host_port_group(self, host_system, portgroup_name, vlan_id, vswitch_name, network_policy):
'\n Function to create/update portgroup on given host using portgroup specifications\n Args:\n host_system: Name of Host System\n portgroup_name: Name of Portgroup\n vlan_id: The VLAN ID for ports using this port group.\n vswitch_name: Name of vSwitch Name\n network_policy: Network policy object\n '
desired_pgs = self.get_port_group_by_name(host_system=host_system, portgroup_name=portgroup_name, vswitch_name=vswitch_name)
port_group = vim.host.PortGroup.Config()
port_group.spec = vim.host.PortGroup.Specification()
if (not desired_pgs):
port_group.spec.name = portgroup_name
port_group.spec.vlanId = vlan_id
port_group.spec.vswitchName = vswitch_name
port_group.spec.policy = network_policy
try:
host_system.configManager.networkSystem.AddPortGroup(portgrp=port_group.spec)
self.changed = True
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg=('Failed to add Portgroup as it already exists: %s' % e.msg))
except vim.fault.NotFound as e:
self.module.fail_json(msg=('Failed to add Portgroup as vSwitch was not found: %s' % e.msg))
except vim.fault.HostConfigFault as e:
self.module.fail_json(msg=('Failed to add Portgroup due to host system configuration failure : %s' % e.msg))
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg=('Failed to add Portgroup as VLAN id was not correct as per specifications: %s' % e.msg))
else:
if (desired_pgs[0].spec.vlanId != vlan_id):
port_group.spec.vlanId = vlan_id
self.changed = True
if self.check_network_policy_diff(desired_pgs[0].spec.policy, network_policy):
port_group.spec.policy = network_policy
self.changed = True
if self.changed:
try:
host_system.configManager.networkSystem.UpdatePortGroup(pgName=self.portgroup_name, portgrp=port_group.spec)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg=('Failed to update Portgroup as it conflicts with already existing Portgroup: %s' % e.msg))
except vim.fault.NotFound as e:
self.module.fail_json(msg=('Failed to update Portgroup as vSwitch was not found: %s' % e.msg))
except vim.fault.HostConfigFault as e:
self.module.fail_json(msg=('Failed to update Portgroup due to host system configuration failure : %s' % e.msg))
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg=('Failed to update Portgroup as VLAN id was not correct as per specifications: %s' % e.msg))
self.changed = False | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
0920838d52e1fdc735af985efc9aa53ecc6c5c2d | b4dd760e79de0db39792b947bacfe2b27c2a89ee | /challenge106interm.py | 195d821f20a949f08778ad8a8b077a487a568c35 | [] | no_license | mooja/dailyprogrammer | c23f1a0c5d6e4269b6c03b47d8cc18f6d857a6e1 | d12fcb6744ac3b4a5e651f37ea0b3f20ca062f7d | refs/heads/master | 2021-01-16T23:47:28.955660 | 2018-04-09T18:03:50 | 2018-04-09T18:03:50 | 23,394,207 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | #!/usr/bin/env python
# encoding: utf-8
# Daily Programmer Challenge 106 Intermediate
#
# http://www.reddit.com/r/dailyprogrammer/comments/11xjfd/10232012_challenge_106_intermediate_jugs/
#
# May.10.2015
from operator import attrgetter
from collections import namedtuple
from itertools import combinations, permutations
Jug = namedtuple('Jug', 'gallons, capacity')
JugsState = namedtuple('JugsState', 'jugs, actions')
def is_full(jug):
return jug.gallons >= jug.capacity
def is_empty(jug):
return jug.gallons == 0
def fill_jug(jug):
return Jug(jug.capacity, jug.capacity)
def empty_jug(jug):
return Jug(0, jug.capacity)
def transf_from_1_to_2(jug1, jug2):
avail_water = jug1.gallons
avail_capacity = jug2.capacity - jug2.gallons
total_transfer = min(avail_water, avail_capacity)
jug1 = Jug(jug1.gallons - total_transfer, jug1.capacity)
jug2 = Jug(jug2.gallons + total_transfer, jug2.capacity)
return jug1, jug2
def jugs_total_gallons(jugs):
return sum(jug.gallons for jug in jugs)
def sort_jugs_by_capacity(jugs):
return sorted(jugs, key=attrgetter('capacity'))
def gen_new_jug_states(jugs_state):
successor_states = []
# generate jug states by filling each jug that is not filled
for jug in jugs_state.jugs:
if not is_full(jug):
# copy state (other than the jug we're going to fill)
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(fill_jug(jug))
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> filled ({}, {})".format(
jug.gallons, jug.capacity))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
# generate jug states by empying each jug that is not empty
for jug in jugs_state.jugs:
if not is_empty(jug):
# copy state (other than the jug we're going to empty)
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(empty_jug(jug))
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> emptied ({}, {})".format(
jug.gallons, jug.capacity))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
# generate jug states by transferring contents each jug into another
for jug1, jug2 in permutations(jugs_state.jugs):
if is_empty(jug1) or is_full(jug2):
continue
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug1 and j != jug2],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(transf_from_1_to_2(jug1, jug2)[0])
new_jugs_state.jugs.append(transf_from_1_to_2(jug1, jug2)[1])
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> transfered {} to {}".format(
jug1, jug2))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
return successor_states
def main():
def is_wanted_jug_state(jugs_state):
return jugs_total_gallons(jugs_state.jugs) == 4
initial_jugs_state = JugsState([Jug(0, 3), Jug(0, 5)], actions=['initial'])
jug_state_queue = [initial_jugs_state]
while jug_state_queue:
if is_wanted_jug_state(jug_state_queue[0]):
print '\n'.join(jug_state_queue[0].actions)
print "Wanted state reached!"
break
jug_state = jug_state_queue.pop(0)
jug_state_queue.extend(gen_new_jug_states(jug_state))
if __name__ == '__main__':
main()
# output:
# initial
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=0, capacity=5)]
# > transfered Jug(gallons=3, capacity=3) to Jug(gallons=0, capacity=5)
# current state: [Jug(gallons=0, capacity=3), Jug(gallons=3, capacity=5)]
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=3, capacity=5)]
# > transfered Jug(gallons=3, capacity=3) to Jug(gallons=3, capacity=5)
# current state: [Jug(gallons=1, capacity=3), Jug(gallons=5, capacity=5)]
# > emptied (5, 5)
# current state: [Jug(gallons=1, capacity=3), Jug(gallons=0, capacity=5)]
# > transfered Jug(gallons=1, capacity=3) to Jug(gallons=0, capacity=5)
# current state: [Jug(gallons=0, capacity=3), Jug(gallons=1, capacity=5)]
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=1, capacity=5)]
# Wanted state reached!
| [
"max.atreides@gmail.com"
] | max.atreides@gmail.com |
9279e380fad7f0a2d4a3dd2741fd94ceeb8bcd89 | a9db74855c63d83034bf4874cda908f77a6eb90b | /demo_project/demo_project/testrunner.py | 4d1a6bd11cce31264a557ec2848637378a57ac83 | [] | no_license | mikelopez/sciweb-django-messages | eadb4d1a117e637b3ac8c32f297249985a8dcace | 55ecc9c444d1e0c718f97da70c5c846c872cda7d | refs/heads/master | 2020-04-15T05:58:58.660546 | 2013-06-22T09:48:11 | 2013-06-22T09:48:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | __author__ = 'Marcos Lopez'
# http://github.com/mikelopez
from django.test.simple import DjangoTestSuiteRunner
import settings
class BaseAppsTestNoDb(DjangoTestSuiteRunner):
def setup_databases(self, **kwargs):
""" override the db stuff from DjangoTestSuiteRunner """
pass
def teardown_databases(self, old_config, **kwargs):
""" override db teardown from DjangoTestSuiteRunner """
pass
def build_suite(self, test_labels, *args, **kwargs):
return super(BaseAppsTestNoDb, self).build_suite(test_labels or \
[i for i in settings.INSTALLED_APPS if not "django" in i], *args, **kwargs)
| [
"dev@scidentify.info"
] | dev@scidentify.info |
4e18bd8857b03ec62724b0ac7499c7556affc40d | 6c512b7d2ae4b1ad713a57f74a4816e1291ba7a1 | /python_3/solutions/soln_kaggle_titanic.py | 44f9b1d34823679fec1149596540f66f21a80b61 | [
"MIT"
] | permissive | duttashi/applied-machine-learning | 451389e8f27931f32132a148e93effa7c6352536 | ff3267b97d9dd7122400754798e06fb493daa40a | refs/heads/master | 2021-12-17T19:12:39.531717 | 2021-12-04T09:36:46 | 2021-12-04T09:36:46 | 169,368,684 | 0 | 2 | MIT | 2021-12-04T09:36:47 | 2019-02-06T07:19:08 | R | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 18:10:07 2020
@author: Ashish
"""
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#% matplotlib inline
#print(os.getcwd())
titanic_df = pd.read_csv("../../data/kaggle_titanic_train.csv"
, low_memory=False)
print(titanic_df.head())
print(titanic_df.info())
sns.catplot(y="Sex", data = titanic_df, hue = 'Pclass')
| [
"ashish.dutt8@gmail.com"
] | ashish.dutt8@gmail.com |
4c03770d902f460fff4750192f2a760a9c9f977b | ef96d96f6b92c5beb0b6e3334c7db2ef590f3875 | /coordination/runtime.py | 0010794e255b8de9bdb02ee30207c3b7bc470c17 | [] | no_license | Evgenus/coordination | 84693f11285dc2d16f864f619fd9d704cdea1b75 | 17d0c4030ccaa672a901af7f83605a237283bd96 | refs/heads/master | 2021-01-25T08:28:08.998333 | 2011-08-04T15:17:46 | 2011-08-04T15:17:46 | 2,118,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,792 | py | #standart
from functools import partial
from weakref import proxy
from collections import deque, defaultdict
class FeatureChecker(object):
def __init__(self, name, func):
self.name = name
self.func = func
def __call__(self, subj):
try:
self.func(subj)
except Exception as error:
subj.forbid(self.name)
else:
subj.allow(self.name)
class FeaturesProvider(object):
'Holding information about available system features for aspects'
checkers = {}
def __init__(self):
self.features = {}
self.namespace = {}
for name, checker in self.checkers.iteritems():
checker(self)
def __contains__(self, feature):
return self.features.get(feature, False)
def allow(self, name):
self.features[name] = True
def forbid(self, name):
self.features[name] = False
def provide(self, **kwargs):
self.namespace.update(kwargs)
def __getattr__(self, name):
if name in self.namespace:
return self.namespace[name]
raise AttributeError(name)
@classmethod
def feature(cls, name):
return partial(FeatureChecker, name)
class FeaturesProviderMeta(type):
def __new__(meta, name, bases, internals):
checkers = {}
cls = type.__new__(meta, name, bases, internals)
checkers.update(cls.checkers)
for name, value in internals.iteritems():
if isinstance(value, FeatureChecker):
checkers[name] = value
cls.checkers = checkers
return cls
FeaturesProvider = FeaturesProviderMeta('FeaturesProvider',
FeaturesProvider.__bases__, dict(FeaturesProvider.__dict__))
class MessageQueue(object):
'Events queue for actors'
def __init__(self):
self.queue = deque()
def add(self, callable, *args, **kwargs):
item = partial(callable, *args, **kwargs)
self.queue.append(item)
def __call__(self):
if self.queue:
callable = self.queue.popleft()
callable()
return True
else:
return False
class MessageLoop(object):
'Base abstract class for event-loop'
timeout = 0.01
def set_callback(self, callback):
self.callback = callback
def run(self):
raise NotImplementedError()
def callback(self):
return False
class Action(object):
'Basic concurency primitive'
queue = None
def __init__(self, preprocess=None):
self.preprocess = preprocess
self.callbacks = []
self.source = None
self.name = None
def __lshift__(self, callback):
if callback not in self.callbacks:
self.callbacks.append(callback)
return self
def __rshift__(self, callback):
if callback in self.callbacks:
self.callbacks.append(callback)
return self
def clear(self):
self.callbacks = []
def __repr__(self):
return "<Action {0} of {1}>".format(self.name, self.source)
def __call__(self, *args, **kwargs):
if self.preprocess is not None:
result = self.preprocess(self.source, *args, **kwargs)
if result is not None:
args, kwargs = result
for callback in self.callbacks:
if self.queue is not None:
self.queue.add(callback, *args, **kwargs)
else:
callback(*args, **kwargs)
def clone(self):
new = self.__class__(self.preprocess)
new.name = self.name
return new
@classmethod
def wrap(cls, callable):
return cls(callable)
class Actor(object):
class __metaclass__(type):
def __new__(meta, name, bases, internals):
actions = internals['_actions'] = {}
for key, value in internals.iteritems():
if isinstance(value, Action):
actions[key] = value
cls = type.__new__(meta, name, bases, internals)
for key, action in actions.iteritems():
action.source = proxy(cls)
action.name = key
return cls
def __init__(self):
super(Actor, self).__init__()
for name, cls_action in self._actions.iteritems():
action = cls_action.clone()
cls_action << action
action.source = proxy(self)
setattr(self, name, action)
class Scope(object):
def __init__(self):
self.entities = defaultdict(list)
self.aspects = defaultdict(list)
def register_entity(self, entity):
self.entities[type(entity)].append(entity)
def register_aspect(self, entity, aspect):
self.aspects[type(entity), type(aspect)].append(aspect)
| [
"chernyshov.eugene@gmail.com"
] | chernyshov.eugene@gmail.com |
3e87f99793c05532a5476acb0d9b4699334dae17 | 49900ba50d4f6c979d6d433577828c8007973125 | /utils.py | 371a67f1d768d75ea6e4aa04eb4705a21502a4b3 | [] | no_license | weizhenzhao/cs224d_nlp_problem_set2 | 9661414965a58b97113f828a47932c5b9d8411df | 302f0e53cdd88147a5c1727d06f0be18270d8a2a | refs/heads/master | 2021-10-22T18:22:31.063591 | 2019-03-12T14:03:36 | 2019-03-12T14:03:36 | 104,356,708 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,768 | py | from collections import defaultdict
import numpy as np
class Vocab(object):
def __init__(self):
self.word_to_index = {}
self.index_to_word = {}
self.word_freq = defaultdict(int)
self.total_words = 0
self.unknown = '<unk>'
self.add_word(self.unknown, count=0)
def add_word(self, word, count=1):
if word not in self.word_to_index:
index = len(self.word_to_index)
self.word_to_index[word] = index
self.index_to_word[index] = word
self.word_freq[word] += count
def construct(self, words):
for word in words:
self.add_word(word)
self.total_words = float(sum(self.word_freq.values()))
print('{} total words with {} uniques'.format(self.total_words, len(self.word_freq)))
def encode(self, word):
if word not in self.word_to_index:
word = self.unknown
return self.word_to_index[word]
def decode(self, index):
return self.index_to_word[index]
def __len__(self):
return len(self.word_freq)
def calculate_perplexity(log_probs):
# https://web.stanford.edu/class/cs124/lec/languagemodeling.pdf
perp = 0
for p in log_probs:
perp += -p
return np.exp(perp / len(log_probs))
def get_ptb_dataset(dataset='train'):
fn = 'data/ptb/ptb.{}.txt'
for line in open(fn.format(dataset)):
for word in line.split():
yield word
# Add token to the end of the line
# Equivalent to <eos> in:
# https://github.com/wojzaremba/lstm/blob/master/data.lua#L32
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py#L31
yield '<eos>'
def ptb_iterator(raw_data, batch_size, num_steps):
# Pulled from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py#L82
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y)
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
# from https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
def data_iterator(orig_X, orig_y=None, batch_size=32, label_size=2, shuffle=False):
# Optionally shuffle the data before training
if shuffle:
indices = np.random.permutation(len(orig_X))
data_X = orig_X[indices]
data_y = orig_y[indices] if np.any(orig_y) else None
else:
data_X = orig_X
data_y = orig_y
# ##
total_processed_examples = 0
total_steps = int(np.ceil(len(data_X) / float(batch_size)))
for step in range(total_steps):
# Create the batch by selecting up to batch_size elements
batch_start = step * batch_size
x = data_X[batch_start:batch_start + batch_size]
# Convert our target from the class index to a one hot vector
y = None
if np.any(data_y):
y_indices = data_y[batch_start:batch_start + batch_size]
y = np.zeros((len(x), label_size), dtype=np.int32)
y[np.arange(len(y_indices)), y_indices] = 1
# ##
yield x, y
total_processed_examples += len(x)
# Sanity check to make sure we iterated over all the dataset as intended
assert total_processed_examples == len(data_X), 'Expected {} and processed {}'.format(len(data_X), total_processed_examples)
| [
"958904120@qq.com"
] | 958904120@qq.com |
c15f225950aa88e2dc2917e52c329801e9be9352 | f8065e5d6f898e02f4fbe533f5b252fe82273bb8 | /master/childmanager.py | 4758fa62b1f48da1ef1b398af3d5f2ddb28e9a83 | [] | no_license | pizi06/firefly_study | f79c8e3596043beabc2b13783d46b18515617bfe | 4e85db73e4eda473180b302c16872a498d605aab | refs/heads/master | 2021-01-15T11:20:39.691679 | 2016-08-25T03:36:43 | 2016-08-25T03:36:43 | 65,810,416 | 0 | 0 | null | 2016-08-16T10:15:39 | 2016-08-16T10:15:39 | null | UTF-8 | Python | false | false | 3,221 | py | #coding:utf8
"""
Created on 2011-10-14
@author: lan (www.9miao.com)
"""
from twisted.python import log
from zope.interface import Interface
from zope.interface import implements
from child import RemoteChild
class _ChildsManager(Interface):
"""节点管理器接口"""
def __init__(self):
"""初始化接口"""
def getChildById(self,childId):
"""根据节点id获取节点实例"""
def getChildByName(self,childname):
"""根据节点的名称获取节点实例"""
def addChild(self,child):
"""添加一个child节点
@param child: Child object
"""
def dropChild(self,*arg,**kw):
"""删除一个节点"""
def callChild(self,*args,**kw):
"""调用子节点的接口"""
def callChildByName(self,*args,**kw):
"""调用子节点的接口
@param childname: str 子节点的名称
"""
def dropChildByID(self,childId):
"""删除一个child 节点
@param childId: Child ID
"""
class ChildsManager(object):
"""子节点管理器"""
implements(_ChildsManager)
def __init__(self):
"""初始化子节点管理器"""
self._childs = {}
def getChildById(self,childId):
"""根据节点的ID获取节点实例"""
return self._childs.get(childId)
def getChildByName(self,childname):
"""根据节点的名称获取节点实例"""
for key,child in self._childs.items():
if child.getName() == childname:
return self._childs[key]
return None
def addChild(self,child):
"""添加一个child节点
@param child: Child object
"""
key = child._name
if self._childs.has_key(key):
raise "child node %s exists"% key
self._childs[key] = child
def addChildByNamePeer(self,name,peer):
child = RemoteChild(name,peer)
self.addChild(child)
def dropChild(self,child):
"""删除一个child 节点
@param child: Child Object
"""
key = child._name
try:
del self._childs[key]
except Exception,e:
log.msg(str(e))
def dropChildByID(self,childId):
"""删除一个child 节点
@param childId: Child ID
"""
try:
del self._childs[childId]
except Exception,e:
log.msg(str(e))
def callChild(self,childId,*args,**kw):
"""调用子节点的接口
@param childId: int 子节点的id
"""
child = self._childs.get(childId,None)
if not child:
log.err("child %s doesn't exists"%childId)
return
return child.callbackChild(*args,**kw)
def callChildByName(self,childname,*args,**kw):
"""调用子节点的接口
@param childname: str 子节点的名称
"""
child = self.getChildByName(childname)
if not child:
log.err("child %s doesn't exists"%childname)
return
return child.callbackChild(*args,**kw)
| [
"chenee543216@gmail.com"
] | chenee543216@gmail.com |
3719f7c33f75113aa29a8acc2ea8453eb1d44ff1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/nd/aadjep.py | 66de504f26e72326a8d2553c00e0e69986e85b13 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,126 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AAdjEp(Mo):
meta = ClassMeta("cobra.model.nd.AAdjEp")
meta.isAbstract = True
meta.moClassName = "ndAAdjEp"
meta.moClassName = "ndAAdjEp"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Adjacency"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.nw.ConnEp")
meta.superClasses.add("cobra.model.nw.AdjEp")
meta.superClasses.add("cobra.model.l3.ProtAdjEp")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.nw.ProtAdjEp")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.Ep")
meta.concreteSubClasses.add("cobra.model.nd.StAdjEp")
meta.concreteSubClasses.add("cobra.model.nd.AdjEp")
meta.concreteSubClasses.add("cobra.model.nd.SvcAdjEp")
meta.rnPrefixes = [
]
prop = PropMeta("str", "addr", "addr", 17071, PropCategory.REGULAR)
prop.label = "IPv6 address"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("addr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "mac", "mac", 17072, PropCategory.REGULAR)
prop.label = "MAC"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("mac", prop)
prop = PropMeta("str", "name", "name", 16432, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "upTS", "upTS", 17073, PropCategory.REGULAR)
prop.label = "ND entry expiry time"
prop.isOper = True
meta.props.add("upTS", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
7b20daa0763292ec49da5292a8b5fec8a9b0692c | 99b3a6bdf81ae69ed07c402098458635f20a75a7 | /one_student_without_allennlp/mean_teacher/modules/convert_mednli.py | 0616df65a45a0f8f77a3b1a50597d1d04f5377cd | [] | no_license | mithunpaul08/mean-teacher | e6298efac8f7d67671bc6eca19f07568afa0caee | 11a0a5b813b4a2f8b9c3524af35d3e3914d457b6 | refs/heads/master | 2021-07-16T13:26:22.343467 | 2020-05-14T23:39:35 | 2020-05-14T23:39:35 | 164,931,583 | 1 | 0 | null | 2020-05-14T23:39:36 | 2019-01-09T20:18:32 | Python | UTF-8 | Python | false | false | 537 | py | import pandas as pd
import json
import os
test_file="../../data/rte/mednli/mli_test_v1.jsonl"
assert os.path.exists(test_file) is True
t=pd.read_json(test_file,lines=True)
out_path="../../data/rte/mednli/mli_test_lex.jsonl"
with open(out_path,'w') as outfile:
outfile.write("")
for i,row in t.iterrows():
with open(out_path, 'a+') as outfile:
total = {'claim': row.sentence1,
'evidence':row.sentence2,
"label":row.gold_label}
json.dump(total,outfile)
outfile.write("\n") | [
"mithunpaul08@gmail.com"
] | mithunpaul08@gmail.com |
d2d41ab5c8dcaa1b0c5a061f0fe767f4d00b5703 | 4b7d5c8824df4462a338993efcdfa3b17199ff5b | /基础/day1/guessage_while.py | f90a4e7c568144a273ae7dc8e8d360cfb0196b82 | [] | no_license | kobe24shou/python | 9c287babfb357e7f650fab453f3e60614b7a71fc | f78f147101f182207a69f0dc8e1595b54280164a | refs/heads/master | 2021-06-02T12:40:59.424542 | 2020-06-28T06:13:51 | 2020-06-28T06:13:51 | 101,620,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
# Author:ls
# aishou24@gmail.com
age = 50
flag = True
while flag:
user_input_age = int(input("Age is :"))
if user_input_age == age:
print("Yes")
flag = False
elif user_input_age > age:
print("Is bigger")
else:
print("Is smaller")
print("End")
print("------------------breake 版本------------------------")
#break # 终止
age1 = 50
# flag = True
# break
while True:
user_input_age = int(input("Age is :"))
if user_input_age == age1:
print("Yes")
break
elif user_input_age > age1:
print("Is bigger")
else:
print("Is smaller")
| [
"aishou24@gmail.com"
] | aishou24@gmail.com |
f9ea0048388a9416f9d9c993cb1572102209b9f4 | 987ca298f54716d920f2f9fa4d2f21c7c7b122cb | /bwin/bwin/middlewares.py | f27033211b243d9c5b489263feaeb04f2383caf9 | [] | no_license | blacktyger/demonstration-bwin | 2f0da4476b2acef0b4a5e50329de50876c721c0f | c5eb7417540f912edfa824b7eacb7cc3c0ac8f33 | refs/heads/main | 2023-07-31T21:06:52.410226 | 2021-09-21T08:37:34 | 2021-09-21T08:37:34 | 407,239,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class BwinSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BwinDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"blacktyg3r@gmail.com"
] | blacktyg3r@gmail.com |
f7bd20779af03b0715192711042512b0ffd10ceb | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/ext/command_utils/choose_menu.py | 2d7bc2bbc94257d79eda6116d7234f1dcb5a82db | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 27,570 | py | __all__ = ('ChooseMenu', )
from scarletio import CancelledError, copy_docs
from ...discord.channel import Channel
from ...discord.core import BUILTIN_EMOJIS
from ...discord.embed import Embed
from ...discord.exceptions import DiscordException, ERROR_CODES
from ...discord.interaction import InteractionEvent
from ...discord.message import Message
from .bases import (
GUI_STATE_CANCELLING, GUI_STATE_READY, GUI_STATE_SWITCHING_CTX, GUI_STATE_SWITCHING_PAGE, GUI_STATE_VALUE_TO_NAME,
PaginationBase
)
from .utils import Timeouter
class ChooseMenu(PaginationBase):
"""
Familiar to ``Pagination``, but instead of just displaying multiple pages of text, it allows the user to select
a displayed option.
The class allows modifications and closing it's representations for every user. Also works at private channels.
Picks up on reaction additions and on reaction deletions as well and removes the added reactions on if has
permission, which might be missing, like in DM-s.
Attributes
----------
_canceller : `None`, `function`
The function called when the ``ChooseMenu`` is cancelled or when it expires. This is a onetime use and after
it was used, is set as `None`.
_task_flag : `int`
A flag to store the state of the ``ChooseMenu``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The ChooseMenu does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The ChooseMenu is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The ChooseMenu is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The ChooseMenu is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The ChooseMenu is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
_timeouter : `None`, ``Timeouter``
Executes the timing out feature on the ``ChooseMenu``.
channel : ``Channel``
The channel where the ``ChooseMenu`` is executed.
client : ``Client``
The client who executes the ``ChooseMenu``.
message : `None`, ``Message``
The message on what the ``ChooseMenu`` is executed.
check : `None`, `callable`
A callable what decides whether the ``ChooseMenu`` should process a received reaction event. Defaults to
`None`.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent``, ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
Note, that ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
embed : ``Embed``
An embed base, what's description and footer will be rendered with the given choices and with information
about the respective page.
selected : `int`
The currently selected option of the ``ChooseMenu``.
choices : `indexable` of `object`
An indexable container, what stores the displayable choices.
It's elements' type can be different from each other, and different structures act differently as well.
There are the following cases:
- If an element is `str`, then it will be used as an option's title and when selecting it, only that
variable will be passed to the respective function when selected.
- If an element is neither `str`, `tuple`, then it's `repr` will be used as an option's title, and only that
variable will be passed to the respective function when selected.
- If an element is `tuple`, then it's first element will be displayed as title. If it is `str`, then
will be just simply added, however if not, then it's `repr` will be used. If selecting a `tuple` option,
then it's element will be passed to the respective function.
timeout : `float`
The timeout of the ``ChooseMenu`` in seconds.
prefix : `None`, `str`
A prefix displayed before each option.
selector : `async-callable`
An `async-callable`, what is ensured when an option is selected.
If the ``ChooseMenu`` is created only with `1` option, then it is ensured initially instead of creating the
``ChooseMenu`` itself. At this case, if `message` was not given (or given as `None`), then the `message`
passed to the `selector` will be `None` as well.
At least 3 parameters are passed to the `selector`:
+-------------------+-------------------------------+
| Respective name | Type |
+===================+===============================+
| client | ``Client`` |
+-------------------+-------------------------------+
| channel | ``Channel`` |
+-------------------+-------------------------------+
| message | ``Message``, `None` |
+-------------------+-------------------------------+
The rest of the parameters depend on the respective choice (an elements of ``choices``). If the element is a
`tuple`, then it's element will be passed, however if the choice is any other type, then only that
object will be passed.
Class Attributes
----------------
UP : ``Emoji`` = `BUILTIN_EMOJIS['arrow_up_small']`
The emoji used to move on the displayed option one above.
DOWN : ``Emoji`` = `BUILTIN_EMOJIS['arrow_down_small']`
The emoji used to move on the displayed option one under.
LEFT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_backward']`
The emoji used to move on the previous page.
RIGHT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_forward']`
The emoji used to move on the next page.
SELECT : ``Emoji`` = `BUILTIN_EMOJIS['ok']`
The emoji used to select an option.
CANCEL : ``Emoji`` = `BUILTIN_EMOJIS['x']`
The emoji used to cancel the ``ChooseMenu``.
EMOJIS_RESTRICTED : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(UP, DOWN, SELECT, CANCEL)`
Restricted emojis, added when the choose menu has only options for 1 page.
EMOJIS : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(UP, DOWN, LEFT, RIGHT, SELECT, CANCEL)`
Emojis added to the choose menu.
"""
UP = BUILTIN_EMOJIS['arrow_up_small']
DOWN = BUILTIN_EMOJIS['arrow_down_small']
LEFT = BUILTIN_EMOJIS['arrow_backward']
RIGHT = BUILTIN_EMOJIS['arrow_forward']
SELECT = BUILTIN_EMOJIS['ok']
CANCEL = BUILTIN_EMOJIS['x']
EMOJIS_RESTRICTED = (UP, DOWN, SELECT, CANCEL)
EMOJIS = (UP, DOWN, LEFT, RIGHT, SELECT, CANCEL)
__slots__ = ('check', 'embed', 'selected', 'choices', 'timeout', 'prefix', 'selector')
async def __new__(cls, client, channel, choices, selector, embed = Embed(), *, timeout = 240., message = None,
prefix = None, check = None):
"""
Creates a new choose menu with the given parameters.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who executes the ``ChooseMenu``.
channel : ``Channel``, ``Message``
The channel where the ``ChooseMenu`` is executed. Pass it as a ``Message`` to send a reply.
If given as ``InteractionEvent``, then will acknowledge it and create a new message with it as well.
Although will not acknowledge it if `message` is given.
choices : `indexable` of `object`
An indexable container, what stores the displayable choices.
It's elements' type can be different from each other, and different structures act differently as well.
There are the following cases:
- If an element is `str`, then it will be used as an option's title and when selecting it, only
that variable will be passed to the respective function when selected.
- If an element is neither `str`, `tuple`, then it's `repr` will be used as an option's title, and only
that variable will be passed to the respective function when selected.
- If an element is `tuple`, then it's first element will be displayed as title. If it is `str`,
then will be just simply added, however if not, then it's `repr` will be used. If selecting a `tuple`
option, then it's element will be passed to the respective function.
selector : `async-callable`
An `async-callable`, what is ensured when an option is selected.
If the ``ChooseMenu`` is created only with `1` option, then it is ensured initially instead of creating
the ``ChooseMenu`` itself. At this case, if `message` was not given (or given as `None`), then the
`message` passed to the `selector` will be `None` as well.
At least 3 parameters are passed to the `selector`:
+-------------------+-----------------------------------------------------------+
| Respective name | Type |
+===================+===========================================================+
| client | ``Client`` |
+-------------------+-----------------------------------------------------------+
| channel | ``Channel``, ``Message``, ``InteractionEvent`` |
+-------------------+-----------------------------------------------------------+
| message | ``Message``, `None` |
+-------------------+-----------------------------------------------------------+
The rest of the parameters depend on the respective choice (an elements of ``choices``). If the element is a
`tuple`, then it's elements will be passed, however if the choice is any other type, then only that
object will be passed.
embed : ``Embed`` = `Embed()`, Optional
An embed base, what's description and footer will be rendered with the given choices and with information
about the respective page. Defaults to an empty ``Embed``.
timeout : `float` = `240.0`, Optional (Keyword only)
The timeout of the ``ChooseMenu`` in seconds.
message : `None`, ``Message`` = `None`, Optional (Keyword only)
The message on what the ``ChooseMenu`` will be executed. If not given a new message will be created.
prefix : `None`, `str` = `None`, Optional (Keyword only)
A prefix displayed before each option.
check : `None`, `callable` = `None`, Optional (Keyword only)
A callable what decides whether the ``ChooseMenu`` should process a received reaction event.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent``, ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
Note, that ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
Returns
-------
self : `None`, ``ChooseMenu``
If `choices`'s length is less than `2`, then returns `None`.
Raises
------
TypeError
`channel`'s type is incorrect.
ValueError
If `prefix` was not given as `None` and it's length is over `64` characters.
"""
if (prefix is not None) and (len(prefix) > 100):
raise ValueError(
f'Please a shorter 100 character long prefix, got {len(prefix)!r}, {prefix!r}.'
)
if isinstance(channel, Channel):
target_channel = channel
received_interaction = False
elif isinstance(channel, Message):
target_channel = channel.channel
received_interaction = False
elif isinstance(channel, InteractionEvent):
target_channel = channel.channel
received_interaction = True
else:
raise TypeError(
f'`channel` can be `{Channel.__name__}`, `{Message.__name__}`, `{InteractionEvent.__name__}`, '
f'got {channel.__class__.__name__}; {channel!r}.'
)
result_length = len(choices)
if result_length < 2:
if result_length == 1:
choice = choices[0]
if isinstance(choice, tuple):
coroutine = selector(client, channel, message, *choice)
else:
coroutine = selector(client, channel, message, choice)
await coroutine
return None
self = object.__new__(cls)
self.check = check
self.client = client
self.channel = target_channel
self.choices = choices
self.selector = selector
self.selected = 0
self._canceller = cls._canceller_function
self._task_flag = GUI_STATE_READY
self.message = message
self.timeout = timeout
self._timeouter = None
self.prefix = prefix
self.embed = embed
try:
if message is None:
if received_interaction:
if not channel.is_acknowledged():
await client.interaction_response_message_create(channel)
message = await client.interaction_followup_message_create(channel, embed = self._render_embed())
else:
message = await client.message_create(channel, embed = self._render_embed())
self.message = message
else:
await client.message_edit(message, embed = self._render_embed())
except BaseException as err:
self.cancel(err)
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.max_reactions, # reached reaction 20, some1 is trolling us.
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
ERROR_CODES.cannot_message_user, # user has dm-s disallowed
):
return self
raise
if not target_channel.cached_permissions_for(client).can_add_reactions:
self.cancel(PermissionError())
return self
try:
for emoji in (self.EMOJIS if (len(choices) > 10) else self.EMOJIS_RESTRICTED):
await client.reaction_add(message, emoji)
except BaseException as err:
self.cancel(err)
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.max_reactions, # reached reaction 20, some1 is trolling us.
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return self
raise
self._timeouter = Timeouter(self, timeout)
client.events.reaction_add.append(message, self)
client.events.reaction_delete.append(message, self)
return self
def _render_embed(self):
"""
Renders the choose menu's embed's description with it's choices of the respective page and it's footer
with page information.
Returns
-------
embed : ``Embed`` (or any compatible)
The rendered embed.
"""
selected = self.selected
choices = self.choices
index = (selected // 10) * 10
end = index + 10
if len(choices) < end:
end = len(choices)
parts = []
prefix = self.prefix
left_length = 195
if (prefix is not None):
left_length -= len(prefix)
while True:
title = choices[index]
if isinstance(title,tuple):
if not title:
title = ''
else:
title = title[0]
if not isinstance(title,str):
title = str(title)
if len(title) > left_length:
space_position = title.rfind(' ', left_length - 25, left_length)
if space_position == -1:
space_position = left_length - 3
title = title[:space_position]+'...'
if index == selected:
if (prefix is not None):
parts.append('**')
parts.append(prefix)
parts.append('** ')
parts.append('**')
parts.append(title)
parts.append('**\n')
else:
if (prefix is not None):
parts.append(prefix)
parts.append(' ')
parts.append(title)
parts.append('\n')
index +=1
if index == end:
break
embed = self.embed
embed.description = ''.join(parts)
current_page = (selected // 10) + 1
limit = len(choices)
page_limit = (limit // 10) + 1
start = end - 9
if start < 1:
start = 1
if end == len(choices):
end -= 1
limit -= 1
embed.add_footer(f'Page {current_page}/{page_limit}, {start} - {end} / {limit}, selected: {selected + 1}')
return embed
@copy_docs(PaginationBase.__call__)
async def __call__(self, client, event):
if event.user.bot:
return
if (event.emoji not in (self.EMOJIS if len(self.choices)>10 else self.EMOJIS_RESTRICTED)):
return
if (event.delete_reaction_with(client) == event.DELETE_REACTION_NOT_ADDED):
return
check = self.check
if (check is not None):
try:
should_continue = check(event)
except GeneratorExit:
raise
except BaseException as err:
await client.events.error(client, f'{self!r}.__call__', err)
return
if not should_continue:
return
task_flag = self._task_flag
if task_flag != GUI_STATE_READY:
if task_flag == GUI_STATE_SWITCHING_PAGE:
if event.emoji is self.CANCEL:
self._task_flag = GUI_STATE_CANCELLING
return
# ignore GUI_STATE_CANCELLED and GUI_STATE_SWITCHING_CTX
return
message = self.message
while True:
emoji = event.emoji
if emoji is self.UP:
selected = self.selected - 1
break
if emoji is self.DOWN:
selected = self.selected + 1
break
if emoji is self.LEFT:
selected = self.selected - 10
break
if emoji is self.RIGHT:
selected = self.selected + 10
break
if emoji is self.CANCEL:
self.cancel(CancelledError())
return
if emoji is self.SELECT:
self._task_flag = GUI_STATE_SWITCHING_CTX
self.cancel()
try:
if self.channel.cached_permissions_for(client).can_manage_messages:
await client.reaction_clear(message)
else:
for emoji in self.EMOJIS:
await client.reaction_delete_own(message, emoji)
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message already deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return
await client.events.error(client, f'{self!r}.__call__', err)
return
selector = self.selector
try:
choice = self.choices[self.selected]
channel = self.channel
if isinstance(choice, tuple):
coroutine = selector(client, channel, message, *choice)
else:
coroutine = selector(client, channel, message, choice)
await coroutine
except GeneratorExit:
raise
except BaseException as err:
await client.events.error(client, f'{self!r}.__call__ when calling {selector!r}', err)
return
return
if selected < 0:
selected = 0
elif selected >= len(self.choices):
selected = len(self.choices) - 1
if self.selected == selected:
return
self.selected = selected
self._task_flag = GUI_STATE_SWITCHING_PAGE
try:
await client.message_edit(message, embed = self._render_embed())
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message already deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.missing_access, # client removed
):
return
# We definitely do not want to silence `ERROR_CODES.invalid_form_body`
await client.events.error(client, f'{self!r}.__call__', err)
return
if self._task_flag == GUI_STATE_CANCELLING:
self.cancel(CancelledError())
return
self._task_flag = GUI_STATE_READY
timeouter = self._timeouter
if (timeouter is not None):
timeouter.set_timeout(self.timeout)
@copy_docs(PaginationBase.__repr__)
def __repr__(self):
repr_parts = [
'<', self.__class__.__name__,
' client = ', repr(self.client),
', channel = ', repr(self.channel),
', state = '
]
task_flag = self._task_flag
repr_parts.append(repr(task_flag))
repr_parts.append(' (')
task_flag_name = GUI_STATE_VALUE_TO_NAME[task_flag]
repr_parts.append(task_flag_name)
repr_parts.append(')')
# Third party things go here
repr_parts.append(', choices = ')
repr_parts.append(repr(self.choices))
repr_parts.append(', selected = ')
repr_parts.append(repr(self.selected))
repr_parts.append(', selector = ')
repr_parts.append(repr(self.selector))
prefix = self.prefix
if (prefix is not None):
repr_parts.append(', prefix = ')
repr_parts.append(repr(prefix))
repr_parts.append('>')
return ''.join(repr_parts)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
8803ee40779e822078d261d366af0cd2919d9924 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02705/s673406592.py | 9a76562ca9e02977efb677163ac5c7ab13a50f02 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | import numpy
print(int(input())*2*numpy.pi)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f05d91a779158e39e065a91876b3ee0594373239 | 6e9c9128054da7eea28a4627381df28f95416ee5 | /finance_ml/labeling/betsides.py | b3df71764d3b3a2d8c9c13dde03c34083689205f | [
"MIT"
] | permissive | BTCTON/finance_ml | c5a4ad2486608ad19c92c04c70fe513be135c236 | a585be2d04db5a749eb6b39b7336e5aeb30d6327 | refs/heads/master | 2021-12-23T07:53:13.791609 | 2021-10-15T01:47:41 | 2021-10-15T01:47:41 | 158,898,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import numbers
import pandas as pd
import numpy as np
import multiprocessing as mp
from ..multiprocessing import mp_pandas_obj
def _cusum_side(diff, h, k=0, molecule=None):
side = []
s_pos, s_neg = 0, 0
timestamps = []
th = None
for t in molecule:
if th is None:
th = h.loc[t]
s_pos = max(0, s_pos + diff.loc[t] - k)
s_neg = min(0, s_neg + diff.loc[t] + k)
if s_pos > th:
s_pos = 0
timestamps.append(t)
th = h.loc[t]
side.append(1)
elif s_neg < -th:
s_neg = 0
timestamps.append(t)
th = h.loc[t]
side.append(-1)
side = pd.Series(side, index=pd.DatetimeIndex(timestamps))
return side
def cusum_side(close, h, k=0, use_log=True, num_threads=None):
"""Sample points with CUSUM Filter and use its direction as betting side
Args:
close (pd.Series): Price series
h (float or pd.Series): Threasholds to sampmle points.\
If specified with float, translate to pd.Series(h, index=close.index)
k (float, optional): Minimum speed parameter to hit threashold.\
Defaults to 0, which means inactive
Returns:
pd.Series: Betting sides at sampled points
"""
if num_threads is None:
num_threads = mp.cpu_count()
# asssum that E y_t = y_{t-1}
side = []
s_pos, s_neg = 0, 0
if use_log:
diff = np.log(close).diff().dropna()
else:
diff = close.diff().dropna()
# time variant threshold
if isinstance(h, numbers.Number):
h = pd.Series(h, index=diff.index)
h = h.reindex(diff.index, method='bfill')
h = h.dropna()
side = mp_pandas_obj(func=_cusum_side,
pd_obj=('molecule', h.index),
num_threads=num_threads,
diff=diff, h=h, k=k)
return side | [
"f.j.akimoto@gmail.com"
] | f.j.akimoto@gmail.com |
e4a5581eacba722b9bd59eaf6b2c79e06c407dd6 | 955f9d3fb34af54de2f046d17bbac11c1474819e | /abc111/b.py | 806ef54082f0f620dd15f1c7e64280e5d3c590c3 | [] | no_license | shimewtr/AtCoderPracticePython | 5bb4c28119fced2d111bd1810e0e290f25b6a191 | f3c22ec1f7a36a27848070c5c6ca4e1717b04ac6 | refs/heads/master | 2023-01-12T17:28:44.770138 | 2020-11-19T22:50:22 | 2020-11-19T22:50:22 | 204,830,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
n = int(input())
for i in range(n, 1000):
s = str(i)
if s[0] == s[1] == s[2]:
print(i)
break
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """111"""
output = """111"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """112"""
output = """222"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """750"""
output = """777"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
| [
"wawawatataru@gmail.com"
] | wawawatataru@gmail.com |
167b95dc17b9c8dee4e07a8205bc2fafd07bd0d8 | 707054dbae74908940b72a462553dda70b97d7d2 | /home/models.py | 6f0c7933685422f0cfa0ec268884ce9f6b0d648d | [] | no_license | nghiatd16/spoj_tournament | ea6b59d6efd0f10fd1993c2252f8afe3b3ffb685 | 21f79224059fbeb84907db7ddc9c050c8da307a8 | refs/heads/master | 2020-04-19T22:32:36.998098 | 2019-02-03T13:25:59 | 2019-02-03T13:25:59 | 168,471,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | from django.db import models
import numpy as np
import math
import time
# Create your models here.
class Member(models.Model):
full_name = models.CharField(max_length=100)
username = models.CharField(max_length=20)
grade = models.CharField(max_length=5)
num_solved = models.IntegerField()
score = models.FloatField(default=0)
target = models.IntegerField(default=1)
lst_solved = models.CharField(max_length=15000)
lastrank = models.IntegerField(default=1)
def parse_lst_solved(self, col_per_row=4):
lst_str_solved = self.lst_solved.__str__().split(' ')
return Member.reshape_list(lst_str_solved, col_per_row=col_per_row)
def get_list_solved(self):
return self.lst_solved.__str__().strip().split(' ')
def get_set_solved(self):
return set(self.lst_solved.__str__().strip().split(' '))
def get_list_exclude(self, other):
self_set_str_solved = set(self.lst_solved.__str__().split(' '))
other_set_str_solved = set(other.lst_solved.__str__().split(' '))
self_res = []
other_res = []
for ele in self_set_str_solved:
if ele not in other_set_str_solved:
self_res.append(ele)
for ele in other_set_str_solved:
if ele not in self_set_str_solved:
other_res.append(ele)
return (self_res, other_res)
@staticmethod
def reshape_list(lst_str_solved, col_per_row=3):
num_row = math.ceil(len(lst_str_solved)/col_per_row)
res = []
c_id = 0
for i in range(num_row):
tmp = []
for j in range(col_per_row):
tmp.append((lst_str_solved[c_id], "https://www.spoj.com/PTIT/problems/{}/".format(lst_str_solved[c_id])))
if c_id == len(lst_str_solved)-1 :
break
c_id += 1
res.append(tmp)
return res
def __str__(self):
return "[{} - {} - {}]".format(self.full_name, self.num_solved, self.target)
def __eq__(self, other):
if self.score == other.score and self.num_solved == other.num_solved and self.username == other.username:
return True
return False
def __gt__(self, other):
if self.score != other.score:
return self.score > other.score
if self.num_solved != num_solved:
return self.num_solved > other.num_solved
return self.username > other.username
def __lt__(self, other):
if self.score != other.score:
return self.score < other.score
if self.num_solved != num_solved:
return self.num_solved < other.num_solved
return self.username < other.username
class Topic(models.Model):
url = models.CharField(max_length=100)
name = models.CharField(max_length=20)
def __str__(self):
return "Topic[name:{} - url:{}]".format(self.name, self.url)
def get_arr_name_columns(self):
return ["name", "url"]
def get_name_columns(self):
arr = self.get_arr_name_columns()
rs = ""
for i in range(len(arr)):
rs += arr[i]
if i != len(arr)-1:
rs += ", "
return rs
def get_refer(self):
return "%s, %s"
def get_value(self):
return (self.name, self.url)
class Problem(models.Model):
code = models.CharField(max_length=15, primary_key=True)
score = models.FloatField() | [
"nghiatd.proptit@gmail.com"
] | nghiatd.proptit@gmail.com |
b65df574541529f940fa1f6059d553464563a10b | b051745bda8b82caa648f4ef2d61f1cf61712a21 | /vise/tests/cli/test_main.py | 56dba5e3ec870d5278c2e00f72de071a9d6dbe63 | [
"Python-2.0",
"MIT"
] | permissive | zhenming-xu/vise | 904bff2902b179884d2da4e7912568bb1983b7b3 | 5f89d11beaf850d8c1dc559d3e092b1752ad0ce6 | refs/heads/master | 2023-02-27T14:35:42.614753 | 2021-01-25T05:33:46 | 2021-01-25T05:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,343 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from argparse import Namespace
from pathlib import Path
from pymatgen import Element
from vise.analyzer.atom_grouping_type import AtomGroupingType
from vise.cli.main import parse_args
from vise.defaults import defaults
from vise.input_set.task import Task
from vise.input_set.xc import Xc
parent_dir = Path(__file__).parent
def test_structure_info_wo_options():
parsed_args = parse_args(["si"])
expected = Namespace(
poscar="POSCAR",
symprec=defaults.symmetry_length_tolerance,
angle_tolerance=defaults.symmetry_angle_tolerance,
show_conventional=False,
func=parsed_args.func)
assert parsed_args == expected
def test_structure_info_w_options():
parsed_args = parse_args(["si",
"-p", "a",
"-s", "1",
"-a", "2",
"-c"])
expected = Namespace(
poscar="a",
symprec=1.0,
angle_tolerance=2.0,
show_conventional=True,
func=parsed_args.func)
assert parsed_args == expected
def test_get_poscars_wo_options():
parsed_args = parse_args(["gp", "-m", "mp-1234"])
# func is a pointer so need to point the same address.
expected = Namespace(
poscar="POSCAR",
prior_info=Path("prior_info.yaml"),
mpid="mp-1234",
func=parsed_args.func)
assert parsed_args == expected
def test_get_poscars_w_options():
parsed_args = parse_args(["get_poscar",
"-p", "a",
"-m", "123",
"-pi", "b.yaml"])
expected = Namespace(
poscar="a",
prior_info=Path("b.yaml"),
mpid="123",
func=parsed_args.func)
assert parsed_args == expected
def test_make_atom_poscars_wo_options():
parsed_args = parse_args(["map"])
expected = Namespace(
dirname=Path.cwd(),
elements=None,
func=parsed_args.func)
assert parsed_args == expected
def test_make_atom_poscars_w_options():
parsed_args = parse_args(["map", "-d", "a", "-e", "H", "He"])
expected = Namespace(
dirname=Path("a"),
elements=[Element.H, Element.He],
func=parsed_args.func)
assert parsed_args == expected
def test_vasp_set_wo_options():
parsed_args = parse_args(["vs"])
# func is a pointer so need to point the same address.
expected = Namespace(
poscar=Path("POSCAR"),
task=defaults.task,
xc=defaults.xc,
kpt_density=None,
overridden_potcar=defaults.overridden_potcar,
user_incar_settings=None,
prev_dir=None,
vasprun=defaults.vasprun,
outcar=defaults.outcar,
options=None,
uniform_kpt_mode=False,
file_transfer_type=None,
func=parsed_args.func,
)
assert parsed_args == expected
def test_vasp_set_w_options():
parsed_args = parse_args(["vs",
"--poscar", "POSCAR-tmp",
"-t", "band",
"-x", "pbesol",
"-k", "4.2",
"--potcar", "Mg_pv", "O_h",
"--user_incar_settings", "LREAD", "F",
"-d", "c",
"--vasprun", "vasprun_1",
"--outcar", "OUTCAR_1",
"--options", "encut", "800",
"--uniform_kpt_mode",
"--file_transfer_type", "WAVECAR", "C",
])
expected = Namespace(
poscar=Path("POSCAR-tmp"),
task=Task.band,
xc=Xc.pbesol,
kpt_density=4.2,
overridden_potcar=["Mg_pv", "O_h"],
user_incar_settings=["LREAD", "F"],
prev_dir=Path("c"),
vasprun=Path("vasprun_1"),
outcar=Path("OUTCAR_1"),
options=["encut", "800"],
uniform_kpt_mode=True,
file_transfer_type=["WAVECAR", "C"],
func=parsed_args.func,
)
assert parsed_args == expected
def test_plot_band_wo_options():
parsed_args = parse_args(["pb"])
# func is a pointer so need to point the same address.
expected = Namespace(
vasprun=defaults.vasprun,
kpoints_filename="KPOINTS",
y_range=[-10.0, 10.0],
filename="band.pdf",
func=parsed_args.func,
)
assert parsed_args == expected
def test_plot_band_w_options():
parsed_args = parse_args(["pb",
"--vasprun", "vasprun_1",
"--kpoints", "KPOINTS_1",
"--y_range", "-1.0", "1.0",
"--filename", "band_1.pdf",
])
expected = Namespace(
vasprun=Path("vasprun_1"),
kpoints_filename="KPOINTS_1",
y_range=[-1.0, 1.0],
filename="band_1.pdf",
func=parsed_args.func,
)
assert parsed_args == expected
def test_plot_dos_wo_options():
parsed_args = parse_args(["pd"])
# func is a pointer so need to point the same address.
expected = Namespace(
vasprun=defaults.vasprun,
outcar=defaults.outcar,
type=AtomGroupingType.non_equiv_sites,
legend=True,
crop_first_value=True,
x_range=None,
y_max_ranges=None,
target=None,
filename="dos.pdf",
base_energy=None,
func=parsed_args.func,
)
assert parsed_args == expected
def test_plot_dos_w_options():
parsed_args = parse_args(["pd",
"--vasprun", "vasprun_1",
"--outcar", "OUTCAR_1",
"-t", "atoms",
"-l", "False",
"-c", "False",
"--x_range", "-1.0", "1.0",
"-y", "-5.0", "5.0",
"--target", "1", "2",
"--filename", "dos_1.pdf",
"-b", "-1"
])
expected = Namespace(
vasprun=Path("vasprun_1"),
outcar=Path("OUTCAR_1"),
type=AtomGroupingType.atoms,
legend=False,
crop_first_value=False,
x_range=[-1.0, 1.0],
y_max_ranges=[-5.0, 5.0],
target=["1", "2"],
filename="dos_1.pdf",
base_energy=-1.0,
func=parsed_args.func,
)
assert parsed_args == expected
def test_plot_absorption_wo_options():
parsed_args = parse_args(["pa"])
expected = Namespace(
vasprun=defaults.vasprun,
outcar=defaults.outcar,
filename="absorption.pdf",
y_ranges=[10**3, 10**8],
calc_kk=False,
ita=0.01,
func=parsed_args.func)
assert parsed_args == expected
def test_plot_absorption_w_options():
parsed_args = parse_args(["pa",
"--vasprun", "vasprun_1",
"--outcar", "OUTCAR_1",
"-f", "a",
"-y", "-5.0", "5.0",
"-ckk",
"-i", "0.1"])
expected = Namespace(
vasprun=Path("vasprun_1"),
outcar=Path("OUTCAR_1"),
filename="a",
y_ranges=[10**-5.0, 10**5.0],
calc_kk=True,
ita=0.1,
func=parsed_args.func)
assert parsed_args == expected
def test_band_edge_wo_options():
parsed_args = parse_args(["be"])
# func is a pointer so need to point the same address.
expected = Namespace(
vasprun=defaults.vasprun,
outcar=defaults.outcar,
func=parsed_args.func,
)
assert parsed_args == expected
def test_band_edge_w_options():
parsed_args = parse_args(["be",
"--vasprun", "vasprun_1",
"--outcar", "OUTCAR_1",
])
expected = Namespace(
vasprun=Path("vasprun_1"),
outcar=Path("OUTCAR_1"),
func=parsed_args.func,
)
assert parsed_args == expected
| [
"yuuukuma@gmail.com"
] | yuuukuma@gmail.com |
bffb107f57f0d36dc20178f29c6dc99e51e19baf | 2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df | /contributions/IlyaGusev/Python/Data Structures/2016-10-22.py | 75d1b2c5f1aa536abbd5ffc5220a703ae5c4ac1f | [] | no_license | 0x8801/commit | 18f25a9449f162ee92945b42b93700e12fd4fd77 | e7692808585bc7e9726f61f7f6baf43dc83e28ac | refs/heads/master | 2021-10-13T08:04:48.200662 | 2016-12-20T01:59:47 | 2016-12-20T01:59:47 | 76,935,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | Following PEP 8 styling guideline.
`bytes` type
Get the most of `int`s
There is more to copying
Implementing **weak** references | [
"phoenixilya@gmail.com"
] | phoenixilya@gmail.com |
a53f15b3da988af03b2566c0cf91fc45e52d5bf2 | 27b86f422246a78704e0e84983b2630533a47db6 | /src/ezdxf/acis/const.py | 7be54fa05ffa8b157ce4698f620934753fe88c3e | [
"MIT"
] | permissive | mozman/ezdxf | 7512decd600896960660f0f580cab815bf0d7a51 | ba6ab0264dcb6833173042a37b1b5ae878d75113 | refs/heads/master | 2023-09-01T11:55:13.462105 | 2023-08-15T11:50:05 | 2023-08-15T12:00:04 | 79,697,117 | 750 | 194 | MIT | 2023-09-14T09:40:41 | 2017-01-22T05:55:55 | Python | UTF-8 | Python | false | false | 5,301 | py | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import enum
from ezdxf.version import __version__
# SAT Export Requirements for Autodesk Products
# ---------------------------------------------
# Script to create test files:
# examples/acistools/create_3dsolid_cube.py
# DXF R2000, R2004, R2007, R2010: OK, tested with TrueView 2022
# ACIS version 700
# ACIS version string: "ACIS 32.0 NT"
# record count: 0, not required
# body count: 1, required
# ASM header: no
# end-marker: "End-of-ACIS-data"
# DXF R2004, R2007, R2010: OK, tested with TrueView 2022
# ACIS version 20800
# ACIS version string: "ACIS 208.00 NT"
# record count: 0, not required
# body count: n + 1 (asm-header), required
# ASM header: "208.0.4.7009"
# end-marker: "End-of-ACIS-data"
# SAB Export Requirements for Autodesk Products
# ---------------------------------------------
# DXF R2013, R2018: OK, tested with TrueView 2022
# ACIS version 21800
# ACIS version string: "ACIS 208.00 NT"
# record count: 0, not required
# body count: n + 1 (asm-header), required
# ASM header: "208.0.4.7009"
# end-marker: "End-of-ASM-data"
ACIS_VERSION = {
400: "ACIS 4.00 NT", # DXF R2000, no asm header - only R2000
700: "ACIS 32.0 NT", # DXF R2000-R2010, no asm header
20800: "ACIS 208.00 NT", # DXF R2013 with asm-header, asm-end-marker
21800: "ACIS 218.00 NT", # DXF R2013 with asm-header, asm-end-marker
22300: "ACIS 223.00 NT", # DXF R2018 with asm-header, asm-end-marker
}
ASM_VERSION = {
20800: "208.0.4.7009", # DXF R2004, R2007, R2010
21800: "208.0.4.7009", # DXF R2013, default version for R2013 and R2018
22300: "222.0.0.1700", # DXF R2018
}
EZDXF_BUILDER_ID = f"ezdxf v{__version__} ACIS Builder"
MIN_EXPORT_VERSION = 700
# ACIS version 700 is the default version for DXF R2000, R2004, R2007 and R2010 (SAT)
# ACIS version 21800 is the default version for DXF R2013 and R2018 (SAB)
DEFAULT_SAT_VERSION = 700
DEFAULT_SAB_VERSION = 21800
DATE_FMT = "%a %b %d %H:%M:%S %Y"
END_OF_ACIS_DATA_SAT = "End-of-ACIS-data"
END_OF_ACIS_DATA_SAB = b"\x0e\x03End\x0e\x02of\x0e\x04ACIS\x0d\x04data"
END_OF_ASM_DATA_SAT = "End-of-ASM-data"
END_OF_ASM_DATA_SAB = b"\x0e\x03End\x0e\x02of\x0e\x03ASM\x0d\x04data"
BEGIN_OF_ACIS_HISTORY_DATA = "Begin-of-ACIS-History-data"
END_OF_ACIS_HISTORY_DATA = "End-of-ACIS-History-data"
DATA_END_MARKERS = (
END_OF_ACIS_DATA_SAT,
BEGIN_OF_ACIS_HISTORY_DATA,
END_OF_ASM_DATA_SAT,
)
NULL_PTR_NAME = "null-ptr"
NONE_ENTITY_NAME = "none-entity"
NOR_TOL = 1e-10
RES_TOL = 9.9999999999999995e-7
BOOL_SPECIFIER = {
"forward": True,
"forward_v": True,
"reversed": False,
"reversed_v": False,
"single": True,
"double": False,
}
ACIS_SIGNATURE = b"ACIS BinaryFile" # DXF R2013/R2018
ASM_SIGNATURE = b"ASM BinaryFile4" # DXF R2018
SIGNATURES = [ACIS_SIGNATURE, ASM_SIGNATURE]
def is_valid_export_version(version: int):
return version >= MIN_EXPORT_VERSION and version in ACIS_VERSION
class Tags(enum.IntEnum):
NO_TYPE = 0x00
BYTE = 0x01 # not used in files!
CHAR = 0x02 # not used in files!
SHORT = 0x03 # not used in files!
INT = 0x04 # 32-bit signed integer
FLOAT = 0x05 # not used in files!
DOUBLE = 0x06 # 64-bit double precision floating point value
STR = 0x07 # count is the following 8-bit uchar
STR2 = 0x08 # not used in files!
STR3 = 0x09 # not used in files!
# bool value for reversed, double, I - depends on context
BOOL_TRUE = 0x0A
# bool value forward, single, forward_v - depends on context
BOOL_FALSE = 0x0B
POINTER = 0x0C
ENTITY_TYPE = 0x0D
ENTITY_TYPE_EX = 0x0E
SUBTYPE_START = 0x0F
SUBTYPE_END = 0x10
RECORD_END = 0x11
LITERAL_STR = 0x12 # count ia a 32-bit uint, see transform entity
LOCATION_VEC = 0x13 # vector (3 doubles)
DIRECTION_VEC = 0x14 # vector (3 doubles)
# Enumeration are stored as strings in SAT and ints in SAB.
# It's not possible to translate SAT enums (strings) to SAB enums (int) and
# vice versa without knowing the implementation details. Each enumeration
# is specific to the class where it is used.
ENUM = 0x15
# 0x16: ???
UNKNOWN_0x17 = 0x17 # double
# entity type structure:
# 0x0D 0x04 (char count of) "body" = SAT "body"
# 0x0E 0x05 "plane" 0x0D 0x07 "surface" = SAT "plane-surface"
# 0x0E 0x06 "ref_vt" 0x0E 0x03 "eye" 0x0D 0x06 "attrib" = SAT "ref_vt-eye-attrib"
class Flags(enum.IntFlag):
HAS_HISTORY = 1
class AcisException(Exception):
pass
class InvalidLinkStructure(AcisException):
pass
class ParsingError(AcisException):
pass
class ExportError(AcisException):
pass
class EndOfAcisData(AcisException):
pass
class Features:
LAW_SPL = 400
CONE_SCALING = 400
LOFT_LAW = 400
REF_MIN_UV_GRID = 400
VBLEND_AUTO = 400
BL_ENV_SF = 400
ELLIPSE_OFFSET = 500
TOL_MODELING = 500
APPROX_SUMMARY = 500
TAPER_SCALING = 500
LAZY_B_SPLINE = 500
DM_MULTI_SURF = 500
GA_COPY_ACTION = 600
DM_MULTI_SURF_COLOR = 600
RECAL_SKIN_ERROR = 520
TAPER_U_RULED = 600
DM_60 = 600
LOFT_PCURVE = 600
EELIST_OWNER = 600
ANNO_HOOKED = 700
PATTERN = 700
ENTITY_TAGS = 700
AT = 700
NET_LAW = 700
STRINGLESS_HISTORY = 700
| [
"me@mozman.at"
] | me@mozman.at |
ccb13e3581e5f4fcdbefec32265612838a553659 | b119f7e1f21510928e59cd5b6f16d284d6f868a3 | /djangodocker/djangodocker/urls.py | fbdfd995b634c92a1b518cdfb6644b77ba0590d2 | [] | no_license | a-bautista/Django_Tutorials | 6433d0ee2e9f2cff80ac4f84af150bfa6011de25 | 7fdffc32ac1dcf0e2a2f88d265d8d0265a267b53 | refs/heads/master | 2022-12-13T12:07:30.113489 | 2019-12-13T05:31:31 | 2019-12-13T05:31:31 | 132,667,576 | 0 | 0 | null | 2022-12-08T06:35:51 | 2018-05-08T21:32:11 | Python | UTF-8 | Python | false | false | 1,041 | py | """djangodocker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import (home, about)
# connect this application with the dashboard application by including the task.urls
# you get access to the other apps by typing 127.0.0.1:8000/task/create
urlpatterns = [
path('', home),
path('admin/', admin.site.urls),
path('about/', about),
path('task/', include('task.urls'))
]
| [
"alex.bautista.ramos.90@gmail.com"
] | alex.bautista.ramos.90@gmail.com |
ed240718c67426f61c98d597d6846f52ef4543b3 | 82f1c3338ee636ee08ec0009c413b40c495f5c95 | /core/settings/base.py | a8ae00dab226edfffa9ed5a7c2045abf9dd08bf7 | [] | no_license | DevHerles/rest | 6be3714ff43d398aedb9dcf1194b3659a38598aa | a723095d77a454c7259871b3ee980f6c3c40ecc6 | refs/heads/main | 2023-05-30T03:02:48.522541 | 2021-06-04T21:55:08 | 2021-06-04T21:55:08 | 362,266,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | from pathlib import Path
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e+3qek7(i5evq)87ff5d8e@bjsd&q_h)w5qejoojqhhx%$4j+h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
LOCAL_APPS = [
'apps.base',
'apps.common',
'apps.organs',
'apps.work_types',
'apps.organic_units',
'apps.settings',
'apps.users',
'apps.partners',
'apps.healths',
'apps.symptoms',
]
THIRD_APPS = [
'rest_framework.authtoken',
'rest_framework',
'simple_history',
'drf_yasg',
]
INSTALLED_APPS = BASE_APPS + LOCAL_APPS + THIRD_APPS
SWAGGER_SETTINGS = {'DOC_EXPANSION': 'none'}
TOKEN_EXPIRED_AFTER_SECONDS = 900
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
DATE_INPUT_FORMATS = ('%d-%m-%Y', '%Y-%m-%d')
LANGUAGE_CODE = 'es-GB'
TIME_ZONE = 'America/Lima'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'users.User'
CORS_ALLOWED_ORIGINS = ["http://localhost:3000"]
CORS_ORIGIN_WHITELIST = ["http://localhost:3000"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
"DATE_INPUT_FORMATS": ["%Y-%m-%d"],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
]
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer', 'JWT'),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user',
'USER_AUTHENTICATION_RULE':
'rest_framework_simplejwt.authentication.default_user_authentication_rule',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken', ),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
| [
"herles.incalla@gmail.com"
] | herles.incalla@gmail.com |
eddc122e28483b47ab38679b8af707a0a5342b2b | dbc216c71fa2cd447d9203bff21f85c48481847b | /python/METConfig_Truth.py | 3635a16b9804cf6f4542a7bcd8a2e563e32c649c | [] | no_license | rjwang/Reconstruction-MET-METReconstruction | be9082dc5a64744948a2cbc5f1a6ac35b3376944 | 2286131d6984cfc5e875ae32c9a4691f61de6ff1 | refs/heads/master | 2021-01-21T16:04:59.181902 | 2016-09-16T21:26:47 | 2016-09-16T21:26:47 | 68,415,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | from METReconstruction.METRecoFlags import metFlags
from METReconstruction.METRecoConfig import BuildConfig, METConfig
from METReconstruction.METAssocConfig import METAssocConfig, AssocConfig
## Simple truth terms
cfg_truth = METConfig('Truth',
[BuildConfig('NonInt'),
BuildConfig('Int'),
BuildConfig('IntOut'),
BuildConfig('IntMuons')],
doRegions=True
)
metFlags.METConfigs()[cfg_truth.suffix] = cfg_truth
metFlags.METOutputList().append(cfg_truth.suffix)
metFlags.METOutputList().append(cfg_truth.suffix+"Regions")
### Truth association maps
#
#############################################################################
## AntiKt4LCTopo
#cfg_truthassoc_akt4lc = METAssocConfig('Truth_AntiKt4LCTopo',
# [AssocConfig('Truth','AntiKt4LCTopoJets')],
# doTruth=True
# )
#
#metFlags.METAssocConfigs()[cfg_truthassoc_akt4lc.suffix] = cfg_truthassoc_akt4lc
#metFlags.METAssocOutputList().append(cfg_truthassoc_akt4lc.suffix)
#
#############################################################################
## AntiKt4EMTopo
#cfg_truthassoc_akt4em = METAssocConfig('Truth_AntiKt4EMTopo',
# [AssocConfig('Truth','AntiKt4EMTopoJets')],
# doTruth=True
# )
#
#metFlags.METAssocConfigs()[cfg_truthassoc_akt4em.suffix] = cfg_truthassoc_akt4em
#metFlags.METAssocOutputList().append(cfg_truthassoc_akt4em.suffix)
#
#############################################################################
## AntiKt4EMPFlow
#
#from RecExConfig.RecFlags import rec
#if rec.doInDet() and metFlags.DoPFlow():
# cfg_truthassoc_akt4pf = METAssocConfig('Truth_AntiKt4EMPFlow',
# [AssocConfig('Truth','AntiKt4EMPFlowJets')],
# doTruth=True
# )
#
# metFlags.METAssocConfigs()[cfg_truthassoc_akt4pf.suffix] = cfg_truthassoc_akt4pf
# metFlags.METAssocOutputList().append(cfg_truthassoc_akt4pf.suffix)
| [
"r.jiewang@gmail.com"
] | r.jiewang@gmail.com |
2eac54655f5b985851187bcd96c6e111a90da1e0 | 8c87341eff7aa9b0face6281ed8644f87b531975 | /models.py | d4610f83b9a2b5b128a708d5ee58be9f8e86667b | [] | no_license | nprapps/breaking-news-facts | 455d27aa5c818ee8d292d81781b17d2cff3ef5e1 | b6aa8d2b4f31c12e8899ce099e2827304cb4500e | refs/heads/master | 2021-01-13T01:30:09.548232 | 2013-10-11T17:23:29 | 2013-10-11T17:23:29 | 10,252,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | import datetime
import time
from peewee import *
from app_config import get_secrets
secrets = get_secrets()
psql_db = PostgresqlDatabase('breaking',
user=secrets['APPS_USER'],
password=secrets['APPS_PASS']
)
def delete_tables():
try:
Event.drop_table()
except:
pass
try:
Fact.drop_table()
except:
pass
def create_tables():
Event.create_table()
Fact.create_table()
class Event(Model):
"""
An event with a series of facts.
"""
name = CharField()
start_date = DateField()
class Meta:
database = psql_db
db_table = 'events'
def get_detail_uri(self):
return '/event-%s.json' % self.id
def get_admin_url(self):
return '/admin/events/%s/' % self.id
def __unicode__(self):
return self.name
def primary_facts(self):
return Fact\
.select()\
.join(Event)\
.where(Fact.event == self)\
.where(Fact.related_facts >> None)\
.order_by(Fact.timestamp.desc())
def as_dict(self):
output = {}
output['name'] = self.name
output['start_time'] = time.mktime(self.start_date.timetuple())
output['detail_uri'] = self.get_detail_uri()
return output
class Fact(Model):
"""
An instance of a fact. Related to a master fact.
"""
STATUS_LIST = ['Confirmed: False', 'Confirmed: True', 'Unconfirmed: Not Verifying', 'Unconfirmed: Verifying']
event = ForeignKeyField(Event, null=True)
statement = TextField()
attribution = TextField()
timestamp = DateTimeField()
# Status choices (enforced at the app level, sadly):
# 0 - Has been confirmed as false.
# 1 - Has been confirmed as true.
# 2 - Neither confirmed nor denied nor checking.
# 3 - Checking.
status = IntegerField(default=2)
related_facts = ForeignKeyField('self', null=True)
public = BooleanField(default=False)
approved = BooleanField(default=False)
reporter = CharField()
class Meta:
database = psql_db
db_table = 'facts'
def __unicode__(self):
return self.statement
def status_widget(self):
template = "<select class='form-control'>"
for status in [0,1,2,3]:
template += "<option"
if self.status == status:
template += " selected"
template += ">%s</option>" % self.STATUS_LIST[status]
template += "</select>"
return template
def get_pretty_time(self):
minute = str(self.timestamp.minute).zfill(2)
hour = self.timestamp.strftime('%-I')
ampm = self.timestamp.strftime('%p')
return '%s:%s %s' % (hour, minute, ampm)
def get_status(self):
return self.STATUS_LIST[self.status]
def get_related_facts(self):
if Fact.select().where(Fact.related_facts == self).count() == 0:
return None
return Fact.select().where(Fact.related_facts == self).order_by(Fact.timestamp.desc())
def as_dict(self):
output = dict(self.__dict__['_data'])
output['timestamp'] = time.mktime(output['timestamp'].timetuple())
output['time_string'] = self.timestamp.isoformat()
output.pop('event')
output.pop('related_facts')
return output
| [
"jeremyjbowers@gmail.com"
] | jeremyjbowers@gmail.com |
39e4c0efd14beeb857c28a288b11086173e2d379 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4057/codes/1685_2471.py | 763bcec1f0339cd7ddf8b9131953dd922ec8bebd | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | i = int(input("Idade: "))
m = float(input("Massa corporal: "))
print ("Entradas: ", i, "anos e IMC ", m)
if (i > 0) and (i <= 130)and (m > 0):
if (i < 45) and (m < 22):
print("Risco: Baixo")
elif (i < 45) and (m >= 22):
print("Risco: Medio")
elif (i >= 45) and (m < 22):
print("Risco: Medio")
elif (i >= 45) and (m >= 22):
print("Risco: Alto")
else:
print("Dados invalidos") | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
c2be04decc5965600ac2292cab586ac24015fd4a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_084/ch149_2020_04_13_20_05_51_616666.py | d4effc79069f55ab7427c8bcca855bb2eef68d48 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | s=float(input('qual é o seu salario? '))
d=int(input('qual o numero de dependentes voce tem? '))
bc=0
if s >= 1045.00:
bc=s-s*0.075-d*189.59
elif 1045.01<= s <= 2089.60:
bc=s-s*0.09-d*189.59
elif 2089.61<= s <= 3134.40:
bc=s-s*0.12-d*189.59
elif 3134.41 <= s <= 6101.06:
bc=s-s*0.14-d*189.59
elif 6101.07 <= s:
bc=s-671.12-d*189.59
print(bc)
if s> 1903.98:
IRRF=bc*0-0
elif 1903.99 <= s <= 2826.65:
IRRF=bc*0-0
print(IRRF)
elif 2826.66 <= s <= 3751.05:
IRRF=bc*0.075-142.8
print(IRRF)
elif 3751.06 <= s <= 3751.05:
IRRF=bc*0.15-354.8
print(IRRF)
elif 3751.06 <= s <=4664.68 :
IRRF=bc*0.225-636.13
print(IRRF)
else:
IRRF=bc*0.275-869.36
print(IRRF)
| [
"you@example.com"
] | you@example.com |
77cd6e9cbe4628bb18c47cbeaef453ed29eaa4fa | 2c635d6b558a65e62a9d37c12abf9e4ecbe8938c | /Word Pattern/Word Pattern.py | ebab0efb357c3284674b254e795a3f19b8cfea06 | [] | no_license | GreatStephen/MyLeetcodeSolutions | c698e13b7088fc9236250b6ec10331b88fe99ed1 | 73a8f79f2cd5c769b195c503f0346893b102acdc | refs/heads/master | 2023-03-01T04:53:19.698040 | 2021-02-05T22:28:18 | 2021-02-05T22:28:18 | 284,350,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
ss = str.split(' ')
l_s, s_l = {}, {}
if len(pattern)!=len(ss):
return False
for letter, s in zip(pattern, ss):
if letter in l_s and s in s_l:
if l_s[letter]!=s or s_l[s]!=letter:
return False
elif letter not in l_s and s not in s_l:
l_s[letter] = s
s_l[s] = letter
else:
return False
return True | [
"litianyou97@gmail.com"
] | litianyou97@gmail.com |
4fc90913119a9447897f1ab6e324c787fdd0a931 | b0365a11976fc19e350ba3c448b2bc3720c3eb73 | /project/qt3/slider1.py | 5d3c85dd67c55740f2c2d7d19a3da8406bc8bcc2 | [] | no_license | excellencemichel/progrk | bd4e6797c21ed921ce4a3d75378ca752cece459d | e3144f78d9313ca9e2c836dcf53cf1bc4b3f10b8 | refs/heads/master | 2021-04-15T07:54:06.240231 | 2018-12-29T03:42:10 | 2018-12-29T03:42:10 | 116,013,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | #! /usr/bin/python
#-*-coding:utf-8-*-
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QApplication,
QMainWindow, QDialog,
QWidget, QVBoxLayout,
QLineEdit, QSlider,
)
class Window(QWidget):
"""
Cette classe nous montre qu'on peut soi-même faire les
dimensions de la fenêtre à la main
"""
def __init__(self):
super().__init__() # Appel du constructeur de la classe QMainWindow
self.title = "PyQt5 Window QSlider Part one"
self.top = 100
self.left = 100
self.width = 400
self.height = 200
self.setWindowIcon(QtGui.QIcon("icons/line.png")) #ça na pas marché
self.init_window()
def init_window(self):
vboxLayout = QVBoxLayout()
self.lineEdit = QLineEdit(self)
vboxLayout.addWidget(self.lineEdit)
self.lineEdit.move(100, 50)
self.slider = QSlider(Qt.Horizontal, self) #Par defaut les slider sorte en verticale
self.slider.move(100, 20)
self.slider.setMinimum(1)
self.slider.setMaximum(99)
self.slider.setValue(20) #Si on donne une veleur minimum >= à setValue cela ne marche pas setValue donne pas alors la valeur par défaut au slider
self.slider.setTickPosition(QSlider.TicksBelow) #Les pointiers en bas
vboxLayout.addWidget(self.slider)
self.setWindowTitle(self.title)
self.setGeometry(self.top, self.left, self.width, self.height)
self.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
| [
"bnvnmmnl@gmail.com"
] | bnvnmmnl@gmail.com |
a055221346fa2dc69b31b83e6f31f1eb51b82322 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/sa.py | b025e8fbdcef3249d4389b6a52e8928b15a84d66 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'SA':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
ebdf4950594c969568cdc67d72e3d9eaf158ea10 | 0c6b4e9c5ecc5a7595717f9699953b227486ef3e | /tests/unit/modules/network/slxos/test_slxos_linkagg.py | 12600b9228050e9f902327f04bf5342b5639978e | [] | no_license | ansible-collection-migration/ansible.misc | d9c92e8bb0c17b3e2a92976215f523c2afaa5a46 | 3c02be2a8c03b2e375a1e1f37b0c119145ea358c | refs/heads/master | 2020-12-26T23:11:36.544511 | 2020-02-03T22:18:53 | 2020-02-03T22:18:53 | 237,681,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,064 | py | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible_collections.ansible.misc.tests.unit.compat.mock import patch
from ansible_collections.ansible.misc.tests.unit.modules.utils import set_module_args
from ansible_collections.ansible.misc.plugins.modules import slxos_linkagg
from ..slxos_module import TestSlxosModule, load_fixture
class TestSlxosLinkaggModule(TestSlxosModule):
module = slxos_linkagg
def setUp(self):
super(TestSlxosLinkaggModule, self).setUp()
self._patch_get_config = patch(
'ansible_collections.ansible.misc.plugins.modules.slxos_linkagg.get_config'
)
self._patch_load_config = patch(
'ansible_collections.ansible.misc.plugins.modules.slxos_linkagg.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestSlxosLinkaggModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_linkagg_group_present(self, *args, **kwargs):
set_module_args(dict(
group='10',
state='present'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit'
],
'changed': True
}
)
def test_slxos_linkagg_group_members_active(self, *args, **kwargs):
set_module_args(dict(
group='10',
mode='active',
members=[
'Ethernet 0/1',
'Ethernet 0/2'
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit',
'interface Ethernet 0/1',
'channel-group 10 mode active',
'interface Ethernet 0/2',
'channel-group 10 mode active'
],
'changed': True
}
)
def test_slxos_linkagg_group_member_removal(self, *args, **kwargs):
set_module_args(dict(
group='20',
mode='active',
members=[
'Ethernet 0/10',
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 20',
'exit',
'interface Ethernet 0/11',
'no channel-group'
],
'changed': True
}
)
def test_slxos_linkagg_group_members_absent(self, *args, **kwargs):
set_module_args(dict(
group='20',
state='absent'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'no interface port-channel 20'
],
'changed': True
}
)
set_module_args(dict(
group='10',
state='absent'
))
result = self.execute_module(changed=False)
self.assertEqual(
result,
{
'commands': [],
'changed': False
}
)
def test_slxos_linkagg_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
group='10',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.pyc|basic.py)\) module: '
'shawshank Supported parameters include: aggregate, group, '
'members, mode, purge, state',
result['msg']
))
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
19c155e9dee77e5313030207c70e8e1fbeeee78b | e9f40b2ae17b5bf7f7fba339b00cb59e2cce34fa | /python_basic/OO/class_and_instance/class_demo2.py | 1d6a488c04cb909e22e3d4d6b164df1d56394e27 | [] | no_license | linyouwei/pycharm | 0d8dbfd83fcc88077137bcbec063186ce0fb622c | 246fe3ab855f7614fd05f2d31239170077791822 | refs/heads/master | 2021-01-19T17:38:35.698089 | 2018-03-26T10:12:50 | 2018-03-26T10:12:50 | 101,077,696 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | #encoding=utf8
class Student(object):
def __init__(self,name,score):
self.name = name
self.score = score
def print_score(self):
print("%s:%s"%(self.name,self.score))
def get_grade(self):
if self.score >= 90:
return 'A'
elif self.score >= 60:
return 'B'
else:
return 'C'
if __name__ == "__main__":
bart1= Student("lin",100)
print(bart1.get_grade())
bart1.print_score()
| [
"yjlyw020150@163.com"
] | yjlyw020150@163.com |
ba0073deff1af9e08e786690c7e8f7d3324ce4af | 0905b794ccd3f3e4af9819a3c77505ba43067556 | /reporter/uhl_reports/bioresource/data_quality/redcap.py | aa242c6af30cc7fd9a9d3c4722227c3d1417558e | [
"MIT"
] | permissive | LCBRU/reporter | 57807fd358eee46d37c529e08baa1a76164588f8 | 8cb0ae403346e375a5e99d1d4df375cf2d5f3b81 | refs/heads/master | 2021-09-27T23:22:39.806232 | 2021-09-27T11:34:10 | 2021-09-27T11:34:10 | 88,853,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | #!/usr/bin/env python3
from reporter.connections import RedcapInstance
from reporter.application_abstract_reports.redcap.percentage_complete import (
RedcapPercentageCompleteReport,
)
from reporter.application_abstract_reports.redcap.withdrawn_or_excluded_with_data import (
RedcapWithdrawnOrExcludedWithDataReport,
)
from reporter.emailing import (
RECIPIENT_BIORESOURCE_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_BIORESOURCE_MANAGER as RECIPIENT_MANAGER,
RECIPIENT_IT_DQ,
)
from reporter.application_abstract_reports.redcap.web_data_quality import (
RedcapWebDataQuality,
)
from reporter.application_abstract_reports.redcap.data_quality import (
RedcapInvalidDate,
RedcapInvalidStudyNumber,
RedcapRecordInvalidStudyNumber,
RedcapInvalidHeightInCm,
RedcapInvalidHeightInFeetAndInches,
RedcapInvalidWeightInKg,
RedcapInvalidWeightInStonesAndPounds,
)
from reporter.core import Schedule
REDCAP_PROJECT_ID = 9
REDCAP_INSTANCE = RedcapInstance.internal
class BioresRedcapPercentageCompleteReport(RedcapPercentageCompleteReport):
def __init__(self):
super().__init__(
study_name='Bioresource',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
schedule=Schedule.never,
)
class BioresourceRedcapWithdrawnOrExcludedWithDataReport(
RedcapWithdrawnOrExcludedWithDataReport):
def __init__(self):
super().__init__(
study_name='Bioresource',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
schedule=Schedule.never,
)
class BioresourceRedcapWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_IT_DQ],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['record_id'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['your_height_centimetres'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidHeightInFeetAndInches(
RedcapInvalidHeightInFeetAndInches):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
feet_field='your_height_feet',
inches_field='your_height_inches',
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['your_weight_kg'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidWeightInStonesAndPounds(
RedcapInvalidWeightInStonesAndPounds):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
stones_field='your_weight_stones',
pounds_field='your_weight_pounds',
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
| [
"rabramley@gmail.com"
] | rabramley@gmail.com |
f39c672ffe5160b7086bffe27ce2ab6182a9a372 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_RESOURCES/my-gists/__CONTAINER/_OLD/_python/file_to_string.py | 3a9ebee124dca93b69b2b27ba4087bae66ed4ea7 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
import sys
filename = sys.argv[1]
# These do not remove \n
with open(filename) as f:
s = "".join(f.readlines())
with open(filename) as f:
s = "".join(f)
with open(filename) as f:
s = f.read() # Fastest according to my tests.
# These remove \n
with open(filename) as f:
s = " ".join(line.replace("\n", "") for line in f)
with open(filename) as f:
s = " ".join(line.rstrip() for line in f)
with open(filename) as f:
s = f.read().replace("\n", "")
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
16b7fe171308835af2c635735c7bdd7d91120fb0 | 99052370591eadf44264dbe09022d4aa5cd9687d | /build/learning_ros/Part_5/joint_space_planner/catkin_generated/pkg.installspace.context.pc.py | be9fecd76557ed5f3b5b526f243f453739bb4c03 | [] | no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/toshiki/ros_ws/install/include".split(';') if "/home/toshiki/ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ljoint_space_planner".split(';') if "-ljoint_space_planner" != "" else []
PROJECT_NAME = "joint_space_planner"
PROJECT_SPACE_DIR = "/home/toshiki/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"mxl592@case.edu"
] | mxl592@case.edu |
050887e1cf44aa7b4e8f04bd2184fd3b2d0d39cd | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/wiColorJ/pyr_Tcrop255_p60_j15/Sob_k29_s001/pyr_5s/L3/step10_a.py | fd4c5f48d0d58ef0a4dc147b9a5bf5e5b6cf01a9 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,899 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_5side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k29_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
##################################
### 1side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_2__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_2__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_3__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_3__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_3__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_4__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_4__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_4__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_4__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
7f0f66410133136707d71e99d19bc7bc6c5702bd | 35271f6bd874799df9a93dbe5bcc50272b619dc1 | /ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 7f3646cb6e7ef201b2942163c3ed1d7e44f6a136 | [
"MIT"
] | permissive | aladdinpersson/Machine-Learning-Collection | c724186b64ae52efa6f9d4e97f37477900901d35 | 558557c7989f0b10fee6e8d8f953d7269ae43d4f | refs/heads/master | 2023-08-31T20:52:06.493437 | 2023-03-21T11:44:08 | 2023-03-21T11:44:08 | 250,184,708 | 5,653 | 2,543 | MIT | 2023-09-02T03:51:36 | 2020-03-26T07:02:40 | Python | UTF-8 | Python | false | false | 5,541 | py | """
Example code of a simple RNN, GRU, LSTM on the MNIST dataset.
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-05-09 Initial coding
* 2022-12-16 Updated with more detailed comments, docstrings to functions, and checked code still functions as intended.
"""
# Imports
import torch
import torch.nn.functional as F # Parameterless functions, like (some) activation functions
import torchvision.datasets as datasets # Standard datasets
import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation
from torch import optim # For optimizers like SGD, Adam, etc.
from torch import nn # All neural network modules
from torch.utils.data import (
DataLoader,
) # Gives easier dataset managment by creating mini batches etc.
from tqdm import tqdm # For a nice progress bar!
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Hyperparameters
input_size = 28
hidden_size = 256
num_layers = 2
num_classes = 10
sequence_length = 28
learning_rate = 0.005
batch_size = 64
num_epochs = 3
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.rnn(x, h0)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Recurrent neural network with GRU (many-to-one)
class RNN_GRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.gru(x, h0)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Recurrent neural network with LSTM (many-to-one)
class RNN_LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(
x, (h0, c0)
) # out: tensor of shape (batch_size, seq_length, hidden_size)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Load Data
train_dataset = datasets.MNIST(
root="dataset/", train=True, transform=transforms.ToTensor(), download=True
)
test_dataset = datasets.MNIST(
root="dataset/", train=False, transform=transforms.ToTensor(), download=True
)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM)
model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# Get data to cuda if possible
data = data.to(device=device).squeeze(1)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent update step/adam step
optimizer.step()
# Check accuracy on training & test to see how good our model
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
# Set model to eval
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# Toggle model back to train
model.train()
return num_correct / num_samples
print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}")
print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")
| [
"aladdin.persson@hotmail.com"
] | aladdin.persson@hotmail.com |
0b19dbee65c1f62954819d4263d13f2b84de00f3 | c1c5a8dc79cacf3b419bad77881213c5db2f80c3 | /Kattis/Appalling_Architecture.py | 21abef0663c911871759173c181ea7ffba02d6ae | [] | no_license | EoinDavey/Competitive | 7ff8b6b6225814ac60c3ace659bb63190eb52420 | b2b6909b93f5c073b684477f8a4b06dac22ec678 | refs/heads/master | 2023-01-08T00:06:19.076941 | 2022-12-26T14:00:31 | 2022-12-26T14:00:31 | 67,259,478 | 17 | 1 | null | 2022-01-19T18:17:59 | 2016-09-02T22:46:26 | C++ | UTF-8 | Python | false | false | 427 | py | h, w = [int(x) for x in input().split()]
l = ""
sm = 0
total = 0
for _ in range(h):
l = input()
for i in range(w):
if l[i] == '.':
continue
sm += i
total += 1
lft = 0
for i in range(w):
if l[i] != '.':
lft = i - 0.5
break
rght = 0
for i in range(w):
if l[i] != '.':
rght = i + 0.5
if sm < lft * total:
print("left")
elif sm > rght * total:
print("right")
else:
print("balanced")
| [
"eoind@vey.ie"
] | eoind@vey.ie |
5631396d7dea1af0dcda2adcf10850ca7ac4ff9b | b7cb9fe22b6fc5a5670170eed0cc5d366878a5c1 | /dataloader.py | e48cf62a0ad25fcd2a4b05151d6a4bf8db6ba577 | [] | no_license | sutt/sirna-utils | 95dce2582ccc5619deaf335526894c0d3048a8bf | d22eb6244b46e6e26dc96370eae2f0166970cd91 | refs/heads/master | 2020-06-16T21:50:48.770016 | 2019-07-12T13:31:22 | 2019-07-12T13:31:22 | 195,713,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,707 | py | import os, sys, copy
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import cv2
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
if os.environ.get('USER', 'na') == 'jupyter':
#gcp
CSV_DATA_DIR = 'data/csvdata/'
IMG_DATA_DIR = 'data/'
else:
#local
CSV_DATA_DIR = 'data_raw/'
IMG_DATA_DIR = 'sample_imgs/'
class DataLoader:
'''
Load Dataset utilities
NOT IMPLEMENTED----
returns (with prepend_msg=True)
tuple:
elem-0: msg on img creation [no]
elem-1: list of imgs channel
-------------------
Terminology for id<x>-dicts:
ida - exp, plate, well
idb - exp, plate, well, site(2)
idc - exp, plate, well, site(2), channel(6)
(indexes for site and channel start at 1)
Other terminology:
row - index of train or train + train_controls
l_imgs - list of imgs (for 6 channels)
l_l_imgs - list of list of imgs
(for multiple sites, and all channels within)
Method naming:
<method>_df returns a dataframe
<method>_img returns a img (numpy array)
'''
def __init__(self,
csv_data_dir = CSV_DATA_DIR,
img_data_dir = IMG_DATA_DIR,
prepend_msg=False,
):
self.IMG_DATA_DIR = img_data_dir
self.CSV_DATA_DIR = csv_data_dir
self.prepend_msg = prepend_msg
self.train_control = pd.read_csv(self.CSV_DATA_DIR + 'train_controls.csv')
self.test_control = pd.read_csv(self.CSV_DATA_DIR + 'test_controls.csv')
self.train = pd.read_csv(self.CSV_DATA_DIR + 'train.csv')
self.test = pd.read_csv(self.CSV_DATA_DIR + 'test.csv')
self.pixel_stats = pd.read_csv(self.CSV_DATA_DIR + 'pixel_stats.csv')
self.sample_submission = pd.read_csv(self.CSV_DATA_DIR + 'sample_submission.csv')
self.exp_sirna = self.train['sirna']
def msg(self, on=False):
'''alter the msg on/off'''
self.prepend_msg = on
@staticmethod
def random(x, k=1):
return np.random.choice(x,k=k)
# utility methods ---------
def row_to_ida(self,row):
'''return dict with exp, plate, wellwith
input (int) row in train
[ ] TODO - add train_controls here
'''
s_experiment = self.train.loc[row, 'experiment']
s_plate = self.train.loc[row, 'plate']
s_well = self.train.loc[row, 'well']
return {
'experiment': s_experiment,
'plate': s_plate,
'well': s_well,
}
@staticmethod
def train_id_to_ida(train_id):
''' using train table id_code, pase into 3 components and
return as ida
'''
ida = {}
for k,v in zip(
('experiment', 'plate', 'well'),
train_id.split('_')
):
ida[k] = v
return ida
@staticmethod
def ida_to_idb(ida, site):
'''add site to id-dict and return'''
ida['site'] = site
return ida
@staticmethod
def ida_to_idbs(ida):
''' return: list of dict - idb (exp, plate, well, channel)
input: dict - ida (exp, plate, well)
'''
idbs = []
for site in range(1,3):
_idb = ida.copy()
_idb['site'] = site
idbs.append(_idb)
return idbs
@staticmethod
def idb_to_idc(idb, channel):
'''add channel to id-dict and return'''
idb['channel'] = channel
return idb
@staticmethod
def idb_to_idcs(idb):
''' return: list of list of dict - idc
input: dict - idb (exp, plate, well, site)
'''
idcs = []
for channel in range(1,7):
_idc = idb.copy()
_idc['channel'] = channel
idcs.append(_idc)
return idcs
@staticmethod
def ida_to_idc(ida, site, channel):
'''add specific channel and site to id-dict and return'''
ida['site'] = channel
ida['channel'] = channel
return ida
@classmethod
def ida_to_idcs(cls, ida):
''' return list of list of dict of idc
input dict of ida
'''
idbs = cls.ida_to_idbs(ida)
return [cls.idb_to_idcs(e) for e in idbs]
def idc_to_fn(self, experiment, plate, well, site, channel):
'''return: str - the img-fn
input: idc as unpacked-dict
'''
return os.path.join(
self.IMG_DATA_DIR,
'train',
experiment,
('Plate' + str(plate)),
( well +
'_s' + str(site) +
'_w' + str(channel)
+ '.png'
)
)
def idcs_to_fns(self, idcs):
''' return - list of file paths
input - list of idc's (for each channel)
'''
return [self.idc_to_fn(**idc) for idc in idcs]
@staticmethod
def load_img(fn, npix=512):
'''return img-obj from fn (via cv2)'''
img = cv2.imread(fn)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if npix != 512:
img = cv2.resize(img, (npix, npix))
return img
def load_img_from_idc(self, idc):
img = self.load_img(self.idc_to_fn(**idc))
return img
def load_img_from_idcs(self, idcs):
l_imgs = [self.load_img(self.idc_to_fn(idc)) for idc in idcs]
def load_img_from_l_idc(self, l_idcs):
l_l_imgs = [[self.load_img_from_idc(idc) for idc in idcs]
for idcs in l_idcs
]
return l_l_imgs
# analytic methods -------------
def get_two_sites_idc(self, sirna=None, experiment=None):
'''return the two sites with the same:
experiment, well, (sirna / well)
TODO
[ ] better code for indexing train
[ ] allow well to be an argument
'''
try:
row = list(
(self.train['sirna'] == sirna).mul(
self.train['experiment'] == experiment)
).index(True)
assert isinstance(row, int)
assert row >= 0
except Exception as e:
print(f'failed to find row for exp {experiment}, sirna: {sirna}')
raise e
ida = self.row_to_ida(row)
l_idcs = self.ida_to_idcs(ida)
return l_idcs
def get_controls_df(self, experiment, plate, well=None, site=None, channel=None):
''' returns df - of all the controls for a particular exp+plate'''
return (self.train_control[
(self.train_control['experiment'] == experiment).mul(
(self.train_control['plate'] == plate))
])
def get_neg_controls_df(self, experiment, plate, well=None, site=None, channel=None):
''' returns df - of all the neg controls for a particular exp+plate'''
controls_df = self.get_controls_df(experiment, plate)
neg_df = controls_df[controls_df['well_type'] == 'negative_control']
return neg_df
def get_pos_controls_df(self, experiment, plate, well=None, site=None, channel=None):
''' returns of df - all the neg controls for a particular exp+plate'''
controls_df = self.get_controls_df(experiment, plate)
pos_df = controls_df[controls_df['well_type'] == 'positive_control']
return pos_df
def get_neg_controls_img(self, experiment, plate, **kwargs):
''' returns of l_l_imgs - all the neg controls for a particular exp+plate'''
df = self.get_neg_controls_df(experiment, plate)
idas = [self.train_id_to_ida(e) for e in df['id_code'].tolist()]
l_idcs = [self.ida_to_idcs(e) for e in idas][0] #bug fix - for now
l_l_imgs = self.load_img_from_l_idc(l_idcs)
return l_l_imgs
def get_all_sirna_df(self, sirna):
'''return the train rows with a certain sirna'''
return self.train[ (self.train['sirna'] == sirna)]
def test_basic():
try:
dc = DataLoader()
except:
raise
def test_method_1():
assert True
| [
"wsutton17@gmail.com"
] | wsutton17@gmail.com |
f7e1145af886703fccb6c648225954a86303bf15 | 84f7ab8ae18acda2b15c3118ac18c4e8c0df1a73 | /tests/evaluator_test.py | 560752062aa9f9cb9a5bf3d2f5a95a63dfff39d9 | [
"Apache-2.0"
] | permissive | lejarx/gafe | 35ef45ec041d7bd76c973c841a01a478b4ba137c | 125d587e39dd2eb94fba6667fffa6d07e508542f | refs/heads/master | 2021-07-08T15:46:12.657591 | 2017-10-03T08:47:59 | 2017-10-03T08:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | '''
Evaluator tests.
'''
import os
import unittest
import numpy as np
from sklearn.datasets import load_iris
from gafe.evaluator import Evaluator
class EvaluatorTest(unittest.TestCase):
def test_init(self):
eva = Evaluator()
self.assertEqual(eva._metric, 'neg_log_loss')
self.assertEqual(eva._cv_folds, 5)
def test_evolve(self):
iris = load_iris()
X = iris.data
y = iris.target
eva = Evaluator()
score = eva.evaluate(X, y)
self.assertTrue(score < 0.25)
score = eva.evaluate(X, y, X_vald=X, y_vald=y)
self.assertTrue(score < 0.25)
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
9de54d8964ab1708694daac3c7a203950e994384 | c55074cae33470f6a6f994b6029425a85818496e | /ci/push/push_request_status.py | ecdf79408a0a7f0f92ca547ca33b282c1840756e | [
"BSD-3-Clause"
] | permissive | marcalbaladejo/CumulusCI | 4ebf590e71f4847f157b33f47da775070e0c7feb | f619d0d984f7bbfa3c6fcd6e33e41e64105cb4f5 | refs/heads/master | 2021-01-18T18:45:06.098227 | 2018-05-28T12:58:22 | 2018-05-28T12:58:22 | 62,956,012 | 1 | 1 | BSD-3-Clause | 2018-05-28T12:58:23 | 2016-07-09T15:32:32 | Python | UTF-8 | Python | false | false | 3,568 | py | import os
import sys
import csv
import time
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
completed_statuses = ['Succeeded','Failed','Cancelled']
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
push_request_id = os.environ.get('PUSH_REQUEST')
subscriber_where = os.environ.get('SUBSCRIBER_WHERE', None)
default_where = {'PackagePushRequest': "Id = '%s'" % push_request_id}
if subscriber_where:
default_where['PackageSubscriber'] = subscriber_where
push_api = SalesforcePushApi(username, password, serverurl, lazy=['subscribers','jobs'], default_where=default_where)
push_request = push_api.get_push_request_objs("Id = '%s'" % push_request_id, limit=1)[0]
interval = 10
if push_request.status not in completed_statuses:
print 'Push request is not yet complete. Polling for status every %s seconds until completion...' % interval
i = 0
while push_request.status not in completed_statuses:
if i == 10:
print 'This is taking a while! Polling every 60 seconds...'
interval = 60
time.sleep(interval)
# Clear the method level cache on get_push_requests and get_push_request_objs
push_api.get_push_requests.cache.clear()
push_api.get_push_request_objs.cache.clear()
# Get the push_request again
push_request = push_api.get_push_request_objs("Id = '%s'" % push_request_id, limit=1)[0]
print push_request.status
i += 1
failed_jobs = []
success_jobs = []
cancelled_jobs = []
jobs = push_request.get_push_job_objs()
for job in jobs:
if job.status == 'Failed':
failed_jobs.append(job)
elif job.status == 'Succeeded':
success_jobs.append(job)
elif job.status == 'Cancelled':
cancelled_jobs.append(job)
print "Push complete: %s succeeded, %s failed, %s cancelled" % (len(success_jobs),len(failed_jobs),len(cancelled_jobs))
failed_by_error = {}
for job in failed_jobs:
errors = job.get_push_error_objs()
for error in errors:
error_key = (error.error_type, error.title, error.message, error.details)
if error_key not in failed_by_error:
failed_by_error[error_key] = []
failed_by_error[error_key].append(error)
if failed_jobs:
print ""
print "-----------------------------------"
print "Failures by error type"
print "-----------------------------------"
for key, errors in failed_by_error.items():
print " "
print "%s failed with..." % (len(errors))
print " Error Type = %s" % key[0]
print " Title = %s" % key[1]
print " Message = %s" % key[2]
print " Details = %s" % key[3]
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
| [
"jlantz@salesforce.com"
] | jlantz@salesforce.com |
f3f0a5e9be18f742ea1dfe2ba0c45ba7c077fe17 | 92209cc6de47e868dfaddae2e61048e40c7dfe66 | /irc3/dec.py | 8a969e4b7512fb7d818f2122dff763ef1c53f483 | [
"LicenseRef-scancode-ietf",
"CC-BY-3.0"
] | permissive | valhallasw/irc3 | 5f2305ca6c8df764da9a2ed0ba2eb1dda67dfb1f | 628d1345cb5b09d90b087ae23d5caf26b25a2e7d | refs/heads/master | 2020-12-28T23:15:30.414896 | 2014-12-23T18:01:19 | 2014-12-23T18:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import venusian
import re
def plugin(wrapped):
"""register a class as plugin"""
setattr(wrapped, '__irc3_plugin__', True)
setattr(wrapped, '__irc3d_plugin__', False)
return wrapped
class event(object):
"""register a method or function an irc event callback::
>>> @event('^:\S+ 353 [^&#]+(?P<channel>\S+) :(?P<nicknames>.*)')
... def on_names(bot, channel=None, nicknames=None):
... '''this will catch nickname when you enter a channel'''
... print(channel, nicknames.split(':'))
The callback can be either a function or a plugin method
If you specify the `iotype` parameter to `"out"` then the event will be
triggered when the regexp match something **sent** by the bot.
For example this event will repeat private messages sent by the bot to the
`#irc3` channel::
>>> @event(r'PRIVMSG (?P<target>[^#]+) :(?P<data>.*)', iotype='out')
... def msg3(bot, target=None, data=None):
... bot.privmsg('#irc3',
... '<{0}> {1}: {2}'.format(bot.nick, target, data))
"""
venusian = venusian
def __init__(self, regexp, callback=None, iotype='in',
venusian_category='irc3.rfc1459'):
try:
re.compile(getattr(regexp, 're', regexp))
except Exception as e:
raise e.__class__(str(e) + ' in ' + getattr(regexp, 're', regexp))
self.regexp = regexp
self.iotype = iotype
self.callback = callback
self.venusian_category = venusian_category
def async_callback(self, kwargs): # pragma: no cover
return self.callback(**kwargs)
def compile(self, config):
regexp = getattr(self.regexp, 're', self.regexp)
if config:
regexp = regexp.format(**config)
self.cregexp = re.compile(regexp)
def __call__(self, wrapped):
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
self.callback = getattr(
obj.get_plugin(ob),
wrapped.__name__)
else:
@functools.wraps(wrapped)
def wrapper(**kwargs):
return wrapped(obj, **kwargs)
self.callback = wrapper
# a new instance is needed to keep this related to *one* bot
# instance
e = self.__class__(self.regexp, self.callback,
venusian_category=self.venusian_category,
iotype=self.iotype)
obj.attach_events(e)
info = self.venusian.attach(wrapped, callback,
category=self.venusian_category)
return wrapped
def __repr__(self):
s = getattr(self.regexp, 'name', self.regexp)
name = self.__class__.__name__
return '<bound {0} {1} to {2}>'.format(name, s, self.callback)
def extend(func):
"""Allow to extend a bot:
Create a module with some usefull routine:
.. literalinclude:: ../examples/myextends.py
..
>>> import sys
>>> sys.path.append('examples')
>>> from irc3 import IrcBot
>>> IrcBot.defaults.update(async=False, testing=True)
Now you can use those routine in your bot::
>>> bot = IrcBot()
>>> bot.include('myextends')
>>> print(bot.my_usefull_function(1))
my_usefull_function(*(1,))
>>> print(bot.my_usefull_method(2))
my_usefull_method(*(2,))
"""
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
@functools.wraps(func)
def f(self, *args, **kwargs):
plugin = obj.get_plugin(ob)
return getattr(plugin, func.__name__)(*args, **kwargs)
setattr(obj, func.__name__, f.__get__(obj, obj.__class__))
else:
setattr(obj, func.__name__, func.__get__(obj, obj.__class__))
info = venusian.attach(func, callback, category='irc3.extend')
return func
| [
"gael@gawel.org"
] | gael@gawel.org |
a858f211dff6a15e0d298437b89542752845bdc2 | a33098d9f7f7402d07c7bb0663e260cab4772fd2 | /src/users/posts/form.py | 2dc9c69af1abf8af4a88703290b5b9829adf8efe | [] | no_license | EgbieAndersonUku1/myBlog | 7906803c5c2f4300f1bcc672f397045894cc65b2 | e4344064012aefa79042ba8d39911b29fb5b7554 | refs/heads/master | 2018-09-08T09:28:25.532806 | 2018-06-04T22:45:48 | 2018-06-04T22:45:48 | 106,434,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from flask_ckeditor import CKEditorField
from flask_wtf.file import FileField, FileAllowed
from users.base.base_ckeditor import BaseCKEditorForm
from wtforms import validators
class PostForm(BaseCKEditorForm):
post = CKEditorField('body', validators=[validators.DataRequired()])
image = FileField('Post image', validators=[FileAllowed(['png', 'jpeg', 'jpg', 'gif'],
'Only the file extension jpg, png, gif and jpeg are allowed')])
| [
"jayunderwood2011@hotmail.com"
] | jayunderwood2011@hotmail.com |
ef1b5b110530027b90cb6abec967fb2dd7351f1a | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/class_setattr.py | 2c965ee92a1f411d2b66a16ec117e84a366a141f | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 365 | py | # expected: fail
class C(object):
pass
# Make sure we can't skirt the tp_slot-updating logic in type.__setattr__
# by trying to use object.__setattr__ which wouldn't do the internal bookkeeping:
def badrepr():
raise Exception()
c = C()
c.a = 1
try:
object.__setattr__(C, '__repr__', badrepr)
assert 0
except TypeError as e:
print e
c.b = 2
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
2255b0382b5184a05622eb39f3305addd69f4232 | 40f5c1236a1b39ffb1d2289f7d71d6c9290ee4fd | /torch/distributed/_sharded_tensor/utils.py | 95dec9129d23b4c105f7473e9633d67697d0382d | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | kouyoumin/pytorch | b8edc5c29d8f32b2e587eb78e9a176150b21dfd7 | 3315c4b31ef582891149a7c7c05cfda8c50642f3 | refs/heads/master | 2021-12-14T18:35:48.849213 | 2021-11-29T05:25:56 | 2021-11-29T05:35:53 | 206,223,149 | 0 | 1 | NOASSERTION | 2019-09-04T03:28:21 | 2019-09-04T03:28:21 | null | UTF-8 | Python | false | false | 10,087 | py | import collections.abc
from contextlib import contextmanager
from typing import Optional, List, Tuple, Sequence
import torch
from torch.distributed import distributed_c10d
from torch.distributed import rpc
from torch.distributed._sharding_spec import (
ShardMetadata,
)
from torch.distributed._sharding_spec._internals import (
check_tensor,
validate_non_overlapping_shards_metadata,
)
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
# Tracks the current process group in the load context manager.
_CURRENT_PROCESS_GROUP = None
@contextmanager
def load_with_process_group(process_group):
"""
Context manager to set the process group with which to load a ShardedTensor.
"""
global _CURRENT_PROCESS_GROUP
if _CURRENT_PROCESS_GROUP is not None:
raise RuntimeError(
'ProcessGroup already set by previous "load_with_process_group" '
'context manager')
_CURRENT_PROCESS_GROUP = process_group
try:
yield process_group
finally:
_CURRENT_PROCESS_GROUP = None
def get_current_process_group():
"""
Retrieves the current process group set by ``load_with_process_group``.
If not set, it just returns the default group.
"""
global _CURRENT_PROCESS_GROUP
if _CURRENT_PROCESS_GROUP is None:
return distributed_c10d._get_default_group()
else:
return _CURRENT_PROCESS_GROUP
def _parse_and_validate_remote_device(pg, remote_device):
worker_name = remote_device.worker_name()
rank = remote_device.rank()
device = remote_device.device()
# Validate rank, skip validation if rank is not part of process group.
if not distributed_c10d._rank_not_in_group(pg):
if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)):
raise ValueError(f'Invalid rank: {rank}')
if worker_name is not None:
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}')
workers = rpc._get_current_rpc_agent().get_worker_infos()
for worker in workers:
if worker.name == worker_name:
return worker.id, device
raise ValueError(f'Invalid worker name: {worker_name}')
return rank, device
def _validate_output_tensor_for_gather(
my_rank: int,
dst_rank: int,
size: torch.Size,
dst_tensor: Optional[torch.Tensor],
) -> None:
if dst_rank == my_rank:
if dst_tensor is None:
raise ValueError(
f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}"
)
if tuple(size) != (dst_tensor.size()):
raise ValueError(
f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())},"
f"but should be {tuple(size)}"
)
elif dst_tensor:
raise ValueError(
"Argument ``dst_tensor`` must NOT be specified "
"on non-destination ranks."
)
def _flatten_tensor_size(size) -> List[int]:
"""
Checks if tensor size is valid, then flatten/return the list of ints.
"""
if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):
dims = list(*size)
else:
dims = list(size)
for dim in dims:
if not isinstance(dim, int):
raise TypeError(f'size has to be a sequence of ints, found: {dims}')
return dims
def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True):
if is_local:
assert isinstance(ranks, int)
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! "
f"Found one local shard tensor {prop_name}={expected}, "
f"the other local shard tensor {prop_name}={actual}.")
else:
# compare failure check across ranks, ranks list should have two rank
assert len(ranks) == 2
if expected != actual:
raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! "
f"Found {prop_name}={expected} on rank:{ranks[0]}, "
f"and {prop_name}={actual} on rank:{ranks[1]}.")
def build_metadata_from_local_shards(
local_shards: List[Shard],
global_size: List[int],
current_rank: int,
pg: distributed_c10d.ProcessGroup
) -> Tuple[ShardedTensorMetadata, torch.device]:
assert len(local_shards) > 0, "must have local shards!"
local_shard_metadatas: List[ShardMetadata] = []
local_shards_device = torch.device("cpu")
first_shard_dtype = local_shards[0].tensor.dtype
first_shard_layout = local_shards[0].tensor.layout
first_shard_requires_grad = local_shards[0].tensor.requires_grad
first_shard_is_pinned = local_shards[0].tensor.is_pinned()
# 1). Validate local tensors and associated metadatas
for i, local_shard in enumerate(local_shards):
local_shard_tensor = local_shard.tensor
local_shard_meta = local_shard.metadata
local_shard_metadatas.append(local_shard_meta)
rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement)
local_shards_device = local_device
if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout:
raise ValueError(
f'Only torch.strided layout is currently supported, but found '
f'{local_shard_tensor.layout} on rank:{current_rank}!'
)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported!')
if rank != current_rank:
raise ValueError(
f"Local shard metadata's rank does not match with the rank in its process group! "
f'Found current rank in the process group: {current_rank}, '
f"local ShardMetadata placement's rank: {rank}"
)
if local_shard_tensor.device != local_device:
raise ValueError(
f"Local shard tensor device does not match with local Shard's placement! "
f"Found local shard tensor device: {local_shard_tensor.device}, "
f"local shard metadata placement device: {local_device}"
)
_raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank)
_raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank)
_raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank)
# 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then
# do all_gather to collect local_sharded_tensor_metadata from all ranks
local_tensor_properties = TensorProperties(
dtype=first_shard_dtype,
layout=first_shard_layout,
requires_grad=first_shard_requires_grad,
memory_format=torch.contiguous_format,
pin_memory=first_shard_is_pinned
)
local_sharded_tensor_metadata = ShardedTensorMetadata(
shards_metadata=local_shard_metadatas,
size=torch.Size(global_size),
tensor_properties=local_tensor_properties)
return (local_sharded_tensor_metadata, local_shards_device)
def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]):
global_sharded_tensor_metadata = None
global_metadata_rank = 0
for rank, rank_metadata in enumerate(gathered_metadatas):
if rank_metadata is None:
continue
if global_sharded_tensor_metadata is None:
global_sharded_tensor_metadata = rank_metadata
global_metadata_rank = rank
else:
_raise_if_mismatch(global_sharded_tensor_metadata.size,
rank_metadata.size,
"global_size",
[global_metadata_rank, rank],
is_local=False)
# don't need to check layout and memory format as we already checked in local shards validation stage
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype,
rank_metadata.tensor_properties.dtype,
"dtype",
[global_metadata_rank, rank],
is_local=False)
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad,
rank_metadata.tensor_properties.requires_grad,
"requires_grad",
[global_metadata_rank, rank],
is_local=False)
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory,
rank_metadata.tensor_properties.pin_memory,
"pin_memory",
[global_metadata_rank, rank],
is_local=False)
# pass all validations, extend shards metadata
global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata)
if global_sharded_tensor_metadata is not None:
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata)
# check if the shards_metadata is compatible with global size of the sharded tensor.
check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size)
else:
raise ValueError("ShardedTensor have no local shards on all ranks!")
return global_sharded_tensor_metadata
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
7718d73f031823f1b9ebf57030912b374108f3ba | 34b09bc83e5726fccb524a93cf2742f5aeadedef | /1. List1/3.py | ea7f0c9ec964633f5a8273b37e01bb56d9eee59c | [] | no_license | mjson1954/WIC | 57eb20ffe7aaf8695d679c893efacdeede573e72 | 670112209aacd274d09f6e9a89d948120486bfc8 | refs/heads/master | 2023-03-20T00:57:19.740025 | 2021-03-05T10:52:51 | 2021-03-05T10:52:51 | 289,925,829 | 0 | 0 | null | 2021-02-21T02:16:11 | 2020-08-24T12:46:58 | Python | UTF-8 | Python | false | false | 360 | py | T = int(input())
for test_case in range(1, T + 1):
N=int(input())
count=[0 for _ in range(10)]
num=input()
for j in range(len(num)):
count[int(num[j])]+=1
max_value=max(count)
for j in range(len(count)):
if(count[j]==max_value):
max_index=j
print("#{0} {1} {2}".format(test_case, max_index, max_value))
| [
"mjson1954@gmail.com"
] | mjson1954@gmail.com |
a48fc9cb60eb6c923be2d70b30f0f7886cc487cc | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /atcoder/abc047/c.py | cae6160fc3353b018fde5958ce7efe990bf07b41 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/env python3
# https://abc047.contest.atcoder.jp/tasks/arc063_a
s = input()
c = 0
for i in range(len(s) - 1):
if s[i] != s[i + 1]: c += 1
print(c)
| [
"alfredo.beaumont@gmail.com"
] | alfredo.beaumont@gmail.com |
eddde73e43c26cf544ab18b8129edda1c503753b | f8e0a0584f0a808311085996597389c9592025af | /news/models.py | 9a7f0d29ffb421a42c0e1a8c628f0f54155a2412 | [] | no_license | virginiah894/Moringa-Tribune | 5073e93d38538185820630c3933b48e183e92209 | 2af5daabad0bdd7f2895f7bd28816d7ad975ad9a | refs/heads/master | 2021-09-09T20:57:37.903815 | 2019-12-13T08:45:46 | 2019-12-13T08:45:46 | 227,795,226 | 0 | 0 | null | 2021-09-08T01:31:55 | 2019-12-13T08:45:02 | Python | UTF-8 | Python | false | false | 1,182 | py | from django.db import models
import datetime as dt
class Editor(models.Model):
first_name = models.CharField(max_length =30)
last_name = models.CharField(max_length =30)
email = models.EmailField()
phone_number = models .CharField(max_length=10,blank=True)
def __str__(self):
return self.first_name
def save_editor(self):
self.save()
class Meta:
ordering = ['first_name']
class tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=60)
post = models.TextField()
editor = models.ForeignKey(Editor)
tags = models.ManyToManyField(tags)
pub_date = models.DateTimeField(auto_now_add=True)
article_image = models.ImageField(upload_to = 'articles/')
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls,date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news= cls.objects.filter(title__icontains=search_term)
return news
| [
"virgyperry@gmail.com"
] | virgyperry@gmail.com |
32a7383bbdb7707bced75887bccb8e9c491a674f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_374/ch9_2020_03_08_05_11_20_823379.py | 11fe13e3a3097c21d3428e5d4f979b2017324147 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import math
def calcula_volume_da_esfera(r):
calcula = 4/3*math.pi*r**3
return(calcula)
| [
"you@example.com"
] | you@example.com |
1b2931848fb0d2a2684a071cf19b28957ec21eef | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /sDvjdcBrbHoXKvDsZ_9.py | edb3defdc96258659cf2fc8e61b896bc9232188e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """
Write a function that returns `True` if a given name can generate an array of
words.
### Examples
anagram("Justin Bieber", ["injures", "ebb", "it"]) ➞ True
anagram("Natalie Portman", ["ornamental", "pita"]) ➞ True
anagram("Chris Pratt", ["chirps", "rat"]) ➞ False
# Not all letters are used
anagram("Jeff Goldblum", ["jog", "meld", "bluffs"]) ➞ False
# "s" does not exist in the original name
### Notes
* Each letter in the name may only be used once.
* All letters in the name must be used.
"""
def anagram(name, words):
newname = sorted(''.join(name.split()).lower())
newwords = sorted(''.join(words).lower())
return newwords == newname
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b3f7befc0eebb80abe6961bb65b8ef7294dceba2 | 8b6b6ef3ac079aabcc0c44243447388bef87f671 | /Projects/customDS.py | 2ae2a9d67492f47a2805f04e91bd174016fe2e62 | [] | no_license | ava6969/DataStructureAlgorithm | c4f35f2b616cd0393050c89b4c42bbad81c5ebcf | 6e88c4aa2b18765d7c4f8a0d3bca5c62260cb0d2 | refs/heads/master | 2022-12-03T18:35:11.994182 | 2020-08-05T15:35:40 | 2020-08-05T15:35:40 | 283,895,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, head):
self.head = head
def append(self, value):
if self.head is None:
self.head = Node(value)
return
# Move to the tail (the last node)
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
def to_list(self):
lst = []
ptr = self.head
while ptr:
lst.append(ptr.value)
ptr = ptr.next
return lst
def flatten(self):
return self._flatten(self.head) # <-- self.head is a node for NestedLinkedList
''' A recursive function '''
def _flatten(self, node):
# A termination condition
if node.next is None:
return merge(node.value, None) # <-- First argument is a simple LinkedList
# _flatten() is calling itself untill a termination condition is achieved
return merge(node.value, self._flatten(node.next)) # <-- Both arguments are a simple LinkedList each
def __repr__(self):
return ' '.join([w for w in self.flatten().to_list()])
# util functions
def merge(list1, list2):
merged = LinkedList(None)
if list1 is None:
return list2
if list2 is None:
return list1
list1_elt = list1.head
list2_elt = list2.head
while list1_elt is not None or list2_elt is not None:
if list1_elt is None:
merged.append(list2_elt)
list2_elt = list2_elt.next
elif list2_elt is None:
merged.append(list1_elt)
list1_elt = list1_elt.next
elif list1_elt.value <= list2_elt.value:
merged.append(list1_elt)
list1_elt = list1_elt.next
else:
merged.append(list2_elt)
list2_elt = list2_elt.next
return merged
class HuffBaseNode:
def __init__(self, weight):
self._weight = weight
self.code = None
def is_leaf(self):
return NotImplemented
def weight(self):
return self._weight
def __add__(self, other):
return self.weight() + other.weight()
def __lt__(self, other):
return self.weight() < other.weight()
def __gt__(self, other):
return self.weight() > other.weight()
def __eq__(self, other):
return self.weight() == other.weight()
class HuffLeafNode(HuffBaseNode):
def __init__(self, element, weight):
super().__init__(weight)
self.element = element
self.visited = False
def value(self):
return self.element
def is_leaf(self):
return True
def __repr__(self):
return f"el: {self.element}, wt: {self._weight}"
class HuffInternalNode(HuffBaseNode):
def __init__(self, weight, left, right):
super().__init__(weight)
self.left = left
self.right = right
def is_leaf(self):
return False
def __repr__(self):
tabs = '\t'
return f'\n{tabs}weight: {self._weight}\n{tabs}left: {self.left.__repr__()}\n{tabs}right: {self.right.__repr__()}'
class HuffTree:
def __init__(self, node):
self.root = node
def _root(self):
return self.root
def weight(self):
return self.root.weight()
def __repr__(self):
return 'root:' + self.root.__repr__()
def __add__(self, other):
return self.root.weight() + other.weight()
def __lt__(self, other):
return self.root.weight() < other.weight()
def __gt__(self, other):
return self.root.weight() > other.weight()
def __eq__(self, other):
return self.root.weight() == other.weight() | [
"ava6969@rit.edu"
] | ava6969@rit.edu |
819d3872ba14d41ded94549d3c76b5a1426f8f46 | 56451b41a2a5f58ea3a1eaa265ab4bda3bf4a54e | /util/dataset_loader.py | 49b42fc9aa289da317860e4a7e9bcf2186671c79 | [] | no_license | peternara/Temperature-Scaling-Modesty-Loss | 9c6285953b0012f00386092264d96a404f9dfcd8 | 7b6faadc2ac2ee989fdf80d674232800c337abda | refs/heads/master | 2021-09-20T15:04:41.996247 | 2018-08-11T00:25:59 | 2018-08-11T00:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | from torchvision import transforms, datasets
import torch
def load_data(d, train=False, batch_size=100):
""" Create and return dataloader for different dataset """
if d == "CIFAR10":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
data_set = datasets.CIFAR10(root='Dataset/', train=train, download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size, shuffle=False)
elif d == "CIFAR100":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
data_set = datasets.CIFAR100(root='Dataset/', train=train, download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size, shuffle=False)
elif d == "ImageNet":
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
return torch.utils.data.DataLoader(datasets.ImageFolder("Dataset/ILSVRC", transform), batch_size=batch_size,
shuffle=False)
elif d == "SVHN":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if not train:
data_set = datasets.SVHN(root='Dataset/', split="test", download=True, transform=transform)
else:
data_set = datasets.SVHN(root='Dataset/', split="train", download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size)
else:
raise TypeError("Dataset inconnu")
| [
"noreply@github.com"
] | peternara.noreply@github.com |
06a27a5131b47c30f58ea7a14ae0ebb90452cdd7 | d7b9b490c954c7a9160b69f8ce2c907ef4681ecb | /sponsors/migrations/0020_sponsorshipbenefit_unavailable.py | 35c842d1e237e7274eb7082a46c37a7648d461c2 | [
"Apache-2.0"
] | permissive | python/pythondotorg | 00db93a4b1789a4d438806d106d9cee3349ad78c | c4ee749942227ca75c8e670546afe67232d647b2 | refs/heads/main | 2023-08-28T20:04:24.735314 | 2023-08-03T19:12:29 | 2023-08-03T19:12:29 | 6,127,047 | 1,131 | 646 | Apache-2.0 | 2023-08-24T15:57:04 | 2012-10-08T16:00:15 | Python | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.0.13 on 2021-02-26 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0019_sponsor_twitter_handle"),
]
operations = [
migrations.AddField(
model_name="sponsorshipbenefit",
name="unavailable",
field=models.BooleanField(
default=False,
help_text="If selected, this benefit will not be available to applicants.",
verbose_name="Benefit is unavailable",
),
),
]
| [
"noreply@github.com"
] | python.noreply@github.com |
0e4eb6c34aebef06d2fff3f979fa3cff9dc3ca53 | 835e428d1cbe87adf945897ff75f77e93b500d12 | /demonstrations/quantum_volume.py | f3c33be50da84dd7180c360143dfe1b5fdb8532d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | quantshah/qml | 9acb3c932610e30a28369fe72ee49683ac301219 | 45533ef6f6d7b9cfa0384302fe52b5ead772b923 | refs/heads/master | 2022-11-30T08:26:12.972709 | 2022-11-18T19:59:59 | 2022-11-18T19:59:59 | 218,805,085 | 0 | 0 | Apache-2.0 | 2019-10-31T16:02:07 | 2019-10-31T16:02:06 | null | UTF-8 | Python | false | false | 38,027 | py | r""".. _quantum_volume:
Quantum volume
==============
.. meta::
:property="og:description": Learn about quantum volume, and how to
compute it.
:property="og:image": https://pennylane.ai/qml/_images/quantum_volume_thumbnail.png
.. related::
qsim_beyond_classical Beyond classical computing with qsim
*Author: Olivia Di Matteo — Posted: 15 December 2020. Last updated: 15 April 2021.*
Twice per year, a project called the TOP500 [#top500]_ releases a list of the
500 most powerful supercomputing systems in the world. However, there is a large
amount of variation in how supercomputers are built. They may run different
operating systems and have varying amounts of memory. `Some
<https://en.wikipedia.org/wiki/Fugaku_(supercomputer)>`_ use 48-core processors,
while `others <https://en.wikipedia.org/wiki/Sunway_TaihuLight>`_ use processors
with up to 260 cores. The speed of processors will differ, and they may be
connected in different ways. We can't rank them by simply counting the number of
processors!
In order to make a fair comparison, we need benchmarking standards that give us
a holistic view of their performance. To that end, the TOP500 rankings are based
on something called the LINPACK benchmark [#linpack]_. The task of the
supercomputers is to solve a dense system of linear equations, and the metric of
interest is the rate at which they perform `floating-point operations (FLOPS)
<https://en.wikipedia.org/wiki/FLOPS>`__. Today's top machines reach speeds well
into the regime of hundreds of petaFLOPS! While a single number certainly
cannot tell the whole story, it still gives us insight into the quality of the
machines, and provides a standard so we can compare them.
A similar problem is emerging with quantum computers: we can't judge quantum
computers on the number of qubits alone. Present-day devices have a number of
limitations, an important one being gate error rates. Typically
the qubits on a chip are not all connected to each other, so it may not be
possible to perform operations on arbitrary pairs of them.
Considering this, can we tell if a machine with 20 noisy qubits is better
than one with 5 very high-quality qubits? Or if a machine with 8 fully-connected
qubits is better than one with 16 qubits of comparable error rate, but arranged in
a square lattice? How can we make comparisons between different
types of qubits?
.. figure:: ../demonstrations/quantum_volume/qubit_graph_variety.svg
:align: center
:width: 50%
..
Which of these qubit hardware graphs is the best?
To compare across all these facets, researchers have proposed a metric called
"quantum volume" [#cross]_. Roughly, the quantum volume is a measure of the
effective number of qubits a processor has. It is calculated by determining the
largest number of qubits on which it can reliably run circuits of a prescribed
type. You can think of it loosely as a quantum analogue of the LINPACK
benchmark. Different quantum computers are tasked with solving the same problem,
and the success will be a function of many properties: error rates, qubit
connectivity, even the quality of the software stack. A single
number won't tell us everything about a quantum computer, but it does establish
a framework for comparing them.
After working through this tutorial, you'll be able to define quantum volume,
explain the problem on which it's based, and run the protocol to compute it!
"""
##############################################################################
#
# Designing a benchmark for quantum computers
# -------------------------------------------
#
# There are many different properties of a quantum computer
# that contribute to the successful execution of a computation. Therefore, we
# must be very explicit about what exactly we are benchmarking, and what is our
# measure of success. In general, to set up a benchmark for a quantum computer
# we need to decide on a number of things [#robin]_:
#
# 1. A family of circuits with a well-defined structure and variable size
# 2. A set of rules detailing how the circuits can be compiled
# 3. A measure of success for individual circuits
# 4. A measure of success for the family of circuits
# 5. (Optional) An experimental design specifying how the circuits are to be run
#
# We'll work through this list in order to see how the protocol for computing
# quantum volume fits within this framework.
#
# The circuits
# ~~~~~~~~~~~~
#
# Quantum volume relates
# to the largest *square* circuit that a quantum processor can run reliably. This benchmark
# uses *random* square circuits with a very particular form:
#
# .. figure:: ../demonstrations/quantum_volume/model_circuit_cross.png
# :align: center
# :width: 60%
#
# ..
#
# A schematic of the random circuit structure used in the quantum volume protocol.
# Image source: [#cross]_.
#
# Specifically, the circuits consist of :math:`d` sequential layers acting on
# :math:`d` qubits. Each layer consists of two parts: a random permutation of
# the qubits, followed by Haar-random SU(4) operations performed on neighbouring
# pairs of qubits. (When the number of qubits is odd, the bottom-most qubit is
# idle while the SU(4) operations run on the pairs. However, it will still be
# incorporated by way of the permutations.) These circuits satisfy the criteria
# in item 1 --- they have well-defined structure, and it is clear how they can be
# scaled to different sizes.
#
# As for the compilation rules of item 2, to compute quantum volume we're
# allowed to do essentially anything we'd like to the circuits in order to
# improve them. This includes optimization, hardware-aware considerations such
# as qubit placement and routing, and even resynthesis by finding unitaries that
# are close to the target, but easier to implement on the hardware [#cross]_.
#
# Both the circuit structure and the compilation highlight how quantum volume is
# about more than just the number of qubits. The error rates will affect the
# achievable depth, and the qubit connectivity contributes through the layers of
# permutations because a very well-connected processor will be able to implement
# these in fewer steps than a less-connected one. Even the quality of the
# software and the compiler plays a role here: higher-quality compilers will
# produce circuits that fit better on the target devices, and will thus produce
# higher quality results.
#
# The measures of success
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# Now that we have our circuits, we have to define the quantities that will
# indicate how well we're able to run them. For that, we need a problem
# to solve. The problem used for computing quantum volume is called the *heavy output
# generation problem*. It has roots in the proposals for demonstrating quantum
# advantage [#aaronson]_. Many such proposals make use of the properties of
# various random quantum circuit families, as the distribution of the
# measurement outcomes may not be easy to sample using classical
# techniques.
#
# A distribution that is theorized to fulfill this property is the distribution
# of *heavy* output bit strings. Heavy bit strings are those whose outcome
# probabilities are above the median of the distribution. For example, suppose
# we run a two-qubit circuit and find that the measurement probabilities for
# the output states are as follows:
measurement_probs = {"00": 0.558, "01": 0.182, "10": 0.234, "11": 0.026}
##############################################################################
#
# The median of this probability distribution is:
import numpy as np
prob_array = np.fromiter(measurement_probs.values(), dtype=np.float64)
print(f"Median = {np.median(prob_array):.3f}")
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Median = 0.208
#
##############################################################################
#
# This means that the heavy bit strings are '00' and '10', because these are
# the two probabilities above the median. If we were to run this circuit, the
# probability of obtaining one of the heavy outputs is:
heavy_output_prob = np.sum(prob_array[prob_array > np.median(prob_array)])
print(f"Heavy output probability = {heavy_output_prob}")
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Heavy output probability = 0.792
#
##############################################################################
#
# Each circuit in a circuit family has its own heavy output probability. If our
# quantum computer is of high quality, then we should expect to see heavy
# outputs quite often across all the circuits. On the other hand, if it's of
# poor quality and everything is totally decohered, we will end up with output
# probabilities that are roughly all the same, as noise will reduce the
# probabilities to the uniform distribution.
#
# The heavy output generation problem quantifies this --- for our family of
# random circuits, do we obtain heavy outputs at least 2/3 of the time on
# average? Furthermore, do we obtain this with high confidence? This is the
# basis for quantum volume. Looking back at the criteria for our benchmarks, for
# item 3 the measure of success for each circuit is how often we obtain heavy
# outputs when we run the circuit and take a measurement. For item 4 the
# measure of success for the whole family is whether or not the mean of these
# probabilities is greater than 2/3 with high confidence.
#
# On a related note, it is important to determine what heavy output probability
# we should *expect* to see on average. The intuition for how this can be
# calculated is as follows [#aaronson]_, [#cmu]_. Suppose that our random
# square circuits scramble things up enough so that the effective operation
# looks like a Haar-random unitary :math:`U`. Since in the circuits we are
# applying :math:`U` to the all-zero ket, the measurement outcome probabilities
# will be the moduli squared of the entries in the first column of :math:`U`.
#
# Now if :math:`U` is Haar-random, we can say something about the form of these
# entries. In particular, they are complex numbers for which both the real and
# imaginary parts are normally distributed with mean 0 and variance
# :math:`1/2^m`, where :math:`m` is the number of qubits. Taking the modulus
# squared of such numbers and making a histogram yields a distribution
# of probabilities with the form :math:`\hbox{Pr}(p) \sim 2^m e^{-2^m p}.` This
# is also known as the *Porter-Thomas distribution*.
#
# By looking at the form of the underlying probability distribution, the
# exponential distribution :math:`\hbox{Pr}(x) = e^{-x}`, we can calculate some
# properties of the heavy output probabilities. First, we can integrate the exponential
# distribution to find that the median sits at :math:`\ln 2`. We can further
# compute the expectation value of obtaining something greater than the median
# by integrating :math:`x e^{-x}` from :math:`\ln 2` to :math:`\infty` to obtain
# :math:`(1 + \ln 2)/2`. This is the expected heavy output probability!
# Numerically it is around 0.85, as we will observe later in our results.
#
#
# The benchmark
# ~~~~~~~~~~~~~
#
# Now that we have our circuits and our measures of success, we're ready to
# define the quantum volume.
#
#
# .. admonition:: Definition
# :class: defn
#
# The quantum volume :math:`V_Q` of an :math:`n`-qubit processor is defined as [#cross]_
#
# .. math::
# \log_2(V_Q) = \hbox{argmax}_m \min (m, d(m))
#
# where :math:`m \leq n` is a number of qubits, and :math:`d(m)` is the number of
# qubits in the largest square circuits for which we can reliably sample
# heavy outputs with probability greater than 2/3.
#
# To see this more concretely, suppose we have a 20-qubit device and find that
# we get heavy outputs reliably for up to depth-4 circuits on any set of 4
# qubits, then the quantum volume is :math:`\log_2 V_Q = 4`. Quantum volume is
# incremental, as shown below --- we gradually work our way up to larger
# circuits, until we find something we can't do. Very loosely, quantum volume
# is like an effective number of qubits. Even if we have those 20 qubits, only
# groups of up to 4 of them work well enough together to sample from
# distributions that would be considered hard.
#
# .. figure:: ../demonstrations/quantum_volume/qv_square_circuits.svg
# :align: center
# :width: 75%
#
# ..
#
# This quantum computer has :math:`\log_2 V_Q = 4`, as the 4-qubit square
# circuits are the largest ones it can run successfully.
#
#
# The maximum achieved quantum volume has been doubling at an increasing rate. In
# late 2020, the most recent announcements have been :math:`\log_2 V_Q = 6` on
# IBM's 27-qubit superconducting device `ibmq_montreal` [#qv64]_, and
# :math:`\log_2 V_Q = 7` on a Honeywell trapped-ion qubit processor
# [#honeywell]_. A device with an expected quantum volume of :math:`\log_2 V_Q
# = 22` has also been announced by IonQ [#ionq]_, though benchmarking results
# have not yet been published.
#
# .. note::
#
# In many sources, the quantum volume of processors is reported as
# :math:`V_Q` explicitly, rather than :math:`\log_2 V_Q` as is the
# convention in this demo. As such, IonQ's processor has the potential for a
# quantum volume of :math:`2^{22} > 4000000`. Here we use the :math:`\log`
# because it is more straightforward to understand that they have 22
# high-quality, well-connected qubits than to extract this at first glance from the
# explicit value of the volume.
#
##############################################################################
#
# Computing the quantum volume
# ----------------------------
#
# Equipped with our definition of quantum volume, it's time to compute it
# ourselves! We'll use the `PennyLane-Qiskit
# <https://pennylaneqiskit.readthedocs.io/en/latest/>`_ plugin to compute the
# volume of a simulated version of one of the IBM processors, since their properties are easily
# accessible through this plugin.
#
#
# Loosely, the protocol for quantum volume consists of three steps:
#
# 1. Construct random square circuits of increasing size
#
# 2. Run those circuits on both a simulator and on a noisy hardware device
#
# 3. Perform a statistical analysis of the results to determine what size
# circuits the device can run reliably
#
#
# The largest reliable size will become the :math:`m` in the expression for
# quantum volume.
#
#
# Step 1: construct random square circuits
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Recall that the structure of the circuits above is alternating layers of
# permutations and random SU(4) operations on pairs of qubits. Let's implement
# the generation of such circuits in PennyLane.
#
# First we write a function that randomly permutes qubits. We'll do this by
# using numpy to generate a permutation, and then apply it with the built-in
# :func:`~.pennylane.Permute` subroutine.
import pennylane as qml
# Object for random number generation from numpy
rng = np.random.default_rng()
def permute_qubits(num_qubits):
# A random permutation
perm_order = list(rng.permutation(num_qubits))
qml.Permute(perm_order, wires=list(range(num_qubits)))
##############################################################################
#
# Next, we need to apply SU(4) gates to pairs of qubits. PennyLane doesn't have
# built-in functionality to generate these random matrices, however its cousin
# `Strawberry Fields <https://strawberryfields.ai/>`_ does! We will use the
# ``random_interferometer`` method, which can generate unitary matrices uniformly
# at random. This function actually generates elements of U(4), but they are
# essentially equivalent up to a global phase.
from strawberryfields.utils import random_interferometer
def apply_random_su4_layer(num_qubits):
for qubit_idx in range(0, num_qubits, 2):
if qubit_idx < num_qubits - 1:
rand_haar_su4 = random_interferometer(N=4)
qml.QubitUnitary(rand_haar_su4, wires=[qubit_idx, qubit_idx + 1])
##############################################################################
#
# Next, let's write a layering method to put the two together --- this is just
# for convenience and to highlight the fact that these two methods together
# make up one layer of the circuit depth.
#
def qv_circuit_layer(num_qubits):
permute_qubits(num_qubits)
apply_random_su4_layer(num_qubits)
##############################################################################
#
# Let's take a look! We'll set up an ideal device with 5 qubits, and generate a
# circuit with 3 qubits. In this demo, we'll work explicitly with `quantum tapes
# <https://pennylane.readthedocs.io/en/latest/code/qml_tape.html>`__ since they
# are not immediately tied to a device. This will be convenient later when we
# need to run the same random circuit on two devices independently.
num_qubits = 5
dev_ideal = qml.device("default.qubit", shots=None, wires=num_qubits)
m = 3 # number of qubits
with qml.tape.QuantumTape() as tape:
qml.layer(qv_circuit_layer, m, num_qubits=m)
expanded_tape = tape.expand(stop_at=lambda op: isinstance(op, qml.QubitUnitary))
print(qml.drawer.tape_text(expanded_tape, wire_order=dev_ideal.wires, show_all_wires=True, show_matrices=True))
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# 0: ─╭SWAP─╭U(M0)─╭U(M1)─╭SWAP───────╭U(M2)─┤
# 1: ─╰SWAP─╰U(M0)─╰U(M1)─│─────╭SWAP─╰U(M2)─┤
# 2: ─────────────────────╰SWAP─╰SWAP────────┤
# 3: ────────────────────────────────────────┤
# 4: ────────────────────────────────────────┤
# M0 =
# [[-0.17514647+0.00759447j 0.11975927+0.16007614j -0.41793925+0.49643728j
# 0.62304058-0.34640531j]
# [-0.73367896-0.58079555j -0.11348577+0.00751965j -0.02640159-0.15592112j
# -0.19507153-0.21998821j]
# [ 0.02988983+0.09364586j -0.74053162+0.55032455j 0.31350059-0.01305651j
# 0.16283233-0.11885036j]
# [-0.13103809-0.25850305j 0.18298996+0.2497364j 0.34879438+0.57771772j
# -0.02385446+0.60346274j]]
# M1 =
# [[ 0.14296171+0.28087257j -0.5985737 -0.27489922j -0.43838149+0.10344812j
# 0.04022491+0.51216658j]
# [-0.21538853+0.02728431j -0.24776721-0.57146257j 0.60975755+0.36241573j
# 0.21787038-0.11953391j]
# [-0.24405375+0.05780278j -0.11688629-0.17397518j -0.51628349-0.11381455j
# 0.44143429-0.64714776j]
# [-0.750841 -0.47630904j -0.28666068+0.22820556j -0.09459735+0.07429451j
# -0.17243398+0.17582253j]]
# M2 =
# [[-0.63733359+1.91519046e-01j -0.49615702+9.79920998e-05j
# 0.06949634+4.54968771e-01j 0.21112196-2.33571716e-01j]
# [ 0.4774216 +5.19692450e-02j -0.2741782 -3.71778068e-01j
# 0.09817361+6.01972062e-01j -0.39517581+1.66741872e-01j]
# [ 0.14401687-1.53582182e-01j 0.51636466-1.58216631e-01j
# 0.43804144+3.62586089e-01j 0.4473567 -3.74872915e-01j]
# [ 0.51670588+1.23210608e-01j -0.48982566-9.40288988e-02j
# -0.19210465-2.36457367e-01j 0.53202679-3.05278186e-01j]]
##############################################################################
#
# The first thing to note is that the last two qubits are never used in the
# operations, since the quantum volume circuits are square. Another important
# point is that this circuit with 3 layers actually has depth much greater than
# 3, since each layer has both SWAPs and SU(4) operations that are further
# decomposed into elementary gates when run on the actual processor.
#
# One last thing we'll need before running our circuits is the machinery to
# determine the heavy outputs. This is quite an interesting aspect of the
# protocol --- we're required to compute the heavy outputs classically in order
# to get the results! As a consequence, it will only be possible to calculate
# quantum volume for processors up to a certain point before they become too
# large.
#
# That said, classical simulators are always improving, and can simulate
# circuits with numbers of qubits well into the double digits (though they may
# need a supercomputer to do so). Furthermore, the designers of the protocol
# don't expect this to be an issue until gate error rates decrease below
# :math:`\approx 10^{-4}`, after which we may need to make adjustments to remove
# the classical simulation, or even consider new volume metrics [#cross]_.
#
# The heavy outputs can be retrieved from a classically-obtained probability
# distribution as follows:
#
def heavy_output_set(m, probs):
# Compute heavy outputs of an m-qubit circuit with measurement outcome
# probabilities given by probs, which is an array with the probabilities
# ordered as '000', '001', ... '111'.
# Sort the probabilities so that those above the median are in the second half
probs_ascending_order = np.argsort(probs)
sorted_probs = probs[probs_ascending_order]
# Heavy outputs are the bit strings above the median
heavy_outputs = [
# Convert integer indices to m-bit binary strings
format(x, f"#0{m+2}b")[2:] for x in list(probs_ascending_order[2 ** (m - 1) :])
]
# Probability of a heavy output
prob_heavy_output = np.sum(sorted_probs[2 ** (m - 1) :])
return heavy_outputs, prob_heavy_output
##############################################################################
#
# As an example, let's compute the heavy outputs and probability for our circuit
# above.
#
# Adds a measurement of the first m qubits to the previous circuit
with tape:
qml.probs(wires=range(m))
# Run the circuit, compute heavy outputs, and print results
output_probs = qml.execute([tape], dev_ideal, None) # returns a list of result !
output_probs = output_probs[0].reshape(2 ** m, )
heavy_outputs, prob_heavy_output = heavy_output_set(m, output_probs)
print("State\tProbability")
for idx, prob in enumerate(output_probs):
bit_string = format(idx, f"#05b")[2:]
print(f"{bit_string}\t{prob:.4f}")
print(f"\nMedian is {np.median(output_probs):.4f}")
print(f"Probability of a heavy output is {prob_heavy_output:.4f}")
print(f"Heavy outputs are {heavy_outputs}")
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# State Probability
# 000 0.0157
# 001 0.0200
# 010 0.0026
# 011 0.2765
# 100 0.0175
# 101 0.4266
# 110 0.0045
# 111 0.2365
#
# Median is 0.0188
# Probability of a heavy output is 0.9596
# Heavy outputs are ['001', '111', '011', '101']
#
##############################################################################
#
# Step 2: run the circuits
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now it's time to run the protocol. First, let's set up our hardware
# device. We'll use a simulated version of the 5-qubit IBM Ourense as an example
# --- the reported quantum volume according to IBM is :math:`V_Q=8`, so we
# endeavour to reproduce that here. This means that we should be able to run our
# square circuits reliably on up to :math:`\log_2 V_Q =3` qubits.
#
# .. note::
#
# In order to access the IBM Q backend, users must have an IBM Q account
# configured. This can be done by running:
#
# .. code-block:: python3
#
# from qiskit import IBMQ
# IBMQ.save_account('MY_API_TOKEN')
#
# A token can be generated by logging into your IBM Q account `here <https://quantum-computing.ibm.com/login>`_ .
#
#
# .. note::
#
# In the time since the original release of this demo, the Ourense device is
# no longer available from IBM Q. However, we leave the original results for
# expository purposes, and note that the methods are applicable in general.
# Users can get a list of available IBM Q backends by importing IBM Q,
# specifying their provider and then calling: ``provider.backends()``
#
dev_ourense = qml.device("qiskit.ibmq", wires=5, backend="ibmq_bogota")
##############################################################################
#
# First, we can take a look at the arrangement of the qubits on the processor
# by plotting its hardware graph.
import matplotlib.pyplot as plt
import networkx as nx
ourense_hardware_graph = nx.Graph(dev_ourense.backend.configuration().coupling_map)
nx.draw_networkx(
ourense_hardware_graph,
node_color="cyan",
labels={x: x for x in range(dev_ourense.num_wires)},
)
##############################################################################
#
# .. figure:: ../demonstrations/quantum_volume/ourense.svg
# :align: center
# :width: 75%
#
##############################################################################
#
# This hardware graph is not fully connected, so the quantum compiler will have
# to make some adjustments when non-connected qubits need to interact.
#
# To actually perform the simulations, we'll need to access a copy of the
# Ourense noise model. Again, we won't be running on Ourense directly ---
# we'll set up a local device to simulate its behaviour.
#
from qiskit.providers.aer import noise
noise_model = noise.NoiseModel.from_backend(dev_ourense.backend)
dev_noisy = qml.device(
"qiskit.aer", wires=dev_ourense.num_wires, shots=1000, noise_model=noise_model
)
##############################################################################
#
# As a final point, since we are allowed to do as much optimization as we like,
# let's put the compiler to work. The compiler will perform a number of
# optimizations to simplify our circuit. We'll also specify some high-quality
# qubit placement and routing techniques [#sabre]_ in order to fit the circuits
# on the hardware graph in the best way possible.
coupling_map = dev_ourense.backend.configuration().to_dict()["coupling_map"]
dev_noisy.set_transpile_args(
**{
"optimization_level": 3,
"coupling_map": coupling_map,
"layout_method": "sabre",
"routing_method": "sabre",
}
)
##############################################################################
#
# Let's run the protocol. We'll start with the smallest circuits on 2
# qubits, and make our way up to 5. At each :math:`m`, we'll look at 200 randomly
# generated circuits.
#
min_m = 2
max_m = 5
num_ms = (max_m - min_m) + 1
num_trials = 200
# To store the results
probs_ideal = np.zeros((num_ms, num_trials))
probs_noisy = np.zeros((num_ms, num_trials))
for m in range(min_m, max_m + 1):
for trial in range(num_trials):
# Simulate the circuit analytically
with qml.tape.QuantumTape() as tape:
qml.layer(qv_circuit_layer, m, num_qubits=m)
qml.probs(wires=range(m))
output_probs = qml.execute([tape], dev_ideal, None)
output_probs = output_probs[0].reshape(2 ** m, )
heavy_outputs, prob_heavy_output = heavy_output_set(m, output_probs)
# Execute circuit on the noisy device
qml.execute([tape], dev_noisy, None)
# Get the output bit strings; flip ordering of qubits to match PennyLane
counts = dev_noisy._current_job.result().get_counts()
reordered_counts = {x[::-1]: counts[x] for x in counts.keys()}
device_heavy_outputs = np.sum(
[
reordered_counts[x] if x[:m] in heavy_outputs else 0
for x in reordered_counts.keys()
]
)
fraction_device_heavy_output = device_heavy_outputs / dev_noisy.shots
probs_ideal[m - min_m, trial] = prob_heavy_output
probs_noisy[m - min_m, trial] = fraction_device_heavy_output
##############################################################################
#
# Step 3: perform a statistical analysis
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Having run our experiments, we can now get to the heart of the quantum volume
# protocol: what *is* the largest square circuit that our processor can run?
# Let's first check out the means and see how much higher they are than 2/3.
#
probs_mean_ideal = np.mean(probs_ideal, axis=1)
probs_mean_noisy = np.mean(probs_noisy, axis=1)
print(f"Ideal mean probabilities:")
for idx, prob in enumerate(probs_mean_ideal):
print(f"m = {idx + min_m}: {prob:.6f} {'above' if prob > 2/3 else 'below'} threshold.")
print(f"\nDevice mean probabilities:")
for idx, prob in enumerate(probs_mean_noisy):
print(f"m = {idx + min_m}: {prob:.6f} {'above' if prob > 2/3 else 'below'} threshold.")
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Ideal mean probabilities:
# m = 2: 0.797979 above threshold.
# m = 3: 0.844052 above threshold.
# m = 4: 0.841203 above threshold.
# m = 5: 0.856904 above threshold.
#
# Device mean probabilities:
# m = 2: 0.773760 above threshold.
# m = 3: 0.794875 above threshold.
# m = 4: 0.722860 above threshold.
# m = 5: 0.692935 above threshold.
##############################################################################
#
# We see that the ideal probabilities are well over 2/3. In fact, they're quite
# close to the expected value of :math:`(1 + \ln 2)/2`, which we recall from
# above is :math:`\approx 0.85`. For this experiment, we see that the device
# probabilities are also above the threshold. But it isn't enough that just the
# mean of the heavy output probabilities is greater than 2/3. Since we're
# dealing with randomness, we also want to ensure these results were not just a
# fluke! To be confident, we also want to be above 2/3 within 2 standard
# deviations :math:`(\sigma)` of the mean. This is referred to as a 97.5%
# confidence interval (since roughly 97.5% of a normal distribution sits within
# :math:`2\sigma` of the mean.)
#
# At this point, we're going to do some statistical sorcery and make some
# assumptions about our distributions. Whether or not a circuit is successful
# (in the sense that it produces heavy outputs more the 2/3 of the time) is a
# binary outcome. When we sample many circuits, it is almost like we are
# sampling from a *binomial distribution* where the outcome probability is
# equivalent to the heavy output probability. In the limit of a large number of
# samples (in this case 200 circuits), a binomial distribution starts to look
# like a normal distribution. If we make this approximation, we can compute the standard
# deviation and use it to make our confidence interval. With the normal
# approximation, the standard deviation is
#
# .. math::
#
# \sigma = \sqrt{\frac{p_h(1 - p_h)}{N}},
#
# where :math:`p_h` is the average heavy output probability, and :math:`N` is
# the number of circuits.
#
stds_ideal = np.sqrt(probs_mean_ideal * (1 - probs_mean_ideal) / num_trials)
stds_noisy = np.sqrt(probs_mean_noisy * (1 - probs_mean_noisy) / num_trials)
##############################################################################
#
# Now that we have our standard deviations, let's see if our means are at least
# :math:`2\sigma` away from the threshold!
#
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(9, 6))
ax = ax.ravel()
for m in range(min_m - 2, max_m + 1 - 2):
ax[m].hist(probs_noisy[m, :])
ax[m].set_title(f"m = {m + min_m}", fontsize=16)
ax[m].set_xlabel("Heavy output probability", fontsize=14)
ax[m].set_ylabel("Occurrences", fontsize=14)
ax[m].axvline(x=2.0 / 3, color="black", label="2/3")
ax[m].axvline(x=probs_mean_noisy[m], color="red", label="Mean")
ax[m].axvline(
x=(probs_mean_noisy[m] - 2 * stds_noisy[m]),
color="red",
linestyle="dashed",
label="2σ",
)
fig.suptitle("Heavy output distributions for (simulated) Ourense QPU", fontsize=18)
plt.legend(fontsize=14)
plt.tight_layout()
##############################################################################
#
# .. figure:: ../demonstrations/quantum_volume/ourense_heavy_output_distributions.svg
# :align: center
# :width: 90%
#
##############################################################################
#
# Let's verify this numerically:
#
two_sigma_below = probs_mean_noisy - 2 * stds_noisy
for idx, prob in enumerate(two_sigma_below):
print(f"m = {idx + min_m}: {prob:.6f} {'above' if prob > 2/3 else 'below'} threshold.")
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# m = 2: 0.714590 above threshold.
# m = 3: 0.737770 above threshold.
# m = 4: 0.659562 below threshold.
# m = 5: 0.627701 below threshold.
#
##############################################################################
#
# We see that we are :math:`2\sigma` above the threshold only for :math:`m=2`,
# and :math:`m=3`. Thus, we find that the quantum volume of our simulated Ourense is
# :math:`\log_2 V_Q = 3`, or :math:`V_Q = 8`, as expected.
#
# This framework and code will allow you to calculate the quantum volume of many
# different processors. Try it yourself! What happens if we don't specify a
# large amount of compiler optimization? How does the volume compare across
# different hardware devices? You can even build your own device configurations
# and noise models to explore the extent to which different factors affect the
# volume.
#
# Concluding thoughts
# -------------------
#
# Quantum volume is a metric used for comparing the quality of different quantum
# computers. By determining the largest square random circuits a processor can
# run reliably, it provides a measure of the effective number of qubits a
# processor has. Furthermore, it goes beyond just gauging quality by a number of
# qubits --- it incorporates many different aspects of a device such as its
# compiler, qubit connectivity, and gate error rates.
#
# However, as with any benchmark, it is not without limitations. A key one
# already discussed is that the heavy output generation problem requires us to
# simulate circuits classically in addition to running them on a device. While
# this is perhaps not an issue now, it will surely become one in the future. The
# number of qubits continues to increase and error rates are getting lower,
# both of which imply that our square circuits will be growing in both width and
# depth as time goes on. Eventually they will reach a point where they are no
# longer classical simulable and we will have to design new benchmarks.
#
# Another limitation is that the protocol only looks at one type of circuit,
# i.e., square circuits. It might be the case that a processor has very few
# qubits, but also very low error rates. For example, what if a processor with 5
# qubits can run circuits with up to 20 layers? Quantum volume would limit us to
# :math:`\log_2 V_Q = 5` and the high quality of those qubits is not reflected
# in this. To that end, a more general *volumetric benchmark* framework was
# proposed that includes not only square circuits, but also rectangular circuits
# [#robin]_. Investigating very deep circuits on few qubits (and very shallow
# circuits on many qubits) will give us a broader overview of a processor's
# quality. Furthermore, the flexibility of the framework of [#robin]_ will
# surely inspire us to create new types of benchmarks. Having a variety of
# benchmarks calculated in different ways is beneficial and gives us a broader
# view of the performance of quantum computers.
#
#
# .. _quantum_volume_references:
#
# References
# ----------
#
# .. [#top500]
#
# `<https://www.top500.org/>`__
#
# .. [#linpack]
#
# `<https://www.top500.org/project/linpack/>`__
#
# .. [#cross]
#
# Cross, A. W., Bishop, L. S., Sheldon, S., Nation, P. D., & Gambetta, J. M.,
# Validating quantum computers using randomized model circuits, `Physical
# Review A, 100(3), (2019). <http://dx.doi.org/10.1103/physreva.100.032328>`__
#
# .. [#robin]
#
# Blume-Kohout, R., & Young, K. C., A volumetric framework for quantum
# computer benchmarks, `Quantum, 4, 362 (2020).
# <http://dx.doi.org/10.22331/q-2020-11-15-362>`__
#
# .. [#aaronson]
#
# Aaronson, S., & Chen, L., Complexity-theoretic foundations of quantum supremacy experiments.
# `arXiv 1612.05903 quant-ph <https://arxiv.org/abs/1612.05903>`__
#
# .. [#cmu]
#
# O'Donnell, R. CMU course: Quantum Computation and Quantum Information 2018.
# `Lecture 25 <https://www.cs.cmu.edu/~odonnell/quantum18/lecture25.pdf>`__
#
# .. [#qv64]
#
# Jurcevic et al. Demonstration of quantum volume 64 on a superconducting quantum computing system.
# `arXiv 2008.08571 quant-ph <https://arxiv.org/abs/2008.08571>`__
#
# .. [#honeywell]
#
# `<https://www.honeywell.com/en-us/newsroom/news/2020/09/achieving-quantum-volume-128-on-the-honeywell-quantum-computer>`__
#
# .. [#ionq]
#
# `<https://www.prnewswire.com/news-releases/ionq-unveils-worlds-most-powerful-quantum-computer-301143782.html>`__
#
# .. [#sabre]
#
# Li, G., Ding, Y., & Xie, Y., Tackling the qubit mapping problem for
# nisq-era quantum devices, `In Proceedings of the Twenty-Fourth
# International Conference on Architectural Support for Programming Languages
# and Operating Systems (pp. 1001–1014)
# (2019). <https://dl.acm.org/doi/10.1145/3297858.3304023>`__ New York, NY,
# USA: Association for Computing Machinery.
#
#
# About the author
# ----------------
# .. include:: ../_static/authors/olivia_di_matteo.txt | [
"noreply@github.com"
] | quantshah.noreply@github.com |
d2f9d512547e26c1ad69be07364a71dcada3972a | 30323e6d5e179994cc25438def9de3dfc07be4a5 | /src/aulas/06.py | 3c4f9ebdb4be7f0f40b9c691bef140cfa1130c9b | [] | no_license | claudimf/python_oo_2 | 31f9c065be6bd9905fe85c6ea5b8cc715cc4e463 | 76b23a0a60433fbe62775aae9e1f0cd8af0b324b | refs/heads/main | 2023-03-18T09:09:48.044179 | 2021-03-11T19:23:44 | 2021-03-11T19:23:44 | 346,146,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | class Programa:
def __init__(self, nome, ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_like(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, novo_nome):
self._nome = novo_nome.title()
class Filme(Programa):
def __init__(self, nome, ano, duracao):
super().__init__(nome, ano)
self.duracao = duracao
def imprime(self):
print(f'{self._nome} - {self.ano} - {self.duracao} min - {self._likes} Likes')
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
super().__init__(nome, ano)
self.temporadas = temporadas
def imprime(self):
print(f'{self._nome} - {self.ano} - {self.temporadas} temporadas - {self._likes} Likes')
vingadores = Filme('vingadores - guerra infinita', 2018, 160)
vingadores.dar_like()
atlanta = Serie('atlanta', 2018, 2)
atlanta.dar_like()
atlanta.dar_like()
filmes_e_series = [vingadores, atlanta]
print('\nimprimindo...\n')
for programa in filmes_e_series:
programa.imprime() | [
"claudi.freitas.prs@synergiaconsultoria.com.br"
] | claudi.freitas.prs@synergiaconsultoria.com.br |
56561bec3c40305d5c936acd30ffbfb98423bb19 | c6a4069e265325e836e4ee79fae0f5490f1a1c47 | /main/fight.py | ecdf67993300f502fc89fad3b4f83cdf03f39bd1 | [] | no_license | astoeff/clean-code-course-project | b2ca1d10b226ea95b602d2535810c9af5aadb244 | 2b64956ea1b33cba405ccd500bf1a5472a65e9c4 | refs/heads/master | 2022-11-19T05:04:20.992189 | 2020-07-17T17:12:59 | 2020-07-17T17:12:59 | 274,676,681 | 0 | 0 | null | 2020-07-17T17:13:00 | 2020-06-24T13:32:49 | Python | UTF-8 | Python | false | false | 2,299 | py | from constants import (DIRECTIONS_WITH_THEIR_OPPOSITES_DICTIONARY, FIGHT_INITIAL_INFORMATION_PART,
PLAYER_ZERO_DAMAGE_WHEN_ATTACKING, FIGHT_HERO_ATTACK_INFORMATION_PART,
FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART, FIGHT_ENEMY_ATTACK_INFORMATION_PART,
FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART)
class Fight:
def __init__(self, hero, enemy, distance=0, direction=0):
self.hero = hero
self.enemy = enemy
self.distance = distance
self.direction = direction
self.information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(self.enemy)]
@property
def oposite_direction(self):
return DIRECTIONS_WITH_THEIR_OPPOSITES_DICTIONARY[self.direction]
def set_information_parts(self, part_to_append):
self.information_parts.append(part_to_append)
def hero_attack(self):
damage_from_attack = self.hero.attack()
information_part_to_append = FIGHT_HERO_ATTACK_INFORMATION_PART + str(damage_from_attack)
hero_can_not_attack = damage_from_attack == PLAYER_ZERO_DAMAGE_WHEN_ATTACKING
if hero_can_not_attack:
information_part_to_append = FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART
self.set_information_parts(information_part_to_append)
self.enemy.take_damage(damage_from_attack)
def enemy_attack(self):
damage_from_attack = self.enemy.attack()
information_part_to_append = FIGHT_ENEMY_ATTACK_INFORMATION_PART + str(damage_from_attack)
enemy_can_not_attack = damage_from_attack == PLAYER_ZERO_DAMAGE_WHEN_ATTACKING
if enemy_can_not_attack:
information_part_to_append = FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART
self.set_information_parts(information_part_to_append)
self.hero.take_damage(damage_from_attack)
def execute(self):
is_fight_in_progress = self.hero.is_alive() and self.enemy.is_alive()
while is_fight_in_progress:
if self.hero.is_alive():
self.hero_attack()
if self.enemy.is_alive():
self.enemy_attack()
is_fight_in_progress = self.hero.is_alive() and self.enemy.is_alive()
def get_fight_information(self):
return self.information_parts
| [
"antoni.1998@abv.bg"
] | antoni.1998@abv.bg |
38f1ba00d5c6f04f70636de75b00cc1ff16b61a0 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/tdeboissiere_DeepLearningImplementations/DeepLearningImplementations-master/DenseRecNet/run_cifar10.py | 8954c52f75b145ad8895faf0eb255a39e448500e | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 7,214 | py | from __future__ import print_function
import os
import time
import json
import argparse
import denserecnet
import numpy as np
import keras.backend as K
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.utils import np_utils
def run_cifar10(batch_size,
nb_epoch,
depth,
nb_dense_block,
nb_filter,
growth_rate,
dropout_rate,
learning_rate,
weight_decay,
plot_architecture):
""" Run CIFAR10 experiments
:param batch_size: int -- batch size
:param nb_epoch: int -- number of training epochs
:param depth: int -- network depth
:param nb_dense_block: int -- number of dense blocks
:param nb_filter: int -- initial number of conv filter
:param growth_rate: int -- number of new filters added by conv layers
:param dropout_rate: float -- dropout rate
:param learning_rate: float -- learning rate
:param weight_decay: float -- weight decay
:param plot_architecture: bool -- whether to plot network architecture
"""
###################
# Data processing #
###################
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = len(np.unique(y_train))
img_dim = X_train.shape[1:]
if K.image_dim_ordering() == "th":
n_channels = X_train.shape[1]
else:
n_channels = X_train.shape[-1]
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Normalisation
X = np.vstack((X_train, X_test))
# 2 cases depending on the image ordering
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean = np.mean(X[:, i, :, :])
std = np.std(X[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean = np.mean(X[:, :, :, i])
std = np.std(X[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std
###################
# Construct model #
###################
model = denserecnet.DenseNet(nb_classes,
img_dim,
depth,
nb_dense_block,
growth_rate,
nb_filter,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Model output
model.summary()
# Build optimizer
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=["accuracy"])
if plot_architecture:
from keras.utils.visualize_util import plot
plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)
####################
# Network training #
####################
print("Training")
list_train_loss = []
list_test_loss = []
list_learning_rate = []
for e in range(nb_epoch):
if e == int(0.5 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))
if e == int(0.75 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))
split_size = batch_size
num_splits = X_train.shape[0] / split_size
arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)
l_train_loss = []
start = time.time()
for batch_idx in arr_splits:
X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)
l_train_loss.append([train_logloss, train_acc])
test_logloss, test_acc = model.evaluate(X_test,
Y_test,
verbose=0,
batch_size=64)
list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
list_test_loss.append([test_logloss, test_acc])
list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
# to convert numpy array to json serializable
print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
d_log = {}
d_log["batch_size"] = batch_size
d_log["nb_epoch"] = nb_epoch
d_log["optimizer"] = opt.get_config()
d_log["train_loss"] = list_train_loss
d_log["test_loss"] = list_test_loss
d_log["learning_rate"] = list_learning_rate
json_file = os.path.join('./log/experiment_log_cifar10.json')
with open(json_file, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CIFAR10 experiment')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--depth', type=int, default=7,
help='Network depth')
parser.add_argument('--nb_dense_block', type=int, default=1,
help='Number of dense blocks')
parser.add_argument('--nb_filter', type=int, default=16,
help='Initial number of conv filters')
parser.add_argument('--growth_rate', type=int, default=12,
help='Number of new filters added by conv layers')
parser.add_argument('--dropout_rate', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('--learning_rate', type=float, default=1E-3,
help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=1E-4,
help='L2 regularization on weights')
parser.add_argument('--plot_architecture', type=bool, default=False,
help='Save a plot of the network architecture')
args = parser.parse_args()
print("Network configuration:")
for name, value in parser.parse_args()._get_kwargs():
print(name, value)
list_dir = ["./log", "./figures"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
run_cifar10(args.batch_size,
args.nb_epoch,
args.depth,
args.nb_dense_block,
args.nb_filter,
args.growth_rate,
args.dropout_rate,
args.learning_rate,
args.weight_decay,
args.plot_architecture)
| [
"659338505@qq.com"
] | 659338505@qq.com |
b66b465a380d4e26fe2fe4a7d4d23968a5dc804e | e797d6ec2088b3471d15ce802f1d79d931194f3a | /NonRPFRasterLoader_ToolValidator.py | 3d574f94818729ab191b74a13b0639f4dc7003ef | [] | no_license | mfunk/MA-Storage | 20a5427644b4cd7929e5e07c5af35c79839de0d6 | c103f346111c1c4d46408d69be46f8bc1ddddc3a | refs/heads/master | 2020-04-15T23:42:38.718055 | 2014-07-21T16:57:31 | 2014-07-21T16:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | #!/usr/bin/env python
class ToolValidator:
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup the Geoprocessor and the list of tool parameters."""
import arcgisscripting as ARC
self.GP = ARC.create(9.3)
self.params = self.GP.getparameterinfo()
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# 0 - Input catalog
# 1 - Input rasters (multiple)
# 2 - Product
# 3 - Scale
# 4 - Series
# 5 - Configuration Keyword
# 6 - Output catalog
self.params[6].ParameterDependencies = [0]
self.params[6].Schema.Clone = True
self.params[6].Schema.FieldsRule = "All"
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parmater
has been changed."""
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
# check that the catalog is a valid RPF catalog
if (self.params[0].Altered == True):
gp = self.GP
input_catalog = str(self.params[0].Value)
isValidRPF = self.ValidRPFCatalog(input_catalog)
if (isValidRPF == False):
self.GP.params[0].SetErrorMessage("Input catalog is not a valid Military Analyst RPF catalog.")
# check string lengths
if (self.params[2].Altered == True):
if (len(self.params[2].Value) > 25):
self.params[2].SetErrorMessage("Product string exceeds maximum length of 25 characters.")
if (self.params[4].Altered == True):
if (len(self.params[4].Value) > 5):
self.params[4].SetErrorMessage("Series string exceeds maximum length of 5 characters.")
return
def ValidRPFCatalog(self,inputTable):
# ensure required fields exist (they will be true if this is a true MA raster catalog)
isValidRPFCat = True
checkfield1 = self.GP.ListFields(inputTable, "PRODUCT", "*")
checkfield2 = self.GP.ListFields(inputTable, "SERIES", "*")
checkfield3 = self.GP.ListFields(inputTable, "SCALE", "*")
checkfield4 = self.GP.ListFields(inputTable, "FULL_NAME", "*")
#if not (checkfield1.Next() and checkfield2.Next() and checkfield3.Next() and checkfield4.Next()) :
if not (checkfield1[0] and checkfield2[0] and checkfield3[0] and checkfield4[0]):
isValidRPFCat = False
return isValidRPFCat
| [
"mfunk@esri.com"
] | mfunk@esri.com |
197d0304d650472767591fbed6177db4e15135d0 | c107c05bc16b53cd057508e18a0dbe9854343a13 | /tests/export/__init__.py | b33ec61af5bd25a717eef9fba9db148763c33b4b | [] | no_license | hkmshb/gridix.web | d80b7561aade5f77bcc43257742d8e56a628bf2e | bda0adf5465a085b0337a8f749c87a21b73b7741 | refs/heads/master | 2021-09-07T08:36:25.089579 | 2017-10-02T09:49:52 | 2017-10-02T09:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | # package: tests.export | [
"hkmshb@gmail.com"
] | hkmshb@gmail.com |
6f87adaee3c5827635ea027b2d5a1ba7c53ad949 | 2df1bce0d11ba43ad213f887b68b8bc1e1e41d33 | /bin/terminal.py | 517ecda5789203347258f60e8731864ddb316bed | [] | no_license | rheiland/pc4training | 80aead99e7859ec004044985492db736c8e0c6e4 | d37af1d9c0db228254b7679fe04cdff88d1558a1 | refs/heads/master | 2020-09-06T02:54:58.824853 | 2019-11-13T16:53:12 | 2019-11-13T16:53:12 | 220,296,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from ipywidgets import Output
from IPython.display import display, HTML
class TerminalTab(object):
def __init__(self):
# self.tab = Output(layout={'height': '600px'})
self.tab = Output(layout={'height': 'auto'})
self.tab.append_display_data(HTML(filename='doc/about.html'))
from ipywidgets import HTML, Tab, Layout
tab = Tab([HTML(value="<iframe width='100%' height='100%' src='../terminals/new'></iframe>", layout=Layout(height='600px'))])
tab.set_title(0, "Terminal")
display(tab)
| [
"heiland@indiana.edu"
] | heiland@indiana.edu |
1d4357ed6ca5e069e3e9bd0e47f3243eb8abe665 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/kikoAnalysis/wndResFiles/70-tideGauge.py | 782e71b5030c2122e3b4de8378784692225ba5a2 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:52:48 2020
@author: Michael Tadesse
"""
import os
import pandas as pd
dir_in = "/lustre/fs0/home/mtadesse/eraFiveConcat"
os.chdir(dir_in)
tgList = os.listdir()
x = 70
y = 71
#looping through individual tide gauges
for ii in range(x, y):
os.chdir(tgList[ii])
print(tgList[ii])
uwnd = pd.read_csv('wnd_u.csv')
vwnd = pd.read_csv('wnd_v.csv')
#check sizes of uwnd and vwnd
if uwnd.shape == vwnd.shape:
print("all good!")
else:
print("sizes not equal")
uwnd.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace = True)
vwnd.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace = True)
#sort by date
uwnd = uwnd.sort_values(by = 'date')
vwnd = vwnd.sort_values(by = 'date')
#reset indices
uwnd.reset_index(inplace = True)
vwnd.reset_index(inplace = True)
uwnd.drop(['index'], axis = 1, inplace = True)
vwnd.drop(['index'], axis = 1, inplace = True)
#get squares of uwnd and vwnd
uSquare = uwnd.iloc[:, 1:]**2
vSquare = vwnd.iloc[:, 1:]**2
#sum and take square root
wndResultant = (uSquare + vSquare)**0.5
wndResultant = pd.concat([pd.DataFrame(uwnd['date']), wndResultant], axis = 1)
#save file
wndResultant.to_csv("wndRest.csv")
os.chdir(dir_in)
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
469e93af04882d13a845caada52a607ad02fef3e | 1c52ae4b10bb033e8f65a66254a13ba8a22d5e03 | /helium/common/permissions.py | 18f57d68c540967cc5992d700522d0141e76e1a9 | [
"MIT"
] | permissive | vaibhavmathur91/platform | 2fa488e449b02e7a82e4759517663822addb6a34 | 529b7047fbbcdbcfc4766156331da1b6c9ced0fa | refs/heads/master | 2020-03-18T15:57:27.883761 | 2018-05-25T23:13:54 | 2018-05-25T23:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | import logging
from rest_framework import permissions
__author__ = 'Alex Laird'
__copyright__ = 'Copyright 2018, Helium Edu'
__version__ = '1.0.0'
logger = logging.getLogger(__name__)
class IsOwner(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.get_user() == request.user
| [
"alexdlaird@gmail.com"
] | alexdlaird@gmail.com |
39b37f4b8dad13e40b2b69dff70d5dfde738a0cf | 37f675391762db798b712a0da0b760f03adc3b44 | /NEWS_HUB/bin/gunicorn | e9b7c5956d85b26d498dd35f1f5cf362f4575eb6 | [
"MIT"
] | permissive | Ken-mbira/News_Hub | 03c7d9d25b0e9b85949d0b3c9052369e0ee34f2c | c68768dd4f958c9dc74300d036ad69c518d3ce80 | refs/heads/master | 2023-08-06T18:16:03.668655 | 2021-09-13T12:57:11 | 2021-09-13T12:57:11 | 404,722,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/kenmbira/Documents/MoringaProjects/Week8/NEWS_HUB/NEWS_HUB/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"ken.mbira@student.moringaschool.com"
] | ken.mbira@student.moringaschool.com | |
b674a146368d20c7b0b4c6656597d6da34be28f3 | acc3bfb8d0cdfbb0c762523b9923382570928ed1 | /backend/manage.py | 74a62f9af36b42bdb52667a28cf7a83ba85c1431 | [] | no_license | crowdbotics-apps/my-art-23726 | 30266909ff78876c3cf0c1636c76ba23afd930a2 | 50d905b644c425ab4562d0d3bf6b1ccbae5c715f | refs/heads/master | 2023-02-16T05:36:51.832504 | 2021-01-07T23:31:07 | 2021-01-07T23:31:07 | 327,746,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_art_23726.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
27c7c3c09333b28580d5cf8203bef60cb0b40592 | 05d5945350fe64f6c1235d4f12ee22323167ca0c | /snakemake/configs/candida_Kadosh_June_06_2019.py | 713abf1d1fd3084db06fb822056f0c1fb0fbf5f7 | [
"BSD-2-Clause"
] | permissive | saketkc/re-ribo-smk | 674d4423830bbae3a32f46146ffd362514047a60 | c9326cbafdfa060e22e9af692d9146c37f5035ba | refs/heads/master | 2021-07-12T18:46:37.772947 | 2020-05-30T01:41:13 | 2020-05-30T01:41:13 | 148,952,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | RAWDATA_DIR ='/staging/as/skchoudh/rna/June_06_2019_Kadosh_C_albicans_Flu/mapped_mRNA_only_collapsed/bams'
OUT_DIR ='/staging/as/skchoudh/rna/June_06_2019_Kadosh_C_albicans_Flu_riboraptor'
#OUT_DIR ='/home/cmb-06/as/skchoudh/rna/July_07_2018_Kadosh_Flu_Film_Planktonic_merged_ribocop_feb2019_longest'
GENOME_FASTA = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/fasta/C_albicans_SC5314_version_A22-s07-m01-r27_chromosomes.fasta'
CHROM_SIZES = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/fasta/C_albicans_SC5314_version_A22-s07-m01-r27_chromosomes.sizes'
STAR_INDEX = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/star_mrna_index'
GTF = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22' + '/annotation/' + 'C_albicans_SC5314_version_A22-s07-m01-r27_features.gtf'
GENE_BED = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/annotation/C_albicans_SC5314_version_A22-s07-m01-r27_features.bed12'
STAR_CODON_BED = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/annotation/C_albicans_SC5314_version_A22-s07-m01-r50_features.encode.gffutils.start_codon.bed'
STOP_CODON_BED = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22/annotation/C_albicans_SC5314_version_A22-s07-m01-r50_features.encode.gffutils.stop_codon.bed'
CDS_BED = '/home/cmb-06/as/skchoudh/genomes/C_albicans_SC5314/Assembly22' + '/annotation/' + '/C_albicans_SC5314_version_A22-s07-m01-r27_features.gtfTogenePred.genePredToBed6'
UTR5_BED = '/home/cmb-06/as/skchoudh/github_projects/C_albicans_project/Bruno_et_al_2014_data/Bruno_et_al_UTR5.A21toA22_hapAB_merged.collapsed.named_uniquehits.bed'
UTR3_BED = '/home/cmb-06/as/skchoudh/github_projects/C_albicans_project/Bruno_et_al_2014_data/Bruno_et_al_UTR3.A21toA22_hapAB_merged.collapsed.named_uniquehits.bed'
INTRON_BED = '/home/cmb-06/as/skchoudh/github_projects/C_albicans_project/Bruno_et_al_2014_data/Bruno_et_al_UTR5.A21toA22_hapAB_merged.collapsed.named_uniquehits.bed'
ORIENTATIONS = ['5prime', '3prime']
STRANDS = ['pos', 'neg', 'combined']
FRAGMENT_LENGTHS = range(18, 39)
| [
"saketkc@gmail.com"
] | saketkc@gmail.com |
d7e27c0c62004c1e8373b6bb6984fb7b6c32f33c | 3c831000ed8639c9187df6252ec7077a9a31d7df | /calender_visualizer.py | 34e5dbebaaeb66d368c3e8cc89df18b378a0cea7 | [] | no_license | robbynickles/mjc_schedulerB | 0790f7e1ddeba87c5c28e81e923e44338fd3ef54 | fa09632972ea071b7e629df479c2af1093add97f | refs/heads/master | 2021-01-02T08:34:05.965140 | 2014-07-27T17:15:20 | 2014-07-27T17:15:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line, Rectangle
from random import randint
half = [ i +':00'for i in ['12', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11']]
curry_me = lambda s: lambda i: i + s
am = curry_me('A')
pm = curry_me('P')
times = map(am, half) + map(pm, half)
relevant_times = times[6:23]
days = dict( zip( ['M','T','W','TH','F'], range(5) ) )
def split_time(hour):
colon = hour.find(':')
return hour[:colon] + ":00" + hour[-1], int(hour[colon+1:-1])
def set_origin(ox, oy):
def give_point(day,hour):
hour, min = split_time(hour)
x_gap, y_gap = 100, 20
ret = [days[day]*x_gap + ox, relevant_times.index(hour)*y_gap + (y_gap*(min/60.0)) + oy]
return ret
return give_point
def extract_times( time_string ):
spl = time_string.split()
if len(spl) >= 3:
return spl[0], spl[2]
else:
return '06:00A', '06:00A'
give_widg_point = set_origin(-200, -200)
give_canv_point = set_origin(190, 96)
class Calender(FloatLayout):
def __init__(self, *args, **kwargs):
super(Calender, self).__init__(*args, **kwargs)
self.d = 10
self.current_point = None
self.build_border()
def build_border( self ):
for d in days.keys():
pos = give_widg_point(d,'06:00A')
pos[1] -= 40
self.add_widget(Label(text=d, pos=pos))
for t in relevant_times:
pos = give_widg_point('M',t)
pos[0] -= 100
self.add_widget(Label(text=t, pos=pos))
def add_block( self, day_list, start, end, color, text ):
for day in day_list:
if day in days.keys():
# A course may have non-traditional days and times that don't
# need calender representation.
p1, p2 = give_canv_point(day, start), give_canv_point(day, end)
canvas_holder = FloatLayout(pos=(0,0), size=(1000, 100))
r,g,b = color
with canvas_holder.canvas:
#Color(r,g,b)
Color(123, 23, 89)
Rectangle(size=(100,p2[1]-p1[1]), pos=p1)
self.add_widget(canvas_holder)
def add_course( self, course_dict ):
color = randint(0,255), randint(0,255), randint(0,255)
time_data = zip(course_dict['Days'], course_dict['Times'], course_dict['Type'])
for d_list, t, ty in time_data:
start, end = extract_times(t)
self.add_block( d_list, start, end, color, ty )
course_dict = {'Name': ['MART-175'], 'Title': ['Color Photography', '08/25/14-12/13/14', 'Material Fee = $45.00'], 'Section': ['2672'], 'Days': ['T', 'TH', 'T', 'TH'], 'Times': ['01:15P - 02:40P', '02:40P - 04:05P', '02:40P - 04:05P', '01:15P - 02:40P'], 'Avail': ['Open'], 'Location': ['MADM 208, West', 'MADM 208, West'
, 'MADM 208, West', 'MADM 208, West'], 'Units': ['3'], 'Instructor': ['Staff03'], 'Type': ['LEC', 'LAB', 'LAB', 'LAB'], 'Important Notes': [''], 'Max/': ['20/11']}
if __name__ == '__main__':
class Main(App):
def build(self):
c = Calender()
c.add_course( course_dict )
return c
main = Main()
main.run()
| [
"r.nickles7@gmail.com"
] | r.nickles7@gmail.com |
30cb565d98d74bd3d69b3c3fff6406b5afe0b5c6 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/xcy.py | 3b2e226fad8e4672903657ebf133328412969648 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'xcY':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
1c29dc1ecfe316faa78fb563f818acb64a7520f9 | a613e5ec5d996bb1a60e6f4d417f44fe7241f867 | /Arrays/Merge overlapping ranges.py | 9bbaea8f72939902888ff0dd2ad24449a7cd16ba | [] | no_license | Nisar-1234/Data-structures-and-algorithms-1 | f0e2d9e63ee8baa35f12d106ee879ccb060c4caf | 777634f01d8b10a92a97c927ec09499ba08a28a4 | refs/heads/main | 2023-06-26T04:03:30.474025 | 2021-07-28T19:33:28 | 2021-07-28T19:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def merge(ranges):
ranges.sort()
stack = []
stack.append(ranges[0])
for i in range(1,len(ranges)):
if ranges[i][0] <= stack[-1][1]:
stack[-1][1] = max(stack[-1][1],ranges[i][1])
else:
stack.append(ranges[i])
return stack
ranges = [[2,13],[8,20]]
print(merge(ranges)) | [
"noreply@github.com"
] | Nisar-1234.noreply@github.com |
02f1ceb7c09effbcdc1c59b7067690ae0c023e77 | 6bc0cef468f97914fab31dd83bd417b4a5321051 | /py_checkio_solutions/Scientific Expedition/sum_by_type.py | ffa4fc777499a9847d22d02d2db4f233d0699a2f | [] | no_license | todatech/checkio | 14f19ef111a3f222b369937c90746c47bf2c3a63 | 763a9e0f81470302b173a4a700b77bed4f71de7a | refs/heads/master | 2023-02-01T16:04:39.018699 | 2020-12-21T01:46:38 | 2020-12-21T01:46:38 | 303,469,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | #!/usr/bin/env checkio --domain=py run sum-by-type
# You have a list. Each value from that list can be either a string or an integer. Your task here is to return two values. The first one is a concatenation of all strings from the given list. The second one is a sum of all integers from the given list.
#
# Input:An array of strings ans integers
#
# Output:A list or tuple
#
# Precondition:both given ints should be between -1000 and 1000
#
#
# END_DESC
from typing import Tuple
def sum_by_types(items: list) -> Tuple[str, int]:
str_ans = ''
num_ans = 0
for n in items:
if type(n) is str:
str_ans += n
if type(n) is int:
num_ans += n
return (str_ans, num_ans)
if __name__ == '__main__':
print("Example:")
print(sum_by_types([]))
# These "asserts" are used for self-checking and not for an auto-testing
assert sum_by_types([]) == ('', 0)
assert sum_by_types([1, 2, 3]) == ('', 6)
assert sum_by_types(['1', 2, 3]) == ('1', 5)
assert sum_by_types(['1', '2', 3]) == ('12', 3)
assert sum_by_types(['1', '2', '3']) == ('123', 0)
assert sum_by_types(['size', 12, 'in', 45, 0]) == ('sizein', 57)
print("Coding complete? Click 'Check' to earn cool rewards!") | [
"tonani@gmail.com"
] | tonani@gmail.com |
28307b976fc960f266e1401750875eb574c139e9 | 05ff9a0778ae16c4b3f29a4e4198e3f829dee409 | /ecommerce_app/migrations/0014_paynowpayment.py | 8d6fc7931cacfb59e11ca1edb1b61580301c8544 | [] | no_license | Inoxevious/malinafro | 8aa87b3b2a5473430ff57790ebccb2aaba6d8493 | 7b5b255997a9f54272c4320ed939b8e24c84b910 | refs/heads/main | 2023-01-20T14:32:23.049381 | 2020-12-02T09:21:47 | 2020-12-02T09:21:47 | 314,222,344 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | # Generated by Django 3.0.8 on 2020-11-24 14:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ecommerce_app', '0013_order_status'),
]
operations = [
migrations.CreateModel(
name='PaynowPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cellphone', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=100, null=True)),
('reference', models.CharField(max_length=100)),
('paynow_reference', models.CharField(max_length=100)),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('details', models.CharField(blank=True, max_length=500)),
('init_status', models.CharField(blank=True, max_length=10)),
('poll_url', models.CharField(max_length=500)),
('browser_url', models.CharField(max_length=500)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(max_length=10)),
('paid', models.BooleanField(default=False)),
('confirmed_at', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mpasiinnocent@gmail.com"
] | mpasiinnocent@gmail.com |
aba30190b9406ca34264eac7310ecebc0beed81d | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/baekjoon/problem/1000~9999/7562.나이트의이동/7562.py | 4723f88cb7b7f2278519430d1b57259805f660b1 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 1,199 | py | import sys
sys.stdin = open('input_7562.txt', 'r')
def BFS(row, col):
queue = []
dx = [-2, -1, +1, +2, +2, +1, -1, -2]
dy = [+1, +2, +2, +1, -1, -2, -2, -1]
queue.append([row, col])
visited[row][col] = True
move_cnt = 0
while True:
temp_list = []
move_cnt += 1
while len(queue) != 0:
element = queue.pop(0)
for idx in range(8):
new_row, new_col = element[0] + dx[idx], element[1] + dy[idx]
if 0 <= new_row < I and 0 <= new_col < I:
if not visited[new_row][new_col]:
if new_row == goal[0] and new_col == goal[1]:
return move_cnt
else:
visited[new_row][new_col] = True
temp_list.append([new_row, new_col])
for temp in temp_list:
queue.append(temp)
for _ in range(int(input())):
I = int(input())
start = list(map(int, input().split()))
goal = list(map(int, input().split()))
visited = [[False] * I for _ in range(I)]
if start == goal:
print(0)
else:
print(BFS(start[0], start[1])) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
eb87b42045e37801ae1ccabf5fe794faad9c9aa5 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /kws_streaming/layers/dct_test.py | b359966280714820cd4c94950e26aec8aedb04af | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,605 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.dct."""
import numpy as np
from kws_streaming.layers import dct
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
tf1.disable_eager_execution()
class DCTTest(tf.test.TestCase):
def test_tf_dct_vs_dct_direct(self):
signal_size = 64
# input signal
signal = np.random.rand(1, 1, signal_size)
# build mfcc model and run it
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = tf.signal.mfccs_from_log_mel_spectrograms(input_signal)
model = tf.keras.Model(input_signal, output)
model.summary()
mfcc_output = model.predict(signal)
# build dct model and run it
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT()(input_signal)
model = tf.keras.Model(input_signal, output)
model.summary()
dct_output = model.predict(signal)
self.assertAllClose(
mfcc_output[0][0], dct_output[0][0], rtol=1e-5, atol=1e-6)
def test_tf_dct_vs_dct_matmul(self):
signal_size = 51
# input signal
signal = np.random.rand(1, 1, signal_size)
# build dct model using tf function
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT(use_tf=False)(input_signal)
model1 = tf.keras.Model(input_signal, output)
model1.summary()
model1_output = model1.predict(signal)
# build dct model using direct matmul
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT(use_tf=True)(input_signal)
model2 = tf.keras.Model(input_signal, output)
model2.summary()
model2_output = model2.predict(signal)
self.assertAllClose(
model1_output, model2_output, rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
1934fbcee797873fc5e7e408002b1afb3f51f96f | 8c057f4d481d0ba756b08be90f3a771bf54411cb | /cloudmersive_currency_api_client/api/currency_exchange_api.py | 440514cc130020a6964c854a9de6682330a23020 | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.Currency | 57ca5717e6b298e782a03e03e32d8ecda5f174ab | c7e00c8f877b6cebb5e922b1c8178e852a7705fb | refs/heads/master | 2022-12-25T17:28:21.432412 | 2020-09-13T03:34:25 | 2020-09-13T03:34:25 | 295,070,766 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,331 | py | # coding: utf-8
"""
currencyapi
The currency APIs help you retrieve exchange rates and convert prices between currencies easily. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_currency_api_client.api_client import ApiClient
class CurrencyExchangeApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def currency_exchange_convert_currency(self, source, destination, source_price, **kwargs): # noqa: E501
"""Converts a price from the source currency into the destination currency # noqa: E501
Automatically converts the price in the source currency into the destination currency using the latest available currency exchange rate data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_convert_currency(source, destination, source_price, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source: Source currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param str destination: Destination currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param float source_price: Input price, such as 19.99 in source currency (required)
:return: ConvertedCurrencyResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.currency_exchange_convert_currency_with_http_info(source, destination, source_price, **kwargs) # noqa: E501
else:
(data) = self.currency_exchange_convert_currency_with_http_info(source, destination, source_price, **kwargs) # noqa: E501
return data
def currency_exchange_convert_currency_with_http_info(self, source, destination, source_price, **kwargs): # noqa: E501
"""Converts a price from the source currency into the destination currency # noqa: E501
Automatically converts the price in the source currency into the destination currency using the latest available currency exchange rate data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_convert_currency_with_http_info(source, destination, source_price, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source: Source currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param str destination: Destination currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param float source_price: Input price, such as 19.99 in source currency (required)
:return: ConvertedCurrencyResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['source', 'destination', 'source_price'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method currency_exchange_convert_currency" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'source' is set
if ('source' not in params or
params['source'] is None):
raise ValueError("Missing the required parameter `source` when calling `currency_exchange_convert_currency`") # noqa: E501
# verify the required parameter 'destination' is set
if ('destination' not in params or
params['destination'] is None):
raise ValueError("Missing the required parameter `destination` when calling `currency_exchange_convert_currency`") # noqa: E501
# verify the required parameter 'source_price' is set
if ('source_price' not in params or
params['source_price'] is None):
raise ValueError("Missing the required parameter `source_price` when calling `currency_exchange_convert_currency`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source' in params:
path_params['source'] = params['source'] # noqa: E501
if 'destination' in params:
path_params['destination'] = params['destination'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'source_price' in params:
body_params = params['source_price']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/currency/exchange-rates/convert/{source}/to/{destination}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConvertedCurrencyResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def currency_exchange_get_available_currencies(self, **kwargs): # noqa: E501
"""Get a list of available currencies and corresponding countries # noqa: E501
Enumerates available currencies and the countries that correspond to these currencies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_get_available_currencies(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AvailableCurrencyResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.currency_exchange_get_available_currencies_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.currency_exchange_get_available_currencies_with_http_info(**kwargs) # noqa: E501
return data
def currency_exchange_get_available_currencies_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of available currencies and corresponding countries # noqa: E501
Enumerates available currencies and the countries that correspond to these currencies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_get_available_currencies_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AvailableCurrencyResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method currency_exchange_get_available_currencies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/currency/exchange-rates/list-available', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AvailableCurrencyResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def currency_exchange_get_exchange_rate(self, source, destination, **kwargs): # noqa: E501
"""Gets the exchange rate from the source currency into the destination currency # noqa: E501
Automatically gets the exchange rate from the source currency into the destination currency using the latest available currency exchange rate data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_get_exchange_rate(source, destination, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source: Source currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param str destination: Destination currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:return: ExchangeRateResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.currency_exchange_get_exchange_rate_with_http_info(source, destination, **kwargs) # noqa: E501
else:
(data) = self.currency_exchange_get_exchange_rate_with_http_info(source, destination, **kwargs) # noqa: E501
return data
def currency_exchange_get_exchange_rate_with_http_info(self, source, destination, **kwargs): # noqa: E501
"""Gets the exchange rate from the source currency into the destination currency # noqa: E501
Automatically gets the exchange rate from the source currency into the destination currency using the latest available currency exchange rate data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.currency_exchange_get_exchange_rate_with_http_info(source, destination, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source: Source currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:param str destination: Destination currency three-digit code (ISO 4217), e.g. USD, EUR, etc. (required)
:return: ExchangeRateResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['source', 'destination'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method currency_exchange_get_exchange_rate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'source' is set
if ('source' not in params or
params['source'] is None):
raise ValueError("Missing the required parameter `source` when calling `currency_exchange_get_exchange_rate`") # noqa: E501
# verify the required parameter 'destination' is set
if ('destination' not in params or
params['destination'] is None):
raise ValueError("Missing the required parameter `destination` when calling `currency_exchange_get_exchange_rate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source' in params:
path_params['source'] = params['source'] # noqa: E501
if 'destination' in params:
path_params['destination'] = params['destination'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/currency/exchange-rates/get/{source}/to/{destination}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExchangeRateResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"35204726+Cloudmersive@users.noreply.github.com"
] | 35204726+Cloudmersive@users.noreply.github.com |
b08e0b567a118d75a7d4962d1ede12db6030b181 | f957ad3b17e4172791ef93c38dc131f34be3545f | /corpus/location.py | 055bd4a146c76db39ef38918a757b68baf6a2eee | [
"Apache-2.0"
] | permissive | LeMyst/ConferenceCorpus | e43c1806165fbb24a6b312a3ad81d4142b1edfe5 | 727afb6f5c9b2e3807260a6161a27531b2f77dd2 | refs/heads/main | 2023-09-05T08:39:29.811243 | 2021-11-08T07:43:02 | 2021-11-08T07:43:02 | 426,408,944 | 0 | 0 | Apache-2.0 | 2021-11-09T22:38:53 | 2021-11-09T22:38:52 | null | UTF-8 | Python | false | false | 6,815 | py | '''
Created on 2021-08-11
@author: wf
'''
#from lodstorage.entity import EntityManager
from geograpy.locator import LocationContext
from OSMPythonTools.nominatim import Nominatim
import os
import logging
class LocationLookup:
'''
lookup locations
'''
preDefinedLocations={
"Not Known": None,
"Online": None,
"Albuquerque, New Mexico, USA":"Q34804",
"Alexandria, Virginia, USA":"Q88",
"Amsterdam": "Q727",
"Amsterdam, Amsterdam": "Q727",
"Amsterdam Netherlands": "Q727",
"Amsterdam, Netherlands": "Q727",
"Amsterdam, The Netherlands":"Q727",
"Bergen, Norway":"Q26793",
"Bremen, Germany": "Q24879",
"Cancun, Mexico":"Q8969",
"Cancún, Mexico": "Q8969",
"Cambridge, United Kingdom": "Q21713103",
"Cambridge, UK": "Q21713103",
"Cambridge, USA": "Q49111",
"Cambridge, MA":"Q49111",
"Cambridge, Massachusetts, USA":"Q49111",
"Cambridge, MA, USA":"Q49111",
"Charleston, South Carolina, USA":"Q47716",
"Gdansk, Poland":"Q1792",
"Heraklion, Crete, Greece":"Q160544",
"Los Angeles California": "Q65",
"Los Angeles CA USA": "Q65",
"Luxembourg, Luxembourg":"Q1842",
"Macau, Macau, China":"Q14773",
"Monterrey, Mexico":"Q81033",
"Montreal, QC": "Q340",
"Montreal, QC, Canada": "Q340",
"Montrèal, Canada": "Q340",
"New Brunswick, New Jersey, USA":"Q138338",
"New Delhi": "Q987",
"New Delhi, India": "Q987",
"New Orleans, LA": "Q34404",
"Palo Alto, USA": "Q47265",
"Palo Alto, California, USA": "Q47265",
"Pasadena, California, USA":"Q485176",
"Phoenix": "Q16556",
"Phoenix, AZ": "Q16556",
"Phoenix AZ USA": "Q16556",
"Phoenix, Arizona, USA": "Q16556",
"Phoenix, USA": "Q16556",
"Phoenix, USA": "Q16556",
"Phoenix, AZ, USA": "Q16556",
"Salamanca, Spain": "Q15695",
"Santa Barbara, California": "Q159288",
"Santa Barbara, CA": "Q159288",
"Santa Barbara, CA, USA": "Q159288",
"Santa Barbara CA USA": "Q159288",
"Santa Barbara, USA": "Q159288",
"Santa Barbara, California, USA": "Q159288",
"Santa Fe, New Mexico": "Q38555",
"Santa Fe, NM, USA": "Q38555",
"Santa Fe, New Mexico, USA": "Q38555",
"Santa Fe, USA": "Q38555",
"Santa Fe, New Mexico, United States": "Q38555",
"Skovde, Sweden": "Q21166",
"Snowbird, Utah, USA": "Q3487194",
"St. Louis, MO, USA": "Q38022",
"St. Petersburg": "Q656",
"Saint-Petersburg, Russia":"Q656",
"Thessaloniki": "Q17151",
"Thessaloniki, Greece": "Q17151",
"Trondheim, Norway":"Q25804",
"Valencia": "Q8818",
"Valencia, Spain": "Q8818",
"Valencia, Valencia, Spain": "Q8818",
"York, UK":"Q42462"
}
other={
"Washington, DC, USA": "Q61",
"Bangalore": "Q1355",
"Bangalore, India": "Q1355",
"Xi'an": "Q5826",
"Xi'an, China": "Q5826",
"Virtual Event USA": "Q30",
"Virtual USA": "Q30",
"London United Kingdom": "Q84",
"Brno":"Q14960",
"Cancun":"Q8969",
"Gothenburg Sweden": "Q25287",
"Zurich, Switzerland": "Q72",
"Barcelona Spain": "Q1492",
"Vienna Austria": "Q1741",
"Seoul Republic of Korea": "Q8684",
"Seattle WA USA": "Q5083",
"Singapore Singapore":"Q334",
"Tokyo Japan": "Q1490",
"Vancouver BC Canada": "Q24639",
"Vancouver British Columbia Canada": "Q24639",
"Paris France": "Q90",
"Nagoya": "Q11751",
"Marrakech":"Q101625",
"Austin Texas":"Q16559",
"Chicago IL USA":"Q1297",
"Bangkok Thailand":"Q1861",
"Firenze, Italy":"Q2044",
"Florence Italy":"Q2044",
"Timisoara":"Q83404",
"Langkawi":"Q273303",
"Beijing China":"Q956",
"Berlin Germany": "Q64",
"Prague Czech Republic":"Q1085",
"Portland Oregon USA":"Q6106",
"Portland OR USA":"Q6106",
"Pittsburgh PA USA":"Q1342",
"Новосибирск":"Q883",
"Los Angeles CA USA":"Q65",
"Kyoto Japan": "Q34600"
}
def __init__(self):
'''
Constructor
'''
self.locationContext=LocationContext.fromCache()
cacheRootDir=LocationContext.getDefaultConfig().cacheRootDir
cacheDir=f"{cacheRootDir}/.nominatim"
if not os.path.exists(cacheDir):
os.makedirs(cacheDir)
self.nominatim = Nominatim(cacheDir=cacheDir)
logging.getLogger('OSMPythonTools').setLevel(logging.ERROR)
def getCityByWikiDataId(self,wikidataID:str):
'''
get the city for the given wikidataID
'''
citiesGen=self.locationContext.cityManager.getLocationsByWikidataId(wikidataID)
if citiesGen is not None:
cities=list(citiesGen)
if len(cities)>0:
return cities[0]
else:
return None
def lookupNominatim(self,locationText:str):
location=None
nresult=self.nominatim.query(locationText,params={"extratags":"1"})
nlod=nresult._json
if len(nlod)>0:
nrecord=nlod[0]
if "extratags" in nrecord:
extratags=nrecord["extratags"]
if "wikidata" in extratags:
wikidataID=extratags["wikidata"]
location=self.getCityByWikiDataId(wikidataID)
return location
def lookup(self,locationText:str):
if locationText in LocationLookup.preDefinedLocations:
locationId=LocationLookup.preDefinedLocations[locationText]
if locationId is None:
return None
else:
location=self.getCityByWikiDataId(locationId)
if location is None:
print(f"❌❌-predefinedLocation {locationText}→{locationId} wikidataId not resolved")
return location
lg=self.lookupGeograpy(locationText)
ln=self.lookupNominatim(locationText)
if ln is not None and lg is not None and not ln.wikidataid==lg.wikidataid:
print(f"❌❌{locationText}→{lg}!={ln}")
return None
return lg
def lookupGeograpy(self,locationText:str):
'''
lookup the given location by the given locationText
'''
locations=self.locationContext.locateLocation(locationText)
if len(locations)>0:
return locations[0]
else:
return None
| [
"wf@bitplan.com"
] | wf@bitplan.com |
3ef28cffc4c66730c648b8fa86a3b1eb738a771c | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190119192913.py | c890a70582358ec2307bee469503689adbba361f | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,099 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
def Cam_Con_CB_Fn(self,state):
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
if state == 2:
self.Cam_Con_CB.setChecked(True)
else:
self.Cam_Con_CB.setChecked(False)
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(False)
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
b89f3d90b055cd7e487503b5d88b55929f5bba30 | 2e4290bc1bee155cb8f95bdf7681b58325c8746e | /School/crop/forms.py | 06dc1db5004099b9a33e20985574b5242b21b1d6 | [] | no_license | codingspider/Schoolscript | bb7b539655417e8ee92dae27cedad69c386f5d80 | 7b61d7edb0b5ca4d4767622a02d8727f55510aec | refs/heads/master | 2022-12-14T12:06:15.351705 | 2020-09-08T11:22:27 | 2020-09-08T11:22:27 | 289,896,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | from PIL import Image
from .models import PointOfInterest, Rental
from django import forms
from django.core.files import File
# class PhotoForm(forms.ModelForm):
# x = forms.FloatField(widget=forms.HiddenInput())
# y = forms.FloatField(widget=forms.HiddenInput())
# width = forms.FloatField(widget=forms.HiddenInput())
# height = forms.FloatField(widget=forms.HiddenInput())
#
# class Meta:
# model = Photo
# fields = ('file', 'x', 'y', 'width', 'height', )
# widgets = {
# 'file': forms.FileInput(attrs={
# 'accept': 'image/*' # this is not an actual validation! don't rely on that!
# })
# }
#
# def save(self):
# photo = super(PhotoForm, self).save()
#
# x = self.cleaned_data.get('x')
# y = self.cleaned_data.get('y')
# w = self.cleaned_data.get('width')
# h = self.cleaned_data.get('height')
#
# image = Image.open(photo.file)
# cropped_image = image.crop((x, y, w+x, h+y))
# resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
# resized_image.save(photo.file.path)
#
# return photo
class LocationForm(forms.ModelForm):
class Meta:
model = PointOfInterest
fields = "__all__"
class RentalForm(forms.ModelForm):
class Meta:
model = Rental
fields = "__all__"
| [
"engrokon.rok@gmail.com"
] | engrokon.rok@gmail.com |
e6401021ad628fdb35351d2021abefaacd6de2d1 | d98d5d1af8c31bb7aa0b628d48e504db2ebecbc8 | /分子反映分类/demo.py | ba68e44b7314565046756df256fdf2aa2c14c27c | [] | no_license | dugzzuli/kaggleDemo | 1d52b931e4399551bc92d7cd40bc9453223ede49 | 65c91c42bf9b01eaca3c071b1ce210f214814433 | refs/heads/master | 2021-01-20T04:15:34.768985 | 2017-04-30T14:57:57 | 2017-04-30T14:57:57 | 89,662,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | """Kaggle competition: Predicting a Biological Response.
Blending {RandomForests, ExtraTrees, GradientBoosting} + stretching to
[0,1]. The blending scheme is related to the idea Jose H. Solorzano
presented here:
http://www.kaggle.com/c/bioresponse/forums/t/1889/question-about-the-process-of-ensemble-learning/10950#post10950
'''You can try this: In one of the 5 folds, train the models, then use
the results of the models as 'variables' in logistic regression over
the validation data of that fold'''. Or at least this is the
implementation of my understanding of that idea :-)
The predictions are saved in test.csv. The code below created my best
submission to the competition:
- public score (25%): 0.43464
- private score (75%): 0.37751
- final rank on the private leaderboard: 17th over 711 teams :-)
Note: if you increase the number of estimators of the classifiers,
e.g. n_estimators=1000, you get a better score/rank on the private
test set.
Copyright 2012, Emanuele Olivetti.
BSD license, 3 clauses.
"""
from __future__ import division
import numpy as np
import load_data
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
def logloss(attempt, actual, epsilon=1.0e-15):
"""Logloss, i.e. the score of the bioresponse competition.
"""
attempt = np.clip(attempt, epsilon, 1.0-epsilon)
return - np.mean(actual * np.log(attempt) +
(1.0 - actual) * np.log(1.0 - attempt))
if __name__ == '__main__':
np.random.seed(0) # seed to shuffle the train set
n_folds = 10
verbose = True
shuffle = False
X, y, X_submission = load_data.load()
if shuffle:
idx = np.random.permutation(y.size)
X = X[idx]
y = y[idx]
skf = list(StratifiedKFold(y, n_folds))
print(skf)
clfs = [RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=50)]
print( "Creating train and test sets for blending.")
#X 训练数据集个数 训练模型个数
dataset_blend_train = np.zeros((X.shape[0], len(clfs)))
# 测试数据集 的个数
dataset_blend_test = np.zeros((X_submission.shape[0], len(clfs)))
for j, clf in enumerate(clfs):
print( j, clf)
#创建 针对当前的分类模型 传入的数据为一个元祖
dataset_blend_test_j = np.zeros((X_submission.shape[0], len(skf)))
for i, (train, test) in enumerate(skf):
print(skf)
print( "Fold", i)
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
print(X_test)
| [
"bingwei2642@qq.com"
] | bingwei2642@qq.com |
422e81d7dc8990d09e10b9f966d4575ed58f6625 | aef1c0d4a32fa10afae10393c850960f9d89cdbc | /MiRegionCO/apps/noticia/migrations/0005_auto_20170722_1413.py | 539cffd200c033897d8312e8478b827075b333a1 | [] | no_license | joselofierro/MiRegionCO | 587059244fc153f32c6eaac8e41fab05bdeb5937 | 781491dc81a2dff7a8ae237d4ea7e23b31a31c52 | refs/heads/master | 2021-11-09T11:23:41.189863 | 2018-01-16T16:53:34 | 2018-01-16T16:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-22 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('noticia', '0004_noticia_destacada'),
]
operations = [
migrations.AddField(
model_name='noticia',
name='duracion',
field=models.CharField(default=1, max_length=5),
preserve_default=False,
),
migrations.AlterField(
model_name='noticia',
name='titular',
field=models.CharField(max_length=100),
),
]
| [
"juliofierro@Mac-mini-de-JULIO.local"
] | juliofierro@Mac-mini-de-JULIO.local |
6c12f87994931874e395ce5e3a254320cbfa4375 | ded564e6571f59df13a3f5d753c6c54f207261c1 | /thermo/units.py | 57c989069162d21bb322e48ddecc82bec6677b5e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | simonmb/thermo | 580ae53e764e00e601a5ef4a918e8d4a77442257 | 9abbb0ea71abe8677155e029d01aebe74cce137f | refs/heads/master | 2021-12-13T23:34:50.774780 | 2021-11-11T22:27:21 | 2021-11-11T22:27:21 | 144,257,869 | 1 | 0 | MIT | 2018-08-10T08:15:47 | 2018-08-10T08:15:46 | null | UTF-8 | Python | false | false | 3,232 | py | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017, 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['u']
import types
import re
import inspect
import functools
import collections
import thermo
import numpy as np
try:
import pint
from pint import _DEFAULT_REGISTRY as u
from pint import DimensionalityError
except ImportError: # pragma: no cover
raise ImportError('The unit handling in fluids requires the installation '
'of the package pint, available on pypi or from '
'https://github.com/hgrecco/pint')
from fluids.units import wraps_numpydoc, wrap_numpydoc_obj
__funcs = {}
failed_wrapping = False
for name in dir(thermo):
if name == '__getattr__' or name == '__test__':
continue
obj = getattr(thermo, name)
if isinstance(obj, types.FunctionType):
pass
# obj = wraps_numpydoc(u)(obj)
elif type(obj) == type and (obj in (thermo.Chemical, thermo.Mixture, thermo.Stream,
thermo.ChemicalConstantsPackage, thermo.PropertyCorrelationsPackage)
or thermo.eos.GCEOS in obj.__mro__
or thermo.activity.GibbsExcess in obj.__mro__
or thermo.TDependentProperty in obj.__mro__
or thermo.MixtureProperty in obj.__mro__
or thermo.Flash in obj.__mro__
):
if obj in (thermo.eos_mix.PSRKMixingRules, thermo.eos_mix.PSRK):
# Not yet implemented
continue
try:
obj = wrap_numpydoc_obj(obj)
except Exception as e:
failed_wrapping = True
print('Current implementation of %s contains documentation not '
'parseable and cound not be wrapped to use pint:' %str(obj))
print(e)
elif isinstance(obj, str):
continue
if name == '__all__':
continue
__all__.append(name)
__funcs.update({name: obj})
globals().update(__funcs)
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
1da55da5caaa3b90460be0fb0e117a0a33a47b72 | 847815fd6d24859dd0e41a3e53fd29df63b0e8f3 | /solutions/CombinationSumII.py | 27391dee0f2916f041e7fa29ac75d529b173e5ce | [] | no_license | howardhe0329/leetcode | 68c2f901ed15e1904241bb31f9fcba5cdc0cb6dd | 588a86282b8cc74fa14d810eb3a532c5c3e6de81 | refs/heads/master | 2020-07-04T13:03:08.134205 | 2015-12-25T14:40:20 | 2015-12-25T14:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | __author__ = 'Daoyuan'
from BaseSolution import *
class CombinationSumII(BaseSolution):
def __init__(self):
BaseSolution.__init__(self)
self.push_test(
params = ([10,1,2,7,6,1,5], 8,),
expects = [
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6],
],
expect_unordered = True
)
self.push_test(
params = ([4,3,2,1,1], 5,),
expects = [
[1, 4],
[1, 1, 3],
[2, 3],
],
expect_unordered = True
)
self.push_test(
params = ([2],5),
expects = []
)
def solution(self, candidates, target):
nums = sorted(candidates)
return list(self.combine(nums, target))
def combine(self, nums, target):
if len(nums) == 0:
return
last = -1
for i in xrange(len(nums)):
if last == nums[i]:
continue
last = nums[i]
if nums[i] == target:
yield [nums[i],]
elif nums[i] > target:
return
else:
for next in self.combine( nums[i+1:], target - nums[i]):
yield [nums[i],] + next | [
"this@caunion.me"
] | this@caunion.me |
4e13646e0695fb15fe65d9cba62592a5336a05f7 | 401fc99cefe615f8ebefb6dd9c2b043c506f5bd0 | /tests/units/test_helpers.py | 6604d3b7ffe74c4a297010609d9e2ed3d39c7a8e | [
"MIT"
] | permissive | atviriduomenys/spinta | 0f85496860ebbcecfccd8dde2bf219564ee66baa | 1fac5b6b75ec65188d815078fd135bc05d49b31c | refs/heads/master | 2023-09-02T13:22:58.411937 | 2023-08-18T12:59:17 | 2023-08-18T12:59:17 | 168,724,854 | 12 | 4 | MIT | 2023-09-14T13:29:39 | 2019-02-01T16:16:11 | Python | UTF-8 | Python | false | false | 1,069 | py | import pytest
from spinta.units.helpers import is_si_unit
from spinta.units.helpers import is_time_unit
@pytest.mark.parametrize('unit', [
'1D',
'D',
'Y',
'3M',
'12H',
])
def test_valid_time_unit(unit: str):
assert is_time_unit(unit)
@pytest.mark.parametrize('unit', [
'D1',
'd',
'YY',
'',
' D',
'D ',
])
def test_invalid_time_unit(unit: str):
assert not is_time_unit(unit)
@pytest.mark.parametrize('unit', [
'm',
'1m',
'10m',
'm^2',
'm²',
'km¹⁰',
'kg⋅m²⋅s⁻³⋅A⁻¹',
'kg*m^2*s^-3⋅A^-1',
'8kg⋅m²⋅s⁻³⋅A⁻¹',
'mg/l',
'g/m^2',
'mg/m^3',
'mm',
'U/m^2',
'U/m^3',
'%',
'ha',
'min',
'h',
'bar',
'U',
'10^6s',
'10⁶s',
'μ/m³',
'yr',
'3mo',
'yr 2mo 4wk',
'°C',
'°',
])
def test_valid_unit(unit: str):
assert is_si_unit(unit)
@pytest.mark.parametrize('unit', [
'D',
'1D',
'meter',
])
def test_invalid_si_unit(unit: str):
assert not is_si_unit(unit)
| [
"sirexas@gmail.com"
] | sirexas@gmail.com |
e69a3b71d01b4f76b8c9c0a1d9ffdb9bc82b442b | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_10000~/B_10871.py | e6b311cfbec7336a975db6800540699eb27bee56 | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # # 11/15 10871번
N,X = input().split()
N = int(N)
X = int(X)
A = list(map(int,input().split()))
B = []
for i in range(0, N):
if (A[i] < X) :
print(A[i], end=' ') | [
"kangsm0903@naver.com"
] | kangsm0903@naver.com |
c7f221ee2ca5d98b9105f235c3746a617815877c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03168/s894090351.py | 90c251f91b9e6ad5b7d11301831dc58de6c7936d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | n = int(input())
arr = list(map(float,input().split()))
dp=[]
for i in range(n+1):
dp.append([0]*(n+1))
dp[1][0]=(1-arr[0])
dp[1][1]=arr[0]
for i in range(2,n+1):
for j in range(0,i+1):
dp[i][j]= dp[i-1][j-1]*arr[i-1] + dp[i-1][j]*(1-arr[i-1])
ans=0
for i in range(n//2+1,n+1):
ans+=dp[n][i]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fab48185848246473828e0ee5d08a35b394b0c4a | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/__init__.py | fe57a995d41dbed8e0c81d59672f858b10a83a59 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 15,094 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import config
import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'prefix-limit'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'bgp', u'global', u'afi-safis', u'afi-safi', u'l3vpn-ipv6-multicast', u'prefix-limit']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
import config
import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/l3vpn-ipv6-multicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'prefix-limit'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'bgp', u'global', u'afi-safis', u'afi-safi', u'l3vpn-ipv6-multicast', u'prefix-limit']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/l3vpn_ipv6_multicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
1607857a4861051a4af79b0e93cb41a76d4659c2 | 311e9f909ec8c63c40a6b09d70006da4f2e0a7d5 | /tfx/utils/model_paths/tf_serving_flavor_test.py | 7c1ad2f866b85cae9556b377d17a85091bfcd24f | [
"Apache-2.0"
] | permissive | 18jeffreyma/tfx | 793fbc6c0597d88d16ac551bae9eddfd18ff1542 | ff6917997340401570d05a4d3ebd6e8ab5760495 | refs/heads/master | 2022-12-15T16:18:15.578839 | 2020-08-31T20:34:05 | 2020-08-31T20:34:56 | 274,276,728 | 3 | 0 | Apache-2.0 | 2020-09-16T18:58:02 | 2020-06-23T01:08:19 | Python | UTF-8 | Python | false | false | 2,855 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.model_paths.tf_serving_flavor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.utils.model_paths import tf_serving_flavor as tfs_flavor
class TFServingFlavorTest(tf.test.TestCase):
def testRoundTrip(self):
self.assertEqual(
tfs_flavor.parse_model_path(
tfs_flavor.make_model_path('/foo/bar', 'my-model', 123)),
('/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.make_model_path(
*tfs_flavor.parse_model_path('/foo/bar/my-model/123')),
'/foo/bar/my-model/123')
def testMakeModelPath(self):
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='/foo/bar',
model_name='my-model',
version=123),
'/foo/bar/my-model/123')
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='s3://bucket-name/foo/bar',
model_name='my-model',
version=123),
's3://bucket-name/foo/bar/my-model/123')
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='gs://bucket-name/foo/bar',
model_name='my-model',
version=123),
'gs://bucket-name/foo/bar/my-model/123')
def testParseModelPath(self):
self.assertEqual(
tfs_flavor.parse_model_path('/foo/bar/my-model/123',),
('/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.parse_model_path('s3://bucket-name/foo/bar/my-model/123'),
('s3://bucket-name/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.parse_model_path('gs://bucket-name/foo/bar/my-model/123'),
('gs://bucket-name/foo/bar', 'my-model', 123))
def testParseModelPath_Fail(self):
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('too-short')
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('/foo/bar/my-model/not-an-int-version')
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('/foo/bar/other-model/123',
expected_model_name='my-model')
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.