blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59602afbce466d3a9113e0c7e330db6597cd02fa | cb242b1fdf3889d4df347f3102daf6584a0c40a4 | /threeandthrees/words.py | d562ed188fda92eba7607308a68f44f8b4960f5d | [
"MIT"
] | permissive | bwarren2/threeandthrees | d927711f0927a8e3493cd201ffdd8d930e5586f2 | 2a09a398ab332c27e2e7722e612fa18318b50e60 | refs/heads/master | 2023-05-25T23:28:11.706181 | 2021-04-30T15:12:45 | 2021-04-30T15:12:45 | 68,949,353 | 0 | 0 | MIT | 2021-04-30T15:12:46 | 2016-09-22T18:18:05 | Python | UTF-8 | Python | false | false | 2,546 | py | from collections import defaultdict, OrderedDict
import random
from colorama import init, Fore
import os
import re
init(autoreset=True)
safe_pattern = re.compile('^[a-z]{9}$')
def extract_words():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/american-english.txt', 'r') as f:
raw_data = f.read().split('\n')
data = list(filter(is_clean, raw_data))
return data
def is_clean(word):
return re.search(safe_pattern, word) is not None
def extract_cores(wordlist):
coremap = defaultdict(list)
for word in wordlist:
coremap[word[3:6]].append(word)
return coremap
all_words = extract_words()
coremap = extract_cores(all_words)
class Wordmonger(object):
def __init__(self, all_words, coremap):
self.words = all_words
self.coremap = coremap
self.challenge = OrderedDict()
def answer_count(self, candidate):
value = self.coremap.get(candidate, None)
if value is None:
return 0
else:
return len(value)
def answers(self, candidate):
return self.coremap.get(candidate, None)
def generate(self):
key = random.choice(list(self.coremap.keys()))
return key
# return self.coremap[key]
def check(self, arg):
return arg in self.coremap[arg[3:6]]
def show_challenge(self):
for idx, (key, value) in enumerate(self.challenge.iteritems(), 1):
if value is not None:
print(
"{idx}:\t {color}{word}".format(
**{
'idx': idx, 'word': value, 'color': Fore.GREEN
}
)
)
else:
print(
"{idx}:\t ___{core}___".format(
**{'idx': idx, 'core': key}
)
)
def formulate_challenge(self, n=10):
self.challenge = OrderedDict()
while n > 0:
new_core = random.choice(list(self.coremap.keys()))
if new_core not in list(self.challenge.keys()):
self.challenge[new_core] = None
n -= 1
def claim(self, answer):
key = answer[3:6]
if (
answer in self.coremap[key]
and key in list(self.challenge.keys())
):
self.challenge[key] = answer
return True
else:
return False
monger = Wordmonger(all_words, coremap)
| [
"bwarren2@gmail.com"
] | bwarren2@gmail.com |
7f21a3c4b4eab2603971a2c036ccf0062bc692a0 | 92d5c15b92356de9f66d2d4738f3c6f00ef2796f | /alembic/versions/11a00705ac61_added_a_bunch_of_gra.py | 125da279586118ae1e213bc3e51a31aadf58a062 | [] | no_license | colinmorris/moz-graphs | 2f88472b7ad23ee0c63977c2151ac102af475769 | f412c0564fb210327436da0468f78932bd21dca0 | refs/heads/master | 2016-09-06T04:36:39.322822 | 2013-07-27T22:00:14 | 2013-07-27T22:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | """added a bunch of graph vars for assignee
Revision ID: 11a00705ac61
Revises: 48044ce97c4f
Create Date: 2013-04-08 10:21:12.247290
"""
# revision identifiers, used by Alembic.
revision = '11a00705ac61'
down_revision = '48044ce97c4f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('bugmonths', sa.Column('assignee_constraint_prior_month', sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('bugmonths', 'assignee_constraint_prior_month')
### end Alembic commands ###
| [
"colin.morris2@gmail.com"
] | colin.morris2@gmail.com |
32c3db21179f71d7d3b81b90f71abe18b6ebcc50 | a757953b0fab1c4d05ce6b70b68b411df42bb887 | /raredecay/run_config/config.py | bf95bbd8a144a333eb7c63e6c9faeb67b38b1b7f | [
"Apache-2.0"
] | permissive | efueger/raredecay | 4e6ae0cff4bde925e2985335793dc7a138a5c772 | 4a92742016b2aea27e5156fee168d69d2c0361d0 | refs/heads/master | 2021-01-01T20:37:16.970625 | 2017-07-31T14:48:35 | 2017-07-31T14:48:35 | 98,898,305 | 0 | 0 | null | 2017-07-31T14:31:54 | 2017-07-31T14:31:54 | null | UTF-8 | Python | false | false | 1,379 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 13:44:43 2016
The configuration file for external operations.
@author: Jonas Eschle "Mayou36"
"""
RUN_NAME = 'Classifier optimization'
run_message = str("This could be your advertisement" +
" ")
OUTPUT_CFG = dict(
run_name=RUN_NAME,
output_path=None,
del_existing_folders=False,
output_folders=dict(
log="log",
plots="plots",
results="results",
config="config"
)
)
save_fig_cfg = dict(
file_format=['png', 'pdf'],
to_pickle=True,
dpi=150,
figsize=(2,10)
)
# ==============================================================================
# LOGGER CONFIGURATION BEGIN
# ==============================================================================
logger_cfg = dict(
logging_mode='both', # define where the logger is written to
# take 'both', 'file', 'console' or 'no'
log_level_file='debug',
# specifies the level to be logged to the file
log_level_console='warning', # 'warning',
# specify the level to be logged to the console
overwrite_file=True,
# specifies whether it should overwrite the log file each time
# or instead make a new one each run
log_file_name='logfile_',
# the beginning ofthe name of the logfile, like 'project1'
log_file_dir=None # will be set automatically
)
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
84384e7ac129c854281286bde8c8fa39109edf50 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/409/usersdata/308/79040/submittedfiles/av1_programa1.py | cf9b07d9090afe0bfb3912b3b86c0d8a9fbf8726 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
#Lendo valor do usuário
x = int(input('Informe o valor: '))
#testando se é par
if (x%2==0):
print('PAR')
else:
print('IMPAR')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d25ac17866c59602f3a74c473525baa8b1525ecc | 42186fa6507999ce60d334a0f04d9ae2127579cd | /shanghai/crypto/aessss/aes.py | 28f4167b8afbc6cd5a1d96fa31eac1158ba77de1 | [] | no_license | Imtinmin/CTF_Challenge | ef8b62b3a4a1741d814d989f795a243257ff6f2b | ea276596f9effdbe0cf9ef4457e2e676e652bb74 | refs/heads/master | 2022-12-21T12:40:40.625562 | 2020-04-30T03:27:56 | 2020-04-30T03:27:56 | 158,999,004 | 18 | 3 | null | 2022-12-10T04:34:27 | 2018-11-25T04:53:04 | PHP | UTF-8 | Python | false | false | 6,458 | py | # -*- coding:utf-8 -*-
import random
import sys
import string
from hashlib import sha256
import SocketServer
from Crypto.Cipher import AES
from secret import FLAG, IV, KEY
class Task(SocketServer.BaseRequestHandler):
def proof_of_work(self):
proof = ''.join(
[random.choice(string.ascii_letters+string.digits) for _ in xrange(20)])
# print proof
digest = sha256(proof).hexdigest()
self.request.send("sha256(XXXX+%s) == %s\n" % (proof[4:], digest))
self.request.send('Give me XXXX:')
x = self.request.recv(10)
x = x.strip()
if len(x) != 4 or sha256(x+proof[4:]).hexdigest() != digest:
return False
return True
def pad(self, s):
s += (256 - len(s)) * chr(256 - len(s))
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.s_box):
ret[pos] = s[index]
return ''.join(ret)
def unpad(self, s):
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.invs_box):
ret[pos] = s[index]
return ''.join(ret[0:-ord(ret[-1])])
s_box = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
]
invs_box = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
]
def encrypt(self, msg):
cipher = AES.new(KEY, AES.MODE_CBC, IV)
return cipher.encrypt(msg).encode('hex')
def handle(self):
if not self.proof_of_work():
return
self.request.settimeout(15)
req = self.request
flag_len = len(FLAG)
assert(flag_len == 33)
self.flag = self.pad(FLAG)
assert(len(self.flag) == 256)
while True:
req.sendall(
'Welcome to AES(WXH) encrypt system.\n1. get encrypted flag.\n2. pad flag.\n3.Do some encrypt.\nYour choice:')
cmd = req.recv(2).strip()
try:
cmd = int(cmd)
except ValueError:
cmd = 0
if cmd == 1:
enc = self.encrypt(self.flag)
req.sendall('Here is the encrypted flag: 0x%s\n' % enc)
elif cmd == 2:
req.sendall('Pad me something:')
self.flag = self.unpad(self.flag)[
:flag_len] + req.recv(1024).strip()
assert(len(self.flag) <= 256)
self.flag = self.pad(self.flag)
req.sendall('Done.\n')
elif cmd == 3:
req.sendall('What do you want to encrypt:')
msg = self.pad(req.recv(1024).strip())
assert(len(msg) <= 256)
enc = self.encrypt(msg)
req.sendall('Here is the encrypted message: 0x%s\n' % enc)
else:
req.sendall('Do not lose heart~ !% Once WXH AK IOI 2019 can Solved! WXH is the first in the tianxia!')
req.close()
return
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = '0.0.0.0', 23333
print 'Run in port:23333'
server = ThreadedServer((HOST, PORT), Task)
server.allow_reuse_address = True
server.serve_forever()
| [
"954093370@qq.com"
] | 954093370@qq.com |
d882a075eccf5c70c14258c6c98aa38b84b83009 | e37fa62da82ae60561e59027f8626facc9728b4a | /learn_spyder/downloads/Chapter2/solutions/Exercise2-4.py | 8d0998a34134a66f4b644de906051a217539109b | [] | no_license | nhuntwalker/mystuff | ea05551ae17cfc0a6e4286a0dd97fe85f39bfe89 | bf2e79a1213fea1249ce93ef28d0caeffc710eb1 | refs/heads/master | 2021-01-19T01:04:22.215725 | 2016-06-23T00:22:22 | 2016-06-23T00:22:22 | 13,390,985 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import ctypes as ct
# libc = ct.CDLL('libc.so.6') # in Linux/MacOS
libc = ct.cdll.msvcrt # in Windows
for c in range(128):
print(c, ' is a ctrl char' if libc.iscntrl(c) else 'is not a ctrl char')
| [
"nhuntwalker@gmail.com"
] | nhuntwalker@gmail.com |
1ce4e956fe58872f2719ab4f3c67b8c279caf0a8 | 8cf5c91fa744f49b40264061d4fd510ea761cf8f | /build/lib/dragonn/visualize_util.py | f672496759111da32eecdc588fa5a4607cc0eb20 | [
"MIT"
] | permissive | AvantiShri/dragonn | 0f38371ac7734099279f3b3e204565d9a663102f | aeb9674f39b71d07ff62d2c3745bef4a2e55b95f | refs/heads/master | 2020-04-23T18:01:34.394772 | 2019-02-18T06:07:40 | 2019-02-18T06:07:40 | 171,352,688 | 0 | 0 | MIT | 2019-02-18T20:37:59 | 2019-02-18T20:37:59 | null | UTF-8 | Python | false | false | 5,953 | py | # Adapted from Keras source code
# License: https://github.com/fchollet/keras/blob/master/LICENSE
import itertools
from keras.layers.containers import Graph, Sequential
from keras.layers.core import Merge
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pydot
except ImportError:
# fall back on pydot if necessary
import pydot
if not pydot.find_graphviz():
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
def layer_typename(layer):
return type(layer).__module__ + "." + type(layer).__name__
def get_layer_to_name(model):
"""Returns a dict mapping layer to their name in the model"""
if not isinstance(model, Graph):
return {}
else:
node_to_name = itertools.chain(
model.nodes.items(), model.inputs.items(), model.outputs.items()
)
return {v: k for k, v in node_to_name}
class ModelToDot(object):
"""
This is a helper class which visits a keras model (Sequential or Graph) and
returns a pydot.Graph representation.
This is implemented as a class because we need to maintain various states.
Use it as ```ModelToDot()(model)```
Keras models can have an arbitrary number of inputs and outputs. A given
layer can have multiple inputs but has a single output. We therefore
explore the model by starting at its output and crawling "up" the tree.
"""
def _pydot_node_for_layer(self, layer, label):
"""
Returns the pydot.Node corresponding to the given layer.
`label` specify the name of the layer (only used if the layer isn't yet
associated with a pydot.Node)
"""
# Check if this already exists (will be the case for nodes that
# serve as input to more than one layer)
if layer in self.layer_to_pydotnode:
node = self.layer_to_pydotnode[layer]
else:
layer_id = 'layer%d' % self.idgen
self.idgen += 1
label = label + " (" + layer_typename(layer) + ")"
if self.show_shape:
# Build the label that will actually contain a table with the
# input/output
outputlabels = str(layer.output_shape)
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = ''
label = "%s\n|{input:|output:}|{{%s}|{%s}}" % (
label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
self.g.add_node(node)
self.layer_to_pydotnode[layer] = node
return node
def _process_layer(self, layer, layer_to_name=None, connect_to=None):
"""
Process a layer, adding its node to the graph and creating edges to its
outputs.
`connect_to` specify where the output of the current layer will be
connected
`layer_to_name` is a dict mapping layer to their name in the Graph
model. Should be {} when processing a Sequential model
"""
# The layer can be a container layer, in which case we can recurse
is_graph = isinstance(layer, Graph)
is_seq = isinstance(layer, Sequential)
if self.recursive and (is_graph or is_seq):
# We got a container layer, recursively transform it
if is_graph:
child_layers = layer.outputs.values()
else:
child_layers = [layer.layers[-1]]
for l in child_layers:
self._process_layer(l, layer_to_name=get_layer_to_name(layer),
connect_to=connect_to)
else:
# This is a simple layer.
label = layer_to_name.get(layer, '')
layer_node = self._pydot_node_for_layer(layer, label=label)
if connect_to is not None:
self.g.add_edge(pydot.Edge(layer_node, connect_to))
# Proceed upwards to the parent(s). Only Merge layers have more
# than one parent
if isinstance(layer, Merge): # Merge layer
for l in layer.layers:
self._process_layer(l, layer_to_name,
connect_to=layer_node)
elif hasattr(layer, 'previous') and layer.previous is not None:
self._process_layer(layer.previous, layer_to_name,
connect_to=layer_node)
def __call__(self, model, recursive=True, show_shape=False,
connect_to=None):
self.idgen = 0
# Maps keras layer to the pydot.Node representing them
self.layer_to_pydotnode = {}
self.recursive = recursive
self.show_shape = show_shape
self.g = pydot.Dot()
self.g.set('rankdir', 'TB')
self.g.set('concentrate', True)
self.g.set_node_defaults(shape='record')
if hasattr(model, 'outputs'):
# Graph
for name, l in model.outputs.items():
self._process_layer(l, get_layer_to_name(model),
connect_to=connect_to)
else:
# Sequential container
self._process_layer(model.layers[-1], {}, connect_to=connect_to)
return self.g
def to_graph(model, **kwargs):
"""
`recursive` controls whether we recursively explore container layers
`show_shape` controls whether the shape is shown in the graph
"""
return ModelToDot()(model, **kwargs)
def plot(model, to_file='model.png', **kwargs):
graph = to_graph(model, **kwargs)
graph.write_png(to_file)
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
4eeccaf83120bc0894347b8916c466c18737500d | 4ecbc07cdc980f899510e0db2971ba754c474670 | /timm/models/layers/mlp.py | 05d076527cfb6f15bcf5f2830fa36777abbc5a1e | [
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | shrikumaran/pytorch-image-models | 8c74ec7d705b6b2fb223d519afdd61f33c108cec | 6d8272e92c3d5f13a9fdd91dfe1eb7fae6784589 | refs/heads/master | 2023-06-16T06:41:30.088230 | 2021-07-08T18:23:55 | 2021-07-08T18:51:12 | 384,009,847 | 0 | 0 | Apache-2.0 | 2021-07-08T05:22:35 | 2021-07-08T05:22:34 | null | UTF-8 | Python | false | false | 3,774 | py | """ MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
# override init of fc1 w/ gate portion set to weight near zero, bias=1
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
gate_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
| [
"rwightman@gmail.com"
] | rwightman@gmail.com |
a23aaeb1fe329a362d00beace17f570d5ab087b3 | 5cb3b0b88c1baa2fae9562ac4cad5f84d65221e1 | /w7/demo/demo/core/models.py | b936ac6f63740a7a568da59d6fbd5aca09523171 | [] | no_license | bobur554396/BFDjango2020Spring | aa7ad9a595b247100f876e36585368af078d862e | e7ef04be2cf4d2506c2212ea4509a106e12d4dd4 | refs/heads/master | 2020-12-15T07:07:16.214284 | 2020-04-13T19:16:28 | 2020-04-13T19:16:28 | 235,028,587 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | from django.db import models
from rest_framework import serializers
class Publisher(models.Model):
"""Publisher class"""
MALE = 1
FEMALE = 2
GENDER = (
(MALE, 'male'),
(FEMALE, 'female'),
)
name = models.CharField(max_length=300, unique=True)
city = models.CharField(max_length=300)
gender = models.PositiveSmallIntegerField(choices=GENDER, default=MALE)
objects = models.Manager()
class Meta:
verbose_name = 'Publisher'
verbose_name_plural = 'Publishers'
# unique_together = ('name', 'city')
# ordering = ('name',)
# db_table = 'publishers_table'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
pass
class Author(models.Model):
name = models.CharField(max_length=300)
email = models.CharField(max_length=300)
rating = models.IntegerField(default=0)
# creator = models.ForeignKey(MainUser)
def __str__(self):
return self.name
def set_new_rating(self, value):
self.rating = value
self.save()
# @property
def books_count(self):
pass
# return self.books.count()
# print(a.books_count)
# class PublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=True)
#
# def filter_by_name(self, name_pattern):
# return self.filter(name__contains=name_pattern)
#
#
# class NotPublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=False)
def valid_num_pages(value):
if not(10 >= value >= 5000):
raise serializers.ValidationError('invalid num of pages')
class Book(models.Model):
name = models.CharField(max_length=300)
price = models.FloatField(default=0)
num_pages = models.IntegerField(default=0,
validators=[valid_num_pages])
is_published = models.BooleanField(default=False)
author = models.ForeignKey(Author,
on_delete=models.CASCADE,
related_name='books')
publisher = models.ForeignKey(Publisher,
on_delete=models.CASCADE,
related_name='books')
objects = models.Manager()
# published_books = PublishedBook()
# not_published_books = NotPublishedBook()
@property
def price_round(self):
return round(self.price, 3)
@classmethod
def top_ten(cls):
return cls.objects.all()[:10]
@staticmethod
def cmp_books(book1, book2):
return book1.price > book2.price
# b1 = Book()
# print(b1.price_round)
#
# b2 = Book()
#
# ret = Book.cmp_books(b1, b2)
class Tag(models.Model):
name = models.CharField(max_length=200)
class BookTag(models.Model):
tag = models.ForeignKey(Tag, on_delete=models.CASCADE,
related_name='books')
book = models.ForeignKey(Book, on_delete=models.CASCADE,
related_name='tags')
# t = Tag()
# t.books.all()
#
# b = Book()
# for book_tag in b.tags.all():
# print(book_tag.tag)
#
| [
"bobur.muhsimbaev@gmail.com"
] | bobur.muhsimbaev@gmail.com |
a28d8b93e2c943b416dc0e882ce5ceeaff0889f8 | 551ef0567aca428a535775d3949f5d9670c0d29c | /abc/212/c/main.py | 9351e78f01dac0c8505cf070b9bc58365ee2fbc7 | [] | no_license | komo-fr/AtCoder | 7451a9402466ce8d487d0c521128732061c647df | c916889294cb12f21e74254de43b3e17e1b354bc | refs/heads/master | 2023-07-22T07:05:52.955188 | 2023-03-01T14:22:16 | 2023-03-01T14:22:16 | 213,109,943 | 0 | 0 | null | 2023-07-06T22:01:28 | 2019-10-06T04:44:49 | Python | UTF-8 | Python | false | false | 529 | py | #!/usr/bin/env python3
N, M = list(map(int, input().split()))
a_list = list(map(int, input().split()))
b_list = list(map(int, input().split()))
ab_list = []
for a in a_list:
ab_list.append((a, "a"))
for b in b_list:
ab_list.append((b, "b"))
ab_list = sorted(ab_list)
a = None
b = None
min_y = float("inf")
for x in ab_list:
if x[1] == "a":
a = x[0]
if x[1] == "b":
b = x[0]
if a is not None and b is not None:
y = abs(a - b)
min_y = min([y, min_y])
ans = min_y
print(ans)
| [
"komo.mdrms@gmail.com"
] | komo.mdrms@gmail.com |
4c0b163a7460d2fd4bc039cf7ea4f217d04db9cf | 47ce68e1ff970318fd31ac43405d0e1fa3594bf6 | /Models/biGAN/lowerDimBiganXEntropy.py | c888aeec7669d02cd851651a907718f7717dec9c | [
"BSD-3-Clause"
] | permissive | Midoriii/Anomaly_Detection_Diploma | 7196da379f8aefbd4546ca23e8303d1829e059fb | 11145e3e5210a4e45a33d98b138213edb7bc5d3d | refs/heads/master | 2023-03-25T20:42:56.961210 | 2021-03-14T01:13:39 | 2021-03-14T01:13:39 | 261,205,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,537 | py | '''
Copyright (c) 2021, Štěpán Beneš
Basic bigAN net, using cross entropy as loss and made to work on 192x192
'''
import numpy as np
from Models.biGAN.BaseBiganModel import BaseBiganModel
from Models.Losses.custom_losses import wasserstein_loss
from Models.biGAN.weightclip_constraint import WeightClip
from keras.layers import Input, Reshape, Dense, Flatten, concatenate
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D, BatchNormalization, Dropout, LeakyReLU
from keras.models import Model
from keras.optimizers import RMSprop, Adam, SGD
class lowerDimBiganXEntropy(BaseBiganModel):
def __init__(self, input_shape, latent_dim=24, lr=0.0005, w_clip=0.01, batch_size=4):
super().__init__(input_shape, latent_dim, lr, w_clip, batch_size)
self.name = "lowerDimBiganXEntropy"
g_optimizer = Adam(lr=self.lr, beta_1=0.5)
d_optimizer = SGD(lr=self.lr)
self.disc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_fake = np.ones((self.batch_size, 1))
self.disc_labels_fake = np.ones((self.batch_size, 1))
self.d = self.build_discriminator()
self.d.compile(optimizer=d_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
self.g = self.build_generator()
self.e = self.build_encoder()
# The Discriminator part in GE model won't be trainable - GANs take turns.
# Since the Discrimiantor itself has been previously compiled, this won't affect it.
self.d.trainable = False
self.ge = self.build_ge_enc()
self.ge.compile(optimizer=g_optimizer, loss=['binary_crossentropy', 'binary_crossentropy'])
return
def build_generator(self):
z_input = Input(shape=[self.latent_dim])
x = Dense(6*6*32)(z_input)
x = Reshape([6, 6, 32])(x)
# 6 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 12 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 24 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 48 -> 96
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 96 -> 192
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(1, (3, 3), activation='tanh', padding='same')(x)
return Model(inputs=z_input, outputs=x)
def build_encoder(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
# 192 -> 96
x = Conv2D(32, (3, 3), padding='same')(img_input)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 96 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 48 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 24 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 12 -> 6
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(self.latent_dim)(x)
return Model(inputs=img_input, outputs=x)
def build_discriminator(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
# Latent
l = Dense(256)(z_input)
l = LeakyReLU(0.1)(l)
l = Dense(256)(l)
l = LeakyReLU(0.1)(l)
# Image
x = Conv2D(64, (3, 3), padding='same')(img_input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# Joint
x = Flatten()(x)
x = concatenate([x, l])
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(1, activation='sigmoid')(x)
return Model(inputs=[img_input, z_input], outputs=x)
def build_ge_enc(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
fake_imgs = self.g(z_input)
critic_fake = self.d([fake_imgs, z_input])
fake_z = self.e(img_input)
critic_real = self.d([img_input, fake_z])
return Model(inputs=[img_input, z_input], outputs=[critic_real, critic_fake])
| [
"stephen.Team24@gmail.com"
] | stephen.Team24@gmail.com |
4f6c3c3b84254e921f2c0c5e943bbdf9507428ac | 54277288865f738e44d7be1d6b41b19c63af267e | /configs/vcop/pretraining/r3d_18_ucf101.py | eee5d0eb75ac9265d0ceb4f2ff2d8593597c7c29 | [] | no_license | scenarios/SR-SVRL | 7b41d29e16cff3020f333efc28a624d85bba4537 | 26e89ecb29355635b10a355f2f16f1b5db9c4e9b | refs/heads/master | 2023-02-26T06:16:13.314491 | 2021-01-30T16:30:57 | 2021-01-30T16:30:57 | 307,295,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | _base_ = ['./default_runtime.py']
work_dir = './output/vcop/pretraining/r3d_18_ucf101'
model = dict(
type='VCOP',
backbone=dict(
type='R3D',
depth=18,
num_stages=4,
stem=dict(
temporal_kernel_size=3,
temporal_stride=1,
in_channels=3,
with_pool=False,
),
down_sampling=[False, True, True, True],
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
with_bn=True,
pretrained=None,
),
vcop_head=dict(
in_channels=512,
tuple_len=3,
hidden_channels=512,
dropout_ratio=0.25
)
)
| [
"zyz0205@hotmail.com"
] | zyz0205@hotmail.com |
0df64cbead81c2a07c3c63de293eecf1dcd41184 | 875bb84440094ce058a2ec25a661a7da6bb2e129 | /algo_py/boj/bj1212.py | 3005d1075ae35471fa8e65763598520f538478f1 | [] | no_license | shg9411/algo | 150e4291a7ba15990f17ca043ae8ab59db2bf97b | 8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d | refs/heads/master | 2023-06-22T00:24:08.970372 | 2021-07-20T06:07:29 | 2021-07-20T06:07:29 | 221,694,017 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | print(bin(int('0o'+input().rstrip(),8))[2:]) | [
"shg9411@naver.com"
] | shg9411@naver.com |
1d75984677b077ce6e550ab347cff03ec6cfd208 | 02e2bae82882e9d16b00de32ffbb65b51010903c | /statement/models.py | 6558c6e70f924e858e576a94f51883682d69461d | [] | no_license | jack2150/rivers | 5bf9f11d0fa48aede825329bb8a0fc539ef43a11 | 2aa8bc18e8626a1a838053886a49a2b198c6d152 | refs/heads/master | 2021-01-17T13:13:02.839904 | 2018-11-09T08:48:42 | 2018-11-09T08:48:42 | 35,507,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 39,875 | py | import logging
from importlib import import_module
from datetime import datetime
from django.db import models
from django.db.models import Q
import pandas as pd
from pandas.tseries.offsets import Day
from base.ufunc import remove_comma
logger = logging.getLogger('views')
class StatementName(models.Model):
"""
Name for all real/test statement
"""
path = models.CharField(max_length=20)
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=20, default='', blank=True)
cash_type = models.CharField(
max_length=20, choices=(('real_money', 'Real Money'), ('paper_money', 'Paper money')),
help_text='Statement cash type'
)
description = models.TextField()
capital = models.DecimalField(max_digits=10, decimal_places=2, default=0.0)
start = models.DateField()
stop = models.DateField(null=True, blank=True)
def __unicode__(self):
return '{name}'.format(name=self.short_name)
class Statement(models.Model):
"""
Account Summary
Net Liquidating Value,"$49,141.69"
Stock Buying Power,"$36,435.34"
Option Buying Power,"$36,435.34"
Commissions/Fees YTD,$159.08
Futures Commissions YTD,$0.00 <-- skip futures
"""
statement_name = models.ForeignKey(StatementName)
date = models.DateField()
net_liquid = models.DecimalField(max_digits=20, decimal_places=2)
stock_bp = models.DecimalField(max_digits=20, decimal_places=2)
option_bp = models.DecimalField(max_digits=20, decimal_places=2)
commission_ytd = models.DecimalField(max_digits=20, decimal_places=2)
csv_data = models.TextField(blank=True)
# unique data
unique_together = (('statement_name', 'date'),)
def load_csv(self, lines):
"""
Input csv last 5 lines, format then save statement
:param lines: str
:return: Statement
"""
values = list()
for line in [x.replace('$', '') for x in lines]:
if '"' in line:
item = map(lambda y: y.replace(',', ''), line.split('"')[1::2])[0]
else:
item = line.split(',')[1]
if '(' in item[0] and ')' in item[-1]:
item = '-%s' % item[1:-1]
values.append(item)
self.net_liquid = values[0] if values[0] != 'N/A' else 0.0
self.stock_bp = values[1] if values[1] != 'N/A' else 0.0
self.option_bp = values[2] if values[2] != 'N/A' else 0.0
self.commission_ytd = values[3]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.net_liquid, self.stock_bp, self.option_bp, self.commission_ytd]],
index=[self.date],
columns=['Net Liquid', 'Stock BP', 'Option BP', 'Commission YTD']
)
def __unicode__(self):
return '{name} {date}'.format(name=self.statement_name.short_name, date=self.date)
def reset_controller(self):
"""
After statement create, controller intance reset
"""
self.controller = self.Controller(self)
class Controller(object):
def __init__(self, statement):
self.statement = statement
self.acc_trades = self.statement.accounttrade_set
self.open_pos = Position.objects.filter(status='OPEN')
""":type: QuerySet"""
self.holding_equity = self.statement.holdingequity_set.all()
self.holding_option = self.statement.holdingoption_set.all()
def position_trades(self):
"""
Loop every trades, then open/close position
"""
logger.info('POS: Loop each account trades and open/close position')
times = sorted(set([t.time for t in self.acc_trades.all()]))
opens = []
for time in times:
for symbol in set([t.symbol for t in self.acc_trades.filter(time=time)]):
opens.append((time, symbol))
for time, symbol in opens:
trades = self.acc_trades.filter(
Q(time=time) & Q(symbol=symbol)
)
pos_effect = list(set([t.pos_effect for t in trades]))[0]
if pos_effect == 'TO OPEN':
position = self.position_open(symbol, time, trades)
elif pos_effect == 'TO CLOSE':
position = self.position_close(symbol, time, trades)
else:
raise ValueError('Invalid pos effect on trade "%s"' % pos_effect)
# add acc trades
for trade in trades:
position.accounttrade_set.add(trade)
# check got holding but no position
for equity in self.holding_equity:
# skip if bonus share or split
if ':' in equity.description:
continue
pos = self.open_pos.filter(symbol=equity.symbol)
if not pos.exists():
print '<%s> %s have equity, no open position' % (
equity.symbol, self.statement.date
)
#raise LookupError('<%s> %s have equity, no open position' % (
# equity.symbol, self.statement.date
#))
for option in self.holding_option:
pos = self.open_pos.filter(symbol=option.symbol)
if not pos.exists():
print '<%s> %s have options, no open position' % (
option.symbol, self.statement.date
)
#raise LookupError('<%s> %s have options, no open position' % (
# option.symbol, self.statement.date
#))
def position_open(self, symbol, time, trades):
"""
Replace position trades with more functions
:param symbol: str
:param time: DateTime
:param trades: list of AccountTrade
:return: Position
"""
logger.info('POS: %s open, date: %s' % (symbol.upper(), self.statement.date))
if self.open_pos.filter(symbol=symbol).exists():
position = self.open_pos.get(symbol=symbol)
# different strategy
if position.spread != Position().set_open(self.statement, trades).spread:
logger.info('POS: %s different strategy found, set custom' % symbol)
position.set_custom()
position.save()
# same strategy but different expire date
if position.name != 'STOCK':
# check expire date
expire_date0 = sorted(list(set([
p['exp'] for p in position.accounttrade_set.filter(
pos_effect='TO OPEN'
).values('exp')
])))
expire_date1 = sorted(list(set([
t['exp'] for t in trades.filter().values('exp')
])))
if expire_date0 != expire_date1:
logger.info('POS: %s different expire found, set custom' % symbol)
position.set_custom()
position.save()
# check strike
strikes0 = [
float(p['strike']) for p in position.accounttrade_set.filter(
pos_effect='TO OPEN'
).values('strike')
]
strikes1 = [
float(t['strike']) for t in trades.filter().values('strike')
]
if set(strikes0) != set(strikes1):
position.set_custom()
else:
# create new pos if no existing pos
position = Position()
position.set_open(self.statement, trades)
position.save()
position.create_stages(trades)
self.add_relations(symbol=symbol, time=time)
return position
def position_close(self, symbol, time, trades):
"""
Close position for all condition
:param symbol: str
:param time: datetime
:param trades: list
:return: Position
"""
logger.info('POS: %s close, date: %s' % (symbol.upper(), self.statement.date))
opens = self.open_pos.filter(symbol=symbol)
if opens.exists():
if opens.count() > 1:
raise LookupError('<%s> more than one open position' % symbol)
position = self.open_pos.get(symbol=symbol)
self.add_relations(symbol=symbol, time=time)
equity = self.holding_equity.filter(symbol=symbol)
options = self.holding_option.filter(symbol=symbol)
if equity.exists() and options.exists():
logger.info('<%s> equity and options still exists, partial close' % symbol)
elif equity.exists() and not options.exists():
logger.info('<%s> equity still exists, partial close' % symbol)
elif not equity.exists() and options.exists():
logger.info('<%s> options still exists, partial close' % symbol)
else:
logger.info('<%s> no more holding, set close' % symbol)
position.set_close(self.statement, time)
position.save()
else:
equity = self.holding_equity.filter(symbol=symbol)
options = self.holding_option.filter(symbol=symbol)
if equity.exists() or options.exists():
raise ValueError('Position <{symbol}> not found for closing {date}'.format(
symbol=symbol, date=self.statement.date
))
else:
date = pd.to_datetime(self.statement.date)
position = Position.objects.filter(symbol=symbol).latest('stop')
logger.info('<%s> position already close' % symbol)
return position
def position_expires(self):
"""
Set expire position
:return: None
"""
logger.info('POS: check any position is expired')
for position in Position.objects.filter(status='OPEN'):
equity = self.statement.holdingequity_set.filter(symbol=position.symbol)
option = self.statement.holdingoption_set.filter(symbol=position.symbol)
if not equity.exists() and not option.exists():
if position.name != 'STOCK':
position.set_expire(self.statement.date)
position.save()
def add_relations(self, symbol=None, time=None):
"""
Set related objects (profit loss, equity, options)
:param symbol: str
:param time: datetime
"""
if symbol:
logger.info('POS: %s add relation, time: %s' % (symbol.upper(), time))
positions = [Position.objects.get(Q(symbol=symbol) & Q(status='OPEN'))]
else:
logger.info('POS: add relation for all open position symbols')
positions = self.open_pos
for position in positions:
f1 = (Q(symbol=position.symbol) & Q(position__isnull=True))
f2 = f1 & (Q(time__lte=time) if time else
Q(time__in=position.accounttrade_set.all().values('time')))
for account_order in self.statement.accountorder_set.filter(f2):
position.accountorder_set.add(account_order)
for holding_equity in self.statement.holdingequity_set.filter(f1):
position.holdingequity_set.add(holding_equity)
for holding_option in self.statement.holdingoption_set.filter(f1):
position.holdingoption_set.add(holding_option)
for profit_loss in self.statement.profitloss_set.filter(f1):
position.profitloss_set.add(profit_loss)
for cash_balance in self.statement.cashbalance_set.filter(position__isnull=True):
values = cash_balance.description.split(' ')[:5]
if position.symbol in values and time == cash_balance.time:
position.cashbalance_set.add(cash_balance)
def __init__(self, *args, **kwargs):
models.Model.__init__(self, *args, **kwargs)
self.controller = Statement.Controller(self)
class Position(models.Model):
statement = models.ForeignKey(Statement)
symbol = models.CharField(max_length=20)
name = models.CharField(max_length=100)
spread = models.CharField(max_length=100)
status = models.CharField(max_length=6, default='OPEN')
start = models.DateTimeField()
stop = models.DateTimeField(null=True, blank=True, default=None)
def set_open(self, statement, trades):
"""
Open a new position and identify its strategy
:param statement: Statement
:param trades: QuerySet
:return: Position
"""
self.status = 'OPEN'
# set symbol
self.statement = statement
self.symbol = trades[0].symbol
# set start date
self.start = datetime.strptime('%s %s' % (
trades[0].statement.date, trades[0].time
), '%Y-%m-%d %H:%M:%S')
# set name
spreads = list(set([trade.spread for trade in trades]))
if len(spreads) > 1:
raise ValueError('Different trade orders for single symbol in account statement.')
if spreads in ('FUTURE', 'FOREX'):
raise ValueError('Future or forex account trades.')
else:
self.name = spreads[0] # for stock, single option or covered position
# set spread
self.spread = 'CUSTOM'
if self.name == 'STOCK':
self.spread = '{side}_STOCK'.format(
side='LONG' if trades[0].qty > 0 else 'SHORT'
)
elif self.name == 'SINGLE': # single option
self.spread = '{side}_{contract}'.format(
side='LONG' if trades[0].side == 'BUY' else 'NAKED',
contract=trades[0].contract
)
elif self.name == 'COVERED':
# print trades
# print [t.contract for t in trades]
# print trades.filter(contract='COVERED')
stock_order = trades.filter(
Q(spread='COVERED') & Q(contract__in=('STOCK', 'ETF'))
).first()
option_order = trades.filter(
Q(spread='COVERED') & (Q(contract='CALL') | Q(contract='PUT'))
).first()
if stock_order.side == 'BUY':
if option_order.side == 'BUY' and option_order.contract == 'PUT':
self.spread = 'PROTECTIVE_PUT'
elif option_order.side == 'SELL' and option_order.contract == 'CALL':
self.spread = 'COVERED_CALL'
elif stock_order.side == 'SELL':
if option_order.side == 'BUY' and option_order.contract == 'CALL':
self.spread = 'PROTECTIVE_CALL'
elif option_order.side == 'SELL' and option_order.contract == 'PUT':
self.spread = 'COVERED_PUT'
elif self.name == 'CUSTOM':
self.spread = 'CUSTOM'
else:
self.spread = '{side}_{contract}{spread}'.format(
side='LONG' if trades[0].net_price > 0 else 'SHORT',
contract=(trades[0].contract + '_'
if len(list(set([t.contract for t in trades]))) == 1
else ''),
spread=spreads[0]
)
logger.info('POS: open %s with %s strategy' % (self.symbol.upper(), self.spread))
return self
def create_stages(self, trades):
"""
Create stage using position trades
:param trades: list
:return: None
"""
logger.info('POS: %s %s create stages' % (self.symbol.upper(), self.spread))
if self.name and self.spread and self.name != 'CUSTOM':
try:
stage_module = import_module(
'statement.position.stages.{name}'.format(name=self.name.lower())
)
stage_cls = getattr(
stage_module,
'Stage{spread}'.format(
spread=''.join([x.lower().capitalize() for x in self.spread.split('_')])
)
)
stages = stage_cls(trades).create()
for stage in stages:
logger.info('POS: stage: %s created' % stage)
self.positionstage_set.add(stage)
except (AttributeError, ImportError):
logger.info('%s.%s not yet implement' % (self.name, self.spread))
pass
elif self.name == 'CUSTOM':
logger.info('<%s> custom not yet implemented' % self.symbol)
else:
raise ValueError('Please set name and spread before run create_stages.')
def set_close(self, statement, time):
"""
Close current position
:param statement: Statement
:param time: str
:return: Position
"""
if self.id:
self.status = 'CLOSE'
self.stop = datetime.strptime('%s %s' % (statement.date, time), '%Y-%m-%d %H:%M:%S')
else:
raise ValueError('Please close a existing positions.')
def set_expire(self, date):
"""
Set position expire
:param date: DateTime
:return: Position
"""
logger.info('POS: %s position set expired, no trade' % self.symbol.upper())
if self.id:
self.status = 'EXPIRE'
self.stop = date
else:
raise ValueError('Please set expire a existing position')
return self
def set_exercise(self, date):
"""
Set position expire
:param date: DateTime
:return: Position
"""
if self.id:
self.status = 'EXERCISE'
self.stop = date
else:
raise ValueError('Please set expire a existing position')
return self
def set_custom(self):
"""
Set position is custom
:rtype : Position
"""
self.name = 'CUSTOM'
self.spread = 'CUSTOM'
return self
def make_conditions(self):
"""
Get all stage and make condition
:return: list of str
"""
conditions = list()
position_stages = self.positionstage_set.order_by('price').all()
if position_stages.count() == 0:
return []
# make all stage into a list
operators = list()
for s in position_stages:
operators += [(s.price, '<', s.lt_stage, s.lt_amount),
(s.price, '==', s.e_stage, s.e_amount),
(s.price, '>', s.gt_stage, s.gt_amount)]
# make a list of same stage
stages = list()
last = 0
for key, (s0, s1) in enumerate(zip(operators[:-1], operators[1:])):
if s0[2] != s1[2]:
stages.append(operators[last:key + 1])
last = key + 1
else:
stages.append(operators[last:len(operators)])
for stage_list in stages:
condition0 = list()
amounts = list()
for price in sorted(set([s[0] for s in stage_list])):
condition1 = list()
for stage in [s for s in stage_list if s[0] == price]:
condition1.append('{x} {operator} {price}'.format(
x='{x}',
operator=stage[1],
price=stage[0],
))
amounts.append(stage[3])
condition0.append(' or '.join(condition1))
else:
if all([a == amounts[0] for a in amounts]):
stage_operators = {
'MAX_PROFIT': '{y} == {amount}',
'PROFIT': '{amount} < {y}',
'EVEN': '{y} == {amount}',
'LOSS': '{y} < {amount}',
'MAX_LOSS': '{y} == {amount}',
}
amount = stage_operators[stage_list[0][2]].format(
y='{y}', amount=amounts[0]
)
else:
stage_operators = {
'PROFIT': '{amount0} < {y} < {amount1}',
'LOSS': '{amount0} < {y} < {amount1}',
}
amount = stage_operators[stage_list[0][2]].format(
y='{y}', amount0=amounts[0], amount1=amounts[1],
)
conditions.append([stage_list[0][2], ' and '.join(condition0), amount])
else:
for condition in conditions:
if 'or' in condition[1]:
if '<' in condition[1] and '==' in condition[1]:
condition[1] = '{x} <= {price}'.format(
x='{x}', price=condition[1].split()[-1]
)
elif '>' in condition[1] and '==' in condition[1]:
condition[1] = '{price} <= {x}'.format(
x='{x}', price=condition[1].split()[-1]
)
elif 'and' in condition[1]:
if '{x} >' in condition[1] and '{x} <' in condition[1]:
price0 = condition[1][condition[1].index('>') + 1:].split()[0]
price1 = condition[1][condition[1].index('<') + 1:].split()[0]
condition[1] = '{price0} < {x} < {price1}'.format(
x='{x}', price0=price0, price1=price1
)
elif '>' in condition[1]:
x, operator, price = condition[1].split()
condition[1] = '{price} {operator} {x}'.format(
price=price, operator=operator.replace('>', '<'), x='{x}'
)
# logger.info('POS: %s stages generated %s conditions' % (self.symbol.upper(), len(conditions)))
return conditions
def current_stage(self, price):
"""
Get current stage using price
:param price: float
:return: str
"""
# logger.info('POS: check current stage of condition for a price')
result = None
for stage, condition, amount in self.make_conditions():
if eval(condition.format(x=price)):
result = stage
break
return result
def __unicode__(self):
return '{symbol} {spread} {status} {date}'.format(
symbol=self.symbol,
spread=self.spread,
status=self.status,
date=(self.stop if self.stop else self.start).strftime('%Y-%m-%d')
)
class PositionStage(models.Model):
"""
A position price stage when current price reach a certain level
"""
price = models.DecimalField(max_digits=10, decimal_places=2)
gt_stage = models.CharField(max_length=20)
gt_amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
e_stage = models.CharField(max_length=20)
e_amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
lt_stage = models.CharField(max_length=20)
lt_amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
position = models.ForeignKey(Position, null=True)
def check_status(self, price):
"""
Check current price is in which position stage
:param price: float
:return: str
"""
logger.info('POS: check position stage for a price')
result = None
for operator, status in (('>', 'gt_stage'), ('==', 'e_stage'), ('<', 'lt_stage')):
formula = '{check_price} {operator} {stage_price}'.format(
check_price=price,
operator=operator,
stage_price=self.price
)
if eval(formula):
result = getattr(self, status)
break
return result
def to_hdf(self):
"""
:return: DataFrame
"""
logger.info('POS: Convert position stages into dataframe')
return pd.DataFrame(
data=[[self.price, self.gt_stage, self.gt_amount, self.e_stage, self.e_amount,
self.lt_stage, self.lt_amount]],
index=[self.id],
columns=['Price', 'P>C', 'P>C $', 'P=C', 'P=C $', 'P<C', 'P<C $']
)
@staticmethod
def format_amount(x):
"""
Format float into string
:param x: float/str
:return: str
"""
return ('' if x is None else (
' {:+.2f}'.format(float(x)) if x else ' 0.00'
))
def __unicode__(self):
"""
:return: str
"""
return 'p < {p} is {lts}{lta}, p == {p} is {es}{ea}, p > {p} is {gts}{gta}'.format(
p=self.price,
lts=self.lt_stage,
lta=self.format_amount(self.lt_amount),
es=self.e_stage,
ea=self.format_amount(self.e_amount),
gts=self.gt_stage,
gta=self.format_amount(self.gt_amount),
)
class CashBalance(models.Model):
"""
DATE,TIME,TYPE,REF #,DESCRIPTION,FEES,COMMISSIONS,AMOUNT,BALANCE
1/29/15,01:00:00,BAL,,Cash balance at the start of business day 29.01 CST,,,,"50,000.00"
1/29/15,14:35:49,LIQ,472251902,Cash liquidation,,,"75,000.00","125,000.00"
"""
time = models.TimeField()
name = models.CharField(max_length=20)
ref_no = models.BigIntegerField(null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
fee = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
commission = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, null=True, blank=True)
balance = models.DecimalField(max_digits=20, decimal_places=2)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format cash_balance csv line data, save it and return object
:param line: str
:return: CashBalance
"""
line = remove_comma(line)
values = map(
lambda x: 0 if x == '' else x, line.split(',')
)
self.time = values[1]
self.name = values[2]
self.ref_no = values[3]
self.description = values[4]
self.fee = values[5]
self.commission = values[6]
self.amount = values[7]
self.balance = values[8] if values[8] != 'N/A' else 0.0
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.time, self.name, self.ref_no, self.description,
self.fee, self.commission, self.amount, self.balance]],
index=[self.statement.date],
columns=['Time', 'Name', 'Ref #', 'Description', 'Fees',
'Commissions', 'Amount', 'Balance']
)
def __unicode__(self):
return self.description
class AccountOrder(models.Model):
"""
Notes,,Time Placed,Spread,Side,Qty,Pos Effect,Symbol,Exp,Strike,Type,PRICE,,TIF,Status
,,1/29/15 14:45:15,VERTICAL,SELL,-2,TO OPEN,EBAY,MAR 15,55,CALL,.73,LMT,DAY,CANCELED
,,,,BUY,+2,TO OPEN,EBAY,MAR 15,57.5,CALL,,,,
"""
time = models.TimeField()
spread = models.CharField(max_length=50)
side = models.CharField(max_length=4)
qty = models.CharField(max_length=100)
pos_effect = models.CharField(max_length=50)
symbol = models.CharField(max_length=20)
exp = models.CharField(max_length=50, null=True, blank=True)
strike = models.DecimalField(max_digits=20, decimal_places=2, null=True, blank=True)
contract = models.CharField(max_length=10, null=True, blank=True)
price = models.CharField(max_length=100)
order = models.CharField(max_length=20)
tif = models.CharField(max_length=20)
status = models.CharField(max_length=500)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format account_order csv line data, save it and return object
:param line: str
:return: AccountOrder
"""
line = remove_comma(line)
values = line.split(',')
self.time = values[2].split(' ')[1]
self.spread = values[3]
self.side = values[4]
self.qty = values[5]
self.pos_effect = values[6]
self.symbol = values[7]
self.exp = values[8] if values[8] else ''
self.strike = values[9] if values[9] else 0.0
self.contract = values[10]
self.price = values[11] if values[11] else 0.0
self.order = values[12]
self.tif = values[13]
self.status = values[14]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.time, self.spread, self.side, self.qty, self.pos_effect,
self.symbol, self.exp, self.strike if self.strike else '',
self.contract, self.price, self.order, self.tif, self.status]],
index=[self.statement.date],
columns=['Time Placed', 'Spread', 'Side', 'Qty', 'Pos Effect', 'Symbol',
'Exp', 'Strike', 'Type', 'PRICE', 'Order', 'TIF', 'Status']
)
def __unicode__(self):
return '{side} {qty} {symbol} {contract} {price} {order}'.format(
side=self.side,
qty=self.qty,
symbol=self.symbol,
contract=self.spread if self.spread == 'STOCK' else '{contract} {exp} {strike}'.format(
contract=self.contract,
exp=self.exp,
strike=self.strike
),
price=self.price,
order=self.order
)
class AccountTrade(models.Model):
"""
,Exec Time,Spread,Side,Qty,Pos Effect,Symbol,Exp,Strike,Type,Price,Net Price,Order Type
,1/29/15 15:43:06,VERTICAL,SELL,-2,TO OPEN,EBAY,MAR 15,55,CALL,1.33,.80,LMT
,,,BUY,+2,TO OPEN,EBAY,MAR 15,57.5,CALL,.53,CREDIT,
"""
time = models.TimeField()
spread = models.CharField(max_length=50)
side = models.CharField(max_length=4)
qty = models.IntegerField()
pos_effect = models.CharField(max_length=50)
symbol = models.CharField(max_length=20)
exp = models.CharField(max_length=50, null=True, blank=True)
strike = models.DecimalField(max_digits=20, decimal_places=2, null=True, blank=True)
contract = models.CharField(max_length=10, null=True, blank=True)
price = models.DecimalField(max_digits=20, decimal_places=2)
net_price = models.DecimalField(max_digits=20, decimal_places=2)
order_type = models.CharField(max_length=20)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format account_trade csv line data, save it and return object
:param line: str
:return: AccountTrade
"""
line = remove_comma(line)
values = line.split(',')
self.time = values[1].split(' ')[1]
self.spread = values[2]
self.side = values[3]
self.qty = values[4]
self.pos_effect = values[5]
self.symbol = values[6]
self.exp = values[7]
self.strike = values[8] if values[8] else 0
self.contract = values[9]
self.price = values[10]
self.net_price = values[11]
self.order_type = values[12]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.time, self.spread, self.side, self.qty, self.pos_effect,
self.symbol, self.exp, self.strike if self.strike else '',
self.contract, self.price, self.net_price, self.order_type]],
index=[self.statement.date],
columns=['Exec Time', 'Spread', 'Side', 'Qty', 'Pos Effect', 'Symbol',
'Exp', 'Strike', 'Type', 'Price', 'Net Price', 'Order Type']
)
def __unicode__(self):
return '{side} {qty} {symbol} {contract} {price} {pos_effect}'.format(
side=self.side,
qty=self.qty,
symbol=self.symbol,
contract=self.spread if self.spread == 'STOCK' else '{contract} {exp} {strike}'.format(
contract=self.contract,
exp=self.exp,
strike=self.strike
),
price=self.price,
pos_effect=self.pos_effect
)
class HoldingEquity(models.Model):
"""
Symbol,Description,Qty,Trade Price,Close Price,Close Value
XOM,EXXON MOBIL CORPORATION COM,-100,87.05,87.58,"($8,758.00)"
"""
symbol = models.CharField(max_length=20)
description = models.CharField(max_length=200)
qty = models.IntegerField()
trade_price = models.DecimalField(max_digits=20, decimal_places=2)
close_price = models.DecimalField(max_digits=20, decimal_places=2)
close_value = models.DecimalField(max_digits=20, decimal_places=2)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format holding_equity csv line data, save it and return object
:param line: str
:return: HoldingEquity
"""
line = remove_comma(line)
values = [
('-' + x[1:-1] if x[0] == '(' and x[-1] == ')' else x)
for x in [x.replace('$', '') for x in line.split(',')]
]
self.symbol = values[0]
self.description = values[1]
self.qty = values[2]
self.trade_price = values[3]
self.close_price = 0.0 if values[4] == 'N/A' else values[4]
self.close_value = values[5]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.symbol, self.description, self.qty,
self.trade_price, self.close_price, self.close_value]],
index=[self.statement.date],
columns=['Symbol', 'Description', 'Qty', 'Trade Price',
'Close Price', 'Close Value']
)
def __unicode__(self):
return '{qty} {symbol} {close_price}'.format(
symbol=self.symbol,
qty=self.qty,
close_price=self.close_price
)
class HoldingOption(models.Model):
"""
Symbol,Option Code,Exp,Strike,Type,Qty,Trade Price,Close Price,Close Value
TSLA,TSLA150320C205,MAR 15,205,CALL,+1,13.95,14.075,"$1,407.50"
"""
symbol = models.CharField(max_length=20)
option_code = models.CharField(max_length=200)
exp = models.CharField(max_length=50)
strike = models.DecimalField(max_digits=20, decimal_places=2)
contract = models.CharField(max_length=10)
qty = models.IntegerField()
trade_price = models.DecimalField(max_digits=20, decimal_places=2)
close_price = models.DecimalField(max_digits=20, decimal_places=2)
close_value = models.DecimalField(max_digits=20, decimal_places=2)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format holding_option csv line data, save it and return object
:param line: str
:return: HoldingEquity
"""
line = remove_comma(line)
values = [
('-' + x[1:-1] if x[0] == '(' and x[-1] == ')' else x)
for x in [x.replace('$', '') for x in line.split(',')]
]
self.symbol = values[0]
self.option_code = values[1]
self.exp = values[2]
self.strike = values[3]
self.contract = values[4]
self.qty = values[5]
self.trade_price = values[6]
self.close_price = values[7]
self.close_value = values[8]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.symbol, self.option_code, self.exp, self.strike, self.contract,
self.qty, self.trade_price, self.close_price, self.close_value]],
index=[self.statement.date],
columns=['Symbol', 'Option Code', 'Exp', 'Strike', 'Type',
'Qty', 'Trade Price', 'Close Price', 'Close Value']
)
def __unicode__(self):
return '{qty} {symbol} {exp} {strike} {contract}'.format(
qty=self.qty,
symbol=self.symbol,
exp=self.exp,
strike=self.strike,
contract=self.contract
)
class ProfitLoss(models.Model):
"""
Symbol,Description,P/L Open,P/L %,P/L Day,P/L YTD,P/L Diff,Margin Req,Close Value
XOM,EXXON MOBIL CORPORATION COM,($28.00),-0.31%,($28.00),($28.00),$0.00,"$1,313.70","($8,995.00)"
"""
symbol = models.CharField(max_length=20)
description = models.CharField(max_length=200)
pl_open = models.DecimalField(max_digits=20, decimal_places=2)
pl_pct = models.DecimalField(max_digits=20, decimal_places=2)
pl_day = models.DecimalField(max_digits=20, decimal_places=2)
pl_ytd = models.DecimalField(max_digits=20, decimal_places=2)
margin_req = models.DecimalField(max_digits=20, decimal_places=2)
close_value = models.DecimalField(max_digits=20, decimal_places=2)
statement = models.ForeignKey(Statement, null=True)
position = models.ForeignKey(Position, null=True)
def load_csv(self, line):
"""
Format profit_loss csv line data, save it and return object
:param line: str
:return: ProfitLoss
"""
line = remove_comma(line)
values = [
('-' + x[1:-1] if x[0] == '(' and x[-1] == ')' else x)
for x in [x.replace('$', '') for x in line.split(',')]
]
self.symbol = values[0]
self.description = values[1]
self.pl_open = values[2]
self.pl_pct = values[3][:-1]
self.pl_day = values[4]
self.pl_ytd = values[5]
self.margin_req = values[6]
self.close_value = values[7]
return self
def to_hdf(self):
"""
:return: DataFrame
"""
return pd.DataFrame(
data=[[self.symbol, self.description, self.pl_open, self.pl_pct, self.pl_day,
self.pl_ytd, self.margin_req, self.close_value]],
index=[self.statement.date],
columns=['Symbol', 'Description', 'P/L Open', 'P/L %', 'P/L Day',
'P/L YTD', 'Margin Req', 'Close Value']
)
def __unicode__(self):
return '{symbol} {pl_open}'.format(
symbol=self.symbol,
pl_open=self.pl_open
)
| [
"bvc100x@gmail.com"
] | bvc100x@gmail.com |
0199017464728830e0f1cc7a81ab6727cbc32e81 | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python - advanced/zajecia08/zadanie_domowe/wc.py | 8f7ba321ebbab792e39244c1e46ea04d4aff5b79 | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Bartłomiej Janiszewski
from wc_funkcje import ustaw_logger, wc
import log
import datetime
if __name__ == '__main__':
start = datetime.datetime.now()
nazwa_pliku = 'zen.txt'
wybrany_szablon = 'pelny'
poziom_logowania = log.DEBUG
ustaw_logger(poziom_logowania)
wynik = wc(nazwa_pliku, wybrany_szablon)
print(wynik)
czas_wykonania = datetime.datetime.now() - start
log.debug(f'czas wykonywania programu: {czas_wykonania}')
| [
"janiszewski.bartlomiej@gmail.com"
] | janiszewski.bartlomiej@gmail.com |
7480bda28d7c6e29cbb2bd96889c9340d1a9e221 | 80e3dfac67631cef70da3dc015d4557d46d41801 | /src/grid/changemapanimations.py | 59382ea322cc21c033792db21c18f8d56677383d | [] | no_license | snizzo/lucrezia | 7b6d648f11581ddd21acd301df5426d77b6b21dd | fb41e5ba4435d48d702c58aa9402c10ab12a51b9 | refs/heads/master | 2023-06-22T13:14:06.634116 | 2023-06-18T22:42:14 | 2023-06-18T22:42:14 | 32,765,327 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | #panda3d
from panda3d.core import NodePath, LPoint2i
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
#internals
from utils.toggle import Toggle
from utils.once import Once
from objects.grass import Grass
from objects.light import Light
from tile import Tile
from character import Character
from utils.fadeout import FadeOut
import os, sys
'''
This class abstracts the 2D grid commoly used in 2D games
to use with panda3d.
INTERNAL TILESET EXAMPLE GRAPH:
^
|
|
y |
|
|
O------------>
x
'''
class ChangeMapAnimations(DirectObject):
'''
Autogenerates empty tileset at start
'''
def __init__(self):
pass
| [
"happy.snizzo@gmail.com"
] | happy.snizzo@gmail.com |
a0b703f87a403de60bd497d01dc11969567edd6c | 2eb8e3606a8df45d432fdf56ee9aa24942304526 | /rocketgram/api/shipping_option.py | 55db89dd5aca13265cd5374ea45adc78d30833c9 | [
"MIT"
] | permissive | KulZlaK/rocketgram | 22848293980ba44dd9fb63db28f34be36c437c84 | 09587deecffcd7ccc9529f4d9e51221888870f23 | refs/heads/master | 2022-07-27T23:25:51.254444 | 2020-05-15T21:36:57 | 2020-05-15T21:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Copyright (C) 2015-2020 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import List
from .labeled_price import LabeledPrice
@dataclass(frozen=True)
class ShippingOption:
"""\
Represents ShippingOption object:
https://core.telegram.org/bots/api#shippingoption
"""
id: str
title: str
prices: List[LabeledPrice]
| [
"vd@"
] | vd@ |
4fcaed0256103e3eb8ace9827d79a215ae909c24 | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/.install/.backup/lib/surface/preview/app/__init__.py | 8250cdb4a7ea1553778dd1cecb732f0c19282aa6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 2,988 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud app group."""
import sys
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import platforms
class UnsupportedPythonVersionError(exceptions.Error):
pass
# TODO(b/24169312): remove
CHANGE_WARNING = """\
The `gcloud preview app` surface is rapidly improving. Look out for
changing flags and new commands before the transition out of the `preview`
component. These changes will be documented in the Cloud SDK release notes
<https://goo.gl/X8apDJ> and via deprecation notices for changing commands.
If you would like to avoid changing behavior, please pin to a fixed version of
the Google Cloud SDK as described under the "Alternative Methods" section of the
Cloud SDK web site: <https://cloud.google.com/sdk/#alternative>.
"""
@base.Beta
class Appengine(base.Group):
"""Manage your App Engine app.
This set of commands allows you to deploy your app, manage your existing
deployments, and also run your app locally. These commands replace their
equivalents in the appcfg tool.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To run your app locally in the development application server, run:
$ dev_appserver.py DEPLOYABLES
To create a new deployment of one or more modules, run:
$ {command} deploy DEPLOYABLES
To list your existing deployments, run:
$ {command} modules list
To generate config files for your source directory:
$ {command} gen-config
""",
}
def Filter(self, unused_context, unused_args):
# TODO(b/24169312): remove
if not properties.VALUES.app.suppress_change_warning.GetBool():
log.warn(CHANGE_WARNING)
properties.PersistProperty(properties.VALUES.app.suppress_change_warning,
'true')
if not platforms.PythonVersion().IsSupported():
raise UnsupportedPythonVersionError(
('Python 2.7 or greater is required for App Engine commands in '
'gcloud.\n\n'
'Your Python location: [{0}]\n\n'
'Please set the CLOUDSDK_PYTHON environment variable to point to a '
'supported version in order to use this command.'
).format(sys.executable))
| [
"rbrown@insilico.us.com"
] | rbrown@insilico.us.com |
aa3f4f7235ffe090bfb7628336f9b3504774ab15 | 4a4579254118db40fb008439d18ad8c573e8fc1a | /build/jsk_common_msgs/jsk_gui_msgs/cmake/jsk_gui_msgs-genmsg-context.py | f56cef5c14333e672ea233f015bc2fa7f8ca52be | [] | no_license | amilearning/AD_mpc_ws | 86ff6ef9e61c6cc5aae6e12f20c2c875b1930d41 | 1fc2d385f281e00c16aff688948f7296e02cbd3a | refs/heads/master | 2023-06-24T13:54:59.759921 | 2021-07-16T01:08:52 | 2021-07-16T01:08:52 | 386,465,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Action.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/MagneticField.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Touch.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/AndroidSensor.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Gravity.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/MultiTouch.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/TouchEvent.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/DeviceSensor.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Tablet.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/VoiceMessage.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/SlackMessage.msg"
services_str = "/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/srv/Query.srv;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/srv/YesNo.srv"
pkg_name = "jsk_gui_msgs"
dependencies_str = "std_msgs;sensor_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "jsk_gui_msgs;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;sensor_msgs;/opt/ros/melodic/share/sensor_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"hojin.projects@gmail.com"
] | hojin.projects@gmail.com |
5ea65368b06c233a66fb293222f5e0e86ade3e0a | f81c9451768a52bc22c6a2abe87c25615ea8b3e6 | /汇总文件/jobboleproject/jobboleproject/spiders/crawlJobbole.py | b191639c8f7518c6e3da765558ff142ab763b436 | [] | no_license | hanfang302/crawlercollect | 07cb7fb5faf85018e82f48b0209bd86dc5c52f8f | 8f7b286df8bf0a344c3656bda5c7fb96cee640dc | refs/heads/master | 2020-03-22T05:27:07.928855 | 2018-07-03T10:26:07 | 2018-07-03T10:26:07 | 139,566,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
#继承crawlspider,crawlspider有继承之spider
class CrawljobboleSpider(CrawlSpider):
#爬虫名称
name = 'crawlJobbole'
#域,制定可以爬取的url必须在这个域下面,不在的自动忽略
allowed_domains = ['blog.jobbole.com']
#同样有一个其实url
start_urls = ['http://blog.jobbole.com/']
#
#LinkExtractor一个类
#restrict_xpaths:制定了xpath路径,那么allow
#allow:匹配‘满足’括号中‘正则表达式’的url会被提取,如果为空,这全部匹配
#deny:匹配‘不满足’括号中‘正则表达式’的url会被提取
#callback:回调函数
rules = (
Rule(LinkExtractor(allow=r'.*?/item/bcjdb',deny=r'.*?/notitem/slcnd',restrict_xpaths='//div[@class=a]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
i = {}
#i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
#i['name'] = response.xpath('//div[@id="name"]').extract()
#i['description'] = response.xpath('//div[@id="description"]').extract()
return i
| [
"hanfang123@aliyun.com"
] | hanfang123@aliyun.com |
1321de452caf0060f6d2cf2523a3f418c5ce49c9 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/different_ways_to_add_parentheses.py | bd10c6c02ef9f79a33afab5e46cb95900b8ae084 | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | class Solution(object):
def diffWaysToCompute(self, exp):
def calculate(num1, num2, sign):
if sign == "+":
return num1 + num2
elif sign == "-":
return num1 - num2
else:
return num1 * num2
ans = []
for i in xrange(len(exp)):
if not exp[i].isdigit():
left = self.diffWaysToCompute(exp[:i])
right = self.diffWaysToCompute(exp[i+1:])
for n1 in left:
for n2 in right:
ans.append(calculate(n1, n2, exp[i]))
if not ans:
return [int(exp)]
return ans | [
"31617695+hayeonk@users.noreply.github.com"
] | 31617695+hayeonk@users.noreply.github.com |
679e412a82c2340b28045e6cdb388a19bfb5b799 | d628948e86841ae3efc93eba2e321dd58fe33b07 | /bookmanager/settings.py | 14f4a01b82c72de936cfd48b51e0a9eebea7e085 | [] | no_license | shd0812/django_demo | 8986dde23c2fd8ae4a46f8a938c9c0924200d4b2 | 832c028171795bf6feabc39d313bcad8cfbe5b94 | refs/heads/master | 2022-12-05T18:57:11.213951 | 2020-08-24T08:15:50 | 2020-08-24T08:15:50 | 289,303,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | """
Django settings for bookmanager project.
Generated by 'django-admin startproject' using Django 1.11.28.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's6e4p%v(%&)$d(s%$l&&inwkn8)4)%&kqc-w+yssvg^)g)fe41'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appone.apps.ApponeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bookmanager',
'HOST': '122.51.192.201',
'PORT': 3306,
'USER': 'root',
'PASSWORD': '123'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| [
"759275499@qq.com"
] | 759275499@qq.com |
bff8f74051ac91f5994980953d38279ba111917d | e9b9626ffce09bf011803a33b4780d8dcc6657e8 | /class9/collateral/put_file.py | aae2559cec9e5615c13bc1c06a49886309059ceb | [
"Apache-2.0"
] | permissive | zh0u0liver/netmiko_course | f7a91eb3f543e2a609172280f13b6bc8c4fbe1b9 | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | refs/heads/master | 2023-08-04T05:41:32.745403 | 2021-09-14T02:18:44 | 2021-09-14T02:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import os
from getpass import getpass
from netmiko import ConnectHandler, file_transfer
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
# Need a privilege15 account (no enable call)
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
# Secure copy server must be enable on the device ('ip scp server enable')
source_file = "test2.txt"
dest_file = "test2.txt"
direction = "put"
file_system = "flash:"
ssh_conn = ConnectHandler(**cisco3)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
overwrite_file=True, # default "will not overwrite"
inline_transfer=True,
)
ssh_conn.disconnect()
print(transfer_dict)
| [
"ktbyers@twb-tech.com"
] | ktbyers@twb-tech.com |
098d0a2a8b1145a3df6d306fd83f6c68df598e98 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/distiller/CWD/pytorch/mmrazor/configs/nas/mmcls/onceforall/ofa_mobilenet_supernet_32xb64_in1k.py | 341f4bda969cdd7625e1da7e3e5ff0c36e6fee57 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 1,671 | py | _base_ = [
'mmcls::_base_/default_runtime.py',
'mmrazor::_base_/settings/imagenet_bs2048_ofa.py',
'mmrazor::_base_/nas_backbones/ofa_mobilenetv3_supernet.py',
]
supernet = dict(
_scope_='mmrazor',
type='SearchableImageClassifier',
data_preprocessor=_base_.data_preprocessor,
backbone=_base_.nas_backbone,
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='DynamicLinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='mmcls.LabelSmoothLoss',
num_classes=1000,
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
input_resizer_cfg=_base_.input_resizer_cfg,
connect_head=dict(connect_with_backbone='backbone.last_mutable_channels'),
)
model = dict(
_scope_='mmrazor',
type='BigNAS',
drop_path_rate=0.2,
backbone_dropout_stages=[6, 7],
architecture=supernet,
distiller=dict(
type='ConfigurableDistiller',
teacher_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
student_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
distill_losses=dict(
loss_kl=dict(type='KLDivergence', tau=1, loss_weight=1)),
loss_forward_mappings=dict(
loss_kl=dict(
preds_S=dict(recorder='fc', from_student=True),
preds_T=dict(recorder='fc', from_student=False)))),
mutators=dict(type='mmrazor.NasMutator'))
model_wrapper_cfg = dict(
type='mmrazor.BigNASDDP',
broadcast_buffers=False,
find_unused_parameters=True)
| [
"mingjiang.li@iluvatar.ai"
] | mingjiang.li@iluvatar.ai |
fc255fe8fed8197264367180513e6fb8aebecba2 | 08b439af0eeccb93b41193b65c196b7ab2dbe773 | /award/urls.py | 1ae3b1aec217ee245e5c22ac908b89354d53b4e5 | [
"MIT"
] | permissive | EidAbdullahi/hilal | a80aa3f6b7ce2a3098f5dcada9bbb2fc52dc6ba9 | 1ccb6a1b5a5143164ced9b8a4c742997abbb6296 | refs/heads/master | 2023-05-01T14:28:46.837200 | 2021-05-05T12:02:48 | 2021-05-05T12:02:48 | 363,429,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """award URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('wardapp.urls')),
path('tinymce/', include('tinymce.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/register/', views.LoginView.as_view(template_name='django_registration/registration_form.html')),
path('accounts/login/', views.LoginView.as_view(template_name='registration/login.html')),
path('accounts/logout/', views.logout_then_login, {"next_page": '/'}),
path('ratings/', include('star_ratings.urls', namespace='ratings')),
path('api-token-auth', obtain_auth_token),
]
| [
"eidabdullahi10@gmail.com"
] | eidabdullahi10@gmail.com |
515604f2abc3e0df4872c067064277872c874543 | 6f65ebe31650b73e9c5e77d598295eb1362702bd | /tools/Polygraphy/tests/logger/test_logger.py | 078390e71a50b887380cad96436dc31ad37a6be2 | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | hierarchyJK/TensorRT | d9b5be9964e54af8b2789a6e98f393519956ed90 | c2668947ea9ba4c73eb1182c162101f09ff250fd | refs/heads/master | 2023-06-26T07:01:08.922681 | 2021-07-12T09:28:23 | 2021-07-13T20:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
from polygraphy.logger.logger import Logger
# We don't use the global logger here because we would have to reset the state each time.
class TestLogger(object):
def test_log_file(self):
logger = Logger()
with tempfile.NamedTemporaryFile("w+") as log_file:
logger.log_file = log_file.name
assert logger.log_file == log_file.name
logger.info("Hello")
log_file.seek(0)
assert log_file.read() == "[I] Hello\n"
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
81c26e3a0d8ed242955d3e50604d0bd97065e591 | 7fa4633ea229fc866cc99992bf9f891663d39ec6 | /common/experiment_manager/src/experiment_manager/msg/__init__.py | d4cd96b7b9dc0d1ccf630dd04ed765622acb87f2 | [] | no_license | onuryuruten/rossi-demo | f7aeccfea9228e13dbab85642426dce3de5ba925 | ab3ef04c40f66ec0883c26910c79cb0c72209051 | refs/heads/master | 2020-12-25T11:42:15.966155 | 2012-01-20T08:17:50 | 2012-01-20T08:17:50 | 3,162,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from ._ExperimentState import *
| [
"mparlaktuna@gmail.com"
] | mparlaktuna@gmail.com |
5e8052af8629684bb42632d743efaf5d48119ba1 | 1b94aae63500b6ff94b0446d01c3c9bee385fad2 | /.history/chandori/account/views_20210824172332.py | 4c4c5078aeec6d0af4357fc59d04853d7bbcc83d | [] | no_license | miracle3070/chandori | 71389c2a9df76c242a5895c2c23d4394220f9c8e | b01d1eaa1d9c0d12d7abdc8f164039bcd9c42925 | refs/heads/master | 2023-08-18T11:46:11.303934 | 2021-09-28T19:23:22 | 2021-09-28T19:23:22 | 393,949,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserChangeForm
from django.utils import timezone
from .models import *
from .forms import CustomUserChangeForm
from django.contrib import messages
def edit(request):
return render(request, edit.html)
def edit(request):
if request.method == 'POST':
user_change_form = CustomUserChangeForm(request.POST, instance = request.user)
if user_change_form.is_valid():
user_change_form.save()
messages.success(request, '회원정보가 수정되었습니다.')
return render(request, 'accounting/templates/base.html')
else:
user_change_form = CustomUserChangeForm(instance = request.user)
return render(request, 'from django.contrib import messages.html', {'user_change_form':user_change_form})
def login_view(request):
error_msg = ""
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
if username == "" or password == "":
error_msg = "아이디 또는 비밀번호를 입력해주세요."
else:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect("accounting:home")
else:
error_msg = "아이디 또는 비밀번호가 틀렸습니다."
return render(request, "login.html", {"error_msg" : error_msg})
def logout_view(request):
logout(request)
return redirect("accounting:home")
def signup_view(request):
error_msg = ""
if request.method == "POST":
password1 = request.POST["password1"]
password2 = request.POST["password2"]
if password1 == password2:
username = request.POST["username"]
nickname = request.POST["nickname"]
age = int(request.POST['age'])
job = request.POST['job']
income = int(request.POST['income'])
signup_date = timezone.now()
user = CustomUser.objects.create_user(
username = username,
password = password1,
nickname = nickname,
age = age,
job = job,
income = income,
signup_date = signup_date,
)
return redirect("account:login")
else:
error_msg = "비밀번호가 일치하지 않습니다."
return render(request, "signup.html", {"error_msg" : error_msg})
| [
"62284729+ehddus980@users.noreply.github.com"
] | 62284729+ehddus980@users.noreply.github.com |
65ed5768bb3bdfc61f10d6cc6a59dfbb999c8d92 | 4a020c0a492d931f7da5c452c9569fba06703686 | /testing/web-platform/tests/webdriver/ecmascript/ecmascript_test.py | cf27c01c06a02063b1347f6673721aec1f88a454 | [
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rbernon/wine-gecko | 353173511a790127ffa2ad39d630b8a0dcbbf5bf | 550ad9eac229b769992f421ce9492ca46edabaa0 | refs/heads/master | 2023-08-06T21:25:26.836672 | 2020-11-30T12:47:56 | 2021-09-30T08:14:19 | 411,965,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class EcmasScriptTest(base_test.WebDriverBaseTest):
def test_that_ecmascript_returns_document_title(self):
self.driver.get(self.webserver.where_is("ecmascript/res/ecmascript_test.html"))
result = self.driver.execute_script("return document.title;");
self.assertEqual("ecmascript test", result);
if __name__ == "__main__":
unittest.main()
| [
"rbernon@codeweavers.com"
] | rbernon@codeweavers.com |
010582cd02fe383b3413bf2b655b36b3af22c368 | 7ba54b83de814cd34f0058e797cf3d6313057147 | /mmdetection/configs/xray/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_2x_nih_chestx_det.py | f3478d0c54101f9e68bb9062d6b3357c0452e8b7 | [] | no_license | TerryGriffin/COMP5300.AdvancedDeepLearning | 083344a939e99fe7e2119225e023ab8aebdda04e | 9521c3327ba6d8344711cd3e404e627af9ffc936 | refs/heads/master | 2023-02-01T10:06:00.355905 | 2020-12-16T02:05:12 | 2020-12-16T02:05:12 | 321,839,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | _base_ = './faster_rcnn_r50_fpn_2x_nih_chestx_det.py'
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| [
"tgriffin62@yahoo.com"
] | tgriffin62@yahoo.com |
f6b37a366b054c1e431aefa17fb9943058075c76 | 0b0f4c15abb143a4b24b3bb01b9a6184df6867bf | /hello_requests/yes_or_no.py | f8b0ac04aff02b0e85c9cd9e2d08c68386c67414 | [] | no_license | saida93522/hello_requests_python | efe15130cbb327a78d3ef1aa8d6e43fdaf339709 | 76dc533dea9e1b1413d04b3fc20c52987139b780 | refs/heads/master | 2023-05-02T20:43:30.906319 | 2020-10-16T20:00:32 | 2020-10-16T20:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import requests
import logging
class APIError(Exception):
pass
def yes_or_no():
try:
response = requests.get('https://yesno.wtf/api/')
response.raise_for_status()
except Exception as e:
logging.exception(e)
raise APIError('Error connecting to API')
try:
data = response.json()
except Exception as e:
logging.exception(e)
raise APIError('Data returned is not JSON') from e
try:
answer = data['answer']
except Exception as e:
logging.exception(e)
raise APIError('JSON does not contain expected data') from e
return answer
def main():
try:
answer = yes_or_no()
print(answer)
except APIError as e:
message, = e.args
print(message)
if __name__ == '__main__':
main() | [
"10088152+claraj@users.noreply.github.com"
] | 10088152+claraj@users.noreply.github.com |
4504613d10c713611621a028b61bb44666f0a9f3 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /6-Operators of Python/30-Identity-and-Membership-operators.py | 1c0385bd6405585f5684b04ece645772b30e7ea3 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | '''
===================================
Identity operators are used to find the type of: class/type/object.
There are 2 types of Identity operators
- is
- is not
===================================
x=6
type(x) -> <class 'int'>
y="hi"
type(y) -> <class 'str'>
type(x) is type(y) -> False
type(x) is not type(y) -> True
'''
'''
====================================
Membership operators are used to validate the membership of a value.
There are 2 types of Membership operators
- in
- not in
====================================
x=[2,3,5,9]
3 in x -> True
4 in x -> False
'''
| [
"rpadhan2015@gmail.com"
] | rpadhan2015@gmail.com |
b7346b389ff30c566e8bc6651ae60b10c67ebf2b | 0e3462bd127c2072b34ac4885c034bde58ac8063 | /c_sharp_code_rewritten_in_python/transformer_command_line_interface.py | 7e6cb49274f86f384976fe5a54132f56ddee81b0 | [] | no_license | SNeicer/py_regex_translator_core | 608b3b6e5d4aff1496a910d38276562f25013cb7 | ea45e1eb556285f20f702ae6bd697aebd6f0efe6 | refs/heads/master | 2023-02-12T04:49:03.063797 | 2021-01-08T14:06:15 | 2021-01-08T14:06:15 | 326,164,216 | 0 | 0 | null | 2021-01-02T11:26:50 | 2021-01-02T11:04:11 | Python | UTF-8 | Python | false | false | 331 | py | from c_sharp_code_rewritten_in_python import interfaces
class TransformerCLI:
def __init__(self, transformer: interfaces.IFileTransformer):
self._transformer = transformer
def run(self, *args):
sourcePath = args[0]
targetPath = args[1]
self._transformer.transform(sourcePath, targetPath)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
8eab42ec4e999316d34dc5b305592808d36f835a | 9dc8c299ee7d4a225002127cc03b4253c8a721fd | /libs/simulator/livepush_acl_topic_simulator.py | 823dabe3f0baee30b5bbd03a10492cff39593daf | [] | no_license | namesuqi/strategy_corgi | 5df5d8c89bdf7a7c465c438048be20ef16120f4f | 557b8f8eabf034c2a57c25e6bc581858dd4f1b6e | refs/heads/master | 2020-03-07T04:00:18.313901 | 2018-03-29T07:50:50 | 2018-03-29T07:50:50 | 127,253,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/python
# coding=utf-8
# livepush_acl report simulator
from libs.simulator.topic_simulator import *
from config import push_review_duration
from libs.module.live_push import *
# live_push report livepush_acl
class LivePushAclTopicSimulator(TopicSimulator):
def __init__(self, log):
super(LivePushAclTopicSimulator, self).__init__()
self.log = log
self.table_name = Live_Push
self.topic = TOPIC_PUSH_ACL
self.review_duration = push_review_duration
def create_topic_data(self, result, **kwargs):
return {"topic": self.topic,
"timestamp": int(time.time() * 1000),
"event": result.event,
"livepush_ip": result.ip,
"reason": result.reason
}
| [
"suqi_name@163.com"
] | suqi_name@163.com |
a22c702811b62c295c71fa1fb4f4aff77ee8108e | 9188d0d7ce9fc5fadf4d2593741894e1448f9326 | /indico/vendor/django_mail/backends/locmem.py | 4e3f2ecda085f3995cb3fbe9698f0f8bd4f91571 | [
"MIT"
] | permissive | vaclavstepan/indico | b411410416acdfa50b0d374f89ec8208de00fb2f | 8ca1ac4d4a958f22f24580a790b3cb015570bdfb | refs/heads/master | 2023-07-21T04:42:03.031131 | 2021-09-01T09:54:17 | 2021-09-01T09:54:17 | 385,897,420 | 0 | 0 | MIT | 2021-07-16T13:07:32 | 2021-07-14T10:16:57 | null | UTF-8 | Python | false | false | 1,481 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# The code in here is taken almost verbatim from `django.core.mail.backends.locmem`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/3.1.x/django/core/mail/backends/locmem.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Backend for test environment.
"""
from indico.vendor import django_mail
from .base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(django_mail, 'outbox'):
django_mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
django_mail.outbox.append(message)
msg_count += 1
return msg_count
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
192f5be2ea74ff5dd2215cd23fc641a35c5f9e09 | 3715df2c833919376a3ee44de8fc64d1c2abe8ce | /AutomlCore/build/lib/algorithms/classification/naive_bayes_complement.py | f057f2f40891211ae4bcc2e4da46ea7a74226a34 | [] | no_license | mindis/dachshund | 6c07f8eb6b9f75f66c74ec3748dfa42a31cff4d5 | 2f1b3e5866e06424c700f3a106051fe69bcb18bc | refs/heads/master | 2022-12-09T05:17:00.812348 | 2020-09-03T05:25:09 | 2020-09-03T05:25:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import numpy as np
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import accuracy_score
from hyperopt import hp
from utils import definitions
from .. import model, model_classification
class ComplementNBClassifier(model.Model, model_classification.ModelClassification):
def __init__(self, _project_name):
super().__init__(_project_name)
self.model_name = 'NaiveBayesComplement'
self.params_list = {}
def getHyperParameterSpace(self):
return {
'alpha': hp.uniform('alpha', 0, 100),
'norm': [False, True],
}
def getModel(self, _params):
return ComplementNB(
alpha= _params['alpha'],
norm= _params['norm'],
)
def trainModel(self, x, y, _params):
self.model = self.getModel(_params)
self.model.fit(x, y)
self.saveModel()
def getPredictResult(self, x):
return self.model.predict(x)
def getPredictProbaResult(self, x):
return self.model.predict_proba(x)
| [
"aoba0203@naver.com"
] | aoba0203@naver.com |
f4b07d056b6b0304d6a97622d9ff3ea596b95948 | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/input/func_e0604.py | d077f31adbfa8f55fc8501b1293f76262c0e2dfc | [
"Apache-2.0",
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] | permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 206 | py | """Test for invalid objects in a module's __all__ variable.
"""
# pylint: disable=R0903,R0201,W0612
__revision__ = 0
def some_function():
"""Just a function."""
pass
__all__ = [some_function]
| [
"alex.barreto@databricks.com"
] | alex.barreto@databricks.com |
c7cee1ce7b74766902666791db0f8f0747fb6459 | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /examples/model_compress/pruning/v2/movement_pruning_glue.py | a8365d9834075161641b7946bb41b2c961cfe190 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 4,715 | py | import functools
from tqdm import tqdm
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from datasets import load_metric, load_dataset
from transformers import (
BertForSequenceClassification,
BertTokenizerFast,
DataCollatorWithPadding,
set_seed
)
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gradient_accumulation_steps = 16
# a fake criterion because huggingface output already has loss
def criterion(input, target):
return input.loss
def trainer(model, optimizer, criterion, train_dataloader):
model.train()
counter = 0
for batch in tqdm(train_dataloader):
counter += 1
batch.to(device)
optimizer.zero_grad()
outputs = model(**batch)
# pruner may wrap the criterion, for example, loss = origin_loss + norm(weight), so call criterion to get loss here
loss = criterion(outputs, None)
loss = loss / gradient_accumulation_steps
loss.backward()
if counter % gradient_accumulation_steps == 0 or counter == len(train_dataloader):
optimizer.step()
if counter % 16000 == 0:
print('Step {}: {}'.format(counter // gradient_accumulation_steps, evaluator(model, metric, is_regression, validate_dataloader)))
def evaluator(model, metric, is_regression, eval_dataloader):
model.eval()
for batch in tqdm(eval_dataloader):
batch.to(device)
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
metric.add_batch(
predictions=predictions,
references=batch["labels"],
)
return metric.compute()
if __name__ == '__main__':
task_name = 'mnli'
is_regression = False
num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2)
train_batch_size = 8
eval_batch_size = 8
set_seed(1024)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
sentence1_key, sentence2_key = task_to_keys[task_name]
# used to preprocess the raw data
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=False, max_length=128, truncation=True)
if "label" in examples:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
raw_datasets = load_dataset('glue', task_name, cache_dir='./data')
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names)
train_dataset = processed_datasets['train']
validate_dataset = processed_datasets['validation_matched' if task_name == "mnli" else 'validation']
data_collator = DataCollatorWithPadding(tokenizer)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=train_batch_size)
validate_dataloader = DataLoader(validate_dataset, collate_fn=data_collator, batch_size=eval_batch_size)
metric = load_metric("glue", task_name)
model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=num_labels).to(device)
print('Initial: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
p_trainer = functools.partial(trainer, train_dataloader=train_dataloader)
optimizer = Adam(model.parameters(), lr=2e-5)
pruner = MovementPruner(model, config_list, p_trainer, optimizer, criterion, training_epochs=10,
warm_up_step=3000, cool_down_beginning_step=27000)
_, masks = pruner.compress()
pruner.show_pruned_weights()
print('Final: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
optimizer = Adam(model.parameters(), lr=2e-5)
trainer(model, optimizer, criterion, train_dataloader)
print('After 1 epoch finetuning: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
| [
"noreply@github.com"
] | penghouwen.noreply@github.com |
ed8b7dbd7403201e805b8dbacb2d97cfdf5005eb | 0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8 | /vivisect/analysis/amd64/__init__.py | 2b48787dc8ec2faa3d32624e5f1ca487682d33a5 | [
"Apache-2.0"
] | permissive | vivisect/vivisect | ac259918b6281d9431c32a0b2307c61f9cab0dec | b07e161cc28b19fdda0d047eefafed22c5b00f15 | refs/heads/master | 2023-08-25T09:02:00.526532 | 2023-07-26T03:07:07 | 2023-07-26T03:07:07 | 26,651,759 | 833 | 181 | Apache-2.0 | 2023-09-07T03:43:53 | 2014-11-14T18:28:47 | Python | UTF-8 | Python | false | false | 34 | py | '''
Amd64 Analysis Modules
'''
| [
"invisigoth@kenshoto.com"
] | invisigoth@kenshoto.com |
2adf288db73ef957c82ad3b82c56653c52cf1dfb | 53e58c213232e02250e64f48b97403ca86cd02f9 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisQCD_HT200to300.py | ca9031e1be70ac3888d31407f97e120bf09f4de2 | [] | no_license | xdlyu/fullRunII_ntuple_102X | 32e79c3bbc704cfaa00c67ab5124d40627fdacaf | d420b83eb9626a8ff1c79af5d34779cb805d57d8 | refs/heads/master | 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCD_HT200to300'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/QCD_HT200to300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
name = 'WWW'
steam_dir = 'xulyu'
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'QCD_HT200to300'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
1ca9421c30d507ffd0c20ef335be2cd7e57b5697 | 347a6aac6fc40edab03d75a53e89053aeeb8fd72 | /quizzes/Quiz7.py | f9e8bdc18bfc69f7deba22dcd76922245d4bc853 | [] | no_license | jwilke/cs373 | 0d5de4676c13e83c8b9dbcab66140be53cebeaf9 | 7923f3710eaa76d38d8261d6dc596f2bfaf12a8e | refs/heads/master | 2021-01-16T00:23:35.227487 | 2012-07-05T22:53:14 | 2012-07-05T22:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | #!/usr/bin/env python
"""
CS373: Quiz #7 (9 pts)
"""
""" ----------------------------------------------------------------------
1. In the paper, "A Bug and a Crash" about the Ariane 5, what was the
software bug?
(2 pts)
the conversion of a 64-bit number to a 16-bit number
"""
""" ----------------------------------------------------------------------
2. In the paper, "Mariner 1", what was the software bug?
(1 pt)
the ommission of a hyphen
"""
""" ----------------------------------------------------------------------
3. What is the output of the following program?
(2 pts)
True
False
"""
a = [2, 3, 4]
b = a
b += [5]
print a is b
a = (2, 3, 4)
b = a
b += (5,)
print a is b
""" ----------------------------------------------------------------------
4. What semantic difference is there between Java's conditional expression
and Python's? Why?
(4 pts)
Java's then and else clause must be of the same type
Java is typed and the compiler must be able to determine the type of the
entire conditional expression
"""
| [
"downing@cs.utexas.edu"
] | downing@cs.utexas.edu |
352b804747cc226f09e7a42316e6262c0a63a77b | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/August/Design HashSet.py | 8ac9ddd111bec7159b93c31f2ee92c9ef039863d | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | class MyHashSet:
def __init__(self):
self.numBuckets = 15000
self.buckets = [[] for _ in range(self.numBuckets)]
def hash_function(self, key):
return key % self.numBuckets
def add(self, key):
i = self.hash_function(key)
if not key in self.buckets[i]:
self.buckets[i].append(key)
def remove(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
self.buckets[i].remove(key)
def contains(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
return True
else:
return False
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
94675a9a4c3a35afe8b4a98198dd51c0877c1b33 | 0ad3f130560a342cceb47889ab7ceac6d8442834 | /ltdmason/s3upload.py | 0d89112f890c34645e75833dfcb596932d59229c | [
"MIT"
] | permissive | AvdN/ltd-mason | dfd4cf4df19170e9422282b986ef95a06d566e51 | b14940037010b1399bd66888c4caef3dd350d76c | refs/heads/master | 2021-01-24T06:44:00.270568 | 2017-03-13T17:44:27 | 2017-03-13T17:44:27 | 93,316,933 | 0 | 0 | null | 2017-06-04T13:58:02 | 2017-06-04T13:58:02 | null | UTF-8 | Python | false | false | 13,865 | py | """S3 upload/sync utilities."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from builtins import * # NOQA
from future.standard_library import install_aliases
install_aliases() # NOQA
import os
import logging
import mimetypes
import boto3
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def upload(bucket_name, path_prefix, source_dir,
upload_dir_redirect_objects=True,
surrogate_key=None, acl=None,
cache_control_max_age=31536000,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Upload built documentation to S3.
This function places the contents of the Sphinx HTML build directory
into the ``/path_prefix/`` directory of an *existing* S3 bucket.
Existing files on S3 are overwritten; files that no longer exist in the
``source_dir`` are deleted from S3.
S3 credentials are assumed to be stored in a place where boto3 can read
them, such as :file:`~/.aws/credentials`. `aws_profile_name` allows you
to select while AWS credential profile you wish to use from the
:file:`~/.aws/credentials`.
See http://boto3.readthedocs.org/en/latest/guide/quickstart.html.
Parameters
----------
bucket_name : str
Name of the S3 bucket where documentation is uploaded.
path_prefix : str
The root directory in the bucket where documentation is stored.
source_dir : str
Path of the Sphinx HTML build directory on the local file system.
The contents of this directory are uploaded into the ``/path_prefix/``
directory of the S3 bucket.
upload_dir_redirect_objects : bool, optional
A feature flag to enable uploading objects to S3 for every directory.
These objects contain headers ``x-amz-meta-dir-redirect=true`` HTTP
headers that tell Fastly to issue a 301 redirect from the directory
object to the '/index.html' in that directory.
surrogate_key : str, optional
The surrogate key to insert in the header of all objects
in the ``x-amz-meta-surrogate-key`` field. This key is used to purge
builds from the Fastly CDN when Editions change.
If `None` then no header will be set.
acl : str, optional
The pre-canned AWS access control list to apply to this upload.
Defaults to ``'public-read'``, which allow files to be downloaded
over HTTP by the public. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
for an overview of S3's pre-canned ACL lists. Note that ACL settings
are not validated locally.
cache_control_max_age : int, optional
Defaults to 31536000 seconds = 1 year.
aws_access_key_id : str, optional
The access key for your AWS account. Also set `aws_secret_access_key`.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_profile : str, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of `aws_access_key_id` and `aws_secret_access_key` for file-based
credentials.
"""
log.debug('s3upload.upload({0}, {1}, {2})'.format(
bucket_name, path_prefix, source_dir))
session = boto3.session.Session(
profile_name=aws_profile,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
metadata = None
if surrogate_key is not None:
if metadata is None:
metadata = {}
metadata['surrogate-key'] = surrogate_key
if cache_control_max_age is not None:
cache_control = 'max-age={0:d}'.format(cache_control_max_age)
else:
cache_control = None
manager = ObjectManager(session, bucket_name, path_prefix)
for (rootdir, dirnames, filenames) in os.walk(source_dir):
# name of root directory on S3 bucket
bucket_root = os.path.relpath(rootdir, start=source_dir)
if bucket_root in ('.', '/'):
bucket_root = ''
# Delete bucket directories that no longer exist in source
bucket_dirnames = manager.list_dirnames_in_directory(bucket_root)
for bucket_dirname in bucket_dirnames:
if bucket_dirname not in dirnames:
log.debug(('Deleting bucket directory {0}'.format(
bucket_dirname)))
manager.delete_directory(bucket_dirname)
# Delete files that no longer exist in source
bucket_filenames = manager.list_filenames_in_directory(bucket_root)
for bucket_filename in bucket_filenames:
if bucket_filename not in filenames:
bucket_filename = os.path.join(bucket_root, bucket_filename)
log.debug('Deleting bucket file {0}'.format(bucket_filename))
manager.delete_file(bucket_filename)
# Upload files in directory
for filename in filenames:
local_path = os.path.join(rootdir, filename)
bucket_path = os.path.join(path_prefix, bucket_root, filename)
log.debug('Uploading to {0}'.format(bucket_path))
_upload_file(local_path, bucket_path, bucket,
metadata=metadata, acl=acl,
cache_control=cache_control)
# Upload a directory redirect object
if upload_dir_redirect_objects is True:
bucket_dir_path = os.path.join(path_prefix, bucket_root)
bucket_dir_path = bucket_dir_path.rstrip('/')
if metadata:
redirect_metadata = dict(metadata)
else:
redirect_metadata = {}
redirect_metadata['dir-redirect'] = 'true'
_upload_object(bucket_dir_path,
content='',
bucket=bucket,
metadata=redirect_metadata,
acl=acl,
cache_control=cache_control)
def _upload_file(local_path, bucket_path, bucket,
metadata=None, acl=None, cache_control=None):
"""Upload a file to the S3 bucket.
This function uses the mimetypes module to guess and then set the
Content-Type and Encoding-Type headers.
Parameters
----------
local_path : str
Full path to a file on the local file system.
bucket_path : str
Destination path (also known as the key name) of the file in the
S3 bucket.
bucket : `boto3` Bucket instance
S3 bucket.
metadata : dict, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : str, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
cache_control : str, optional
The cache-control header value. For example, 'max-age=31536000'.
``'
"""
extra_args = {}
if acl is not None:
extra_args['ACL'] = acl
if metadata is not None:
extra_args['Metadata'] = metadata
if cache_control is not None:
extra_args['CacheControl'] = cache_control
# guess_type returns None if it cannot detect a type
content_type, content_encoding = mimetypes.guess_type(local_path,
strict=False)
if content_type is not None:
extra_args['ContentType'] = content_type
log.debug(str(extra_args))
obj = bucket.Object(bucket_path)
# no return status from the upload_file api
obj.upload_file(local_path, ExtraArgs=extra_args)
def _upload_object(bucket_path, bucket, content='',
metadata=None, acl=None, cache_control=None):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : str
Destination path (also known as the key name) of the file in the
S3 bucket.
content : str or bytes
Object content, optional
bucket : `boto3` Bucket instance
S3 bucket.
metadata : dict, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : str, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
cache_control : str, optional
The cache-control header value. For example, 'max-age=31536000'.
``'
"""
args = {}
if metadata is not None:
args['Metadata'] = metadata
if acl is not None:
args['ACL'] = acl
if cache_control is not None:
args['CacheControl'] = cache_control
obj = bucket.Object(bucket_path)
obj.put(Body=content, **args)
class ObjectManager(object):
"""Manage objects existing in a bucket under a specific ``bucket_root``.
The ObjectManager maintains information about objects that exist in the
bucket, and can delete objects that no longer exist in the source.
Parameters
----------
session : :class:`boto3.session.Session`
A boto3 session instance provisioned with the correct identities.
bucket_name : str
Name of the S3 bucket.
bucket_root : str
The version slug is the name root directory in the bucket where
documentation is stored.
"""
def __init__(self, session, bucket_name, bucket_root):
super().__init__()
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
self._session = session
self._bucket = bucket
self._bucket_root = bucket_root
# Strip trailing '/' from bucket_root for comparisons
if self._bucket_root.endswith('/'):
self._bucket_root = self._bucket_root.rstrip('/')
def list_filenames_in_directory(self, dirname):
"""List all file-type object names that exist at the root of this
bucket directory.
Parameters
----------
dirname : str
Directory name in the bucket relative to ``bucket_root/``.
Returns
-------
filenames : list
List of file names (`str`), relative to ``bucket_root/``, that
exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
filenames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
if obj.key.endswith('/'):
continue
obj_dirname = os.path.dirname(obj.key)
if obj_dirname == prefix:
# object is at root of directory
filenames.append(os.path.relpath(obj.key,
start=prefix))
return filenames
def list_dirnames_in_directory(self, dirname):
"""List all names of directories that exist at the root of this
bucket directory.
Note that *directories* don't exist in S3; rather directories are
inferred from path names.
Parameters
----------
dirname : str
Directory name in the bucket relative to ``bucket_root``.
Returns
-------
dirnames : list
List of directory names (`str`), relative to ``bucket_root/``,
that exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
dirnames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
dirname = os.path.dirname(obj.key)
# if the object is a directory redirect, make it look like a dir
if dirname == '':
dirname = obj.key + '/'
rel_dirname = os.path.relpath(dirname, start=prefix)
dir_parts = rel_dirname.split('/')
if len(dir_parts) == 1:
dirnames.append(dir_parts[0])
dirnames = list(set(dirnames))
if '.' in dirnames:
dirnames.remove('.')
return dirnames
def _create_prefix(self, dirname):
if dirname in ('.', '/'):
dirname = ''
# Strips trailing slash from dir prefix for comparisons
# os.dirname() returns directory names without a trailing /
prefix = os.path.join(self._bucket_root, dirname)
if prefix.endswith('/'):
prefix = prefix.rstrip('/')
return prefix
def delete_file(self, filename):
"""Delete a file from the bucket.
Parameters
----------
filename : str
Name of the file, relative to ``bucket_root/``.
"""
key = os.path.join(self._bucket_root, filename)
objects = list(self._bucket.objects.filter(Prefix=key))
for obj in objects:
obj.delete()
def delete_directory(self, dirname):
"""Delete a directory (and contents) from the bucket.
Parameters
----------
dirname : str
Name of the directory, relative to ``bucket_root/``.
"""
key = os.path.join(self._bucket_root, dirname)
if not key.endswith('/'):
key += '/'
delete_keys = {'Objects': []}
key_objects = [{'Key': obj.key}
for obj in self._bucket.objects.filter(Prefix=key)]
assert len(key_objects) > 0
delete_keys['Objects'] = key_objects
# based on http://stackoverflow.com/a/34888103
s3 = self._session.resource('s3')
r = s3.meta.client.delete_objects(Bucket=self._bucket.name,
Delete=delete_keys)
log.debug(r)
if 'Errors' in r:
raise S3Error('S3 could not delete {0}'.format(key))
class S3Error(Exception):
"""General errors in S3 API usage."""
pass
| [
"jsick@lsst.org"
] | jsick@lsst.org |
af8a2ebd0fe7f5f06e5aeccc636bf8704d77ef8a | ccf6945c819e25a3eae92103116b7d1063e21871 | /chess_piece_detection/app/other_models/alexnet.py | 6da26d5ea58921656765650c08f6ac477852e5d1 | [] | no_license | ace-racer/Extending-Board-Games-using-deep-learning | 67355eabec91ed8afb1150e4c2d6bb0068e74910 | 7171b14c80be35b0ee882322c318307779379e9a | refs/heads/master | 2020-04-10T05:32:18.705335 | 2019-04-22T11:42:47 | 2019-04-22T11:42:47 | 160,830,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | # Import necessary components to build LeNet
# Reference: https://github.com/eweill/keras-deepcv/blob/master/models/classification/alexnet.py
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
# Other imports
import numpy as np
import os
# custom imports
import appconfigs
import modelconfigs
import constants
import utils
def train_alexnet_model(model_configs, train_model=True, num_samples=None):
print("Alexnet model...")
X_train, y_train = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_train_data, num_samples)
X_test, y_test = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_test_data, num_samples)
# Initialize model
alexnet = Sequential()
# Layer 1
alexnet.add(Conv2D(96, (11, 11), input_shape=(200, 200, 3),
padding='same', kernel_regularizer=l2(0.)))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
alexnet.add(Conv2D(256, (5, 5), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 3
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(512, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 4
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
# Layer 5
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 6
alexnet.add(Flatten())
alexnet.add(Dense(3072))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 7
alexnet.add(Dense(4096))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 8
alexnet.add(Dense(constants.num_output_classes))
alexnet.add(BatchNormalization())
alexnet.add(Activation('softmax'))
batch_size = model_configs["batch_size"][0]
# number of training epochs
nb_epoch = model_configs["epochs"][0]
if train_model:
filepath = os.path.join(appconfigs.model_folder_location, model_configs["model_weights_file_name"][0])
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True,
mode='max')
earlystop = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10,
verbose=1, mode='max')
tensorboard = TensorBoard(log_dir=appconfigs.tensorboard_logs_folder_location, histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [checkpoint, earlystop, tensorboard]
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
hist = alexnet.fit(X_train, y_train, shuffle=True, batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=(X_test, y_test), callbacks=callbacks_list)
return hist, alexnet, X_test, y_test
else:
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
return None, alexnet, X_test, y_test
| [
"anuragchatterjee92@gmail.com"
] | anuragchatterjee92@gmail.com |
3415d6c90827899f16903882ab9e54d437fd5b09 | 34eb0a65adb4290d6224075a2700d432d5649de6 | /tachyon/common/restclient.py | 1d6c499cbfc98be41954791218186029b21669a2 | [
"BSD-3-Clause"
] | permissive | Vuader/tachyon_common | ae950e13ac14df590cc76b3c7f98c26434104217 | a675d13a251aeda16dba7a416354872ee41509e6 | refs/heads/master | 2021-01-19T09:51:55.904274 | 2017-02-15T11:50:00 | 2017-02-15T11:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,684 | py | # Tachyon OSS Framework
#
# Copyright (c) 2016-2017, see Authors.txt
# All rights reserved.
#
# LICENSE: (BSD3-Clause)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENTSHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import thread
import json
import tachyon.ui
try:
# python 3
from io import BytesIO
except ImportError:
# python 2
from StringIO import StringIO as BytesIO
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
import nfw
log = logging.getLogger(__name__)
sessions = {}
class RestClient(nfw.RestClient):
def __init__(self, url, username=None, password=None, domain=None):
global sessions
self.thread_id = thread.get_ident()
if self.thread_id not in sessions:
sessions[self.thread_id] = {}
self.session = sessions[self.thread_id]
self.url = url
if url in self.session:
self.username = self.session[url]['username']
self.password = self.session[url]['password']
self.domain = self.session[url]['domain']
self.tachyon_headers = self.session[url]['headers']
super(RestClient, self).__init__()
else:
self.session[url] = {}
self.session[url]['username'] = username
self.session[url]['password'] = password
self.session[url]['domain'] = domain
self.session[url]['headers'] = {}
self.username = username
self.password = password
self.domain = domain
super(RestClient, self).__init__()
self.tachyon_headers = self.session[url]['headers']
if username is not None:
self.authenticate(url, username, password, domain)
def authenticate(self, username, password, domain):
url = self.url
auth_url = "%s/login" % (url,)
if 'token' in self.tachyon_headers:
del self.tachyon_headers['token']
self.tachyon_headers['X-Domain'] = domain
data = {}
data['username'] = username
data['password'] = password
data['expire'] = 1
server_headers, result = self.execute("POST", auth_url,
data, self.tachyon_headers)
if 'token' in result:
self.token = result['token']
self.tachyon_headers['X-Auth-Token'] = self.token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def token(self, token, domain, tenant):
log.error("TOKEN %s" % (token,))
url = self.url
auth_url = "%s/login" % (url,)
self.tachyon_headers['X-Tenant'] = tenant
self.tachyon_headers['X-Domain'] = domain
self.tachyon_headers['X-Auth-Token'] = token
server_headers, result = self.execute("GET", auth_url,
None, self.tachyon_headers)
if 'token' in result:
self.token = token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def domain(self, domain):
self.tachyon_headers['X-Domain'] = domain
self.session[url]['headers'] = self.tachyon_headers
def tenant(self, tenant):
self.tachyon_headers['X-Tenant'] = tenant
self.session[url]['headers'] = self.tachyon_headers
def execute(self, request, url, obj=None, headers=None):
if obj is not None:
data = json.dumps(obj)
else:
data = None
if self.url not in url:
url = "%s/%s" % (self.url, url)
if headers is None:
headers = self.tachyon_headers
else:
headers.update(self.tachyon_headers)
server_headers, response = super(RestClient, self).execute(request, url, data, headers)
if response is not None:
response = json.loads(response)
return [server_headers, response]
| [
"christiaan.rademan@gmail.com"
] | christiaan.rademan@gmail.com |
4d8d26efe3b7019edb01e9d8bda2c443a49b9e38 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/adapters/IStatsAdapter.py | 09fc72b393427f042a93a4eee9e8594f486462b9 | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,718 | py | # Embedded file name: scripts/client/adapters/IStatsAdapter.py
from locale import getpreferredencoding
from time import localtime, strftime
import Settings
import db.DBLogic
from HelperFunctions import wowpRound
from Helpers.i18n import localizeAchievements, getFormattedTime, localizeAirplaneLong, localizeTimeIntervalHM, localizeTimeIntervalMS, localizeTooltips, separateThousandths
from adapters.DefaultAdapter import DefaultAdapter
from consts import PLANE_TYPE, PLANE_TYPE_NAME
NATION_FLAG_TEMPLATE = 'icons/shop/flagAchiev{0}.dds'
class IPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['row'] = stats
ob['stats'] = _convertPlaneStats(stats)
return ob
class ISummaryStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['flighttime'] = stats['flighttime']
ob['flighttimeStr'] = localizeTimeIntervalHM(stats['flighttime'])
ob['createdAt'] = strftime(Settings.g_instance.scriptConfig.timeFormated['dmYHM'], localtime(float(ob['stats']['createdAt']))).decode(getpreferredencoding())
ob['stats'] = _convertSummeryStats(stats)
return ob
class IShortPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IShortPlaneStatsAdapter, self).__call__(account, ob, **kw)
ob['flighttimeStr'] = localizeTimeIntervalHM(ob['flighttime'])
return ob
class IShortPlaneDescription(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IShortPlaneDescription, self).__call__(account, ob, **kw)
planeID = kw['idTypeList'][0][0]
planeData = db.DBLogic.g_instance.getAircraftData(planeID).airplane
ob['planeName'] = localizeAirplaneLong(planeData.name)
ob['level'] = planeData.level
ob['icoPath'] = planeData.iconPath
ob['flagPath'] = NATION_FLAG_TEMPLATE.format(planeData.country)
ob['nationID'] = db.DBLogic.g_instance.getNationIDbyAircraftID(planeID)
ob['planeID'] = planeID
return ob
class IPlayerSummaryStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['flighttime'] = stats['flighttime']
ob['flighttimeStr'] = localizeTimeIntervalHM(stats['flighttime'])
ob['createdAt'] = strftime(Settings.g_instance.scriptConfig.timeFormated['dmYHM'], localtime(float(stats['createdAt']))).decode(getpreferredencoding()) if stats['createdAt'] > 0 else ''
ob['stats'] = _convertSummeryStats(stats)
return ob
class IPlayerShortPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IPlayerShortPlaneStatsAdapter, self).__call__(account, ob, **kw)
ob['flighttimeStr'] = localizeTimeIntervalHM(ob['flighttime'])
return ob
class IPlayerShortPlaneDescriptionAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IPlayerShortPlaneDescriptionAdapter, self).__call__(account, ob, **kw)
import db.DBLogic
planeData = db.DBLogic.g_instance.getAircraftData(kw['idTypeList'][1][0]).airplane
ob['planeName'] = localizeAirplaneLong(planeData.name)
ob['level'] = planeData.level
ob['icoPath'] = planeData.iconPath
ob['flagPath'] = NATION_FLAG_TEMPLATE.format(planeData.country)
ob['nationID'] = db.DBLogic.g_instance.getNationIDbyAircraftID(kw['idTypeList'][1][0])
ob['planeID'] = kw['idTypeList'][1][0]
return ob
class IPlayerPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['stats'] = _convertPlaneStats(stats)
return ob
def _percent(total, partial, precision = 0):
if total > 0:
return wowpRound(partial * 100.0 / total, precision)
return 0
def _statRecord(name = None, value = None, percentValue = None, title = None, tooltip = None):
return dict(name=localizeAchievements(name), value=separateThousandths(value) if isinstance(value, int) else value, percentValue=percentValue, title=localizeAchievements(title) if title else None, tooltip=localizeTooltips(tooltip) if tooltip else None)
def ftostr(val):
return format(val, '.2f')
def _convertCommonStats(stats):
return [_statRecord(None, None, None, 'ACHIEVEMENTS_BETTLE_EFFICIENCY'),
_statRecord('ACHIEVEMENTS_AIRCRAFTS_DESTROYED_2-0', stats['pKill']),
_statRecord('ACHIEVEMENTS_WINS_IN_GROUP_2-0', stats['pAssist']),
_statRecord('ACHIEVEMENTS_DEFENDER-BOMBER_DESTROYED', stats['dKill'] + stats['bKill']),
_statRecord('ACHIEVEMENTS_DEFENDER-BOMBER_DESTROYED_ASSISTS', stats['dAssist'] + stats['bAssist']),
_statRecord('ACHIEVEMENTS_AIR_TARGETS_DESTROYED_AVERAGE_PER_FLIGHT', int(round((stats['pKill'] + stats['dKill'] + stats['bKill']) / float(stats['flights']) if stats['flights'] else 0))),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED', stats['gKill']),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED_ASSIST', stats['gAssist']),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED_AVERAGE_PER_FLIGHT', int(round(stats['gKill'] / float(stats['flights']) if stats['flights'] else 0))),
_statRecord('ACHIEVEMENTS_PARTICIPATION_IN_CAP_SECTOR', stats['zoneCapture']),
_statRecord('ACHIEVEMENTS_AIRCRAFTS_DESTROED_IN_DEF_SECTOR', stats['pKillDefZone']),
_statRecord(None, None, None, 'ACHIEVEMENTS_HEROIC_DEEDS'),
_statRecord('ACHIEVEMENTS_MAX_AIRCRAFTS_DESTROED_PER_BATTLE', stats['pKillMax']),
_statRecord('ACHIEVEMENTS_MAX_DEFENDER-BOMBER_DESTROED_PER_BATTLE', stats['dbKillMax']),
_statRecord('ACHIEVEMENTS_MAX_GROUND_OBJECT_DESTROED_PER_BATTLE', stats['gKillMax']),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_AIR_TARGETS_PER_BATTLE', int(round(stats['atDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_AIRCRAFT_PER_BATTLE', int(round(stats['pDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_DEFENDER-BOMBER_PER_BATTLE', int(round(stats['dbDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_GROUND_OBJECT_PER_BATTLE', int(round(stats['gDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_AIRCRAFT_DESTROED_IN_DEF_SECTOR', stats['pKillDefZoneMax'])]
def _convertSummeryStats(stats):
records = [_statRecord('ACHIEVEMENTS_TOTAL_BATTLES', stats['battles']),
_statRecord('ACHIEVEMENTS_WINS', stats['wins'], _percent(stats['battles'], stats['wins'], 2)),
_statRecord('ACHIEVEMENTS_DEFEATS', stats['losses'], _percent(stats['battles'], stats['losses'], 2)),
_statRecord('ACHIEVEMENTS_DRAWS', stats['draws'], _percent(stats['battles'], stats['draws'], 2)),
_statRecord('ACHIEVEMENTS_STAT_IN_NEW_MODE_CONQUEST', localizeTimeIntervalHM(stats['flighttime']))]
records.extend(_convertCommonStats(stats))
records.append(_statRecord(None, None, None, 'ACHIEVEMENTS_XP_AND_BATTLESCORE'))
records.append(_statRecord('ACHIEVEMENTS_AVG_XP_PER_BATTLE', int(stats['baseXPAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_XP_FOR_BATTLE', stats['baseXPMax']))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_BATTLE_POINTS_PER_BATTLE', int(stats['bScoreAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_BATTLE_POINTS_PER_BATTLE', stats['bScoreMax']))
idx = 16
coeff = 20
def _addEfficiency(plType):
flights = stats['flightsByPlType'].get(plType, 0)
return _statRecord('ACHIEVEMENTS_CLASS_EFFICIENCY_%s' % PLANE_TYPE.toStr(plType), '{}%'.format(round(stats['ranksByPlType'].get(plType, 0) / float(flights) * coeff, 1) if flights else 0.0))
records.insert(idx, _addEfficiency(PLANE_TYPE.BOMBER))
records.insert(idx, _addEfficiency(PLANE_TYPE.ASSAULT))
records.insert(idx, _addEfficiency(PLANE_TYPE.HFIGHTER))
records.insert(idx, _addEfficiency(PLANE_TYPE.NAVY))
records.insert(idx, _addEfficiency(PLANE_TYPE.FIGHTER))
records.insert(idx, _statRecord(None, None, None, 'ACHIEVEMENTS_CLASS_EFFICIENCY'))
return records
def _convertPlaneStats(stats):
records = [_statRecord('ACHIEVEMENTS_TOTAL_BATTLES', stats['battles']),
_statRecord('ACHIEVEMENTS_TOTAL_FLIGHT', stats['flights']),
_statRecord('ACHIEVEMENTS_AVERAGE_DURATION_FLIGHT', localizeTimeIntervalMS(stats['flighttimeAvg'])),
_statRecord('ACHIEVEMENTS_ALL_DURATION_ON_PLANES', localizeTimeIntervalHM(stats['flighttime']))]
records.extend(_convertCommonStats(stats))
records.append(_statRecord(None, None, None, 'ACHIEVEMENTS_XP_AND_BATTLESCORE'))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_EXP_PER_FLIGHT', int(stats['baseXPAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_XP_FOR_BATTLE', stats['baseXPMax']))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_BATTLE_POINTS_PER_FLIGHT', int(stats['bScoreAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_BATTLE_POINTS_PER_BATTLE', stats['bScoreMax']))
return records | [
"55k@outlook.com"
] | 55k@outlook.com |
12f85dfe3b746c02305c2dd7cc147b806129fb82 | 391d648132c1a05e7da575205eef89a7208a892a | /scripts/playground/phase.py | 0fe2427793d433ebdd4a2e65cbfcf3a84a814b9a | [] | no_license | michaelbateman/DemographicInference | c3ceaf69f8b554f3973473607d6b5201cca423f9 | b1e2529b1ce0710f82d2867d08588ae4a6c72bb2 | refs/heads/master | 2021-01-10T06:28:43.698581 | 2015-10-06T18:25:45 | 2015-10-06T18:25:45 | 43,770,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | import subprocess
in_file = 'ag1000g.phase1.AR2.3L.PASS.vcf.gz'
region_size = 5000000
num_regions = 50e6 / region_size
print num_regions
print int(num_regions)
for k in range(0,int(num_regions)):
i = int(num_regions) -1 - k
left = i * region_size
right = (i+1) * region_size
window = str(left) + '-' + str(right)
out_file = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.vcf'
call_string = 'time ../bcftools/bcftools view -o ' + out_file + ' ' + in_file + ' -r 3L:' + window
print call_string
print 'Now creating the file: ', out_file
print '.....'
subprocess.call(call_string, shell=True)
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -writeAll temp_file_name'
print call_string
print 'Now preparing site file...'
subprocess.call(call_string, shell=True)
# The 1530 just below is the number of haplotypes in 765 samples
# Should change in different situation
phased_name = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.phased.vcf'
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -phase 1530 -readAll temp_file_name -writeVcf ' + phased_name
print call_string
print 'Now phasing...'
subprocess.call(call_string, shell=True)
call_string = 'time gzip ' + phased_name
print call_string
subprocess.call(call_string, shell=True)
call_string = 'rm ' + out_file
print call_string
subprocess.call(call_string, shell=True)
print 'Progress: %d out of %d regions complete.' %(k+1, num_regions)
print call_string | [
"bbeeefcake@gmail.com"
] | bbeeefcake@gmail.com |
19180fe87d05b23f4942f5c69caa88c9fb25a645 | 8e38169331b6fdec8cb61e2a8ed564023e2cba9a | /telluricpy/__init__.py | 5a1309d60edfe354a8ebb966a91bde80ceca426f | [
"MIT"
] | permissive | OpenGeoVis/telluricpy | 20b4dd0f77e9a60c3dd61dbfac2668c3994ffdc3 | b5cd5ac9e373137c33b2ecc98d4dfed4d0784699 | refs/heads/master | 2020-03-19T13:31:31.479146 | 2018-06-29T07:11:24 | 2018-06-29T07:11:24 | 136,583,691 | 0 | 0 | MIT | 2018-06-29T06:50:30 | 2018-06-08T07:32:21 | Python | UTF-8 | Python | false | false | 102 | py | from . import dataFiles
from . import modelOperations
from . import vtkTools
from . import modelTools
| [
"banesullivan@gmail.com"
] | banesullivan@gmail.com |
c3adcbeba8fc8166b6429a87de5ab17b4187ccfd | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /nni/experiment/rest.py | bdacc7c215ac759fdb551e7d4fa3d6e296e45fd1 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 1,156 | py | import logging
from typing import Any, Optional
import requests
_logger = logging.getLogger(__name__)
url_template = 'http://localhost:{}/api/v1/nni{}'
timeout = 20
def request(method: str, port: Optional[int], api: str, data: Any = None) -> Any:
if port is None:
raise RuntimeError('Experiment is not running')
url = url_template.format(port, api)
if data is None:
resp = requests.request(method, url, timeout=timeout)
else:
resp = requests.request(method, url, json=data, timeout=timeout)
if not resp.ok:
_logger.error('rest request %s %s failed: %s %s', method.upper(), url, resp.status_code, resp.text)
resp.raise_for_status()
if method.lower() in ['get', 'post'] and len(resp.content) > 0:
return resp.json()
def get(port: Optional[int], api: str) -> Any:
return request('get', port, api)
def post(port: Optional[int], api: str, data: Any) -> Any:
return request('post', port, api, data)
def put(port: Optional[int], api: str, data: Any) -> None:
request('put', port, api, data)
def delete(port: Optional[int], api: str) -> None:
request('delete', port, api)
| [
"noreply@github.com"
] | penghouwen.noreply@github.com |
e6bd6f44f4b8d52a1fe03affd4b5296e02733784 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03435/s544670590.py | b7a33f056509c825aa6f270f9dacfc4421f64bb9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | c1= list(map(int, input().split()))
c2 = list(map(int, input().split()))
c3 = list(map(int, input().split()))
a1 = 0
a2 = c2[0] - c1[0]
a3 = c3[0] - c1[0]
b1 = c1[0]
b2 = c1[1]
b3 = c1[2]
if c1[0] == a1 + b1 and c1[1] == a1 + b2 and c1[2] == a1 + b3 and c2[0] == a2 + b1 and c2[1] == a2 + b2 and c2[2] == a2 + b3 and c3[0] == a3 + b1 and c3[1] == a3 + b2 and c3[2] == a3 + b3:
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2983a873d801ab0ebf4602895cbc7188cd6c679e | f02bf676e5fea7a94814b5afaf46e762f1781489 | /src/resource.py | 8ca43fe2a5ce19f8cf911be5902db91c21a1af32 | [] | no_license | PASTAplus/pastaplus_adapter | b514a0274eada7543fc742fbd7757b13c0e58940 | 369688ea41bf87dd27e50eb89376ce894dc828b1 | refs/heads/master | 2020-04-02T01:51:52.549760 | 2017-08-12T19:47:07 | 2017-08-12T19:47:07 | 83,488,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,851 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: resource
:Synopsis:
:Author:
servilla
:Created:
3/10/17
"""
import logging
import hashlib
import xml.etree.ElementTree as ET
import requests
import d1_common.resource_map
import d1_common.types.exceptions
import d1_common.types.generated.dataoneTypes_v1 as dataoneTypes_v_1
import d1_common.types.generated.dataoneTypes_v2_0 as dataoneTypes_v2_0
import adapter_exceptions
import adapter_utilities
import properties
from sys_meta import SysMeta
logger = logging.getLogger('resource')
class ResourceBase(object):
def __init__(self, url=None, owner=None):
logger.info('Building resource: {r}'.format(r=url))
self._acl = None
self._checksum_value = None
self._checksum_algorithm = None
self._d1_sys_meta = None
self._file_name = None
self._format_identifier = None
self._identifier = url
self._object = None
self._owner = owner
self._predecessor = None
self._replication_policy = None
self._rights_holder = properties.DEFAULT_RIGHTS_HOLDER
self._size = None
self._url = url
self._vendor_specific_header = None
def _get_checksum_value(self, path, replacement):
"""
Set the checksum value and algorithm for the given resource
:param path: PASTA resource path fragment
:param replacement: Modified path fragment for checksum value
:return: None
"""
url = self._url.replace(path, replacement)
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
return r.text.strip()
def _get_acl(self, path, replacement):
"""
Return the EML access control list of principals and permissions
:param path: PASTA resource path fragment
:param replacement: Modified path fragment for PASTA EML ACL
:param owner: Data package principal owner
:return: Access control list
"""
auth = (properties.GMN_USER, properties.GMN_PASSWD)
eml_acl = None
url = self._url.replace(path, replacement)
r = adapter_utilities.requests_get_url_wrapper(url=url, auth=auth)
if r is not None:
eml_acl = r.text.strip()
acl = []
if eml_acl is not None:
tree = ET.ElementTree(ET.fromstring(eml_acl))
for allow_rule in tree.iter('allow'):
principal = allow_rule.find('./principal')
permission = allow_rule.find('./permission')
acl.append(
{'principal': principal.text,
'permission': permission.text})
if self._owner is not None:
acl.append({'principal': self._owner,
'permission': 'changePermission'})
return acl
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, a):
self._acl = a
def get_d1_sys_meta(self):
"""
Return a D1 system metadata object for the given resource as pyxb
object.
:return: D1 system metadata as a pyxb object
"""
sm = SysMeta()
sm.access_policy = self._acl
sm.checksum_algorithm = self._checksum_algorithm
sm.checksum_value = self._checksum_value
sm.format_identifier = self._format_identifier
sm.identifier = self._identifier
sm.replication_policy = self._replication_policy
sm.rights_holder = self._rights_holder
sm.size = self._size
return sm.d1_sys_meta()
@property
def identifier(self):
return self._identifier
@property
def object(self):
return self._object
@property
def owner(self):
return self._owner
@property
def url(self):
return self._url
@property
def vendor_specific_header(self):
return self._vendor_specific_header
class ResourceMetadata(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceMetadata,self).__init__(url, owner)
self._acl = self._get_acl('/metadata/eml/', '/metadata/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/metadata/eml/', '/metadata/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = self._get_format_id()
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_format_id(self):
d1_formats = adapter_utilities.get_d1_formats()
format_id = None
url = self._url.replace('/metadata/eml/', '/metadata/format/eml/')
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
eml_version = r.text.strip()
if eml_version in d1_formats:
format_id = d1_formats[eml_version].formatId
return format_id
def _get_size(self):
size = None
r = adapter_utilities.requests_get_url_wrapper(url=self._url)
if r is not None:
size = int(r.headers['Content-Length'])
return size
@property
def predecessor(self):
return self._predecessor
@predecessor.setter
def predecessor(self, pred):
identifier = properties.PASTA_BASE_URL + 'metadata/eml/' + \
pred.replace('.', '/')
self._predecessor = identifier
class ResourceReport(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceReport,self).__init__(url, owner)
self._acl = self._get_acl('/report/eml/', '/report/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/report/eml/', '/report/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = 'text/xml'
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_size(self):
size = None
r = adapter_utilities.requests_get_url_wrapper(url=self._url)
if r is not None:
size = int(r.headers['Content-Length'])
return size
class ResourceData(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceData,self).__init__(url, owner)
self._acl = self._get_acl('/data/eml/', '/data/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/data/eml/', '/data/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = self._get_format_id()
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_format_id(self):
d1_formats = adapter_utilities.get_d1_formats()
format_id = None
try:
r = requests.head(self._url, allow_redirects=True)
if r.status_code == requests.codes.ok:
content_type = r.headers['Content-Type']
if content_type in d1_formats:
format_id = d1_formats[content_type].formatId
else:
format_id = 'application/octet-stream'
except Exception as e:
logger.error(e)
return format_id
def _get_size(self):
size = None
url = self._url.replace('/data/eml/', '/data/size/eml/')
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
size = int(r.text.strip())
return size
class ResourceOre(ResourceBase):
def __init__(self, doi=None, owner=None, resources=None):
super(ResourceOre,self).__init__(doi, owner)
ore_xml = _build_ore(pid=doi, resources=resources)
self._checksum_algorithm = 'SHA-1'
self._checksum_value = hashlib.sha1(ore_xml).hexdigest()
self._format_identifier = 'http://www.openarchives.org/ore/terms'
self._object = ore_xml
self._resources = None
self._size = len(ore_xml)
@property
def predecessor(self):
return self._predecessor
@predecessor.setter
def predecessor(self, doi):
self._predecessor = doi
def _build_ore(pid=None, resources=None):
data = []
data.append(resources[properties.METADATA].identifier)
data.append(resources[properties.REPORT].identifier)
for data_resource in resources[properties.DATA]:
data.append(data_resource.identifier)
ore = d1_common.resource_map.ResourceMap(base_url=properties.D1_BASE_URL)
ore.oreInitialize(pid=pid)
ore.addMetadataDocument(pid=resources[properties.METADATA].identifier)
ore.addDataDocuments(scidata_pid_list=data, scimeta_pid=resources[properties.METADATA].identifier)
return ore.serialize()
| [
"mark.servilla@gmail.com"
] | mark.servilla@gmail.com |
2be58aa42be5d9593ef75e0651cfefb8cbdd0f51 | 3b66632458e2463db62a800f9a0cf9e13c71a47e | /examples/template_tfe_multiple_optimizers/edflow.py | d0c88a39f566f2597d3f743824684633f16c3834 | [
"MIT"
] | permissive | pesser/edflow | eddb6d9341b861670946c157363933e9add52288 | 317cb1b61bf810a68004788d08418a5352653264 | refs/heads/dev | 2022-12-09T05:19:35.850173 | 2020-07-21T16:29:15 | 2020-07-21T16:29:15 | 146,750,121 | 27 | 15 | MIT | 2022-12-07T20:55:50 | 2018-08-30T12:59:11 | Python | UTF-8 | Python | false | false | 7,162 | py | import functools
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow.keras as tfk
import numpy as np
from edflow import TemplateIterator, get_logger
class FullLatentDistribution(object):
# TODO: write some comment on where this comes from
def __init__(self, parameters, dim, stochastic=True):
self.parameters = parameters
self.dim = dim
self.stochastic = stochastic
ps = self.parameters.shape.as_list()
if len(ps) != 2:
self.expand_dims = True
self.parameters = tf.reshape(self.parameters, (ps[0], ps[3]))
ps = self.parameters.shape.as_list()
else:
self.expand_dims = False
assert len(ps) == 2
self.batch_size = ps[0]
event_dim = self.dim
n_L_parameters = (event_dim * (event_dim + 1)) // 2
size_splits = [event_dim, n_L_parameters]
self.mean, self.L = tf.split(self.parameters, size_splits, axis=1)
# L is Cholesky parameterization
self.L = tf.contrib.distributions.fill_triangular(self.L)
# make sure diagonal entries are positive by parameterizing them
# logarithmically
diag_L = tf.linalg.diag_part(self.L)
self.log_diag_L = diag_L # keep for later computation of logdet
diag_L = tf.exp(diag_L)
# scale down then set diags
row_weights = np.array([np.sqrt(i + 1) for i in range(event_dim)])
row_weights = np.reshape(row_weights, [1, event_dim, 1])
self.L = self.L / row_weights
self.L = tf.linalg.set_diag(self.L, diag_L)
self.Sigma = tf.matmul(self.L, self.L, transpose_b=True) # L times L^t
ms = self.mean.shape.as_list()
self.event_axes = list(range(1, len(ms)))
self.event_shape = ms[1:]
assert len(self.event_shape) == 1, self.event_shape
@staticmethod
def n_parameters(dim):
return dim + (dim * (dim + 1)) // 2
def sample(self, noise_level=1.0):
if not self.stochastic:
out = self.mean
else:
eps = noise_level * tf.random_normal([self.batch_size, self.dim, 1])
eps = tf.matmul(self.L, eps)
eps = tf.squeeze(eps, axis=-1)
out = self.mean + eps
if self.expand_dims:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=1)
return out
def kl(self, other=None):
if other is not None:
raise NotImplemented("Only KL to standard normal is implemented.")
delta = tf.square(self.mean)
diag_covar = tf.reduce_sum(tf.square(self.L), axis=2)
logdet = 2.0 * self.log_diag_L
kl = 0.5 * tf.reduce_sum(
diag_covar - 1.0 + delta - logdet, axis=self.event_axes
)
kl = tf.reduce_mean(kl)
return kl
class Model(tfk.Model):
def __init__(self, config):
super().__init__()
self.z_dim = config["z_dim"]
self.n_z_params = FullLatentDistribution.n_parameters(self.z_dim)
self.lr = config["lr"]
self.encode = tfk.Sequential(
[
tfk.layers.Dense(
1000,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
500,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
300,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
self.n_z_params,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
]
)
self.decode = tfk.Sequential(
[
tfk.layers.Dense(300, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(500, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(1000, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(784, kernel_initializer="he_uniform"),
tfk.layers.Activation(tf.nn.tanh),
]
)
input_shape = (config["batch_size"], 28 ** 2)
self.build(input_shape)
self.submodels = {"decoder": self.decode, "encoder": self.encode}
def call(self, x):
x = tf.reshape(x, (-1, 28 ** 2))
posterior_params = self.encode(x)
posterior_distr = FullLatentDistribution(posterior_params, self.z_dim)
posterior_sample = posterior_distr.sample()
rec = self.decode(posterior_sample)
rec = tf.reshape(rec, (-1, 28, 28, 1))
output = {"x": x, "posterior_distr": posterior_distr, "rec": rec}
return output
class Iterator(TemplateIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# loss and optimizers
self.optimizers = {
submodel_name: tf.compat.v1.train.AdamOptimizer(learning_rate=self.model.lr)
for submodel_name, submodel in self.model.submodels.items()
}
# to save and restore
self.tfcheckpoint = tf.train.Checkpoint(model=self.model, **self.optimizers)
def save(self, checkpoint_path):
self.tfcheckpoint.write(checkpoint_path)
def restore(self, checkpoint_path):
self.tfcheckpoint.restore(checkpoint_path)
def step_op(self, model, **kwargs):
# get inputs
losses = {}
inputs = kwargs["image"]
# compute loss
with tf.GradientTape(persistent=True) as tape:
outputs = model(inputs)
loss = tf.reduce_mean(
tf.reduce_sum(tf.square(inputs - outputs["rec"]), axis=(1, 2, 3))
)
loss_kl = outputs["posterior_distr"].sample()
losses["encoder"] = loss + loss_kl
losses["decoder"] = loss
def train_op():
for loss_name, loss in losses.items():
optimizer = self.optimizers[loss_name]
submodel = self.model.submodels[loss_name]
params = submodel.trainable_variables
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip(grads, params))
image_logs = {"rec": np.array(outputs["rec"]), "x": np.array(inputs)}
scalar_logs = {"loss_rec": loss, "loss_kl": loss_kl}
def log_op():
return {
"images": image_logs,
"scalars": scalar_logs,
}
def eval_op():
eval_outputs = {}
eval_outputs.update(image_logs)
return eval_outputs
return {"train_op": train_op, "log_op": log_op, "eval_op": eval_op}
| [
"supermario94123@gmail.com"
] | supermario94123@gmail.com |
1ce01c3e5eafef0398c727f2132d92cef69b14ab | 2b93a5f46980e475375e796de139ed46a53809a6 | /Functions/Calculator.py | 0095b099701cd2e5bbf751a81ce9b7acc2e6e00c | [] | no_license | ravi4all/PythonMay_11 | c9996cb0a2741a5a022c74129aa21c1f4b495aba | f0f3fb5e99a67e704df2a109a7af3d8d18010f9d | refs/heads/master | 2020-03-16T21:28:06.549963 | 2018-05-30T05:34:50 | 2018-05-30T05:34:50 | 133,000,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def add(x,y):
z = x + y
print("Addition is",z)
def sub(x,y):
z = x - y
print("Subtraction is",z)
def div(x,y):
z = x / y
print("Division is",z)
def mul(x,y):
z = x * y
print("Multiplication is",z)
# Menu Driven Programs
print("""
1. Add
2. Sub
3. Div
4. Mul
""")
user_choice = input("Enter your choice : ")
num_1 = int(input("Enter first number : "))
num_2 = int(input("Enter second number " ))
todo = {
"1" : add,
"2" : sub,
"3" : div,
"4" : mul
}
func = todo.get(user_choice)
# print(func)
func(num_1, num_2) | [
"noreply@github.com"
] | ravi4all.noreply@github.com |
1b168c8660752d0007441aec85f837fc3f33b6f2 | ca831a9dc9d6dc0b2cedc4d998b26600439b5f10 | /python/numpy/q4_np_concatenate.py | d47d611fb6d9b69f1a21103ef45d97b89b76e8e9 | [
"MIT"
] | permissive | mxdzi/hackerrank | c2579f4351fba5af1dec21a49485e043421c2dd8 | c8da62ac39a0c24f535eded74c102a9c0ccd7708 | refs/heads/master | 2022-12-26T20:10:36.948961 | 2022-12-08T18:27:51 | 2022-12-08T18:27:51 | 225,469,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import numpy
def main():
N, M, P = map(int, input().split())
array1 = numpy.array([input().split() for _ in range(N)], int)
array2 = numpy.array([input().split() for _ in range(M)], int)
print(numpy.concatenate((array1, array2)))
if __name__ == "__main__":
main()
| [
"michal@dziadowicz.it"
] | michal@dziadowicz.it |
7ba0744fc18c12981135e6b474843ce4bdb643e4 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/requests_oauthlib/oauth2_auth.py | d8712fd46c49685f885010eb48c0e21094a5eea6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
from requests.auth import AuthBase
class OAuth2(AuthBase):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(r.url,
http_method=r.method, body=r.body, headers=r.headers)
return r
| [
"robert.fujara@gmail.com"
] | robert.fujara@gmail.com |
4cc5fa1c5d42c3b5b3744dc1eb24a06ed4c8e10c | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_siurl_list.py | 094c867212270e09508ac2b3c62b72a81fd28872 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.siurl_list import SIURLList # noqa: E501
from swagger_client.rest import ApiException
class TestSIURLList(unittest.TestCase):
"""SIURLList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSIURLList(self):
"""Test SIURLList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.siurl_list.SIURLList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
4b11dd4cd2213194d38521b0f83f8f3b572200c8 | d79c152d072edd6631e22f886c8beaafe45aab04 | /nicolock/users/migrations/0004_user_user_type.py | 92f9060fd70d6a9a282d30ca0b5d03f2722b99e2 | [] | no_license | kabroncelli/Nicolock | 764364de8aa146721b2678c14be808a452d7a363 | 4c4343a9117b7eba8cf1daf7241de549b9a1be3b | refs/heads/master | 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 23:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_companyprofile'),
]
operations = [
migrations.AddField(
model_name='user',
name='user_type',
field=models.CharField(choices=[('homeowner', 'Homeowner'), ('contractor', 'Contractor')], default='contractor', max_length=12, verbose_name='user type'),
preserve_default=False,
),
]
| [
"brennen@lightningkite.com"
] | brennen@lightningkite.com |
51b4d394824411e8488ff400df0a553116936ee9 | 9d3b8d5f45e5407f3275542cf5792fd2510abfe4 | /Chapter8-Practice/test_8.3.3.py | be0013a975a93a720f793d9350d06c6bec324a9c | [] | no_license | Beautyi/PythonPractice | 375767583870d894801013b775c493bbd3c36ebc | 9104006998a109dcab0848d5540fb963b20f5b02 | refs/heads/master | 2020-04-23T09:58:50.065403 | 2019-04-08T02:55:52 | 2019-04-08T02:55:52 | 171,088,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #返回字典
def build_person(first_name, last_name):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name}
return person
musician = build_person('jimi', 'hendrix')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
return person
musician = build_person('jimi', 'hendrix', '27')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
if age:
person['age'] = age
return person
musician = build_person('jimi', 'hendrix', age=27)
print(musician)
| [
"1210112866@qq.com"
] | 1210112866@qq.com |
262f3516ed9b97ec15c6b602731188fef2efd36d | aee7a6cca6a2674f044d7a1cacf7c72d7438b8b1 | /cup_skills/stats/average_rewardtest_score.py | c332ccef793a04daac6ef2eea3f62649d5182885 | [] | no_license | lagrassa/rl-erase | efd302526504c1157fa5810e886caccba8570f1b | 0df5c8ce4835c4641a2303d11095e9c27307f754 | refs/heads/master | 2021-05-13T13:36:12.901945 | 2019-08-01T02:13:15 | 2019-08-01T02:13:15 | 116,709,555 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | 16.363636363636363,46.36363636363636,28.18181818181818,20.909090909090907,18.181818181818183,38.18181818181819,52.72727272727272,31.818181818181817,2.727272727272727,30.909090909090907,17.272727272727273,17.272727272727273,49.09090909090909,11.818181818181818,50.0,27.27272727272727,46.36363636363636,31.818181818181817,16.363636363636363,1.8181818181818181,38.18181818181819,51.81818181818182,41.81818181818181,19.090909090909093,30.909090909090907,23.636363636363637,52.72727272727272,11.818181818181818,16.363636363636363,27.27272727272727,30.909090909090907,46.36363636363636,13.636363636363635,50.90909090909091,44.54545454545455,50.90909090909091,26.36363636363636,30.0,52.72727272727272,49.09090909090909,11.818181818181818,20.909090909090907,49.09090909090909,49.09090909090909,14.545454545454545,30.909090909090907,51.81818181818182,16.363636363636363,50.90909090909091,44.54545454545455,27.27272727272727,50.90909090909091,7.2727272727272725,22.727272727272727,24.545454545454547,46.36363636363636,32.72727272727273,32.72727272727273,52.72727272727272,23.636363636363637,48.18181818181818,18.181818181818183,51.81818181818182,30.909090909090907,25.454545454545453,47.27272727272727,44.54545454545455,28.18181818181818,16.363636363636363,12.727272727272727,23.636363636363637,51.81818181818182,30.909090909090907,23.636363636363637,50.0,10.0,36.36363636363637,48.18181818181818,42.72727272727273,16.363636363636363,32.72727272727273,40.0,11.818181818181818,48.18181818181818,12.727272727272727,33.63636363636363,50.90909090909091,21.818181818181817,29.09090909090909,49.09090909090909,18.181818181818183,50.0,47.27272727272727,25.454545454545453,16.363636363636363,30.0,38.18181818181819,16.363636363636363,24.545454545454547,11.818181818181818,20.0,22.727272727272727,49.09090909090909,30.909090909090907,52.72727272727272,48.18181818181818,20.909090909090907,35.45454545454545,27.27272727272727,36.36363636363637,42.72727272727273,24.545454545454547,25.454545454545453,45.45454545454545,47.27272727272727,27.27272727272727,27.27272727272727,48.18181818181818,31.818181818181817,32.72727272727273, | [
"lagrassa@mit.edu"
] | lagrassa@mit.edu |
20407bfda932d7e6b053febedd6a5e1883e14e76 | b3c070597742904f963f44414e9195511770520b | /venv/lib/python3.8/site-packages/openapi_client/models/first_last_name_origined_out.py | 2563afe3a3c76fa871e515944e62259fec07abc6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | akshitgoyal/NLP-Research-Project | 7d98cf0bccd8fdfcc13a23e5f17fcc703aa4b565 | 6adf80cb7fa3737f88faf73a6e818da495b95ab4 | refs/heads/master | 2022-12-11T05:51:08.601512 | 2020-09-03T18:05:56 | 2020-09-03T18:05:56 | 270,881,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,677 | py | # coding: utf-8
"""
NamSor API v2
NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it! # noqa: E501
OpenAPI spec version: 2.0.10
Contact: contact@namsor.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FirstLastNameOriginedOut(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'first_name': 'str',
'last_name': 'str',
'country_origin': 'str',
'country_origin_alt': 'str',
'countries_origin_top': 'list[str]',
'score': 'float',
'region_origin': 'str',
'top_region_origin': 'str',
'sub_region_origin': 'str',
'probability_calibrated': 'float',
'probability_alt_calibrated': 'float'
}
attribute_map = {
'id': 'id',
'first_name': 'firstName',
'last_name': 'lastName',
'country_origin': 'countryOrigin',
'country_origin_alt': 'countryOriginAlt',
'countries_origin_top': 'countriesOriginTop',
'score': 'score',
'region_origin': 'regionOrigin',
'top_region_origin': 'topRegionOrigin',
'sub_region_origin': 'subRegionOrigin',
'probability_calibrated': 'probabilityCalibrated',
'probability_alt_calibrated': 'probabilityAltCalibrated'
}
def __init__(self, id=None, first_name=None, last_name=None, country_origin=None, country_origin_alt=None, countries_origin_top=None, score=None, region_origin=None, top_region_origin=None, sub_region_origin=None, probability_calibrated=None, probability_alt_calibrated=None): # noqa: E501
"""FirstLastNameOriginedOut - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._first_name = None
self._last_name = None
self._country_origin = None
self._country_origin_alt = None
self._countries_origin_top = None
self._score = None
self._region_origin = None
self._top_region_origin = None
self._sub_region_origin = None
self._probability_calibrated = None
self._probability_alt_calibrated = None
self.discriminator = None
if id is not None:
self.id = id
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if country_origin is not None:
self.country_origin = country_origin
if country_origin_alt is not None:
self.country_origin_alt = country_origin_alt
if countries_origin_top is not None:
self.countries_origin_top = countries_origin_top
if score is not None:
self.score = score
if region_origin is not None:
self.region_origin = region_origin
if top_region_origin is not None:
self.top_region_origin = top_region_origin
if sub_region_origin is not None:
self.sub_region_origin = sub_region_origin
if probability_calibrated is not None:
self.probability_calibrated = probability_calibrated
if probability_alt_calibrated is not None:
self.probability_alt_calibrated = probability_alt_calibrated
@property
def id(self):
"""Gets the id of this FirstLastNameOriginedOut. # noqa: E501
:return: The id of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FirstLastNameOriginedOut.
:param id: The id of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._id = id
@property
def first_name(self):
"""Gets the first_name of this FirstLastNameOriginedOut. # noqa: E501
:return: The first_name of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this FirstLastNameOriginedOut.
:param first_name: The first_name of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this FirstLastNameOriginedOut. # noqa: E501
:return: The last_name of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this FirstLastNameOriginedOut.
:param last_name: The last_name of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def country_origin(self):
"""Gets the country_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely country of Origin # noqa: E501
:return: The country_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._country_origin
@country_origin.setter
def country_origin(self, country_origin):
"""Sets the country_origin of this FirstLastNameOriginedOut.
Most likely country of Origin # noqa: E501
:param country_origin: The country_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._country_origin = country_origin
@property
def country_origin_alt(self):
"""Gets the country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
Second best alternative : country of Origin # noqa: E501
:return: The country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._country_origin_alt
@country_origin_alt.setter
def country_origin_alt(self, country_origin_alt):
"""Sets the country_origin_alt of this FirstLastNameOriginedOut.
Second best alternative : country of Origin # noqa: E501
:param country_origin_alt: The country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._country_origin_alt = country_origin_alt
@property
def countries_origin_top(self):
"""Gets the countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
List countries of Origin (top 10) # noqa: E501
:return: The countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
:rtype: list[str]
"""
return self._countries_origin_top
@countries_origin_top.setter
def countries_origin_top(self, countries_origin_top):
"""Sets the countries_origin_top of this FirstLastNameOriginedOut.
List countries of Origin (top 10) # noqa: E501
:param countries_origin_top: The countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
:type: list[str]
"""
self._countries_origin_top = countries_origin_top
@property
def score(self):
"""Gets the score of this FirstLastNameOriginedOut. # noqa: E501
Compatibility to NamSor_v1 Origin score value # noqa: E501
:return: The score of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._score
@score.setter
def score(self, score):
"""Sets the score of this FirstLastNameOriginedOut.
Compatibility to NamSor_v1 Origin score value # noqa: E501
:param score: The score of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._score = score
@property
def region_origin(self):
"""Gets the region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._region_origin
@region_origin.setter
def region_origin(self, region_origin):
"""Sets the region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param region_origin: The region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._region_origin = region_origin
@property
def top_region_origin(self):
"""Gets the top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._top_region_origin
@top_region_origin.setter
def top_region_origin(self, top_region_origin):
"""Sets the top_region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param top_region_origin: The top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._top_region_origin = top_region_origin
@property
def sub_region_origin(self):
"""Gets the sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._sub_region_origin
@sub_region_origin.setter
def sub_region_origin(self, sub_region_origin):
"""Sets the sub_region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param sub_region_origin: The sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._sub_region_origin = sub_region_origin
@property
def probability_calibrated(self):
"""Gets the probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:return: The probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._probability_calibrated
@probability_calibrated.setter
def probability_calibrated(self, probability_calibrated):
"""Sets the probability_calibrated of this FirstLastNameOriginedOut.
:param probability_calibrated: The probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._probability_calibrated = probability_calibrated
@property
def probability_alt_calibrated(self):
"""Gets the probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:return: The probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._probability_alt_calibrated
@probability_alt_calibrated.setter
def probability_alt_calibrated(self, probability_alt_calibrated):
"""Sets the probability_alt_calibrated of this FirstLastNameOriginedOut.
:param probability_alt_calibrated: The probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._probability_alt_calibrated = probability_alt_calibrated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FirstLastNameOriginedOut):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"goyalakshit.ag@gmail.com"
] | goyalakshit.ag@gmail.com |
77bd1762c4aaac19096157edc60a32d1f6d81374 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/uribv6/db.py | b29be36943d31f0ffadcc44b5729663096da2e21 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,200 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Db(Mo):
"""
"""
meta = ClassMeta("cobra.model.uribv6.Db")
meta.moClassName = "uribv6Db"
meta.rnFormat = "db-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "Database"
meta.writeAccessMask = 0x8008421042001
meta.readAccessMask = 0x8008421042001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.uribv6.Route")
meta.childNamesAndRnPrefix.append(("cobra.model.uribv6.Route", "rt-"))
meta.parentClasses.add("cobra.model.uribv6.Dom")
meta.superClasses.add("cobra.model.l3.Db")
meta.superClasses.add("cobra.model.nw.Db")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.rib.Db")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.nw.Item")
meta.rnPrefixes = [
('db-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16436, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 17496, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "rt"
prop._addConstant("adj", "adjacency-database", 3)
prop._addConstant("nh", "nexthop-database", 2)
prop._addConstant("rt", "route-database", 1)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
3a5e3a9076882a87027c00689734bedef960925d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03425/s478084192.py | 7e20b760b92578dea372ba9ffdd4d4f5431cd5bc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from itertools import combinations
N = int(input())
A = [0 for _ in range(5)]
for _ in range(N):
a = input().strip()
if a[0]=="M":
A[0] += 1
elif a[0]=="A":
A[1] += 1
elif a[0]=="R":
A[2] += 1
elif a[0]=="C":
A[3] += 1
elif a[0]=="H":
A[4] += 1
cnt = 0
for x in combinations(range(5),3):
cnt += A[x[0]]*A[x[1]]*A[x[2]]
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8e9f3948ab5c1e5a196edddb2fe11e19304dd0e5 | 1cd0c5706f5afcabccf28b59c15d306b114dd82a | /siteapi/migrations/0004_auto_20170220_2215.py | b448ea88ca4bcdc7a5f1e878f3326138726cde40 | [] | no_license | jacobbridges/scribbli | fb1ed8633fc8ebcd7d989fbab2e051612bdc07d2 | eb21ca9f5ee4c7caba5a25b76c6cdfe81af5d995 | refs/heads/master | 2021-01-12T10:32:38.466332 | 2018-01-27T19:48:39 | 2018-01-27T19:48:39 | 81,711,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-20 22:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteapi', '0003_auto_20170220_2213'),
]
operations = [
migrations.AlterField(
model_name='alphainvitation',
name='date_expires',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 21, 22, 15, 51, 651428), verbose_name='date expires'),
),
migrations.AlterField(
model_name='alphainvitation',
name='unik',
field=models.CharField(max_length=36),
),
]
| [
"him@jacobandkate143.com"
] | him@jacobandkate143.com |
beb923b2521bb0f9e00e5a892115a68855650a54 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/servicebus/aaz/latest/servicebus/namespace/private_link_resource/_show.py | eae4030227ceaa104f1f40abb9b01954ee7cdcc9 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 6,208 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus namespace private-link-resource show",
)
class Show(AAZCommand):
"""List lists of resources that supports Privatelinks.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/privatelinkresources", "2022-10-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkResourcesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class PrivateLinkResourcesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateLinkResources",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = cls._schema_on_200.value.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.required_members = AAZListType(
serialized_name="requiredMembers",
)
properties.required_zone_names = AAZListType(
serialized_name="requiredZoneNames",
)
required_members = cls._schema_on_200.value.Element.properties.required_members
required_members.Element = AAZStrType()
required_zone_names = cls._schema_on_200.value.Element.properties.required_zone_names
required_zone_names.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
84c9288217995f2610547ebe92e52a2f6e69d003 | d5ee3688c0df793a765aa7fca25253ef450b82e9 | /src/scs_mfr/opc_conf.py | a821e3faf7068277766ea4005206cbf8c299d8b0 | [
"MIT"
] | permissive | seoss/scs_mfr | 0e85146c57dfefd605967e7dd54c666bfefddf74 | 997dd2b57160df30ef8750abed7efa87831e4c66 | refs/heads/master | 2023-01-20T23:58:16.547082 | 2020-11-27T09:40:20 | 2020-11-27T09:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,256 | py | #!/usr/bin/env python3
"""
Created on 13 Jul 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The opc_conf utility is used to specify whether an Alphasense optical particle counter (OPC) is present and if so,
which model is attached. An option is also available to override the host's default SPI bus and SPI chip select
lines for the OPC.
The specification also includes the number of seconds between readings by the OPC monitor sub-process. The maximum
time between readings is 10 seconds, the minimum five. A 10 second period provides the highest precision, but sampling
at this rate may be subject to clipping in extremely polluted environments.
The --restart-on-zeroes flag can be used to test the OPC in some situations, by overriding the default behaviour,
which is to restart the OPC if repeated zero readings are presented.
Flags are included to add or remove data interpretation exegetes, together with the source of T / rH readings.
Use of these is under development.
Sampling is performed by the scs_dev/particulates_sampler utility. If an opc_conf.json document is not present, the
scs_dev/particulates_sampler utility terminates.
Note that the scs_dev/particulates_sampler process must be restarted for changes to take effect.
The Alphasense OPC-N2, OPC-N3, OPC-R1, and Sensirion SPS30 models are supported.
Alternate exegetes (data interpretation models) can be added or removed - available interpretations can be listed with
the --help flag.
SYNOPSIS
opc_conf.py [-n NAME] [{ [-m MODEL] [-s SAMPLE_PERIOD] [-z { 0 | 1 }] [-p { 0 | 1 }]
[-b BUS] [-a ADDRESS] [-i INFERENCE_UDS] [-e EXEGETE] [-r EXEGETE] | -d }] [-v]
EXAMPLES
./opc_conf.py -m N2 -b 0 -a 1 -e ISLin/Urban/N2/v1
./opc_conf.py -m S30 -b 1
DOCUMENT EXAMPLE
{"model": "N3", "sample-period": 10, "restart-on-zeroes": true, "power-saving": false,
"inf": "/home/scs/SCS/pipes/lambda-model-pmx-s1.uds", "exg": []}
FILES
~/SCS/conf/opc_conf.json
SEE ALSO
scs_dev/particulates_sampler
scs_mfr/opc_cleaning_interval
REFERENCES
https://github.com/south-coast-science/scs_core/blob/develop/src/scs_core/particulate/exegesis/exegete_catalogue.py
BUGS
The specification allows for a power saving mode - which enables the OPC to shut down between readings - but
this is not currently implemented.
"""
import sys
from scs_core.data.json import JSONify
from scs_dfe.particulate.opc_conf import OPCConf
from scs_host.sys.host import Host
from scs_mfr.cmd.cmd_opc_conf import CmdOPCConf
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
incompatibles = []
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdOPCConf()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("opc_conf: %s" % cmd, file=sys.stderr)
sys.stderr.flush()
# ----------------------------------------------------------------------------------------------------------------
# resources...
# OPCConf...
conf = OPCConf.load(Host, name=cmd.name)
# ----------------------------------------------------------------------------------------------------------------
# run...
if cmd.set():
if conf is None and not cmd.is_complete():
print("opc_conf: No configuration is stored - you must therefore set the required fields.",
file=sys.stderr)
cmd.print_help(sys.stderr)
exit(2)
model = cmd.model if cmd.model else conf.model
sample_period = cmd.sample_period if cmd.sample_period else conf.sample_period
restart_on_zeroes = cmd.restart_on_zeroes if cmd.restart_on_zeroes is not None else conf.restart_on_zeroes
power_saving = cmd.power_saving if cmd.power_saving is not None else conf.power_saving
if conf is None:
conf = OPCConf(None, 10, True, False, None, None, None, []) # permit None for bus and address settings
bus = conf.bus if cmd.bus is None else cmd.bus
address = conf.address if cmd.address is None else cmd.address
inference = conf.inference if cmd.inference is None else cmd.inference
conf = OPCConf(model, sample_period, restart_on_zeroes, power_saving,
bus, address, inference, conf.exegete_names)
if cmd.use_exegete:
conf.add_exegete(cmd.use_exegete)
if cmd.remove_exegete:
conf.discard_exegete(cmd.remove_exegete)
# compatibility check...
try:
incompatibles = conf.incompatible_exegetes()
except KeyError as ex:
print("opc_conf: The following exegete is not valid: %s." % ex, file=sys.stderr)
exit(1)
if incompatibles:
print("opc_conf: The following exegetes are not compatible with %s: %s." %
(conf.model, ', '.join(incompatibles)),
file=sys.stderr)
exit(1)
conf.save(Host)
elif cmd.delete:
conf.delete(Host, name=cmd.name)
conf = None
if conf:
print(JSONify.dumps(conf))
| [
"bruno.beloff@southcoastscience.com"
] | bruno.beloff@southcoastscience.com |
ad5ae115186a694489f6794a6279b0b75e037ee8 | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/SVM/Mutants/code/SVM_rbf/DigitRecognitionApp_47.py | 2be1dd52e2ea4cb532d52028ef938535d52fe789 | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | """
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by RR
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(-1)].astype(np.int)
digits_test = np.loadtxt('digits_Test.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,(-1)].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'mutpy') as modelFile:
cPickle.dump(classifier, modelFile) | [
"anurag.bms@gmail.com"
] | anurag.bms@gmail.com |
e23bc12419592f4b9956c4150d64796a12d4900f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03329/s950503139.py | 06a2ed1899ce879a8061ac47bf453dca06be7b16 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import sys
read = sys.stdin.read
readlines = sys.stdin.readlines
def main():
n = int(input())
nums = []
n6 = 6
while n6 <= n:
nums.append(n6)
n6 = n6 * 6
n9 = 9
while n9 <= n:
nums.append(n9)
n9 = n9 * 9
nums.sort(reverse=True)
dp = [i for i in range(2 * n + 1)]
for num in nums:
for j1 in range(n + 1):
dp[j1+num] = min(dp[j1+num], dp[j1] + 1)
print(dp[n])
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6b31c5782ba2db81a6a2b0b105aa3a0552dcb4ad | 0e4519d3a94157a419e56576875aec1da906f578 | /Python_200Q/051_/Q059.py | 110df8a421ca7cff330c0a2f0054d5279bd7f11d | [] | no_license | ivorymood/TIL | 0de3b92861e345375e87d01654d1fddf940621cd | 1f09e8b1f4df7c205c68eefd9ab02d17a85d140a | refs/heads/master | 2021-02-23T17:30:50.406370 | 2020-10-02T06:43:25 | 2020-10-02T06:43:25 | 245,388,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | import time
count = 1
try:
while True:
print(count)
count += 1
time.sleep(0.5)
# Ctrl + C가 입력되면 발생되는 오류
except KeyboardInterrupt:
print('사용자에 의해 프로그램이 중단되었습니다') | [
"ivorymood@gmail.com"
] | ivorymood@gmail.com |
6a2b5689eeaab249fbbcd16268bfeaf37add46d9 | a5a2abaf5c7a681ebea71b4034d7b12dbd750455 | /examens/migrations/0002_auto_20160210_0540.py | 4a24c8b6dac1ce2a87efbeacc069d098d652c98a | [
"BSD-3-Clause"
] | permissive | matinfo/dezede | e8be34a5b92f8e793a96396f7ec4ec880e7817ff | 829ba8c251a0301741460e6695438be52d04a2fc | refs/heads/master | 2020-03-15T11:25:56.786137 | 2018-04-23T13:47:20 | 2018-04-23T13:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,213 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
LEVELS_DATA = (
(1, (6107, 10442, 10531)),
(2, (2253, 12468, 12469)),
(3, (10603, 8167, 10447)),
(4, (8280, 15117)),
(5, (3412, 14)),
(6, (2256,)),
)
LEVELS_HELPS = {
1: """
<p>
L’exercice consiste à transcrire un ensemble de sources
de manière diplomatique.
Il comporte six étapes à la difficulté croissante.
Chaque étape est validée lorsque le texte saisi
correspond exactement au texte contenu dans la source.
</p>
<p>
Pour ce type de transcription, il est important de respecter
le texte de la source : graphie fautive, style (capitales,
petites capitales, etc.), abréviations, ponctuation.
Deux exceptions sont admises :
</p>
<ul>
<li>
l’accentuation doit être rétablie suivant l’usage moderne
(y compris sur les majuscules) ;
</li>
<li>
la justification ne doit pas être respectée :
vous devez aligner le texte à gauche.
</li>
</ul>
""",
2: """
<p>
Mêmes règles que pour la première étape.
On insiste cette fois-ci sur le respect des styles
(capitales, petites capitales, italique, gras, exposant).
</p>
""",
3: """
<p>
Dans une transcription diplomatique, l’usage est de respecter
les graphies fautives. Dans ce cas, le mot erroné doit être suivi
de la locution latine « sic » en italique et entre crochets carrés.
Par exemple : « Beethowen [<em>sic</em>] ».
</p>
""",
4: """<p>Combinaison des règles précédentes.</p>""",
5: """
<p>
Combinaison des règles précédentes sur une transcription plus longue.
</p>
<p>
Certaines fautes apparentes pour un lecteur d’aujourd’hui sont en fait
des usages d’orthographe de l’époque.
Par exemple, on écrivait indifféremment « accents » ou « accens »
pour le pluriel d’« accent ».
</p>
<p>Conservez l’orthographe des noms propres.</p>
""",
6: """
<p>
Utilisez les outils de tableau de l’éditeur de texte
pour obtenir un tableau sans bordure.
Ne pas inclure les points servant de guides dans le tableau.
</p>
""",
}
def add_levels(apps, schema_editor):
Level = apps.get_model('examens.Level')
LevelSource = apps.get_model('examens.LevelSource')
Source = apps.get_model('libretto.Source')
level_sources = []
for level_number, source_ids in LEVELS_DATA:
level = Level.objects.create(
number=level_number, help_message=LEVELS_HELPS[level_number])
for pk in source_ids:
try:
source = Source.objects.get(pk=pk)
except Source.DoesNotExist:
continue
level_sources.append(LevelSource(level=level, source=source))
LevelSource.objects.bulk_create(level_sources)
class Migration(migrations.Migration):
dependencies = [
('examens', '0001_initial'),
]
operations = [
migrations.RunPython(add_levels),
]
| [
"bordage.bertrand@gmail.com"
] | bordage.bertrand@gmail.com |
b4631acdfaeba6632543932c6d6b336b5eb9fa7f | 2485f7d6e12daa2c29926a7c87e2ab18f951a107 | /pypilot/signalk.py | a49f9d16378428197f19f51b84213e2e9ee31e36 | [] | no_license | mielnicz/pypilot | add65367b9b1d2630bad463aa82ce6463e177147 | f0c9b2d2c8a1107a0f114ebe528af2beee7ad161 | refs/heads/master | 2023-07-13T15:58:08.306890 | 2020-03-16T06:24:02 | 2020-03-16T06:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,095 | py | #!/usr/bin/env python
#
# Copyright (C) 2020 Sean D'Epagnier
#
# This Program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
import time, socket, multiprocessing, os
from nonblockingpipe import NonBlockingPipe
import pyjson
from client import pypilotClient
from values import Property, RangeProperty
from sensors import source_priority
signalk_priority = source_priority['signalk']
radians = 3.141592653589793/180
meters_s = 0.5144456333854638
# provide bi-directional translation of these keys
signalk_table = {'wind': {('environment.wind.speedApparent', meters_s): 'speed',
('environment.wind.angleApparent', radians): 'direction'},
'gps': {('navigation.courseOverGroundTrue', radians): 'track',
('navigation.speedOverGround', meters_s): 'speed',
('navigation.position', 1): {'latitude': 'lat', 'longitude': 'lon'}},
'rudder': {('steering.rudderAngle', radians): 'angle'},
'apb': {('steering.autopilot.target.headingTrue', radians): 'track'},
'imu': {('navigation.headingMagnetic', radians): 'heading_lowpass',
('navigation.attitude', radians): {'pitch': 'pitch', 'roll': 'roll', 'yaw': 'heading_lowpass'}}}
token_path = os.getenv('HOME') + '/.pypilot/signalk-token'
def debug(*args):
#print(*args)
pass
class signalk(object):
def __init__(self, sensors=False):
self.sensors = sensors
if not sensors: # only signalk process for testing
self.client = pypilotClient()
self.multiprocessing = False
else:
server = sensors.client.server
self.multiprocessing = server.multiprocessing
self.client = pypilotClient(server)
self.initialized = False
self.missingzeroconfwarned = False
self.signalk_access_url = False
self.last_access_request_time = 0
self.sensors_pipe, self.sensors_pipe_out = NonBlockingPipe('signalk pipe', self.multiprocessing)
if self.multiprocessing:
import multiprocessing
self.process = multiprocessing.Process(target=self.process, daemon=True)
self.process.start()
else:
self.process = False
def setup(self):
try:
f = open(token_path)
self.token = f.read()
print('signalk' + _('read token'), self.token)
f.close()
except Exception as e:
print('signalk ' + _('failed to read token'), token_path)
self.token = False
try:
from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf
except Exception as e:
if not self.missingzeroconfwarned:
print('signalk: ' + _('failed to') + ' import zeroconf, ' + _('autodetection not possible'))
print(_('try') + ' pip3 install zeroconf' + _('or') + ' apt install python3-zeroconf')
self.missingzeroconfwarned = True
time.sleep(20)
return
self.last_values = {}
self.last_sources = {}
self.signalk_last_msg_time = {}
# store certain values across parsing invocations to ensure
# all of the keys are filled with the latest data
self.last_values_keys = {}
for sensor in signalk_table:
for signalk_path_conversion, pypilot_path in signalk_table[sensor].items():
signalk_path, signalk_conversion = signalk_path_conversion
if type(pypilot_path) == type({}): # single path translates to multiple pypilot
self.last_values_keys[signalk_path] = {}
self.period = self.client.register(RangeProperty('signalk.period', .5, .1, 2, persistent=True))
self.uid = self.client.register(Property('signalk.uid', 'pypilot', persistent=True))
self.signalk_host_port = False
self.signalk_ws_url = False
self.ws = False
class Listener:
def __init__(self, signalk):
self.signalk = signalk
self.name_type = False
def remove_service(self, zeroconf, type, name):
print('signalk zeroconf ' + _('service removed'), name, type)
if self.name_type == (name, type):
self.signalk.signalk_host_port = False
self.signalk.disconnect_signalk()
print('signalk ' + _('server lost'))
def update_service(self, zeroconf, type, name):
self.add_service(zeroconf, type, name)
def add_service(self, zeroconf, type, name):
print('signalk zeroconf ' + _('service add'), name, type)
self.name_type = name, type
info = zeroconf.get_service_info(type, name)
if not info:
return
properties = {}
for name, value in info.properties.items():
properties[name.decode()] = value.decode()
if 'swname' in properties and properties['swname'] == 'signalk-server':
try:
host_port = socket.inet_ntoa(info.addresses[0]) + ':' + str(info.port)
except Exception as e:
host_port = socket.inet_ntoa(info.address) + ':' + str(info.port)
self.signalk.signalk_host_port = host_port
print('signalk ' + _('server found'), host_port)
zeroconf = Zeroconf()
listener = Listener(self)
browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
#zeroconf.close()
self.initialized = True
def probe_signalk(self):
print('signalk ' + _('probe') + '...', self.signalk_host_port)
try:
import requests
except Exception as e:
print('signalk ' + _('could not') + ' import requests', e)
print(_('try') + " 'sudo apt install python3-requests' " + _('or') + " 'pip3 install requests'")
time.sleep(50)
return
try:
r = requests.get('http://' + self.signalk_host_port + '/signalk')
contents = pyjson.loads(r.content)
self.signalk_ws_url = contents['endpoints']['v1']['signalk-ws'] + '?subscribe=none'
except Exception as e:
print(_('failed to retrieve/parse data from'), self.signalk_host_port, e)
time.sleep(5)
self.signalk_host_port = False
return
print('signalk ' + _('found'), self.signalk_ws_url)
def request_access(self):
import requests
if self.signalk_access_url:
dt = time.monotonic() - self.last_access_request_time
if dt < 10:
return
self.last_access_request_time = time.monotonic()
try:
r = requests.get(self.signalk_access_url)
contents = pyjson.loads(r.content)
print('signalk ' + _('see if token is ready'), self.signalk_access_url, contents)
if contents['state'] == 'COMPLETED':
if 'accessRequest' in contents:
access = contents['accessRequest']
if access['permission'] == 'APPROVED':
self.token = access['token']
print('signalk ' + _('received token'), self.token)
try:
f = open(token_path, 'w')
f.write(self.token)
f.close()
except Exception as e:
print('signalk ' + _('failed to store token'), token_path)
# if permission == DENIED should we try other servers??
self.signalk_access_url = False
except Exception as e:
print('signalk ' + _('error requesting access'), e)
self.signalk_access_url = False
return
try:
def random_number_string(n):
if n == 0:
return ''
import random
return str(int(random.random()*10)) + random_number_string(n-1)
if self.uid.value == 'pypilot':
self.uid.set('pypilot-' + random_number_string(11))
r = requests.post('http://' + self.signalk_host_port + '/signalk/v1/access/requests', data={"clientId":self.uid.value, "description": "pypilot"})
contents = pyjson.loads(r.content)
print('signalk post', contents)
if contents['statusCode'] == 202 or contents['statusCode'] == 400:
self.signalk_access_url = 'http://' + self.signalk_host_port + contents['href']
print('signalk ' + _('request access url'), self.signalk_access_url)
except Exception as e:
print('signalk ' + _('error requesting access'), e)
self.signalk_ws_url = False
def connect_signalk(self):
try:
from websocket import create_connection, WebSocketBadStatusException
except Exception as e:
print('signalk ' + _('cannot create connection:'), e)
print(_('try') + ' pip3 install websocket-client ' + _('or') + ' apt install python3-websocket')
self.signalk_host_port = False
return
self.subscribed = {}
for sensor in list(signalk_table):
self.subscribed[sensor] = False
self.subscriptions = [] # track signalk subscriptions
self.signalk_values = {}
self.keep_token = False
try:
self.ws = create_connection(self.signalk_ws_url, header={'Authorization': 'JWT ' + self.token})
self.ws.settimeout(0) # nonblocking
except WebSocketBadStatusException:
print('signalk ' + _('bad status, rejecting token'))
self.token = False
self.ws = False
except ConnectionRefusedError:
print('signalk ' + _('connection refused'))
#self.signalk_host_port = False
self.signalk_ws_url = False
time.sleep(5)
except Exception as e:
print('signalk ' + _('failed to connect'), e)
self.signalk_ws_url = False
time.sleep(5)
def process(self):
time.sleep(6) # let other stuff load
print('signalk process', os.getpid())
self.process = False
while True:
time.sleep(.1)
self.poll(1)
def poll(self, timeout=0):
if self.process:
msg = self.sensors_pipe_out.recv()
while msg:
sensor, data = msg
self.sensors.write(sensor, data, 'signalk')
msg = self.sensors_pipe_out.recv()
return
t0 = time.monotonic()
if not self.initialized:
self.setup()
return
self.client.poll(timeout)
if not self.signalk_host_port:
return # waiting for signalk to detect
t1 = time.monotonic()
if not self.signalk_ws_url:
self.probe_signalk()
return
t2 = time.monotonic()
if not self.token:
self.request_access()
return
t3 = time.monotonic()
if not self.ws:
self.connect_signalk()
if not self.ws:
return
print('signalk ' + _('connected to'), self.signalk_ws_url)
# setup pypilot watches
watches = ['imu.heading_lowpass', 'imu.roll', 'imu.pitch', 'timestamp']
for watch in watches:
self.client.watch(watch, self.period.value)
for sensor in signalk_table:
self.client.watch(sensor+'.source')
return
# at this point we have a connection
# read all messages from pypilot
while True:
msg = self.client.receive_single()
if not msg:
break
debug('signalk pypilot msg', msg)
name, value = msg
if name == 'timestamp':
self.send_signalk()
self.last_values = {}
if name.endswith('.source'):
# update sources
for sensor in signalk_table:
source_name = sensor + '.source'
if name == source_name:
self.update_sensor_source(sensor, value)
self.last_sources[name[:-7]] = value
else:
self.last_values[name] = value
t4 = time.monotonic()
while True:
try:
msg = self.ws.recv()
except Exception as e:
break
if not msg:
print('signalk server closed connection')
if not self.keep_token:
print('signalk invalidating token')
self.token = False
self.disconnect_signalk()
return
try:
self.receive_signalk(msg)
except Exception as e:
debug('failed to parse signalk', e)
return
self.keep_token = True # do not throw away token if we got valid data
t5 = time.monotonic()
# convert received signalk values into sensor inputs if possible
for sensor, sensor_table in signalk_table.items():
for source, values in self.signalk_values.items():
data = {}
for signalk_path_conversion, pypilot_path in sensor_table.items():
signalk_path, signalk_conversion = signalk_path_conversion
if signalk_path in values:
try:
value = values[signalk_path]
if type(pypilot_path) == type({}): # single path translates to multiple pypilot
for signalk_key, pypilot_key in pypilot_path.items():
data[pypilot_key] = value[signalk_key] / signalk_conversion
else:
data[pypilot_path] = value / signalk_conversion
except Exception as e:
print(_('Exception converting signalk->pypilot'), e, self.signalk_values)
break
elif signalk_conversion != 1: # don't require fields with conversion of 1
break # missing fields? skip input this iteration
else:
for signalk_path_conversion in sensor_table:
signalk_path, signalk_conversion = signalk_path_conversion
if signalk_path in values:
del values[signalk_path]
# all needed sensor data is found
data['device'] = source + 'signalk'
if self.sensors_pipe:
self.sensors_pipe.send([sensor, data])
else:
debug('signalk ' + _('received'), sensor, data)
break
#print('sigktimes', t1-t0, t2-t1, t3-t2, t4-t3, t5-t4)
def send_signalk(self):
# see if we can produce any signalk output from the data we have read
updates = []
for sensor in signalk_table:
if sensor != 'imu' and (not sensor in self.last_sources or\
source_priority[self.last_sources[sensor]]>=signalk_priority):
#debug('signalk skip send from priority', sensor)
continue
for signalk_path_conversion, pypilot_path in signalk_table[sensor].items():
signalk_path, signalk_conversion = signalk_path_conversion
if type(pypilot_path) == type({}): # single path translates to multiple pypilot
keys = self.last_values_keys[signalk_path]
# store keys we need for this signalk path in dictionary
for signalk_key, pypilot_key in pypilot_path.items():
key = sensor+'.'+pypilot_key
if key in self.last_values:
keys[key] = self.last_values[key]
# see if we have the keys needed
v = {}
for signalk_key, pypilot_key in pypilot_path.items():
key = sensor+'.'+pypilot_key
if not key in keys:
break
v[signalk_key] = keys[key]*signalk_conversion
else:
updates.append({'path': signalk_path, 'value': v})
self.last_values_keys[signalk_path] = {}
else:
key = sensor+'.'+pypilot_path
if key in self.last_values:
v = self.last_values[key]*signalk_conversion
updates.append({'path': signalk_path, 'value': v})
if updates:
# send signalk updates
msg = {'updates':[{'$source':'pypilot','values':updates}]}
debug('signalk updates', msg)
try:
self.ws.send(pyjson.dumps(msg)+'\n')
except Exception as e:
print('signalk ' + _('failed to send updates'), e)
self.disconnect_signalk()
def disconnect_signalk(self):
if self.ws:
self.ws.close()
self.ws = False
self.client.clear_watches() # don't need to receive pypilot data
def receive_signalk(self, msg):
try:
data = pyjson.loads(msg)
except:
if msg:
print('signalk ' + _('failed to parse msg:'), msg)
return
if 'updates' in data:
updates = data['updates']
for update in updates:
source = 'unknown'
if 'source' in update:
source = update['source']['talker']
elif '$source' in update:
source = update['$source']
if 'timestamp' in update:
timestamp = update['timestamp']
if not source in self.signalk_values:
self.signalk_values[source] = {}
if 'values' in update:
values = update['values']
elif 'meta' in update:
values = update['meta']
else:
debug('signalk message update contains no values or meta', update)
continue
for value in values:
path = value['path']
if path in self.signalk_last_msg_time:
if self.signalk_last_msg_time[path] == timestamp:
debug('signalk skip duplicate timestamp', source, path, timestamp)
continue
self.signalk_values[source][path] = value['value']
else:
debug('signalk skip initial message', source, path, timestamp)
self.signalk_last_msg_time[path] = timestamp
def update_sensor_source(self, sensor, source):
priority = source_priority[source]
watch = priority < signalk_priority # translate from pypilot -> signalk
if watch:
watch = self.period.value
for signalk_path_conversion, pypilot_path in signalk_table[sensor].items():
if type(pypilot_path) == type({}):
for signalk_key, pypilot_key in pypilot_path.items():
pypilot_path = sensor + '.' + pypilot_key
if pypilot_path in self.last_values:
del self.last_values[pypilot_path]
self.client.watch(pypilot_path, watch)
else:
# remove any last values from this sensor
pypilot_path = sensor + '.' + pypilot_path
if pypilot_path in self.last_values:
del self.last_values[pypilot_path]
self.client.watch(pypilot_path, watch)
subscribe = priority >= signalk_priority
# prevent duplicating subscriptions
if self.subscribed[sensor] == subscribe:
return
self.subscribed[sensor] = subscribe
if not subscribe:
#signalk can't unsubscribe by path!?!?!
subscription = {'context': '*', 'unsubscribe': [{'path': '*'}]}
debug('signalk unsubscribe', subscription)
try:
self.ws.send(pyjson.dumps(subscription)+'\n')
except Exception as e:
print('signalk failed to send', e)
self.disconnect_signalk()
return
signalk_sensor = signalk_table[sensor]
if subscribe: # translate from signalk -> pypilot
subscriptions = []
for signalk_path_conversion in signalk_sensor:
signalk_path, signalk_conversion = signalk_path_conversion
if signalk_path in self.signalk_last_msg_time:
del self.signalk_last_msg_time[signalk_path]
subscriptions.append({'path': signalk_path, 'minPeriod': self.period.value*1000, 'format': 'delta', 'policy': 'instant'})
self.subscriptions += subscriptions
else:
# remove this subscription and resend all subscriptions
debug('signalk remove subs', signalk_sensor, self.subscriptions)
subscriptions = []
for subscription in self.subscriptions:
for signalk_path_conversion in signalk_sensor:
signalk_path, signalk_conversion = signalk_path_conversion
if subscription['path'] == signalk_path:
break
else:
subscriptions.append(subscription)
self.subscriptions = subscriptions
self.signalk_last_msg_time = {}
subscription = {'context': 'vessels.self'}
subscription['subscribe'] = subscriptions
debug('signalk subscribe', subscription)
try:
self.ws.send(pyjson.dumps(subscription)+'\n')
except Exception as e:
print('signalk failed to send subscription', e)
self.disconnect_signalk()
def main():
sk = signalk()
while True:
sk.poll(1)
if __name__ == '__main__':
main()
| [
"seandepagnier@gmail.com"
] | seandepagnier@gmail.com |
e78b99366d88cbdb16defac1ca2282fdf9ecf490 | 82f7c00aa14c95032fb6e6ff1029823404246b83 | /apps/statistics/rstats.py | a55468334938caefa6725db99cec04117e861e29 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gillbates/NewsBlur | 621feaa090cdc2fe9dcfcae4af7de3f40b69ba00 | 0eb2ccf4ebe59ff27d6ed822cc406a427cf3bf6a | refs/heads/master | 2020-12-30T17:32:03.999893 | 2013-07-01T00:12:41 | 2013-07-01T00:12:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,566 | py | import redis
import datetime
import re
from collections import defaultdict
from django.conf import settings
class RStats:
STATS_TYPE = {
'page_load': 'PLT',
'feed_fetch': 'FFH',
}
@classmethod
def stats_type(cls, name):
return cls.STATS_TYPE[name]
@classmethod
def add(cls, name, duration=None):
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
pipe = r.pipeline()
minute = round_time(round_to=60)
key = "%s:%s" % (cls.stats_type(name), minute.strftime('%s'))
pipe.incr("%s:s" % key)
if duration:
pipe.incrbyfloat("%s:a" % key, duration)
pipe.expireat("%s:a" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
pipe.expireat("%s:s" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
pipe.execute()
@classmethod
def clean_path(cls, path):
if not path:
return
if path.startswith('/reader/feed/'):
path = '/reader/feed/'
elif path.startswith('/social/stories'):
path = '/social/stories/'
elif path.startswith('/reader/river_stories'):
path = '/reader/river_stories/'
elif path.startswith('/social/river_stories'):
path = '/social/river_stories/'
elif path.startswith('/reader/page/'):
path = '/reader/page/'
elif path.startswith('/api/check_share_on_site'):
path = '/api/check_share_on_site/'
return path
@classmethod
def count(cls, name, hours=24):
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
stats_type = cls.stats_type(name)
now = datetime.datetime.now()
pipe = r.pipeline()
for minutes_ago in range(60*hours):
dt_min_ago = now - datetime.timedelta(minutes=minutes_ago)
minute = round_time(dt=dt_min_ago, round_to=60)
key = "%s:%s" % (stats_type, minute.strftime('%s'))
pipe.get("%s:s" % key)
values = pipe.execute()
total = sum(int(v) for v in values if v)
return total
@classmethod
def sample(cls, sample=1000, pool=None):
if not pool:
pool = settings.REDIS_STORY_HASH_POOL
r = redis.Redis(connection_pool=pool)
keys = set()
errors = set()
prefixes = defaultdict(set)
prefixes_ttls = defaultdict(lambda: defaultdict(int))
prefix_re = re.compile(r"(\w+):(.*)")
p = r.pipeline()
[p.randomkey() for _ in range(sample)]
keys = set(p.execute())
p = r.pipeline()
[p.ttl(key) for key in keys]
ttls = p.execute()
for k, key in enumerate(keys):
match = prefix_re.match(key)
if not match:
errors.add(key)
continue
prefix, rest = match.groups()
prefixes[prefix].add(rest)
ttl = ttls[k]
if ttl < 60*60: # 1 hour
prefixes_ttls[prefix]['1h'] += 1
elif ttl < 60*60*12:
prefixes_ttls[prefix]['12h'] += 1
elif ttl < 60*60*24:
prefixes_ttls[prefix]['1d'] += 1
elif ttl < 60*60*168:
prefixes_ttls[prefix]['1w'] += 1
elif ttl < 60*60*336:
prefixes_ttls[prefix]['2w'] += 1
else:
prefixes_ttls[prefix]['2w+'] += 1
keys_count = len(keys)
print " ---> %s total keys" % keys_count
for prefix, rest in prefixes.items():
total_expiring = sum([k for k in dict(prefixes_ttls[prefix]).values()])
print " ---> %4s: (%.4s%%) %s keys (%s expiring: %s)" % (prefix, 100. * (len(rest) / float(keys_count)), len(rest), total_expiring, dict(prefixes_ttls[prefix]))
print " ---> %s errors: %s" % (len(errors), errors)
def round_time(dt=None, round_to=60):
"""Round a datetime object to any time laps in seconds
dt : datetime.datetime object, default now.
round_to : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None : dt = datetime.datetime.now()
seconds = (dt - dt.min).seconds
rounding = (seconds+round_to/2) // round_to * round_to
return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
8cc839ce2eb3f1d63aae16b0a5a37c6d5b8669aa | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/identity/azure-identity/tests/test_authn_client.py | 3dac71b924c722a3ccaee3a40a4d6d8bfffbdf1c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 9,973 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""These tests use the synchronous AuthnClient as a driver to test functionality
of the sans I/O AuthnClientBase shared with AsyncAuthnClient."""
import json
import time
try:
from unittest.mock import Mock, patch
except ImportError: # python < 3.3
from mock import Mock, patch # type: ignore
from azure.core.credentials import AccessToken
from azure.identity._authn_client import AuthnClient
from azure.identity._constants import EnvironmentVariables, DEFAULT_REFRESH_OFFSET, DEFAULT_TOKEN_REFRESH_RETRY_DELAY
import pytest
from six.moves.urllib_parse import urlparse
from helpers import mock_response
def test_deserialization_expires_integers():
now = 6
expires_in = 59 - now
expires_on = now + expires_in
access_token = "***"
expected_access_token = AccessToken(access_token, expires_on)
scope = "scope"
# response with expires_on only
token_payload = {"access_token": access_token, "expires_on": expires_on, "token_type": "Bearer", "resource": scope}
mock_send = Mock(return_value=mock_response(json_payload=token_payload))
token = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send)).request_token(scope)
assert token == expected_access_token
# response with expires_in as well
token_payload = {
"access_token": access_token,
"expires_in": expires_in,
"token_type": "Bearer",
"ext_expires_in": expires_in,
}
with patch(AuthnClient.__module__ + ".time.time") as mock_time:
mock_time.return_value = now
mock_send = Mock(return_value=mock_response(json_payload=token_payload))
token = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send)).request_token(scope)
assert token == expected_access_token
def test_deserialization_app_service_msi():
now = 6
expires_in = 59 - now
expires_on = now + expires_in
access_token = "***"
expected_access_token = AccessToken(access_token, expires_on)
scope = "scope"
# response with expires_on only and it's a datetime string (App Service MSI, Linux)
token_payload = {
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(now + expires_in),
"token_type": "Bearer",
"resource": scope,
}
mock_send = Mock(return_value=mock_response(json_payload=token_payload))
token = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send)).request_token(scope)
assert token == expected_access_token
def test_deserialization_expires_strings():
now = 6
expires_in = 59 - now
expires_on = now + expires_in
access_token = "***"
expected_access_token = AccessToken(access_token, expires_on)
scope = "scope"
# response with string expires_in and expires_on (IMDS, Cloud Shell)
token_payload = {
"access_token": access_token,
"expires_in": str(expires_in),
"ext_expires_in": str(expires_in),
"expires_on": str(expires_on),
"token_type": "Bearer",
"resource": scope,
}
mock_send = Mock(return_value=mock_response(json_payload=token_payload))
token = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send)).request_token(scope)
assert token == expected_access_token
def test_caching_when_only_expires_in_set():
"""the cache should function when auth responses don't include an explicit expires_on"""
access_token = "token"
now = 42
expires_in = 1800
expires_on = now + expires_in
expected_token = AccessToken(access_token, expires_on)
mock_send = Mock(
return_value=mock_response(
json_payload={"access_token": access_token, "expires_in": expires_in, "token_type": "Bearer"}
)
)
client = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send))
with patch("azure.identity._authn_client.time.time") as mock_time:
mock_time.return_value = 42
token = client.request_token(["scope"])
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
cached_token = client.get_cached_token(["scope"])
assert cached_token == expected_token
def test_expires_in_strings():
expected_token = "token"
mock_send = Mock(
return_value=mock_response(
json_payload={
"access_token": expected_token,
"expires_in": "42",
"ext_expires_in": "42",
"token_type": "Bearer",
}
)
)
now = int(time.time())
with patch("azure.identity._authn_client.time.time") as mock_time:
mock_time.return_value = now
token = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send)).request_token("scope")
assert token.token == expected_token
assert token.expires_on == now + 42
def test_cache_expiry():
access_token = "token"
now = 42
expires_in = 1800
expires_on = now + expires_in
expected_token = AccessToken(access_token, expires_on)
token_payload = {"access_token": access_token, "expires_in": expires_in, "token_type": "Bearer"}
mock_send = Mock(return_value=mock_response(json_payload=token_payload))
client = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send))
with patch("azure.identity._authn_client.time.time") as mock_time:
# populate the cache with a valid token
mock_time.return_value = now
token = client.request_token("scope")
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
cached_token = client.get_cached_token("scope")
assert cached_token == expected_token
# advance time past the cached token's expires_on
mock_time.return_value = expires_on + 3600
cached_token = client.get_cached_token("scope")
assert not cached_token
# request a new token
new_token = "new token"
token_payload["access_token"] = new_token
token = client.request_token("scope")
assert token.token == new_token
# it should be cached
cached_token = client.get_cached_token("scope")
assert cached_token.token == new_token
def test_cache_scopes():
scope_a = "scope_a"
scope_b = "scope_b"
scope_ab = scope_a + " " + scope_b
expected_tokens = {
scope_a: {"access_token": scope_a, "expires_in": 1 << 31, "ext_expires_in": 1 << 31, "token_type": "Bearer"},
scope_b: {"access_token": scope_b, "expires_in": 1 << 31, "ext_expires_in": 1 << 31, "token_type": "Bearer"},
scope_ab: {"access_token": scope_ab, "expires_in": 1 << 31, "ext_expires_in": 1 << 31, "token_type": "Bearer"},
}
def mock_send(request, **kwargs):
token = expected_tokens[request.data["resource"]]
return mock_response(json_payload=token)
client = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send))
# if the cache has a token for a & b, it should hit for a, b, a & b
token = client.request_token([scope_a, scope_b], form_data={"resource": scope_ab})
assert token.token == scope_ab
for scope in (scope_a, scope_b):
assert client.get_cached_token([scope]).token == scope_ab
assert client.get_cached_token([scope_a, scope_b]).token == scope_ab
# if the cache has only tokens for a and b alone, a & b should miss
client = AuthnClient(endpoint="http://foo", transport=Mock(send=mock_send))
for scope in (scope_a, scope_b):
token = client.request_token([scope], form_data={"resource": scope})
assert token.token == scope
assert client.get_cached_token([scope]).token == scope
assert not client.get_cached_token([scope_a, scope_b])
@pytest.mark.parametrize("authority", ("localhost", "https://localhost"))
def test_request_url(authority):
tenant_id = "expected-tenant"
parsed_authority = urlparse(authority)
expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost"
def validate_url(url):
actual = urlparse(url)
assert actual.scheme == "https"
assert actual.netloc == expected_netloc
assert actual.path.startswith("/" + tenant_id)
def mock_send(request, **kwargs):
validate_url(request.url)
return mock_response(json_payload={"token_type": "Bearer", "expires_in": 42, "access_token": "***"})
client = AuthnClient(tenant=tenant_id, transport=Mock(send=mock_send), authority=authority)
client.request_token(("scope",))
request = client.get_refresh_token_grant_request({"secret": "***"}, "scope")
validate_url(request.url)
# authority can be configured via environment variable
with patch.dict("os.environ", {EnvironmentVariables.AZURE_AUTHORITY_HOST: authority}, clear=True):
client = AuthnClient(tenant=tenant_id, transport=Mock(send=mock_send))
client.request_token(("scope",))
request = client.get_refresh_token_grant_request({"secret": "***"}, "scope")
validate_url(request.url)
def test_should_refresh():
client = AuthnClient(endpoint="http://foo")
now = int(time.time())
# do not need refresh
token = AccessToken("token", now + DEFAULT_REFRESH_OFFSET + 1)
should_refresh = client.should_refresh(token)
assert not should_refresh
# need refresh
token = AccessToken("token", now + DEFAULT_REFRESH_OFFSET - 1)
should_refresh = client.should_refresh(token)
assert should_refresh
# not exceed cool down time, do not refresh
token = AccessToken("token", now + DEFAULT_REFRESH_OFFSET - 1)
client._last_refresh_time = now - DEFAULT_TOKEN_REFRESH_RETRY_DELAY + 1
should_refresh = client.should_refresh(token)
assert not should_refresh
| [
"noreply@github.com"
] | yunhaoling.noreply@github.com |
3dd8f60009e8e64694d233e8d52287efdf741784 | 0e92ded4d7059ff5d1a1235e0e5016395546bf93 | /itests/util.py | e22e459564962ba1d599ace095915a343d557d82 | [
"MIT"
] | permissive | VijayanB/asgard-api | f351ee949e52d9c49bc8a6a044d567d3fb2ad115 | 5444d81be33bf4af3c9cf5a2185c16ff10357034 | refs/heads/master | 2022-04-13T18:42:40.762838 | 2020-03-18T19:55:44 | 2020-03-18T19:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,131 | py | import asyncio
import os
import random
import string
from collections import defaultdict
from typing import Any, Dict, List, Type, Set
import asyncworker
from aioelasticsearch import Elasticsearch
from aiohttp import web, ClientSession
from aiohttp.test_utils import TestClient, TestServer
from aiopg.sa import Engine
from asynctest import TestCase
from sqlalchemy import Table
from sqlalchemy.sql.ddl import CreateTable
import asgard.backends.users
from asgard.conf import settings
from asgard.db import _SessionMaker
from asgard.http.client import HttpClient
from asgard.models.account import AccountDB as Account
from asgard.models.user import UserDB as User
from asgard.models.user_has_account import UserHasAccount
class PgDataMocker:
def __init__(self, pool: Engine) -> None:
self.data: Dict[Table, List[Dict]] = defaultdict(list)
self.pool = pool
self.schema = "".join(random.choices(string.ascii_lowercase, k=10))
self._used_tables: List[Table] = []
self._table_names: Set[str] = set()
self._original_table_schemas: Dict[Table, str] = {}
def add_data(
self, model: Type, field_names: List[str], data: List[List[Any]]
):
if not data:
return
assert all(len(field_names) == len(row) for row in data)
if type(model) is not Table:
table = model.__table__
else:
table = model
if table.name not in self._table_names:
# ensure schema
self._original_table_schemas[table] = table.schema
table.schema = self.schema
self._used_tables.append(table)
self.data[table].extend((dict(zip(field_names, row)) for row in data))
self._table_names.add(table.name)
async def _create_schema(self):
await self._drop_schema_only()
async with self.pool.acquire() as conn:
await conn.execute(f"CREATE SCHEMA IF NOT EXISTS {self.schema}")
for table in self._used_tables:
await conn.execute(CreateTable(table))
async def create(self):
await self._create_schema()
commands = (
table.insert().values(self.data[table])
for table in self._used_tables
)
async with self.pool.acquire() as conn:
for command in commands:
await conn.execute(command)
async def _drop_schema_only(self):
async with self.pool.acquire() as conn:
await conn.execute(f"DROP SCHEMA IF EXISTS {self.schema} CASCADE")
async def drop(self):
await self._drop_schema_only()
for table, original_schema in self._original_table_schemas.items():
table.schema = original_schema
ACCOUNT_INFRA_NAME: str = "Infra Team"
ACCOUNT_INFRA_ID: int = 11
ACCOUNT_INFRA_NAMESPACE: str = "infra"
ACCOUNT_INFRA_OWNER: str = "infra"
ACCOUNT_INFRA_DICT = {
"id": ACCOUNT_INFRA_ID,
"name": ACCOUNT_INFRA_NAME,
"namespace": ACCOUNT_INFRA_NAMESPACE,
"owner": ACCOUNT_INFRA_OWNER,
}
ACCOUNT_DEV_NAME: str = "Dev Team"
ACCOUNT_DEV_ID: int = 10
ACCOUNT_DEV_NAMESPACE: str = "dev"
ACCOUNT_DEV_OWNER: str = "dev"
ACCOUNT_DEV_DICT = {
"id": ACCOUNT_DEV_ID,
"name": ACCOUNT_DEV_NAME,
"namespace": ACCOUNT_DEV_NAMESPACE,
"owner": ACCOUNT_DEV_OWNER,
}
ACCOUNT_WITH_NO_USERS_ID = 12
ACCOUNT_WITH_NO_USERS_NAME = "Other Team"
ACCOUNT_WITH_NO_USERS_NAMESPACE = "other"
ACCOUNT_WITH_NO_USERS_OWNER = "other"
ACCOUNT_WITH_NO_USERS_DICT = {
"id": ACCOUNT_WITH_NO_USERS_ID,
"name": ACCOUNT_WITH_NO_USERS_NAME,
"namespace": ACCOUNT_WITH_NO_USERS_NAMESPACE,
"owner": ACCOUNT_WITH_NO_USERS_OWNER,
}
USER_WITH_ONE_ACCOUNT_ID = 22
USER_WITH_ONE_ACCOUNT_AUTH_KEY = "7259c8ce82294480b1be60835b74de0c"
USER_WITH_ONE_ACCOUNT_NAME = "User one account"
USER_WITH_ONE_ACCOUNT_EMAIL = "userone@server.com"
USER_WITH_ONE_ACCOUNT_DICT = {
"id": USER_WITH_ONE_ACCOUNT_ID,
"name": USER_WITH_ONE_ACCOUNT_NAME,
"email": USER_WITH_ONE_ACCOUNT_EMAIL,
}
USER_WITH_NO_ACCOUNTS_ID = 21
USER_WITH_NO_ACCOUNTS_AUTH_KEY = "7b4184bfe7d2349eb56bcfb9dc246cf8"
USER_WITH_NO_ACCOUNTS_NAME = "User with no acounts"
USER_WITH_NO_ACCOUNTS_EMAIL = "user-no-accounts@host.com"
USER_WITH_NO_ACCOUNTS_DICT = {
"id": USER_WITH_NO_ACCOUNTS_ID,
"name": USER_WITH_NO_ACCOUNTS_NAME,
"email": USER_WITH_NO_ACCOUNTS_EMAIL,
}
USER_WITH_MULTIPLE_ACCOUNTS_ID = 20
USER_WITH_MULTIPLE_ACCOUNTS_EMAIL = "john@host.com"
USER_WITH_MULTIPLE_ACCOUNTS_NAME = "John Doe"
USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY = "69ed620926be4067a36402c3f7e9ddf0"
USER_WITH_MULTIPLE_ACCOUNTS_DICT = {
"id": USER_WITH_MULTIPLE_ACCOUNTS_ID,
"name": USER_WITH_MULTIPLE_ACCOUNTS_NAME,
"email": USER_WITH_MULTIPLE_ACCOUNTS_EMAIL,
}
_session = None
_pool = None
class BaseTestCase(TestCase):
use_default_loop = True
async def conn_pool(self):
global _session, _pool
if not _session:
_session = _SessionMaker(settings.DB_URL)
_pool = await _session.engine()
return _pool
async def setUp(self):
self.esclient = Elasticsearch([settings.STATS_API_URL])
self.pg_data_mocker = PgDataMocker(pool=await self.conn_pool())
self.users_fixture = [
[
USER_WITH_MULTIPLE_ACCOUNTS_ID,
USER_WITH_MULTIPLE_ACCOUNTS_NAME,
USER_WITH_MULTIPLE_ACCOUNTS_EMAIL,
USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY,
],
[
USER_WITH_NO_ACCOUNTS_ID,
USER_WITH_NO_ACCOUNTS_NAME,
USER_WITH_NO_ACCOUNTS_EMAIL,
USER_WITH_NO_ACCOUNTS_AUTH_KEY,
],
[
USER_WITH_ONE_ACCOUNT_ID,
USER_WITH_ONE_ACCOUNT_NAME,
USER_WITH_ONE_ACCOUNT_EMAIL,
USER_WITH_ONE_ACCOUNT_AUTH_KEY,
],
]
self.pg_data_mocker.add_data(
User,
["id", "tx_name", "tx_email", "tx_authkey"],
self.users_fixture,
)
self.pg_data_mocker.add_data(
Account,
["id", "name", "namespace", "owner"],
[
[
ACCOUNT_DEV_ID,
ACCOUNT_DEV_NAME,
ACCOUNT_DEV_NAMESPACE,
ACCOUNT_DEV_OWNER,
],
[
ACCOUNT_INFRA_ID,
ACCOUNT_INFRA_NAME,
ACCOUNT_INFRA_NAMESPACE,
ACCOUNT_INFRA_OWNER,
],
[
ACCOUNT_WITH_NO_USERS_ID,
ACCOUNT_WITH_NO_USERS_NAME,
ACCOUNT_WITH_NO_USERS_NAMESPACE,
ACCOUNT_WITH_NO_USERS_OWNER,
],
],
)
self.pg_data_mocker.add_data(
UserHasAccount,
["id", "user_id", "account_id"],
[
[10, USER_WITH_MULTIPLE_ACCOUNTS_ID, ACCOUNT_DEV_ID],
[11, USER_WITH_MULTIPLE_ACCOUNTS_ID, ACCOUNT_INFRA_ID],
[12, USER_WITH_ONE_ACCOUNT_ID, ACCOUNT_DEV_ID],
], # John Doe, accounts: Dev Team, Infra Team
)
await self.pg_data_mocker.create()
async def tearDown(self):
await self.pg_data_mocker.drop()
if hasattr(self, "server"):
await self.server.close()
async def aiohttp_client(self, app: asyncworker.App) -> TestClient:
routes = app.routes_registry.http_routes
http_app = web.Application()
for route in routes:
for route_def in route.aiohttp_routes():
route_def.register(http_app.router)
self.server = TestServer(
http_app, port=int(os.getenv("TEST_ASYNCWORKER_HTTP_PORT") or 0)
)
client = TestClient(self.server)
await self.server.start_server()
return client
async def _load_app_stats_into_storage(
self, index_name, timestamp_to_use, datapoints
):
"""
Carrega no elasticsearch local os datapoints de estatísticas de apps
O `timestamp` usado em cada datapoint é o `timestamp_to_use`.
"""
for datapoint in datapoints:
datapoint["timestamp"] = timestamp_to_use.isoformat()
await self.esclient.index(
index=index_name, doc_type="stats", body=datapoint
)
# para dar tempo do Elasticsearch local indexar os dados recém inseridos
await asyncio.sleep(3)
CHRONOS_BASE_URL = f"{settings.SCHEDULED_JOBS_SERVICE_ADDRESS}/v1/scheduler"
http_client = HttpClient()
async def _cleanup_chronos():
resp = await http_client.get(f"{CHRONOS_BASE_URL}/jobs")
all_jobs_json = await resp.json()
for job in all_jobs_json:
await http_client.delete(f"{CHRONOS_BASE_URL}/job/{job['name']}")
async def _load_jobs_into_chronos(*jobs_list):
await _cleanup_chronos()
for job in jobs_list:
await http_client.post(f"{CHRONOS_BASE_URL}/iso8601", json=job)
await asyncio.sleep(1)
| [
"daltonmatos@gmail.com"
] | daltonmatos@gmail.com |
1a635f7170026354750bc2dc5dfac90016d119ef | 096ccaca86872b03a137edf58221413073d770cb | /spiders/drizly_new.py | 948516a160d2bd3b74ee71a3d4f2f8a058cbaa65 | [] | no_license | DH-heima/webscrapping | f142962b50deed2628052dd7a48098a4afbcbada | 1dc8f81f45db0d4366391c3052c5ab36f4d4bc5d | refs/heads/master | 2022-02-02T23:26:22.520064 | 2019-06-13T13:38:10 | 2019-06-13T13:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,090 | py | import os
import os.path as op
from lxml import etree
parser = etree.HTMLParser(encoding='utf-8')
from time import sleep
from urllib.parse import urlsplit, parse_qs
import requests_cache
from validators import validate_raw_files, check_products_detection
from create_csvs import create_csvs
from ers import all_keywords_usa as keywords, fpath_namer, mh_brands, clean_url, shop_inventory_lw_csv
from matcher import BrandMatcher
from ers import COLLECTION_DATE, file_hash, img_path_namer, TEST_PAGES_FOLDER_PATH
from custom_browser import CustomDriver
from parse import parse
from ers import clean_xpathd_text
# Init variables and assets
shop_id = 'drizly'
root_url = 'https://www.drizly.com'
requests_cache.install_cache(fpath_namer(shop_id, 'requests_cache'))
country = 'USA'
searches, categories, products = {}, {}, {}
driver = CustomDriver(headless=True, download_images=True)
brm = BrandMatcher()
def getprice(pricestr):
if pricestr == '':
return pricestr
pricestr = pricestr.replace(',', '').strip()
print(pricestr)
price = parse('${dol:d}.{pence:d}', pricestr)
if price is None:
price = parse('{pence:d}p', pricestr)
return price.named['pence']
else:
return price.named['dol'] * 100 + price.named['pence']
def init_drizly(driver):
driver.get("https://drizly.com/beer/c2")
sleep(2)
driver.waitclick('//*[@class="AddressSelect"]')
driver.text_input('1557 5th Avenue, New York, NY, United States', '//*[@id="address_field"]')
sleep(1)
driver.text_input('', '//*[@id="address_field"]', enter=True)
sleep(3)
drizly_was_initialised = False
# ##################
# # CTG page xpathing #
# ##################
ctg_page_test_url = 'https://drizly.com/champagne/c196479'
exple_ctg_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'ctg_page_test.html') # TODO : store the file
os.makedirs(op.dirname(exple_ctg_page_path), exist_ok=True)
ctg, test_categories, test_products = '', {'': []}, {}
# driver.get(ctg_page_test_url)
# driver.save_page(exple_ctg_page_path, scroll_to_bottom=True)
def ctg_parsing(fpath, ctg, categories, products): # TODO : modify xpaths
tree = etree.parse(open(fpath, 'rb'), parser=parser)
for li in tree.xpath('//ul//li[contains(@class, "CatalogResults__CatalogListItem")]'):
if not li.xpath('.//a/@href'):
continue
produrl = li.xpath('.//a/@href')[0]
produrl = parse_qs(urlsplit(produrl).query)['url'][0] if 'url' in parse_qs(urlsplit(produrl).query) else produrl
products[produrl] = {
'pdct_name_on_eretailer': clean_xpathd_text(li.xpath('.//h3//text()')),
'volume': clean_xpathd_text(li.xpath('./zzz')),
'raw_price': clean_xpathd_text(li.xpath('.//*[contains(@class, "CatalogItem__CatalogItemDetails__Price")]//text()')).split(' - ')[-1],
'raw_promo_price': clean_xpathd_text(li.xpath('./zzzz')),
'pdct_img_main_url': "".join(li.xpath('.//div[contains(@class, "CatalogItemImage")]/@style')),
}
products[produrl]['brnd'] = brm.find_brand(products[produrl]['pdct_name_on_eretailer'])['brand']
print(products[produrl], produrl)
products[produrl]['price'] = getprice(products[produrl]['raw_price'])
products[produrl]['promo_price'] = getprice(products[produrl]['raw_promo_price'])
products[produrl]['pdct_img_main_url'] = clean_url(products[produrl]['pdct_img_main_url'], root_url)
print(products[produrl])
categories[ctg].append(produrl)
return categories, products
ctg_parsing(exple_ctg_page_path, ctg, test_categories, test_products)
###################
# # KW page xpathing #
###################
search_page_test_url = 'https://www.abcfws.com/searchresults?Ntt=champagne&No=0&Rdm=909&Nr=AND(NOT(product.productTypeId:4),NOT(product.productTypeId:9),NOT(record.type:Store))&N=&Nrpp=24'
exple_kw_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'kw_page_test.html') # TODO : store the file
os.makedirs(op.dirname(exple_ctg_page_path), exist_ok=True)
kw_test, test_searches, test_products = 'champagne', {"champagne": []}, {}
# driver.get(search_page_test_url.format(kw=kw_test))
# driver.save_page(exple_kw_page_path, scroll_to_bottom=True)
def kw_parsing(fpath, kw, searches, products): # TODO : modify xpaths
tree = etree.parse(open(fpath, 'rb'), parser=parser)
for li in tree.xpath('//ul//li[contains(@class, "CatalogResults__CatalogListItem")]'):
if not li.xpath('(.//a/@href)[1]'):
continue
produrl = li.xpath('(.//a/@href)[1]')[0]
produrl = parse_qs(urlsplit(produrl).query)['url'][0] if 'url' in parse_qs(urlsplit(produrl).query) else produrl
products[produrl] = {
'pdct_name_on_eretailer': clean_xpathd_text(li.xpath('.//h3//text()')),
'volume': clean_xpathd_text(li.xpath('./zzzzz')),
'raw_price': clean_xpathd_text(li.xpath('.//*[contains(@class, "CatalogItem__CatalogItemDetails__Price")]//text()')).split(' - ')[-1],
'raw_promo_price': clean_xpathd_text(li.xpath('./zzzzz')),
'pdct_img_main_url': "".join(li.xpath('.//div[contains(@class, "CatalogItemImage")]/@style')[:1]),
}
products[produrl]['brnd'] = brm.find_brand(products[produrl]['pdct_name_on_eretailer'])['brand']
print(products[produrl], produrl)
products[produrl]['price'] = getprice(products[produrl]['raw_price'])
products[produrl]['promo_price'] = getprice(products[produrl]['raw_promo_price'])
products[produrl]['pdct_img_main_url'] = clean_url(products[produrl]['pdct_img_main_url'], root_url)
print(products[produrl])
print(products[produrl])
searches[kw].append(produrl)
return searches, products
kw_parsing(exple_kw_page_path, kw_test, test_searches, test_products)
###################
# # PDCT page xpathing #
###################
exple_pdct_page_path = op.join(TEST_PAGES_FOLDER_PATH, shop_id, 'pdct_page_test.html') # TODO: store the file
# exple_pdct_page_path = "/code/mhers/cache/w_9/isetan/pdct/<クリュッグ>ロゼ ハーフサイズ-page0.html"
test_url, test_products = '', {'': {}}
def pdct_parsing(fpath, url, products): # TODO : modify xpaths
tree = etree.parse(open(fpath), parser=parser)
products[url].update({
# 'volume': clean_xpathd_text(tree.xpath('.//*[@class="item-info"]//tr//td//text()')[:3], unicodedata_normalize=True),
'pdct_img_main_url': clean_url(''.join(tree.xpath('//div[@class="ProductMeta__product-image"]//@data-default-image')[:1]), root_url),
'ctg_denom_txt': ' '.join(tree.xpath('//nav[contains(@class,"breadcrumbs")]//li//text()')),
})
return products
pdct_parsing(exple_pdct_page_path, test_url, test_products)
###################
# # CTG scrapping #
###################
urls_ctgs_dict = {
'champagne': 'https://drizly.com/champagne/c196479/page{page}',
'sparkling': 'https://drizly.com/wine/c3-c196553/page{page}',
'still_wines': 'https://drizly.com/white-wine/c8/page{page}',
'white_wine': 'https://drizly.com/white-wine/c8/page{page}',
'red_wine': 'https://drizly.com/white-wine/c7/page{page}',
'whisky': 'https://drizly.com/scotch/c196927/page{page}',
'cognac': 'https://drizly.com/liquor/c4-c83/page{page}',
'vodka': 'https://drizly.com/vodka/c89/page{page}',
'gin': 'https://drizly.com/gin/c84/page{page}',
'tequila': 'https://drizly.com/tequila/c88/page{page}',
'rum': 'https://drizly.com/rum/c87/page{page}',
}
# Category Scraping - with selenium - multiple pages per category (click on next page)
for ctg, url in urls_ctgs_dict.items():
categories[ctg] = []
number_of_pdcts_in_ctg = 0
for p in range(100):
fpath = fpath_namer(shop_id, 'ctg', ctg, p)
if not op.exists(fpath):
if not drizly_was_initialised:
init_drizly(driver)
drizly_was_initialised = True
driver.get(url.format(page=p+1))
sleep(2)
driver.save_page(fpath, scroll_to_bottom=True)
categories, products = ctg_parsing(fpath, ctg, categories, products)
if len(set(categories[ctg])) == number_of_pdcts_in_ctg:
break
else:
number_of_pdcts_in_ctg = len(set(categories[ctg]))
print(ctg, url, p, len(categories[ctg]))
######################################
# # KW searches scrapping ############
######################################
# KW searches Scraping - with requests - one page per search
kw_search_url = "https://drizly.com/search?utf8=%E2%9C%93&q={kw}" # TODO : modify URL
for kw in keywords:
searches[kw] = []
number_of_pdcts_in_kw_search = 0
if not op.exists(fpath_namer(shop_id, 'search', kw, 0)):
driver.get(kw_search_url.format(kw=kw))
for p in range(1):
fpath = fpath_namer(shop_id, 'search', kw, p)
if not op.exists(fpath):
if not drizly_was_initialised:
init_drizly(driver)
drizly_was_initialised = True
sleep(2)
driver.smooth_scroll()
driver.save_page(fpath, scroll_to_bottom=True)
searches, products = kw_parsing(fpath, kw, searches, products)
print(kw, len(searches[kw]))
######################################
# # Product pages scraping ###########
######################################
# Download the pages - with selenium
for url in sorted(list(set(products))):
d = products[url]
if d['brnd'] in mh_brands:
print(d['pdct_name_on_eretailer'], d['volume'])
url_mod = clean_url(url, root_url=root_url)
fpath = fpath_namer(shop_id, 'pdct', d['pdct_name_on_eretailer'], 0)
if not op.exists(fpath):
driver.get(url_mod)
if not drizly_was_initialised:
init_drizly(driver)
drizly_was_initialised = True
sleep(2)
driver.save_page(fpath, scroll_to_bottom=True)
products = pdct_parsing(fpath, url, products)
print(products[url])
######################################
# # Download images ###########
######################################
# Download images
from ers import download_img
for url, pdt in products.items():
if 'pdct_img_main_url' in pdt and pdt['pdct_img_main_url'] and brm.find_brand(pdt['pdct_name_on_eretailer'])['brand'] in mh_brands:
print(pdt['pdct_name_on_eretailer'] + "." + pdt['pdct_img_main_url'].split('.')[-1])
orig_img_path = img_path_namer(shop_id, pdt['pdct_name_on_eretailer'])
img_path = download_img(pdt['pdct_img_main_url'], orig_img_path, shop_id=shop_id, decode_content=False, gzipped=False, debug=False)
if img_path:
products[url].update({'img_path': img_path, 'img_hash': file_hash(img_path)})
create_csvs(products, categories, searches, shop_id, fpath_namer(shop_id, 'raw_csv'), COLLECTION_DATE)
validate_raw_files(fpath_namer(shop_id, 'raw_csv'))
check_products_detection(shop_id, fpath_namer(shop_id, 'raw_csv'), shop_inventory_lw_csv)
driver.quit()
| [
"pierre.chevalier@epitech.eu"
] | pierre.chevalier@epitech.eu |
f6687c34620096ecab62771b37059ffc0b24c773 | 0e4f76fc3f9aae8b00a67806dffcfc6360e310d1 | /classes/ifcannotationfillareaoccurrence.py | 153502113434a6de9b77ae22bb820913bb7669da | [] | no_license | HassanEmam/PyoIFC | e11f3b330d3e758aaae47ee9e70b2ae5b5847432 | c2ec4e017ce366b07686491dbe22827aec2711c0 | refs/heads/master | 2020-07-31T14:27:53.305768 | 2019-09-24T15:41:28 | 2019-09-24T15:41:28 | 210,630,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | class IFCANNOTATIONFILLAREAOCCURRENCE(IFCANNOTATIONOCCURRENCE):
def __init__(self, FillStyleTarget, GlobalOrLocal):
self.FillStyleTarget = FillStyleTarget
self.GlobalOrLocal = GlobalOrLocal | [
"hassan.emam@hotmail.com"
] | hassan.emam@hotmail.com |
8bf9fe5983a368347046e3207f8b7633072051ca | 3546dd5dbcffc8509440c820faa7cf28080c5df7 | /python35/Lib/site-packages/numpy/fft/tests/test_fftpack.py | e46d78fadb1b219e6e2ee42a40dcf35b89a2e984 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | Matchoc/python_env | 55ad609c8270cc6148eda22d37f36709d73b3652 | 859d84d1717a265a4085ad29706b12c19c62d36f | refs/heads/master | 2022-02-13T11:05:51.825544 | 2020-06-05T02:42:08 | 2020-06-05T02:42:08 | 75,793,921 | 0 | 1 | Apache-2.0 | 2018-12-14T07:30:28 | 2016-12-07T03:06:13 | Python | UTF-8 | Python | false | false | 6,218 | py | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.random import random
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy.testing import assert_array_equal
import threading
import sys
if sys.version_info[0] >= 3:
import queue
else:
import Queue as queue
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(TestCase):
def test_fft_n(self):
self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(TestCase):
def test_fft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(fft1(x), np.fft.fft(x))
assert_array_almost_equal(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"))
def test_ifft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x)))
assert_array_almost_equal(
x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"))
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x))
assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"))
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x))
assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"))
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x))
assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"))
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x))
assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"))
def test_rfft(self):
x = random(30)
assert_array_almost_equal(np.fft.fft(x)[:16], np.fft.rfft(x))
assert_array_almost_equal(np.fft.rfft(x) / np.sqrt(30),
np.fft.rfft(x, norm="ortho"))
def test_irfft(self):
x = random(30)
assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x)))
assert_array_almost_equal(
x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"))
def test_rfft2(self):
x = random((30, 20))
assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x))
assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"))
def test_irfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x)))
assert_array_almost_equal(
x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"))
def test_rfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x))
assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"))
def test_irfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x)))
assert_array_almost_equal(
x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"))
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"))
def test_ihttf(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)))
assert_array_almost_equal(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
norm="ortho"))
class TestFFTThreadSafe(TestCase):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
if __name__ == "__main__":
run_module_suite()
| [
"matchoc@hotmail.com"
] | matchoc@hotmail.com |
00ffa9623b835b0bf4f8cab421978522270fe77f | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_QingDao_ZD_CLI.py | 9a483fb34fa64b69afd278f3ba300ea8d274eb7a | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | import sys
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
def tcid(tcid):
return "TCID:32.%02d" % tcid
def defineTestConfiguration(target_station):
test_cfgs = []
test_name = 'ZD_CLI_Testing'
common_name = 'wlaninfo -V: show VAP info'
test_cfgs.append(({'testcase':'wlaninfo_v'}, test_name, common_name, tcid(1)))
common_name = 'wlaninfo -S: show station info'
test_cfgs.append(({'testcase':'wlaninfo_s', 'target_station':target_station}, test_name, common_name, tcid(2)))
common_name = 'wlaninfo -T: show timer'
test_cfgs.append(({'testcase':'wlaninfo_t'}, test_name, common_name, tcid(3)))
common_name = 'wlaninfo -C: show configured APs'
test_cfgs.append(({'testcase':'wlaninfo_c'}, test_name, common_name, tcid(4)))
common_name = 'wlaninfo -R: show Rogue devices'
test_cfgs.append(({'testcase':'wlaninfo_r'}, test_name, common_name, tcid(5)))
common_name = 'wlaninfo -W: show WLAN info'
test_cfgs.append(({'testcase':'wlaninfo_w'}, test_name, common_name, tcid(6)))
common_name = 'wlaninfo -U: show users info'
test_cfgs.append(({'testcase':'wlaninfo_u'}, test_name, common_name, tcid(8)))
common_name = 'wlaninfo -M: show Mesh entries'
test_cfgs.append(({'testcase':'wlaninfo_m'}, test_name, common_name, tcid(9)))
common_name = 'apmgrinfo -a: Display APs info'
test_cfgs.append(({'testcase':'apmgrinfo_a'}, test_name, common_name, tcid(11)))
common_name = 'apmgrinfo -p: ping APMgr'
test_cfgs.append(({'testcase':'apmgrinfo_p'}, test_name, common_name, tcid(13)))
common_name = 'ping: ping to a destination'
test_cfgs.append(({'testcase':'ping'}, test_name, common_name, tcid(15)))
common_name = 'stp: enable/disable STP'
test_cfgs.append(({'testcase':'stp'}, test_name, common_name, tcid(16)))
common_name = 'upnp: enable/disable UPNP'
test_cfgs.append(({'testcase':'upnp'}, test_name, common_name, tcid(17)))
common_name = 'wlaninfo -A: show all active APs'
test_cfgs.append(({'testcase':'wlaninfo_a'}, test_name, common_name, tcid(23)))
common_name = 'wlaninfo --system: show system parameters'
test_cfgs.append(({'testcase':'wlaninfo_system'}, test_name, common_name, tcid(24)))
common_name = 'wlaninfo --dos: show all DOS entries'
test_cfgs.append(({'testcase':'wlaninfo_dos'}, test_name, common_name, tcid(25)))
common_name = 'wlaninfo --web-auth: show all authorized clients'
test_cfgs.append(({'testcase':'wlaninfo_web_auth', 'target_station':target_station}, test_name, common_name, tcid(26)))
common_name = 'wlaninfo --all-dpsk: show all dynamic PSK'
test_cfgs.append(({'testcase':'wlaninfo_dpsk', 'target_station':target_station}, test_name, common_name, tcid(28)))
common_name = 'wlaninfo --dcert: show all dynamic certificate'
test_cfgs.append(({'testcase':'wlaninfo_dcert', 'target_station':target_station}, test_name, common_name, tcid(29)))
common_name = 'wlaninfo --acl: show all L2 ACL'
test_cfgs.append(({'testcase':'wlaninfo_acl'}, test_name, common_name, tcid(30)))
common_name = 'wlaninfo --role: show all role'
test_cfgs.append(({'testcase':'wlaninfo_role'}, test_name, common_name, tcid(31)))
common_name = 'wlaninfo --auth: show all Authentication servers'
test_cfgs.append(({'testcase':'wlaninfo_auth'}, test_name, common_name, tcid(32)))
common_name = 'wlaninfo --pmk-cache: show all PMK cache'
test_cfgs.append(({'testcase':'wlaninfo_pmk', 'target_station':target_station}, test_name, common_name, tcid(33)))
common_name = 'wlaninfo --mesh-ap: show Mesh APs'
test_cfgs.append(({'testcase':'wlaninfo_mesh_ap'}, test_name, common_name, tcid(34)))
common_name = 'wlaninfo --mesh-topology: show Mesh Topology'
test_cfgs.append(({'testcase':'wlaninfo_mesh_topology'}, test_name, common_name, tcid(35)))
common_name = 'wlaninfo --mesh-history: show Mesh History'
test_cfgs.append(({'testcase':'wlaninfo_mesh_history'}, test_name, common_name, tcid(36)))
common_name = 'wlaninfo --all-wlangroup: show all WLAN group'
test_cfgs.append(({'testcase':'wlaninfo_wlangroup'}, test_name, common_name, tcid(37)))
common_name = 'wlaninfo -all-apgroup: show all AP groups'
test_cfgs.append(({'testcase':'wlaninfo_apgroup'}, test_name, common_name, tcid(38)))
common_name = 'wlaninfo --all-disc-ap: show all disconnected APs'
test_cfgs.append(({'testcase':'wlaninfo_disc_ap'}, test_name, common_name, tcid(39)))
common_name = 'show ap: show all active APs'
test_cfgs.append(({'testcase': 'show_ap'}, test_name, common_name, tcid(41)))
common_name = 'show ap: show all stations'
test_cfgs.append(({'testcase': 'show_station', 'target_station': target_station},
test_name, common_name, tcid(42)))
return test_cfgs
def make_test_suite(**kwargs):
#tbi = getTestbed(**kwargs)
#tb_cfg = testsuite.getTestbedConfig(tbi)
tb = testsuite.getTestbed2(**kwargs)
tbcfg = testsuite.getTestbedConfig(tb)
ap_sym_dict = tbcfg['ap_sym_dict']
sta_ip_list = tbcfg['sta_ip_list']
target_station = testsuite.getTargetStation(sta_ip_list)
ts_name = 'ZD CLI'
ts = testsuite.get_testsuite(ts_name, 'ZD CLI')
test_cfgs = defineTestConfiguration(target_station)
test_order = 1
test_added = 0
for test_params, test_name, common_name, tcid in test_cfgs:
cname = "%s - %s" % (tcid, common_name)
if testsuite.addTestCase(ts, test_name, cname, test_params, test_order) > 0:
test_added += 1
test_order += 1
print "Add test case with test_name: %s\n\tcommon_name: %s" % (test_name, cname)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict( sys.argv[1:] )
make_test_suite(**_dict)
| [
"tan@xx.com"
] | tan@xx.com |
774aad474bad9392a0edf5653a83c94f0f1b4b48 | fd55b40f4279c25c0d06dccd6bcd77b9a7dbff28 | /bivariate2/Udropout_kernels.py | 268deb2613b9ce61ee6d63aeff92071b77771fe2 | [] | no_license | albertfxwang/mypytools | 0e30420e2a570a3e18564fd28fc56eb55ecccbf9 | c6cdc8f1914cbc3a5b94dd25501782a3daf542f5 | refs/heads/master | 2020-04-10T19:06:54.193920 | 2015-04-20T01:36:37 | 2015-04-20T01:36:37 | 30,038,302 | 1 | 1 | null | 2015-01-29T19:55:09 | 2015-01-29T19:55:09 | null | UTF-8 | Python | false | false | 11,978 | py | #!/usr/bin/env python
import numpy as np
from transfunc import kernelgrid, kernel
from dropout_kernels import KernelFactory, DropoutKernelGrid
from dropout_selection import lbg_colorcrit as lcc
import os, sys
import udropsim_uflux as uu
import cPickle
from pygoods import Ftable
simcat_dir = '/Users/khuang/Dropbox/Research/bivariate/bivariate_fit/dropsim_catalogs'
kgrid_dir = '/Users/khuang/Dropbox/Research/bivariate/bivariate_fit/dropsim_kernels'
tfitsim_cat = {'udf': os.path.join(simcat_dir, 'run1_udf_130625.fits'),
'deep': os.path.join(simcat_dir, 'run2_tfitsim_130322.fits')}
tfitsim_cat['ers'] = tfitsim_cat['deep']
tfitsim_cat['wide'] = tfitsim_cat['deep']
arcmin_str = 2.9089e-4
area = {}
# area so far only includes GOODS-S
# will add new keys for GOODS-N? Like deep_N, wide_N, etc.
area['udf'] = 4.497 * arcmin_str**2 # = 3.805226134736999e-07
area['deep'] = 67.318 * arcmin_str**2 # = 5.696246674187799e-06
area['ers'] = 46.359 * arcmin_str**2 # = 3.924536093598e-06
area['wide'] = 50.867 * arcmin_str**2 # = 4.304212537150699e-06
area_unit = 'arcmin2'
catalogs = {}
## --------------- BELOW ARE PSF-MATCHED SIMULATION CATALOGS --------------- ##
catalogs['deep'] = os.path.join(simcat_dir, 'udropssim_run4m_deep_140704.fits')
catalogs['udf'] = os.path.join(simcat_dir, 'udropssim_run4m_udf_140801.fits')
catalogs['ers'] = os.path.join(simcat_dir, 'udropssim_run4m_ers_140721.fits')
catalogs['wide'] = os.path.join(simcat_dir, 'udropssim_run4m_wide_140721.fits')
catalogs_ubz = {}
catalogs_ubz['udf'] = '/Users/khuang/CANDELS/goodss/udrops_sim/udf/udrops_run5m_udf_140801.fits'
## ------------------------------------------------------------------------- ##
pixscale = 0.06 # the pixel scale of the images used in the simulations
umag_kpdf = {}
umag_kpdf['deep'] = os.path.join(kgrid_dir, 'uvimos_gds_mag_pdf_140310.p')
umag_kpdf['udf'] = os.path.join(kgrid_dir, 'uvimos_hudf_mag_pdf_140310.p')
umag_kpdf['ers'] = umag_kpdf['deep']
umag_kpdf['wide'] = umag_kpdf['deep']
umag_1sig_kpdf = {}
umag_1sig_kpdf['deep'] = os.path.join(kgrid_dir, 'uvimos_gds_mag_1sig_pdf_140310.p')
umag_1sig_kpdf['udf'] = os.path.join(kgrid_dir, 'uvimos_hudf_mag_1sig_pdf_140310.p')
umag_1sig_kpdf['ers'] = umag_1sig_kpdf['deep']
umag_1sig_kpdf['wide'] = umag_1sig_kpdf['deep']
# color-correction because I didn't run the sims with PSF-matched images
# however, the corrections are small... not sure how much difference it makes
# ISO_COLOR + color_corr ~= INPUT_COLOR
# the corrections are calculated from simulation catalogs and applies to HST
# colors only.
vmy_corr = {}
# vmy_corr['udf'] = 0.025
# vmy_corr['deep'] = 0.050
# vmy_corr['ers'] = 0.040
# vmy_corr['wide'] = 0.052
vmy_corr['udf'] = 0.
vmy_corr['deep'] = 0.
vmy_corr['ers'] = 0.
vmy_corr['wide'] = 0.
class UdropsKernelFactory(KernelFactory):
"""
For U-dropouts in GOODS-S (VIMOS U-band).
"""
# This is for UBVY selection. Write another class for UBY selection if
# necessary.
def __init__(self, catalog, field, droplabel="UBVY105", interpolate=True, interp_dz=0.02, n_repeat=5, expand=[0.0, 0.0], mag0=22.0, SN_lolim={'wfc3_f160w':5.0,'wfc3_f105w':5.0,'acs_f435w':3.0,'acs_f606w':5.0}, mag_in='m1500_in', re_in='re_in_arcsec', hstBands=['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w'],uband='vimos_u'):
z0 = 3.5
KernelFactory.__init__(self, catalog, z0, 2.5, 4.5,
interpolate=interpolate, n_repeat=n_repeat,
expand=expand, mag_in=mag_in, re_in=re_in)
self.field = field
self.lcc = lcc.colorcrit()
self.SN_lolim = SN_lolim
print "droplabel: ", droplabel
self.lcc = self.lcc('uvimos', droplabel)
print "Eval_string:", self.lcc.eval
# if self.field.lower() == 'ers':
# if droplabel == None:
# droplabel = 'UBVY098'
# self.lcc = self.lcc('uvimos', droplabel)
# # self.bands=['acs_f435w','acs_f606w','wfc3_f098m','wfc3_f160w']
# else:
# if droplabel == None:
# droplabel = 'UBVY105'
# self.lcc = self.lcc('uvimos', droplabel)
# self.bands=['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w']
self.bands = hstBands
self.uband = uband
self.area = area[field]
self.area_unit = area_unit
self.pixscale = pixscale
self.umag_kpdf = umag_kpdf[field]
self.umag_1sig_kpdf = umag_1sig_kpdf[field]
self._drawn_umag = False
self.mag0 = mag0
# the lower limit of input U-band magnitude from simulations
def testdrawMag(self):
"""
For tests.
"""
self.vimos_u_mag = self.c.u_mag_in
self.vimos_u_mag_1sig = self.c.u_mag_in
def drawUMag(self):
# Draw U-band magnitude from probability distribution functions calculated
# using TFIT simulation catalogs in VIMOS U-band.
# In princple, we draw an output U-band magnitude given U-band input
# magnitudes to simulate photometric errors. We do it this way because
# running enough simulations with TFIT is prohibitively expensive.
cu = Ftable(tfitsim_cat[self.field])
# mag0 = 22.0
umag_in = cu.uvimos_mag_in[cu.uvimos_fitquerr > 0.]
print "Repeat the drawing process %d times" % self.n_repeat
vimos_u_mag = uu.draw_umag(np.tile(self.c.u_mag_in, self.n_repeat),
self.umag_kpdf, mag0=self.mag0)
vimos_u_mag_1sig = uu.draw_ulimmag(self.n_repeat*len(self.c.u_mag_in),
self.umag_1sig_kpdf)
# If the drawn U-band 1-sigma limit mag is brighter than the drawn
# U-band magnitude, use the 1-sigma limit magnitude as the measured
# U-band magnitude.
self._drawn_umag = True
return np.minimum(vimos_u_mag, vimos_u_mag_1sig)
def ColorSelection(self, vmy_corr=0.0, full_output=False):
# define color selection
# Also enforce S/N limits!
# Require S/N >= sigma_low in detected bands; also require detection
self.SNcrit = np.ones(len(self.c.d) * self.n_repeat, 'bool') & np.tile(self.c.detect, self.n_repeat)
if len(self.SN_lolim.keys()) > 0:
print "sn_lolim", self.SN_lolim
for b in self.SN_lolim.keys():
assert (b in self.bands)
SN_band = (getattr(self, b + '_sn') >= self.SN_lolim[b])
SN_band = np.tile(SN_band, self.n_repeat)
self.SNcrit = np.logical_and(self.SNcrit, SN_band)
## Second: perform color selection!
print "Do selections..."
umag = getattr(self, '%s_mag' % self.uband)
m1 = getattr(self, '%s_mag' % self.bands[0])
color1 = umag - m1
m2 = getattr(self, '%s_mag' % self.bands[1])
# m3 = getattr(self, '%s_mag' % self.bands[2])
if len(self.lcc.bands) == 3:
# color1 = m1 - m2
# color2 = m2 - m3
color2 = m1 - m2
else:
m3 = getattr(self, '%s_mag' % self.bands[2])
color2 = m2 - m3
# m4 = getattr(self, '%s_mag' % self.bands[3])
# color1 = self.vimos_u_mag-self.acs_f435w_mag
# color1 = m1 - m2
# color2 = m3 - m4
# if self.field.lower() == 'ers':
# color2 = self.acs_f606w_mag-self.wfc3_f098m_mag
# else:
# color2 = self.acs_f606w_mag-self.wfc3_f105w_mag
# applies color-correction
color2 = color2 + vmy_corr
self.lcc.select(color1, color2)
colorcrit = self.lcc.crit.copy()
self.lcc.crit = self.lcc.crit & self.SNcrit # Fold in S/N criteria
# isDropout = self.lcc.crit
print "Selections done."
print "Total number of objects in the catalog: %d" % len(self.c.d)
print "Total number of objects satisfying color cuts: %d" % (np.sum(colorcrit)/self.n_repeat)
print "Total number of objects satisfying S/N cuts: %d" % (np.sum(self.SNcrit)/self.n_repeat)
print "Total number selected as U-dropouts: %d" % (np.sum(self.lcc.crit)/(self.n_repeat))
if full_output:
return self.lcc.crit, color1, color2
else:
return self.lcc.crit
def SelectDropout(self, full_output=False):
"""
Main driver method that runs the whole procedure.
"""
if not self._drawn_umag:
for b in self.bands:
self.setMagnitudes(b)
# self.vimos_u_mag = self.drawUMag()
setattr(self, '%s_mag' % self.uband, self.drawUMag())
isDropout = self.ColorSelection(full_output=full_output)
return isDropout
class UdropsBoxcarKernelFactory(UdropsKernelFactory):
def SelectDropout(self):
"""
Return a boxcar P(z).
"""
z0 = 3.5
zlo = z0 - 0.5
zhi = z0 + 0.5
isDropout = np.where((self.c.redshift>=zlo) & (self.c.redshift<zhi),
True, False)
isDropout = isDropout & (self.c.detect)
return isDropout
class UdropoutKernelGrid(DropoutKernelGrid):
"""
The top-level class that a user should call.
"""
def __init__(self, field, catalogs=catalogs, droplabel="UBVY105", hstBands=['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w'], kwargs_factory={}, kwargs_kgrid={}):
if field == 'udf':
kwargs_factory['mag0'] = 25.0
else:
kwargs_factory['mag0'] = 23.0
Factory = UdropsBoxcarKernelFactory(catalogs[field], field,
hstBands=hstBands)
super(UdropoutKernelGrid, self).__init__(Factory, **kwargs_kgrid)
class UdropoutBoxcarKernelGrid(DropoutKernelGrid):
"""
A boxcar dropout kernel grid (P(z) = 1 between z=3 and z=4) for testing.
"""
def __init__(self, field, n_repeat=1, hstBands=['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w'], kwargs_factory={}, kwargs_kgrid={}):
# if field == 'udf':
# kwargs_factory['mag0'] = 25.0
# else:
# kwargs_factory['mag0'] = 23.0
Factory = UdropsBoxcarKernelFactory(catalogs[field], field, n_repeat=1,
droplabel='UBVY105',
interpolate=True,
hstBands=hstBands, **kwargs_factory)
print "Color criteria: ", Factory.lcc.criteria
# super(UdropoutKernelGrid, self).__init__(Factory, **kwargs_kgrid)
DropoutKernelGrid.__init__(self, Factory, **kwargs_kgrid)
def make_udrops_ubvy_kernel_grid(field, filename, droplabel="UBVY105",kwargs_factory={'n_repeat':20, 're_in':'re_in_arcsec'}, kwargs_kgrid={'ylimits':[-2.4, 1.0]}):
"""
A convient function to do everything.
The keyword arguments **kwargs are passed to UdropoutKernelGrid.__init__().
"""
if field == 'ers':
hstBands = ['acs_f435w','acs_f606w','wfc3_f098m','wfc3_f160w']
kwargs_factory['SN_lolim'] = {'wfc3_f160w':5.0,'wfc3_f098m':5.0,'acs_f435w':3.0,'acs_f606w':3.0}
else:
hstBands = ['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w']
kwargs_factory['SN_lolim'] = {'wfc3_f160w':5.0,'wfc3_f105w':5.0,'acs_f606w':3.0,'acs_f435w':3.0}
# For ERS, need to also supply a custom SN_lolim argument.
Grid = UdropoutKernelGrid(field, droplabel=droplabel,
catalogs=catalogs, hstBands=hstBands,
kwargs_factory=kwargs_factory,
kwargs_kgrid=kwargs_kgrid)
f = open(filename, 'wb')
cPickle.dump(Grid, f, 2)
f.close()
def make_udrops_ubz_kernel_grid(field, filename, droplabel='W14',kwargs_factory={'n_repeat':20, 're_in':'re_in_arcsec'}, kwargs_kgrid={'ylimits':[-2.5,1.0]}):
hstBands = ['acs_f435w','acs_f850lp','wfc3_f160w']
kwargs_factory['SN_lolim'] = {'wfc3_f160w':5.0,'acs_f435w':3.0,'acs_f850lp':5.0}
Grid = UdropoutKernelGrid(field, droplabel=droplabel,
catalogs=catalogs_ubz, hstBands=hstBands,
kwargs_factory=kwargs_factory,
kwargs_kgrid=kwargs_kgrid)
f = open(filename, 'wb')
cPickle.dump(Grid, f, 2)
f.close()
| [
"astrokuang@gmail.com"
] | astrokuang@gmail.com |
12a2227ef7b5b606b94c018fd3c7fd65044f1ce6 | c6634a86b42477746df3fb37590ec40896344245 | /Ciclo 1/Lab ICC/Funciones/test.py | fce8f86be80129393576999720e64b92fdf43ce2 | [] | no_license | lordjuacs/ICC-Trabajos | 2163818fe52d3102c4258b1e8f3462092b55f8e1 | 68f4fc3ae1bd63d76fdbc0490300b9e4130e792e | refs/heads/master | 2022-04-11T06:58:00.367990 | 2020-03-29T19:40:43 | 2020-03-29T19:40:43 | 251,117,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def test(c):
d = 3
print(c)
print(d)
a = 0
if a == 0:
b = 1
test(7)
print(a)
print(b)
print(c)
print(d)
| [
"50754596+lordjuacs@users.noreply.github.com"
] | 50754596+lordjuacs@users.noreply.github.com |
a460eaab3d3a06de2e6ad1c493b1cd3337f33ec9 | 72185b74081ffc4d58b8de8081a5ccebed29113a | /tensorflow2/tf2cv/models/inceptionv4.py | 4e525c924823b7e05aca449852b73f97bba12e79 | [
"MIT"
] | permissive | ethan-jiang-1/imgclsmob | f575e5f53bf80a25d9ddf832178da22ca8425fc8 | ae4913400b64c064b3de7c9536b1b6c92f1e06e4 | refs/heads/master | 2023-04-21T06:07:05.835484 | 2021-05-16T10:28:32 | 2021-05-16T10:28:32 | 367,827,134 | 1 | 0 | MIT | 2021-05-16T08:42:49 | 2021-05-16T08:42:49 | null | UTF-8 | Python | false | false | 28,977 | py | """
InceptionV4 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, AvgPool2d, Conv2d, BatchNorm, SimpleSequential, Concurrent, flatten, is_channels_first,\
get_channel_axis
class InceptConv(nn.Layer):
"""
InceptionV4 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
data_format="channels_last",
**kwargs):
super(InceptConv, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
momentum=0.1,
epsilon=1e-3,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
return x
def incept_conv1x1(in_channels,
out_channels,
data_format="channels_last",
**kwargs):
"""
1x1 version of the InceptionV4 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return InceptConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
data_format=data_format,
**kwargs)
def incept_conv3x3(in_channels,
out_channels,
strides,
padding=1,
data_format="channels_last",
**kwargs):
"""
3x3 version of the InceptionV4 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return InceptConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class MaxPoolBranch(nn.Layer):
"""
InceptionV4 specific max pooling branch block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(MaxPoolBranch, self).__init__(**kwargs)
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.pool(x)
return x
class AvgPoolBranch(nn.Layer):
"""
InceptionV4 specific average pooling branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(AvgPoolBranch, self).__init__(**kwargs)
self.pool = AvgPool2d(
pool_size=3,
strides=1,
padding=1,
# count_include_pad=False,
data_format=data_format,
name="pool")
self.conv = incept_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class Conv1x1Branch(nn.Layer):
"""
InceptionV4 specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Conv1x1Branch, self).__init__(**kwargs)
self.conv = incept_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class Conv3x3Branch(nn.Layer):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
self.conv = incept_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class ConvSeqBranch(nn.Layer):
"""
InceptionV4 specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
data_format="channels_last",
**kwargs):
super(ConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
self.conv_list = SimpleSequential(name="conv_list")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.children.append(InceptConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
def call(self, x, training=None):
x = self.conv_list(x, training=training)
return x
class ConvSeq3x3Branch(nn.Layer):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
data_format="channels_last",
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
self.data_format = data_format
self.conv_list = SimpleSequential(name="conv_list")
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.children.append(InceptConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = mid_channels
self.conv1x3 = InceptConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
data_format=data_format,
name="conv1x3")
self.conv3x1 = InceptConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
data_format=data_format,
name="conv3x1")
def call(self, x, training=None):
x = self.conv_list(x, training=training)
y1 = self.conv1x3(x, training=training)
y2 = self.conv3x1(x, training=training)
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
return x
class InceptionAUnit(nn.Layer):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionAUnit(nn.Layer):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionBUnit(nn.Layer):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
in_channels = 1024
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1024
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionCUnit(nn.Layer):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
in_channels = 1536
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock3a(nn.Layer):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptBlock3a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch1"))
self.branches.children.append(Conv3x3Branch(
in_channels=64,
out_channels=96,
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock4a(nn.Layer):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptBlock4a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock5a(nn.Layer):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(InceptBlock5a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv3x3Branch(
in_channels=192,
out_channels=192,
data_format=data_format,
name="branch1"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
self.conv1 = InceptConv(
in_channels=in_channels,
out_channels=32,
kernel_size=3,
strides=2,
padding=0,
data_format=data_format,
name="conv1")
self.conv2 = InceptConv(
in_channels=32,
out_channels=32,
kernel_size=3,
strides=1,
padding=0,
data_format=data_format,
name="conv2")
self.conv3 = InceptConv(
in_channels=32,
out_channels=64,
kernel_size=3,
strides=1,
padding=1,
data_format=data_format,
name="conv3")
self.block1 = InceptBlock3a(
data_format=data_format,
name="block1")
self.block2 = InceptBlock4a(
data_format=data_format,
name="block2")
self.block3 = InceptBlock5a(
data_format=data_format,
name="block3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.block1(x, training=training)
x = self.block2(x, training=training)
x = self.block3(x, training=training)
return x
class InceptionV4(tf.keras.Model):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dropout_rate=0.0,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionV4, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
data_format=data_format,
name="init_block"))
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
stage.add(unit(
data_format=data_format,
name="unit{}".format(j + 1)))
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="output1/dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=1536,
name="output1/fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv4 or weight_count == 42679816)
if __name__ == "__main__":
_test()
| [
"osemery@gmail.com"
] | osemery@gmail.com |
2c9f22569636a53ba7024b43c5fbc0137b68d6ad | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch6_2020_02_29_19_04_12_994734.py | 7bbc11c4c4ebea79ae6702f72b7f73c447ec34d4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | def celcius_para_fahrenheit (x):
celcius_para_fahrenheit = 1.8*x + 32
return y
| [
"you@example.com"
] | you@example.com |
7a7055c98db6e33109323e2cb20c2d9878f88586 | 28a2d58672a937c337b7310ed4c7d50219b176ce | /tests/test_search.py | ae98278b716f5a7645bbea496c41dd5e640c1bd3 | [
"MIT"
] | permissive | AnuradhaNaik/nyc-council-councilmatic | 9b7fc0fc255500222d96fc534a3638fd32d6ede7 | 94974de317e34dcb05165a7c23717960c400d942 | refs/heads/master | 2020-03-26T12:37:51.662574 | 2018-04-04T18:26:43 | 2018-04-04T18:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | from unittest.mock import MagicMock
import pytest
from haystack.query import SearchQuerySet
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
# Different combinations of possible parameters
sorters = ['title', 'date', 'relevance', None]
ascenders = ['true', None]
queries = ['test', None]
@pytest.mark.django_db
@pytest.mark.parametrize('sort_by', sorters)
@pytest.mark.parametrize('ascending', ascenders)
@pytest.mark.parametrize('query', queries)
def test_search_params(sort_by, ascending, query, mocker):
# Use different query strings depending on the params
if sort_by or ascending or query:
query_string = '?'
if sort_by:
query_string += 'sort_by={sort_by}'.format(sort_by=sort_by)
if ascending:
query_string += '&ascending={ascending}'.format(ascending=ascending)
if query:
query_string += '&q={query}'.format(query=query)
else:
query_string = ''
# Mock the SearchQuerySet.order_by method so we can track how it's used
sqs = MagicMock(spec=SearchQuerySet)
empty_qs = sqs
order_func = sqs.facet().facet().facet().facet().facet().highlight().order_by
order_func.return_value = empty_qs
mocker.patch('nyc.views.SearchQuerySet', return_value=sqs)
# Also mock out the `extra_context` method of the search view, which
# will try to check to make sure Solr is running otherwise
mocker.patch('nyc.views.NYCCouncilmaticFacetedSearchView.extra_context', return_value={})
# The Paginator object gets mad if Solr doesn't return any actual results,
# so let's mock it out too
pag = MagicMock(spec=Paginator)
pag.validate_number.return_value = 0
mocker.patch('haystack.views.Paginator', return_value=pag)
client = Client()
search = client.get(reverse('search') + query_string)
assert search.status_code == 200
if sort_by and sort_by != 'relevance':
# Make sure ordering was applied
assert order_func.call_count == 1
# Look for the emphasized button on the page signalling that this
# ordering key has been selected
button= '<strong>{sort_by}</strong>'.format(sort_by=sort_by.title())
assert button in search.content.decode('utf-8')
elif query or sort_by == 'relevance':
# When a query exists with no sort_by value, we default
# to ordering by `relevance` (hence `SearchQuerySet.order_by` will
# not get called)
order_func.assert_not_called()
else:
# When no query or sort_by values exist, we default to `date` ordering
assert order_func.call_count == 1
assert order_func.called_with('-last_action_date')
# Check that the ascending keyword got handled
if sort_by and sort_by != 'relevance': # Relevance doesn't display anything for ascending
if ascending:
assert 'fa-sort-amount-asc' in search.content.decode('utf-8')
else:
if sort_by == 'date':
# Descending is the default for Date
assert 'fa-sort-amount-desc' in search.content.decode('utf-8')
elif sort_by == 'title':
# Ascending is the default for Title
assert 'fa-sort-amount-asc' in search.content.decode('utf-8')
| [
"jean@jeancochrane.com"
] | jean@jeancochrane.com |
f5cbb9807a06669bd8a22ce5ff10a73f9986acba | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBSjjlnu/scripts/plotting/DrawNuisancesAll.py | 1227c2b19c34b89db9d68cf7e0dc8e811857de0b | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 8,676 | py | #!/usr/bin/env python
#import json
import sys
import os
import ROOT
import optparse
if __name__ == '__main__':
print '''
--------------------------------------------------------------------------------------------------
__ \ \ | _)
| | __| _` | \ \ \ / \ | | | | __| _` | __ \ __| _ \ __|
| | | ( | \ \ \ / |\ | | | | \__ \ ( | | | ( __/ \__ \
____/ _| \__,_| \_/\_/ _| \_| \__,_| _| ____/ \__,_| _| _| \___| \___| ____/
--------------------------------------------------------------------------------------------------
'''
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('--outputDirPlots' , dest='outputDirPlots' , help='output directory' , default='./')
parser.add_option('--inputFile' , dest='inputFile' , help='input file with histograms' , default='input.root')
parser.add_option('--nuisancesFile' , dest='nuisancesFile' , help='file with nuisances configurations' , default=None )
parser.add_option('--samplesFile' , dest='samplesFile' , help='file with samples' , default=None )
parser.add_option('--cutName' , dest='cutName' , help='cut names' , default=None )
parser.add_option('--splitStat' , dest='splitStat' , help='draw statistics one bin per plot' , default=None )
parser.add_option('--dryRun' , dest='dryRun' , help='allow a dry run only ' , default=None )
parser.add_option('--drawYields' , dest='drawYields' , help='draw yields of the plots ' , default='0' )
parser.add_option('--joinSubsamples' , dest='joinSubsamples' , help='Add the histograms of subsamples' , default='0' )
parser.add_option('--onlySample' , dest='onlySample' , help='Only plot the requested sample ' , default=None )
(opt, args) = parser.parse_args()
sys.argv.append( '-b' )
ROOT.gROOT.SetBatch()
print " inputFile = ", opt.inputFile
print " nuisancesFile = ", opt.nuisancesFile
print " samplesFile = ", opt.samplesFile
print " outputDirPlots = ", opt.outputDirPlots
print " cutName = ", opt.cutName
print " splitStat = ", opt.splitStat
print " dryRun = ", opt.dryRun
print " drawYields = ", opt.drawYields
print " joinSubsamples = ", opt.joinSubsamples
print " onlySample = ", opt.onlySample
os.system ("mkdir " + opt.outputDirPlots + "/")
samples = {}
if os.path.exists(opt.samplesFile) :
handle = open(opt.samplesFile,'r')
exec(handle)
handle.close()
if opt.joinSubsamples == '0':
subsamples = {}
for sampleName, sample in samples.items():
if "subsamples" in sample:
for subs in sample["subsamples"]:
subsamples[sampleName+"_"+subs] = {}
samples.update(subsamples)
nuisances = {}
if os.path.exists(opt.nuisancesFile) :
handle = open(opt.nuisancesFile,'r')
exec(handle)
handle.close()
ROOTinputFile = ROOT.TFile.Open( opt.inputFile, 'READ')
# for list_histos in ROOTinputFile.GetListOfKeys() :
# print " --> ", list_histos
texOutputFile = open( 'plot_' + opt.cutName + '.tex' ,"w")
texOutputFile.write('\n')
# loop over nuisances
for sampleName, sample in samples.iteritems():
if opt.onlySample and opt.onlySample not in sampleName: continue
nameNominal = 'histo_' + sampleName
nbins = 100
if nameNominal in ROOTinputFile.GetListOfKeys() :
histoNominal = ROOTinputFile.Get(nameNominal)
nbins = histoNominal.GetNbinsX()
print " nbins = ", nbins
texOutputFile.write('\n')
texOutputFile.write('\n')
texOutputFile.write('\\begin{figure*}[htbp] \n')
texOutputFile.write('\\centering \n')
counterNuisance = 0
for nuisanceName, nuisance in nuisances.iteritems():
#print " nuisanceName = ", nuisanceName
#print " nuisance = ", nuisance
if 'name' in nuisance.keys() :
if 'skipCMS' in nuisance and nuisance['skipCMS'] == 1:
entryName = nuisance['name']
else:
entryName = 'CMS_' + nuisance['name']
nameDown = 'histo_' + sampleName + '_' + entryName + 'Down'
nameUp = 'histo_' + sampleName + '_' + entryName + 'Up'
print " nameDown = ", nameDown
print " nameUp = ", nameUp
if nameDown in ROOTinputFile.GetListOfKeys() :
print ('root -b -q DrawNuisances.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\",\\\"' + opt.drawYields + '\\\"\) ')
if opt.dryRun == None :
os.system ('root -b -q DrawNuisances.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\",\\\"' + opt.drawYields + '\\\"\) ')
texOutputFile.write('\\includegraphics[width=0.09\\textwidth]{Figs/Nuisance/'+ opt.outputDirPlots + '/cc_' + nameUp +'.png}')
counterNuisance += 1
if counterNuisance >= 9 :
counterNuisance = 0
texOutputFile.write('\\\\')
texOutputFile.write('\n')
else :
if nuisanceName == 'stat' : # 'stat' has a separate treatment, it's the MC/data statistics
if 'samples' in nuisance.keys():
if sampleName in nuisance['samples'].keys() :
if opt.splitStat == None :
nameDown = 'histo_' + sampleName + '_CMS_' + opt.cutName + '_' + sampleName + '_ibin_'
nameUp = 'histo_' + sampleName + '_CMS_' + opt.cutName + '_' + sampleName + '_ibin_'
print ('root -b -q DrawNuisancesStat.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\",\\\"' + opt.drawYields + '\\\"\) ')
if opt.dryRun == None :
os.system ('root -b -q DrawNuisancesStat.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\",\\\"' + opt.drawYields + '\\\"\) ')
else :
for iBin in range(1, nbins): # max number of bins
print iBin
nameDown = 'histo_' + sampleName + '_CMS_' + opt.cutName + '_' + sampleName + '_ibin_' + str(iBin) + '_stat' + 'Down'
nameUp = 'histo_' + sampleName + '_CMS_' + opt.cutName + '_' + sampleName + '_ibin_' + str(iBin) + '_stat' + 'Up'
if nameDown in ROOTinputFile.GetListOfKeys() :
print ('root -b -q DrawNuisances.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\"\) ')
if opt.dryRun == None :
os.system ('root -b -q DrawNuisances.cxx\(\\\"' + opt.inputFile + '\\\",\\\"' + nameNominal + '\\\",\\\"' + nameUp + '\\\",\\\"' + nameDown + '\\\",\\\"' + opt.outputDirPlots + '\\\"\) ')
texOutputFile.write('\\includegraphics[width=0.10\\textwidth]{Figs/Nuisance/'+ opt.outputDirPlots + '/cc_' + nameUp +'.png}')
counterNuisance += 1
if counterNuisance >= 9 :
counterNuisance = 0
texOutputFile.write('\\\\')
texOutputFile.write('\n')
texOutputFile.write('\\\\ \n')
texOutputFile.write('\\caption{ \n')
texOutputFile.write(' Distributions for ' + (sampleName).replace('_', '-') + ' of nuisances effects for ' + (opt.cutName).replace('_', '-') + ' selections.\n')
texOutputFile.write('} \n')
texOutputFile.write('\\label{fig:' + sampleName + '_' + opt.cutName + '} \n')
texOutputFile.write('\\end{figure*} \n')
texOutputFile.write('\n')
texOutputFile.write('\n')
| [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
fa3c05b75bdac93c2ad02f1e4b234dbc2dc39fb2 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/9144ae424953dcd5cbab180fb68ad51108249353-<srccoms_extract>-bug.py | 70228bce66c7542f656b0ac20014a89b3610376c | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,891 | py | def srccoms_extract(srcfile, status_all, wlist):
'\n Given a source file ``srcfile``, this function will\n extract its API(doc comments) and run sample codes in the\n API.\n\n Args:\n srcfile(file): the source file\n status_all(dict): record all the sample code execution states.\n wlist(list): white list\n\n Returns:\n\n string: the length of __all__ list in srcfile versus the exact number of\n analysed API to make sure no API is missed in this srcfile and it\n is useful for statistic practices.\n '
srcc = srcfile.read()
srcfile.seek(0, 0)
srcls = srcfile.readlines()
allidx = srcc.find('__all__')
if (allidx != (- 1)):
alllist = []
if (srcfile.name.find('ops.py') != (- 1)):
for ai in range(0, len(srcls)):
if srcls[ai].startswith('__all__'):
lb = srcls[ai].find('[')
rb = srcls[ai].find(']')
if (lb == (- 1)):
continue
allele = srcls[ai][(lb + 1):rb].replace("'", '').replace(' ', '').replace('"', '')
alllist.append(allele)
if ('' in alllist):
alllist.remove('')
else:
alllist_b = (allidx + len('__all__'))
allstr = srcc[((alllist_b + srcc[alllist_b:].find('[')) + 1):(alllist_b + srcc[alllist_b:].find(']'))]
allstr = allstr.replace('\n', '').replace(' ', '').replace("'", '').replace('"', '')
alllist = allstr.split(',')
if ('' in alllist):
alllist.remove('')
api_alllist_count = len(alllist)
api_count = 0
handled = []
if (srcfile.name.find('ops.py') != (- 1)):
for i in range(0, len(srcls)):
if (srcls[i].find('__doc__') != (- 1)):
opname = srcls[i][:(srcls[i].find('__doc__') - 1)]
if (opname in wlist):
status_all[((srcfile.name + '/') + opname)] = [(- 2)]
continue
comstart = i
for j in range(i, len(srcls)):
if (srcls[j].find('"""') != (- 1)):
comstart = i
opcom = ''
for j in range((comstart + 1), len(srcls)):
opcom += srcls[j]
if (srcls[j].find('"""') != (- 1)):
break
status = sampcd_extract_and_run(opcom, opname, 'def', opname)
api_count += 1
status_all[((srcfile.name + '/') + opname)] = status
handled.append(opname)
for i in range(0, len(srcls)):
if srcls[i].startswith('def '):
f_header = srcls[i].replace(' ', '')
fn = f_header[len('def'):f_header.find('(')]
if (fn in handled):
continue
if (fn in alllist):
api_count += 1
if ((fn in wlist) or (((fn + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + fn)] = [(- 2)]
continue
fcombody = single_defcom_extract(i, srcls)
if (fcombody == ''):
print_header('def', fn)
print('WARNING: no comments in function ', fn, ', but it deserves.')
status_all[((srcfile.name + '/') + fn)] = [(- 1)]
print(status_all[((srcfile.name + '/') + fn)])
continue
else:
status = sampcd_extract_and_run(fcombody, fn, 'def', fn)
status_all[((srcfile.name + '/') + fn)] = status
if srcls[i].startswith('class '):
c_header = srcls[i].replace(' ', '')
cn = c_header[len('class'):c_header.find('(')]
if (cn in handled):
continue
if (cn in alllist):
api_count += 1
if ((cn in wlist) or (((cn + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + cn)] = [(- 2)]
continue
classcom = single_defcom_extract(i, srcls, True)
if (classcom != ''):
status = sampcd_extract_and_run(classcom, cn, 'class', cn)
status_all[((srcfile.name + '/') + cn)] = status
else:
print('WARNING: no comments in class itself ', cn, ', but it deserves.\n')
status_all[((srcfile.name + '/') + cn)] = [(- 1)]
print(status_all[((srcfile.name + '/') + cn)])
for x in range((i + 1), len(srcls)):
if (srcls[x].startswith('def ') or srcls[x].startswith('class ')):
break
else:
srcls[x] = srcls[x].replace('\t', ' ')
if srcls[x].startswith(' def '):
thisl = srcls[x]
indent = (len(thisl) - len(thisl.lstrip()))
mn = thisl[(indent + len('def ')):thisl.find('(')]
name = ((cn + '.') + mn)
if mn.startswith('_'):
continue
if ((name in wlist) or (((name + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + name)] = [(- 2)]
continue
thismethod = [thisl[indent:]]
for y in range((x + 1), len(srcls)):
srcls[y] = srcls[y].replace('\t', ' ')
if (srcls[y].startswith('def ') or srcls[y].startswith('class ')):
break
elif srcls[y].startswith(' def '):
break
else:
thismethod.append(srcls[y][indent:])
thismtdcom = single_defcom_extract(0, thismethod)
if (thismtdcom != ''):
status = sampcd_extract_and_run(thismtdcom, name, 'method', name)
status_all[((srcfile.name + '/') + name)] = status
return [((srcfile.name + ' all list length: ') + str(api_alllist_count)), ('analysed api count: ' + str(api_count))] | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
13253bbcb5a61616a6cb69054f8269129190e2ae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/337/usersdata/280/100565/submittedfiles/diagonaldominante.py | f05a7e40031566c52ae540227f222395e0e02f05 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
import numpy as np
n=int(input("Digite n: "))
matriz=np.empty([n,n])
diag=np.empty([1,n])
cont=0
for i in range(0,n,1):
for j in range(0,n,1):
matriz[i][j]=int(input("Insira um valor: "))
for i in range(0,n,1):
diag[0][i]=matriz[i][i]
print(matriz)
print(diag)
for i in range(0,n,1):
if diag[0][i] > sum(matriz[i]):
cont=cont+1
if cont==n:
print("SIM")
else:
print("NAO")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5e65775bcde20459164350bf4eb7c0d6390187ca | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/eve/client/script/ui/inflight/target.py | 668e77d143a15c403bcae9b92c51fe48a9155e12 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,502 | py | #Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/ui/inflight/target.py
import blue
import uthread
import uix
import uiutil
import util
import state
import base
import random
import _weakref
import uicls
import uiconst
import xtriui
import localization
import telemetry
import math
accuracyThreshold = 0.8
class Target(uicls.Container):
__guid__ = 'xtriui.Target'
__notifyevents__ = ['ProcessShipEffect',
'OnStateChange',
'OnJamStart',
'OnJamEnd',
'OnSlimItemChange',
'OnDroneStateChange2',
'OnDroneControlLost',
'OnStateSetupChance',
'OnSetPlayerStanding',
'OnItemNameChange',
'OnUIRefresh',
'OnFleetJoin_Local',
'OnFleetLeave_Local']
def init(self):
self.gaugesInited = 0
self.gaugesVisible = 0
self.lastDistance = None
self.sr.gaugeParent = None
self.sr.gauge_shield = None
self.sr.gauge_armor = None
self.sr.gauge_structure = None
self.sr.updateTimer = None
self.drones = {}
self.activeModules = {}
self.lastDataUsedForLabel = None
self.timers = {}
self.jammers = {}
self.timerNames = {'propulsion': localization.GetByLabel('UI/Inflight/ScramblingShort'),
'electronic': localization.GetByLabel('UI/Inflight/JammingShort'),
'unknown': localization.GetByLabel('UI/Inflight/MiscellaneousShort')}
def OnUIRefresh(self):
self.Flush()
self.init()
bp = sm.GetService('michelle').GetBallpark()
if bp is not None:
slimItem = bp.GetInvItem(self.id)
self.Startup(slimItem)
def Startup(self, slimItem):
sm.RegisterNotify(self)
obs = sm.GetService('target').IsObserving()
self.ball = _weakref.ref(sm.GetService('michelle').GetBall(slimItem.itemID))
self.slimItem = _weakref.ref(slimItem)
self.id = slimItem.itemID
self.itemID = slimItem.itemID
self.updatedamage = slimItem.categoryID != const.categoryAsteroid and slimItem.groupID != const.groupHarvestableCloud
iconPar = uicls.Container(parent=self, width=64, height=64, align=uiconst.TOPLEFT, state=uiconst.UI_DISABLED)
icon = uicls.Icon(parent=iconPar, align=uiconst.TOALL, typeID=slimItem.typeID, size=64)
self.sr.iconPar = iconPar
self.slimForFlag = slimItem
self.SetStandingIcon()
self.sr.hilite = uicls.Fill(parent=iconPar, color=(1.0, 1.0, 1.0, 0.125), state=uiconst.UI_HIDDEN)
self.sr.activeTarget = xtriui.ActiveTarget(parent=iconPar)
rot = uiutil.GetChild(self.sr.activeTarget, 'rotate')
sm.GetService('ui').Rotate(rot, 2.0, timeFunc=blue.os.GetSimTime)
sm.GetService('ui').BlinkSpriteA(rot.children[0], 1.0, 500.0, 0, timeFunc=blue.os.GetSimTime)
self.sr.diode = uicls.Fill(parent=self, color=(0.0, 1.0, 0.0, 1.0), align=uiconst.TOLEFT, width=6, state=uiconst.UI_HIDDEN)
self.sr.diode.state = uiconst.UI_HIDDEN
if not obs:
labelClass = uicls.EveLabelSmall
else:
labelClass = uicls.EveLabelMedium
self.sr.label = labelClass(text=' ', parent=self, left=[0, 74][obs], top=68, width=[96, 128][obs], state=uiconst.UI_DISABLED)
self.SetTargetLabel()
self.sr.assigned = uicls.Container(name='assigned', align=uiconst.TOPLEFT, parent=self, width=32, height=128, left=64)
self.sr.updateTimer = base.AutoTimer(random.randint(750, 1000), self.UpdateData)
self.UpdateData()
selected = sm.GetService('state').GetExclState(state.selected)
self.Select(selected == slimItem.itemID)
hilited = sm.GetService('state').GetExclState(state.mouseOver)
self.Hilite(hilited == slimItem.itemID)
activeTargetID = sm.GetService('target').GetActiveTargetID()
self.ActiveTarget(activeTargetID == slimItem.itemID)
drones = sm.GetService('michelle').GetDrones()
for key in drones:
droneState = drones[key]
if droneState.targetID == self.id:
self.drones[droneState.droneID] = droneState.typeID
self.UpdateDrones()
def OnItemNameChange(self, *args):
uthread.new(self.SetTargetLabel)
def SetTargetLabel(self):
obs = sm.GetService('target').IsObserving()
self.label = uix.GetSlimItemName(self.slimForFlag)
if self.slimForFlag.corpID:
self.label = localization.GetByLabel('UI/Inflight/Target/TargetLabelWithTicker', target=uix.GetSlimItemName(self.slimForFlag), ticker=cfg.corptickernames.Get(self.slimForFlag.corpID).tickerName)
if obs:
self.label = sm.GetService('bracket').DisplayName(self.slimForFlag, uix.GetSlimItemName(self.slimForFlag))
self.UpdateData()
def OnSetPlayerStanding(self, *args):
self.SetStandingIcon()
def OnStateSetupChance(self, *args):
self.SetStandingIcon()
def SetStandingIcon(self):
stateMgr = sm.GetService('state')
flag = stateMgr.CheckStates(self.slimForFlag, 'flag')
self.standingIcon = uix.SetStateFlagForFlag(self, flag, top=51, left=36, showHint=False)
def OnFleetJoin_Local(self, member, *args):
if session.charid == member.charID or self.slimForFlag.charID == member.charID:
self.SetStandingIcon()
def OnFleetLeave_Local(self, member, *args):
if session.charid == member.charID or self.slimForFlag.charID == member.charID:
self.SetStandingIcon()
def OnSlimItemChange(self, oldSlim, newSlim):
uthread.new(self._OnSlimItemChange, oldSlim, newSlim)
def _OnSlimItemChange(self, oldSlim, newSlim):
if self.itemID != oldSlim.itemID or self.destroyed:
return
self.itemID = newSlim.itemID
self.slimItem = _weakref.ref(newSlim)
if oldSlim.corpID != newSlim.corpID or oldSlim.charID != newSlim.charID:
self.label = uix.GetSlimItemName(newSlim)
self.UpdateData()
def OnStateChange(self, itemID, flag, true, *args):
if not self.destroyed:
uthread.new(self._OnStateChange, itemID, flag, true)
def _OnStateChange(self, itemID, flag, true):
if self.destroyed or self.itemID != itemID:
return
if flag == state.mouseOver:
self.Hilite(true)
elif flag == state.selected:
self.Select(true)
elif flag == state.activeTarget:
self.ActiveTarget(true)
def Hilite(self, state):
if self.sr.hilite:
self.sr.hilite.state = [uiconst.UI_HIDDEN, uiconst.UI_DISABLED][state]
def Select(self, state):
pass
def OnJamStart(self, sourceBallID, moduleID, targetBallID, jammingType, startTime, duration):
if jammingType not in self.jammers:
self.jammers[jammingType] = {}
self.jammers[jammingType][sourceBallID, moduleID, targetBallID] = (startTime, duration)
self.CheckJam()
def OnJamEnd(self, sourceBallID, moduleID, targetBallID, jammingType):
if self and not self.destroyed and hasattr(self, 'jammers'):
if jammingType in self.jammers:
id = (sourceBallID, moduleID, targetBallID)
if id in self.jammers[jammingType]:
del self.jammers[jammingType][id]
self.CheckJam()
def CheckJam(self):
jams = self.jammers.keys()
jams.sort()
for jammingType in jams:
jam = self.jammers[jammingType]
sortList = []
for id in jam.iterkeys():
sourceBallID, moduleID, targetBallID = id
if targetBallID == self.id:
startTime, duration = jam[id]
sortList.append((startTime + duration, (sourceBallID,
moduleID,
targetBallID,
jammingType,
startTime,
duration)))
if sortList:
sortList = uiutil.SortListOfTuples(sortList)
sourceBallID, moduleID, targetBallID, jammingType, startTime, duration = sortList[-1]
self.ShowTimer(jammingType, startTime, duration, self.timerNames.get(jammingType, '???'))
else:
self.KillTimer(jammingType)
@telemetry.ZONE_METHOD
def ShowTimer(self, timerID, startTime, duration, label):
check = self.GetTimer(timerID)
if check:
if check.endTime <= startTime + duration:
check.Close()
else:
return
timer = uicls.Container(name='%s' % timerID, parent=self.sr.gaugeParent, align=uiconst.TOTOP, height=7, padTop=5, padBottom=1)
timer.endTime = startTime + duration
timer.timerID = timerID
self.ArrangeGauges()
t1 = uicls.EveHeaderSmall(text=label, parent=timer, left=68, top=-1, state=uiconst.UI_DISABLED)
uicls.Frame(parent=timer, padding=-1, color=(1.0, 1.0, 1.0, 0.5))
t = uicls.EveLabelSmall(text='', parent=timer, left=5, align=uiconst.CENTERLEFT, state=uiconst.UI_NORMAL)
p = uicls.Fill(parent=timer, align=uiconst.TOLEFT)
timer.height = max(7, t.textheight - 3, t1.textheight - 3)
duration = float(duration)
while 1 and not timer.destroyed:
now = blue.os.GetSimTime()
dt = blue.os.TimeDiffInMs(startTime, now)
timeLeft = (duration - dt) / 1000.0
timer.timeLeft = timeLeft
if timer.destroyed or dt > duration:
t.text = localization.GetByLabel('UI/Common/Done')
p.width = 0
break
t.text = localization.GetByLabel('UI/Inflight/Target/TimerDuration', timeLeft=timeLeft)
p.width = int(timer.displayWidth * ((duration - dt) / duration))
timer.height = max(7, t.textheight - 3)
blue.pyos.synchro.Yield()
blue.pyos.synchro.SleepWallclock(250)
if not timer.destroyed and not self.destroyed:
t.text = ''
blue.pyos.synchro.SleepWallclock(250)
if not timer.destroyed and not self.destroyed:
t.text = localization.GetByLabel('UI/Common/Done')
blue.pyos.synchro.SleepWallclock(250)
if not timer.destroyed and not self.destroyed:
t.text = ''
blue.pyos.synchro.SleepWallclock(250)
if not timer.destroyed and not self.destroyed:
t.text = localization.GetByLabel('UI/Common/Done')
blue.pyos.synchro.SleepWallclock(250)
if not timer.destroyed and not self.destroyed:
t.text = ''
timer.Close()
if not self.destroyed:
self.ArrangeGauges()
def KillTimer(self, timerID):
timer = self.GetTimer(timerID)
if timer:
timer.Close()
def GetTimer(self, timerID):
if not self.destroyed and hasattr(self, 'sr') and self.sr.Get('gaugeParent') and hasattr(self.sr.gaugeParent, 'children'):
for each in self.sr.gaugeParent.children:
if each.name == '%s' % timerID:
return each
else:
return None
def ArrangeGauges(self):
if self.gaugesInited:
totalGaugeHeight = sum([ each.height + each.padTop + each.padBottom for each in self.sr.gaugeParent.children if each.state != uiconst.UI_HIDDEN ])
self.sr.gaugeParent.height = totalGaugeHeight
self.sr.label.top = self.sr.gaugeParent.top + self.sr.gaugeParent.height + 5
def UpdateDamage(self):
bp = sm.GetService('michelle').GetBallpark()
if bp is None:
self.sr.updateTimer = None
return
dmg = bp.GetDamageState(self.itemID)
if dmg is not None:
self.SetDamageState(dmg)
def SetDamageState(self, state):
self.InitGauges()
visible = 0
for i, gauge in enumerate((self.sr.gauge_shield, self.sr.gauge_armor, self.sr.gauge_structure)):
if state[i] is None:
gauge.state = uiconst.UI_HIDDEN
else:
healthState = state[i]
damageBarWidth = gauge.displayWidth * (1.0 - healthState)
if healthState != 1.0:
damageBarWidth = max(1.0, damageBarWidth)
gauge.damageBar.renderObject.displayX = gauge.displayWidth - damageBarWidth
gauge.damageBar.renderObject.displayWidth = damageBarWidth
gauge.damageBar.renderObject.displayHeight = gauge.displayHeight
gauge.state = uiconst.UI_NORMAL
visible += 1
percLeft = max(healthState, 0) * 100
if percLeft >= 1.0:
math.floor(percLeft)
self.SetHint(gauge, percLeft)
self.gaugesVisible = visible
self.ArrangeGauges()
def SetHint(self, gauge, percLeft):
if gauge.name == 'gauge_shield':
hintLabel = 'UI/Inflight/Target/GaugeShieldRemaining'
elif gauge.name == 'gauge_armor':
hintLabel = 'UI/Inflight/Target/GaugeArmorRemaining'
elif gauge.name == 'gauge_structure':
hintLabel = 'UI/Inflight/Target/GaugeStructureRemaining'
else:
return
gauge.hint = localization.GetByLabel(hintLabel, percentage=percLeft)
def InitGauges(self):
if self.gaugesInited:
self.sr.gaugeParent.state = uiconst.UI_NORMAL
return
obs = sm.GetService('target').IsObserving()
if obs:
par = uicls.Container(name='gauges', parent=self, align=uiconst.TOPLEFT, width=64, height=32, top=0, left=74, state=uiconst.UI_NORMAL)
else:
par = uicls.Container(name='gauges', parent=self, align=uiconst.TOPLEFT, width=66, height=32, top=66, left=0, state=uiconst.UI_NORMAL)
gauges = ['shield', 'armor', 'structure']
for gaugeName in gauges:
g = uicls.Container(name='gauge_%s' % gaugeName, parent=par, align=uiconst.TOTOP, height=7, padTop=5, padBottom=1)
g.damageBar = uicls.Fill(parent=g, align=uiconst.NOALIGN, color=(158 / 256.0,
11 / 256.0,
14 / 256.0,
1.0))
uicls.Frame(parent=g, color=(1.0, 1.0, 1.0, 0.5), padding=-1)
uicls.Fill(parent=g, padding=-1)
setattr(self.sr, 'gauge_%s' % gaugeName, g)
self.sr.gaugeParent = par
self.gaugesInited = 1
self.ArrangeGauges()
def GetShipID(self):
return self.itemID
def GetIcon(self, icon, typeID, size):
if not self.destroyed:
icon.LoadIconByTypeID(typeID)
icon.SetSize(size, size)
def _OnClose(self, *args):
sm.UnregisterNotify(self)
self.sr.updateTimer = None
def ProcessShipEffect(self, godmaStm, effectState):
slimItem = self.slimItem()
if slimItem and effectState.environment[3] == slimItem.itemID:
if effectState.start:
if self.GetWeapon(effectState.itemID):
return
moduleInfo = self.GetModuleInfo(effectState.itemID)
if moduleInfo:
self.AddWeapon(moduleInfo)
self.activeModules[effectState.itemID] = moduleInfo
else:
self.RemoveWeapon(effectState.itemID)
self.activeModules.pop(effectState.itemID, None)
def AddWeapon(self, moduleInfo):
if self is None or self.destroyed:
return
icon = uicls.Icon(parent=self.sr.assigned, align=uiconst.RELATIVE, width=16, height=16, state=uiconst.UI_HIDDEN, typeID=moduleInfo.typeID, size=32)
icon.sr.moduleID = moduleInfo.itemID
icon.OnClick = (self.ClickWeapon, icon)
icon.OnMouseEnter = (self.OnMouseEnterWeapon, icon)
icon.OnMouseExit = (self.OnMouseExitWeapon, icon)
icon.baseAlpha = 1.0
self.ArrangeWeapons()
def ClickWeapon(self, icon):
shipui = uicore.layer.shipui
if shipui:
module = shipui.GetModule(icon.sr.moduleID)
if module:
module.Click()
def OnMouseEnterWeapon(self, icon):
sm.GetService('bracket').ShowHairlinesForModule(icon.sr.moduleID, reverse=True)
def OnMouseExitWeapon(self, icon):
sm.GetService('bracket').StopShowingModuleRange(icon.sr.moduleID)
def IsEffectActivatible(self, effect):
return effect.isDefault and effect.effectName != 'online' and effect.effectCategory in (const.dgmEffActivation, const.dgmEffTarget)
def RemoveWeapon(self, moduleID):
icon = self.GetWeapon(moduleID)
if icon:
icon.Close()
self.ArrangeWeapons()
def ArrangeWeapons(self):
if self and not self.destroyed and self.sr.assigned:
size = [32, 16][len(self.sr.assigned.children) > 2]
left = 0
top = 0
for icon in self.sr.assigned.children:
icon.width = icon.height = size
icon.left = left
icon.top = top
top += size
if top == 64:
top = 0
left += size
icon.state = uiconst.UI_NORMAL
def GetWeapon(self, moduleID):
if self is None or self.destroyed:
return
if self.sr.assigned:
for each in self.sr.assigned.children:
if each.sr.moduleID == moduleID:
return each
def GetModuleInfo(self, moduleID):
ship = sm.GetService('godma').GetItem(eve.session.shipid)
if ship is None:
return
for module in ship.modules:
if module.itemID == moduleID:
return module
def OnClick(self, *args):
sm.GetService('state').SetState(self.itemID, state.selected, 1)
sm.GetService('state').SetState(self.itemID, state.activeTarget, 1)
sm.GetService('menu').TacticalItemClicked(self.itemID)
def GetMenu(self):
obs = sm.GetService('target').IsObserving()
m = []
if obs:
m += [(uiutil.MenuLabel('UI/Inflight/Target/ToggleTeam'), sm.GetService('target').ToggleTeam, (self.itemID,))]
m += [(uiutil.MenuLabel('UI/Inflight/Target/MoveUp'), sm.GetService('target').MoveUp, (self.itemID,))]
m += [(uiutil.MenuLabel('UI/Inflight/Target/MoveDown'), sm.GetService('target').MoveDown, (self.itemID,))]
m += sm.GetService('menu').CelestialMenu(self.itemID)
return m
def OnMouseHover(self, *args):
pass
def OnMouseDown(self, *args):
if args[0] != uiconst.MOUSELEFT or len(uicore.layer.target.children) <= 1:
return
horizontalAlign = settings.user.ui.Get('alignHorizontally', True)
rows, cols = sm.GetService('target').GetTargetsSize()
width = height = 0
left = top = None
for target in uicore.layer.target.children:
if isinstance(target, xtriui.Target):
tl, tt, tw, th = target.GetAbsolute()
width = max(int(cols * tw), width)
height = max(int(rows * th), height)
if left == None:
left = tl
else:
left = min(left, tl)
if top == None:
top = tt
else:
top = min(top, tt)
clipper = (left,
top,
left + width,
top + height)
uthread.new(self.DoRepositionDrag, clipper)
def OnMouseUp(self, *args):
if args[0] != uiconst.MOUSELEFT:
return
uicore.uilib.UnclipCursor()
def OnMouseEnter(self, *args):
sm.GetService('state').SetState(self.id, state.mouseOver, 1)
def OnMouseExit(self, *args):
sm.GetService('state').SetState(self.itemID, state.mouseOver, 0)
def DoRepositionDrag(self, cursorClipper):
blue.synchro.Sleep(200)
if uicore.uilib.leftbtn and uicore.uilib.mouseOver == self:
uicore.uilib.ClipCursor(*cursorClipper)
else:
return
origin = self.GetAbsolute()
xOffset = uicore.uilib.x - origin[0]
yOffset = uicore.uilib.y - origin[1]
horizontalAlign = settings.user.ui.Get('alignHorizontally', True)
repositionLine = uicls.Line(align=uiconst.TORIGHT, weight=4, color=(1, 1, 1, 0.5))
uiutil.Transplant(self, uicore.layer.abovemain)
targetSvc = sm.GetService('target')
targetSvc.ArrangeTargets()
while uicore.uilib.leftbtn:
self.SetAlign(uiconst.TOPLEFT)
self.left = uicore.uilib.x - xOffset
self.top = uicore.uilib.y - yOffset
(x, y), (toLeft, toTop) = targetSvc.GetOriginPosition(getDirection=1)
lessThanAll = True
for target in uicore.layer.target.children:
if isinstance(target, xtriui.Target):
tl, tt, tw, th = target.GetAbsolute()
if tl - 2 <= uicore.uilib.x <= tl + tw + 2 and tt - 2 <= uicore.uilib.y <= tt + th + 2:
if horizontalAlign:
repositionLine.padTop = repositionLine.padBottom = 32
if not toLeft:
repositionLine.SetAlign(uiconst.TOLEFT)
repositionLine.padLeft = -10
else:
repositionLine.SetAlign(uiconst.TORIGHT)
repositionLine.padLeft = 0
lessThanAll = False
break
else:
if not toTop:
repositionLine.SetAlign(uiconst.TOTOP)
repositionLine.padTop = 0
else:
repositionLine.SetAlign(uiconst.TOBOTTOM)
repositionLine.padTop = -10
lessThanAll = False
break
if lessThanAll:
if horizontalAlign:
repositionLine.padTop = repositionLine.padBottom = 32
if not toLeft:
repositionLine.SetAlign(uiconst.TORIGHT)
repositionLine.padLeft = 0
else:
repositionLine.SetAlign(uiconst.TOLEFT)
repositionLine.padLeft = -10
elif not toTop:
repositionLine.SetAlign(uiconst.TOBOTTOM)
repositionLine.padTop = -10
else:
repositionLine.SetAlign(uiconst.TOTOP)
repositionLine.padTop = 0
uiutil.Transplant(repositionLine, target)
blue.pyos.synchro.Yield()
uicore.uilib.UnclipCursor()
uiutil.Transplant(self, uicore.layer.target, idx=uicore.layer.target.children.index(repositionLine.parent) if not lessThanAll else None)
repositionLine.Close()
targetSvc.ArrangeTargets()
@telemetry.ZONE_METHOD
def UpdateData(self):
ball = self.ball()
if not ball:
return
obs = sm.GetService('target').IsObserving()
if not obs:
dist = ball.surfaceDist
distanceInMeters = int(dist)
dataUsedForLabel = (self.label, int(distanceInMeters))
if dataUsedForLabel != self.lastDataUsedForLabel:
newText = localization.GetByLabel('UI/Inflight/Target/DataLabel', label=self.label, distance=util.FmtDist(dist))
self.sr.label.text = newText
self.lastDataUsedForLabel = dataUsedForLabel
elif self.sr.label != self.label:
self.sr.label.text = self.label
if self.updatedamage:
self.UpdateDamage()
def ActiveTarget(self, true):
if self.destroyed:
return
targetSvc = sm.GetService('target')
if true and not targetSvc.IsObserving():
self.sr.iconPar.width = self.sr.iconPar.height = 56
self.sr.iconPar.left = self.sr.iconPar.top = 5
if not targetSvc.disableSpinnyReticule:
self.sr.activeTarget.state = uiconst.UI_DISABLED
else:
self.sr.iconPar.width = self.sr.iconPar.height = 64
self.sr.iconPar.left = self.sr.iconPar.top = 1
self.sr.activeTarget.state = uiconst.UI_HIDDEN
def OnDroneStateChange2(self, itemID, oldActivityState, activityState):
michelle = sm.GetService('michelle')
droneState = michelle.GetDroneState(itemID)
if activityState in (const.entityCombat, const.entityEngage, const.entityMining):
if droneState.targetID == self.id:
self.drones[itemID] = droneState.typeID
elif self.drones.has_key(itemID):
del self.drones[itemID]
elif self.drones.has_key(itemID):
del self.drones[itemID]
self.UpdateDrones()
def OnDroneControlLost(self, droneID):
if self.drones.has_key(droneID):
del self.drones[droneID]
self.UpdateDrones()
def UpdateDrones(self):
if not self.drones:
self.RemoveWeapon('drones')
return
droneIcon = self.GetWeapon('drones')
if not droneIcon:
icon = uicls.Sprite(align=uiconst.RELATIVE, width=16, height=16, state=uiconst.UI_HIDDEN, texturePath='res:/UI/Texture/Icons/56_64_5.png', parent=self.sr.assigned)
icon.sr.moduleID = 'drones'
self.ArrangeWeapons()
self.UpdateDroneHint()
def UpdateDroneHint(self):
dronesByTypeID = {}
droneIcon = self.GetWeapon('drones')
for droneID, droneTypeID in self.drones.iteritems():
if not dronesByTypeID.has_key(droneTypeID):
dronesByTypeID[droneTypeID] = 0
dronesByTypeID[droneTypeID] += 1
hintLines = []
for droneTypeID, number in dronesByTypeID.iteritems():
hintLines.append(localization.GetByLabel('UI/Inflight/Target/DroneHintLine', drone=droneTypeID, count=number))
droneIcon.hint = localization.GetByLabel('UI/Inflight/Target/DroneHintLabel', droneHintLines='<br>'.join(hintLines)) | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
8311b5e7ac60b4884e1f00810a0af2d768c7ca97 | 7a20895c1a5ebe80bfffd6072d2efaa27892b4df | /base_challenge.py | e99d3d082accabd4ebf9723af2b30944ee52694e | [] | no_license | rhlobo/hackerhank_challenges | 609cf35cff04ef8cf34322418d86896d4057d8b0 | 18d75200587d9b259a84bcbbff6cfcdd1c91d6a8 | refs/heads/master | 2018-12-29T14:13:44.606310 | 2014-11-27T16:13:00 | 2014-11-27T16:13:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | #!/usr/bin/env python
import tester
tester.configure()
'''
https://www.hackerrank.com/challenges/XXX
'''
import XXX
def solve(x):
pass
T = int(raw_input())
for _ in xrange(T):
args = [int(x) for x in raw_input().split()]
print solve(*args)
| [
"rhlobo+github@gmail.com"
] | rhlobo+github@gmail.com |
afa240d770a9b1b6bed14b5b46610250143b3006 | 28779d02314089e0a70a91cc1db875cd5024a395 | /tfx/orchestration/kubeflow/v2/kubeflow_v2_dag_runner.py | d5a9835313af31580c8b92a0124df1209ea97512 | [
"Apache-2.0"
] | permissive | voquangtuong/tfx | 4a2b41f6fd1238c8ae59f2080948d451b7668aba | ce2fd6fbde9845cd837c47089c3d6db2f87007b9 | refs/heads/master | 2023-04-10T21:25:06.404137 | 2021-04-20T03:13:12 | 2021-04-20T03:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,252 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""V2 Kubeflow DAG Runner."""
import datetime
import json
import os
from typing import Any, Dict, List, Optional, Text
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow.v2 import pipeline_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.utils import deprecation_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
from google.protobuf import json_format
_KUBEFLOW_TFX_CMD = (
'python', '-m',
'tfx.orchestration.kubeflow.v2.container.kubeflow_v2_run_executor')
# Current schema version for the API proto.
_SCHEMA_VERSION = '2.0.0'
# Default TFX container image/commands to use in KubeflowV2DagRunner.
_KUBEFLOW_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
def _get_current_time():
"""Gets the current timestamp."""
return datetime.datetime.now()
class KubeflowV2DagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration specific to execution on Kubeflow pipelines."""
def __init__(self,
project_id: Text,
display_name: Optional[Text] = None,
default_image: Optional[Text] = None,
default_commands: Optional[List[Text]] = None,
**kwargs):
"""Constructs a Kubeflow V2 runner config.
Args:
project_id: GCP project ID to be used.
display_name: Optional human-readable pipeline name. Defaults to the
pipeline name passed into `KubeflowV2DagRunner.run()`.
default_image: The default TFX image to be used if not overriden by per
component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
**kwargs: Additional args passed to base PipelineConfig.
"""
super(KubeflowV2DagRunnerConfig, self).__init__(**kwargs)
self.project_id = project_id
self.display_name = display_name
self.default_image = default_image or _KUBEFLOW_TFX_IMAGE
if default_commands is None:
self.default_commands = _KUBEFLOW_TFX_CMD
else:
self.default_commands = default_commands
class KubeflowV2DagRunner(tfx_runner.TfxRunner):
"""Kubeflow V2 pipeline runner.
Builds a pipeline job spec in json format based on TFX pipeline DSL object.
"""
def __init__(self,
config: KubeflowV2DagRunnerConfig,
output_dir: Optional[Text] = None,
output_filename: Optional[Text] = None):
"""Constructs an KubeflowV2DagRunner for compiling pipelines.
Args:
config: An KubeflowV2DagRunnerConfig object to specify runtime
configuration when running the pipeline in Kubeflow.
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. The file output format will be a JSON-serialized PipelineJob pb
message. Defaults to 'pipeline.json'.
"""
if not isinstance(config, KubeflowV2DagRunnerConfig):
raise TypeError('config must be type of KubeflowV2DagRunnerConfig.')
super(KubeflowV2DagRunner, self).__init__()
self._config = config
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename or 'pipeline.json'
def run(self,
pipeline: tfx_pipeline.Pipeline,
parameter_values: Optional[Dict[Text, Any]] = None,
write_out: Optional[bool] = True) -> Dict[Text, Any]:
"""Compiles a pipeline DSL object into pipeline file.
Args:
pipeline: TFX pipeline object.
parameter_values: mapping from runtime parameter names to its values.
write_out: set to True to actually write out the file to the place
designated by output_dir and output_filename. Otherwise return the
JSON-serialized pipeline job spec.
Returns:
Returns the JSON pipeline job spec.
Raises:
RuntimeError: if trying to write out to a place occupied by an existing
file.
"""
# TODO(b/166343606): Support user-provided labels.
# TODO(b/169095387): Deprecate .run() method in favor of the unified API
# client.
display_name = (
self._config.display_name or pipeline.pipeline_info.pipeline_name)
pipeline_spec = pipeline_builder.PipelineBuilder(
tfx_pipeline=pipeline,
default_image=self._config.default_image,
default_commands=self._config.default_commands).build()
pipeline_spec.sdk_version = 'tfx-{}'.format(version.__version__)
pipeline_spec.schema_version = _SCHEMA_VERSION
runtime_config = pipeline_builder.RuntimeConfigBuilder(
pipeline_info=pipeline.pipeline_info,
parameter_values=parameter_values).build()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'kubeflow_v2'}):
result = pipeline_pb2.PipelineJob(
display_name=display_name or pipeline.pipeline_info.pipeline_name,
labels=telemetry_utils.get_labels_dict(),
runtime_config=runtime_config)
result.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
pipeline_json_dict = json_format.MessageToDict(result)
if write_out:
if fileio.exists(self._output_dir) and not fileio.isdir(self._output_dir):
raise RuntimeError('Output path: %s is pointed to a file.' %
self._output_dir)
if not fileio.exists(self._output_dir):
fileio.makedirs(self._output_dir)
with fileio.open(
os.path.join(self._output_dir, self._output_filename), 'wb') as f:
f.write(json.dumps(pipeline_json_dict, sort_keys=True))
return pipeline_json_dict
compile = deprecation_utils.deprecated_alias(
deprecated_name='compile', name='run', func_or_class=run)
| [
"tensorflow-extended-nonhuman@googlegroups.com"
] | tensorflow-extended-nonhuman@googlegroups.com |
8fffa06beb3240b52cbe74e56e6881e19d342e7d | cb81bfaa03921e6388ad6eb012051916d2c04772 | /jumpvisualiser/urls.py | bdd79bc10db3879acd4be1544ec95ae4c6a854d6 | [] | no_license | skyride/eve-jump-visualiser | 43d9c82da0922e04433f576e544b94a4ee35023b | 838053a8b50de337d52616c1e35c2cbb4a16c1d7 | refs/heads/master | 2018-06-27T23:11:18.332956 | 2018-05-30T18:18:04 | 2018-05-30T18:18:04 | 118,148,044 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | """jumpvisualiser URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"adam.findlay@mercurytide.co.uk"
] | adam.findlay@mercurytide.co.uk |
dc391b93eb9bb7b4d45cbb1a5abe5ca9c9ab5285 | b8564a3aebbb85d24e3547cbd28f3654de1cfb6e | /darkworld/__init__.py | 69800e8fc9b722567ba06cf3199253e30e8f7b05 | [] | no_license | kwoolter/DarkWorld | 4a62949d1d37d1897fd1b2bb59dce244df403889 | fa11f53d4a27d7220229036a9708aa7f86e43504 | refs/heads/master | 2021-03-23T19:58:36.704359 | 2020-11-03T13:36:08 | 2020-11-03T13:36:08 | 247,479,909 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | from darkworld.controller import DWController
| [
"keith.woolterton@gmail.com"
] | keith.woolterton@gmail.com |
e071c6d32cf23a91804981e1c9fbd363874efec7 | d996edcd595c565c5725a16286ce8d338af67246 | /src/rl/qlearning/sarsa_agent.py | 6204f1b366a0ea9e6d541dbc86fc6eab2e9efaf8 | [] | no_license | preddy5/dltemplate | fbbfce7660c451495e255cf8d8437e4b4e207f9c | 77b04b767cbd4914e0a3d3609c645e475aabcc43 | refs/heads/master | 2020-04-28T19:37:04.893001 | 2019-03-13T13:35:04 | 2019-03-13T13:35:04 | 175,517,056 | 1 | 1 | null | 2019-03-13T23:59:40 | 2019-03-13T23:59:39 | null | UTF-8 | Python | false | false | 1,071 | py | import numpy as np
from rl.qlearning.qlearning_agent import QLearningAgent
class EVSarsaAgent(QLearningAgent):
"""
An agent that changes some of its q-learning functions to implement
Expected Value SARSA.
"""
def get_value(self, state):
"""
Returns Vpi for current state under epsilon-greedy policy:
V_{pi}(s) = sum _{over a_i} {pi(a_i | s) * Q(s, a_i)}
:param state:
:return:
"""
epsilon = self.epsilon
possible_actions = self.get_legal_actions(state)
n_actions = len(possible_actions)
if n_actions == 0:
return 0.
q_values = [self.get_q_value(state, action) for action in possible_actions]
best_action_idx = np.argmax(q_values)
expected_value = 0.
for i in range(n_actions):
if i == best_action_idx:
expected_value += (1 - epsilon + epsilon / n_actions) * q_values[i]
else:
expected_value += (epsilon / n_actions) * q_values[i]
return expected_value
| [
"markmo@me.com"
] | markmo@me.com |
aaefa16d38f2d7ea1dc21425da2ace84fc7f1b87 | 058498e815b20950cc97033c2e4e55c732c3f909 | /tempest/lib/api_schema/response/compute/v2_1/floating_ips.py | 0c665905fe34dde28020cc2c8839f64852e8e61f | [
"Apache-2.0"
] | permissive | cisco-openstack/tempest | 49c56de4ee2422791fe5cd832083d7b6558c7d0d | 0bc47dbdd05b5d12d048c09800515c2bd03a16ce | refs/heads/proposed | 2021-01-22T00:11:00.113774 | 2020-06-26T09:32:55 | 2020-06-26T09:32:55 | 24,151,261 | 2 | 5 | Apache-2.0 | 2020-08-07T06:13:20 | 2014-09-17T15:46:17 | Python | UTF-8 | Python | false | false | 4,677 | py | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
common_floating_ip_info = {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but
# here allows 'string' also because we will be
# able to change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
'instance_id': {'type': ['string', 'null']},
'ip': parameter_types.ip_address,
'fixed_ip': parameter_types.ip_address
},
'additionalProperties': False,
'required': ['id', 'pool', 'instance_id',
'ip', 'fixed_ip'],
}
list_floating_ips = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips': {
'type': 'array',
'items': common_floating_ip_info
},
},
'additionalProperties': False,
'required': ['floating_ips'],
}
}
create_get_floating_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip': common_floating_ip_info
},
'additionalProperties': False,
'required': ['floating_ip'],
}
}
list_floating_ip_pools = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_pools': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}
},
'additionalProperties': False,
'required': ['name'],
}
}
},
'additionalProperties': False,
'required': ['floating_ip_pools'],
}
}
add_remove_floating_ip = {
'status_code': [202]
}
create_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_create': {
'type': 'object',
'properties': {
'interface': {'type': ['string', 'null']},
'ip_range': {'type': 'string'},
'pool': {'type': ['string', 'null']},
},
'additionalProperties': False,
'required': ['interface', 'ip_range', 'pool'],
}
},
'additionalProperties': False,
'required': ['floating_ips_bulk_create'],
}
}
delete_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_delete': {'type': 'string'}
},
'additionalProperties': False,
'required': ['floating_ips_bulk_delete'],
}
}
list_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_info': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'address': parameter_types.ip_address,
'instance_uuid': {'type': ['string', 'null']},
'interface': {'type': ['string', 'null']},
'pool': {'type': ['string', 'null']},
'project_id': {'type': ['string', 'null']},
'fixed_ip': parameter_types.ip_address
},
'additionalProperties': False,
# NOTE: fixed_ip is introduced after JUNO release,
# So it is not defined as 'required'.
'required': ['address', 'instance_uuid', 'interface',
'pool', 'project_id'],
}
}
},
'additionalProperties': False,
'required': ['floating_ip_info'],
}
}
| [
"mtreinish@kortar.org"
] | mtreinish@kortar.org |
a69a45a82ed766a21c7056d57f6569f2d180450f | fe03eab6477db3f7f3667eefe369dd89d457a58f | /Run2Demonstrator/milliqanScripts/timeCalibration/plotTripleCoincidence.py | f94e68768254ded82d8cb2888577b3914042c347 | [] | no_license | milliQan-sw/milliqanOffline | c64a56173797cd00b9243e4ca7196515081834e6 | 4df6cb4a16ea88feea82b99f9f8c356dbce863cc | refs/heads/master | 2023-08-23T13:39:17.513848 | 2023-08-17T20:16:20 | 2023-08-17T20:16:20 | 123,949,343 | 3 | 7 | null | 2023-09-13T06:59:02 | 2018-03-05T16:47:13 | Python | UTF-8 | Python | false | false | 3,871 | py | #!/usr/local/bin/python
import ROOT as r
import pickle
import os,sys
import pandas as pd
r.gROOT.SetBatch(True)
r.gStyle.SetOptFit(0)
# inputFile = r.TFile('recalibTree.root')
inputFile = r.TFile('/Users/mcitron/milliqanScripts/realTripleCoinc.root')
# inputFile = r.TFile('AllTripleCoincidenceNominalHVNov7_v2.root')
tempCanvas = r.TCanvas()
tempCanvas.cd()
oldtree = inputFile.Get('t')
bins = '(40,{0},{1})'.format(-30,20)
oldtree.Draw("MinIf$(time_module_calibrated,(((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75)))-MinIf$(time_module_calibrated,(layer==1&&nPE>=200))>>noBeam"+bins,"MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))>0&&MinIf$(time_module_calibrated,layer==1&&nPE>=200)>0&&!beam","")
histNoBeam = tempCanvas.GetListOfPrimitives()[0]
oldtree.Draw("MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))-MinIf$(time_module_calibrated,(layer==1&&nPE>=200))>>beam"+bins,"MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))>0&&MinIf$(time_module_calibrated,layer==1&&nPE>=200)>0&&beam","")
histBeam = tempCanvas.GetListOfPrimitives()[0]
histBeamBackgroundSubtract = histBeam.Clone('backgroundSubtract')
histNoBeamToSubtract = histNoBeam.Clone()
histNoBeamToSubtract.Scale(1.97)
histBeamBackgroundSubtract.Add(histNoBeamToSubtract,-1)
gNoBeam = r.TF1('gNoBeam','gaus',-30,20)
gBeam = r.TF1('gBeam','gaus',5,15)
histNoBeam.Fit(gNoBeam,"R")
print gNoBeam.GetProb()
histNoBeam.Draw()
tempCanvas.SaveAs("noBeam.pdf")
tempCanvas.Clear()
histBeamBackgroundSubtract.Fit(gBeam,"R")
print gBeam.GetProb()
histBeamBackgroundSubtract.Draw()
tempCanvas.SaveAs("backgroundSubtract.pdf")
tempCanvas.Clear()
doubleG = r.TF1("doubleG","gaus(0)+gaus(3)",-30,20)
doubleG.SetParameter(0,gNoBeam.GetParameter(0))
doubleG.SetParameter(1,gNoBeam.GetParameter(1))
doubleG.SetParameter(2,gNoBeam.GetParameter(2))
doubleG.SetParameter(3,gBeam.GetParameter(0))
doubleG.SetParameter(4,gBeam.GetParameter(1))
doubleG.SetParameter(5,gBeam.GetParameter(2))
histBeam.Fit(doubleG,"R")
gNoBeam.SetParameter(0,doubleG.GetParameter(0))
gNoBeam.SetParameter(1,doubleG.GetParameter(1))
gNoBeam.SetParameter(2,doubleG.GetParameter(2))
print doubleG.GetProb()
tempCanvas.Clear()
histBeam.Draw()
gNoBeam.SetLineColor(r.kBlue)
gNoBeam.Draw("same")
tempCanvas.SaveAs("mess.pdf")
# newFile = r.TFile('skimmed.root','recreate')
# jaeData = pd.read_csv('dataJae.txt',sep=' ')
# newtree = oldtree.CloneTree(0);
# threetimes = []
# for entry in oldtree:
# minTimeLayer1 = 9999
# minTimeLayer2 = 9999
# minTimeLayer3 = 9999
# for iT in range(len(entry.time_module_calibrated)):
# if entry.nPE[iT] >= 100:
# if entry.layer[iT] == 1:
# if entry.time_module_calibrated[iT] < minTimeLayer1:
# minTimeLayer1 = entry.time_module_calibrated[iT]
# elif entry.layer[iT] == 2:
# if entry.time_module_calibrated[iT] < minTimeLayer2:
# minTimeLayer2 = entry.time_module_calibrated[iT]
# elif entry.layer[iT] == 3:
# if entry.time_module_calibrated[iT] < minTimeLayer3:
# minTimeLayer3 = entry.time_module_calibrated[iT]
# if all(x != 9999 for x in [minTimeLayer1,minTimeLayer2,minTimeLayer3]):
# threetimes.append([minTimeLayer1,minTimeLayer2,minTimeLayer3])
# # if len(jaeData.loc[(jaeData["run"]==entry.run) & (jaeData["file"] == entry.file) & (jaeData["event"] == entry.event)]) != 0:
# # newtree.Fill()
# # if (entry.run in jaeData["run"]):
# # if (entry.file in jaeData[jaeData["run"] == entry.run]"file"]):
# # if (entry.event in jaeData["event"]):
# # newtree.Fill()
# pickle.dump(threetimes,open("threetimes.pkl","w"))
| [
"mcitron@cern.ch"
] | mcitron@cern.ch |
89a5ee9e158505e8dec2b66c3914cb369c48eb39 | 1e7ce1c56f3030aa6df1e928bab559f50c59bad5 | /homepage/tests.py | c7d12c23566f23ea318652893201412b06dcf0c7 | [] | no_license | AIRob/WxRobot | f7fe37331c399a9d7fb467c7e913f10cc981f8eb | b27a48edb44694d4faa349d68d9b753fe4063276 | refs/heads/master | 2020-06-05T04:53:11.310909 | 2019-05-17T06:46:30 | 2019-05-17T06:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | <<<<<<< HEAD
from django.test import TestCase
# Create your tests here.
=======
from django.test import TestCase
# Create your tests here.
>>>>>>> acb8c86e5915306157008056c793ddc27ee3fd97
| [
"1194681498@qq.com"
] | 1194681498@qq.com |
847e3c09351b69ab1c0163179a460ae8d4ab2c66 | 193283457d285d8ddcaa28c3d06dc0546820a224 | /virtual/bin/easy_install | c807322d4e51bf8e12a16ed26965966644e0050d | [] | no_license | ALKEMIA-CHARLES/connectpy | 39c3e3139b6ab4e8173d0c01b2bd59dce607999d | 1659a8ab7fa0183b1ecc6f307c8436540b6a64a8 | refs/heads/master | 2021-05-24T15:30:49.262152 | 2020-04-06T22:32:09 | 2020-04-06T22:32:09 | 253,630,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | #!/home/charles/Documents/moringa-school-projects/connect-4/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"charlesmtawaliJr@gmail.com"
] | charlesmtawaliJr@gmail.com | |
50351090dc204e3bf9d7ee545c28949f6f6d83bc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/551.py | cf4c418fdeef20399f363ec5314f670826471da8 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # -*- coding: utf-8 -*-
import sys
class Recycle:
def solve(self,low,high):
ans=0
for offset in range(high+1-low):
num = low+offset
num_str = list("%d"%num)
for i in range(len(num_str)-1):
modified_str_list = num_str[i+1:] + num_str[0:i+1]
if modified_str_list[0] == '0':
continue
else:
modified_str = "".join(modified_str_list)
modified_num = int(modified_str)
if num != modified_num and modified_num >= low and modified_num <= high:
ans+=1
#print ans_dict
return ans/2
recycle=Recycle()
f=open(sys.argv[1])
f2=open(sys.argv[2],'w')
lines=f.read().split('\n')
for idx in range(int(lines[0])):
params = [int(param) for param in (lines[idx+1]).split(' ')]
low=params.pop(0)
high=params.pop(0)
ans = recycle.solve(low,high)
f2.write("Case #%d: %d\n"%(idx+1,ans))
f2.close
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8e3302d62e49391b32e25a39d6ddb24d0aa64cf2 | 4650ddcf27ddf908c1f38702fe4491df92232905 | /v1/my_site/models.py | ce740596944e1772fdef7c5b105e5c6e515dc0a1 | [] | no_license | volitilov/webface | 0e765232544496fcb1527175cc51e6287ed2797e | 7141c47fe9e86337183faea42be92663ef24afd9 | refs/heads/master | 2020-12-30T15:55:15.023542 | 2017-06-04T18:30:36 | 2017-06-04T18:30:36 | 91,187,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | from django.db import models
from django.utils import timezone
from django.conf import settings
class Category(models.Model):
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
title = models.CharField(max_length=30, verbose_name='Название', unique=True)
def __str__(self):
return self.title
class Mesage(models.Model):
class Meta():
verbose_name = 'Сообщение'
verbose_name_plural = 'Сообщения'
name = models.CharField(max_length=20, verbose_name='Имя:')
email = models.EmailField(verbose_name='Почт. ящик:')
website = models.URLField(verbose_name='Сайт:')
text = models.TextField(verbose_name='Сообщение:')
def __str__(self):
return self.name
class PortfolioItem(models.Model):
class Meta():
verbose_name = 'Работа'
verbose_name_plural = 'Работы'
category = models.ManyToManyField(Category, verbose_name='Категория(и):')
title = models.CharField(max_length=200, verbose_name='Название:')
description = models.TextField(verbose_name='Описание:')
img_small = models.ImageField(upload_to='portfolio/%d-%m-%Y', verbose_name='Мал. изображение:')
img_big = models.ImageField(upload_to='portfolio/%d-%m-%Y', verbose_name='Бол. изображение:')
project_url = models.URLField(max_length=200, verbose_name='Ссылка проекта:')
download = models.URLField(max_length=300, verbose_name='Ссылка скачки:')
created_date = models.DateTimeField(default=timezone.now, verbose_name='Дата создания:')
published_date = models.DateTimeField(blank=True, null=True, verbose_name='Дата публикации:')
likes = models.IntegerField(default=0, verbose_name='Кол. лайков:')
previews = models.IntegerField(default=0, verbose_name='Кол. просмотров:')
def __str__(self):
return self.title
| [
"volitilov@gmail.com"
] | volitilov@gmail.com |
3392eb8b2392427e0ee3784a06465a7fbe30b026 | 679ce4b323f79b2425976201324c6c1f88b95199 | /Python/Resource/Themes/Version1/theme_publisher_2022-10-24.py | 0cc74626bf9151f49ebf4a49972e3d4beb9b570b | [] | no_license | abriggs914/Coding_Practice | ff690fb5f145a11f4da144f3882b37f473b10450 | 3afd7c59e0d90f0ef5f6203853e69f853312019b | refs/heads/master | 2023-08-31T04:04:58.048554 | 2023-08-29T13:23:29 | 2023-08-29T13:23:29 | 161,865,421 | 0 | 1 | null | 2022-10-27T08:35:29 | 2018-12-15T03:20:14 | Python | UTF-8 | Python | false | false | 37,481 | py | import os
import json
from colour_utility import *
from tkinter_utility import *
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
VERSION = \
"""
GUI for gui theme design
Version.............1.01
Date..........2022-10-05
Author......Avery Briggs
"""
def VERSION_NUMBER():
return float(VERSION.split("\n")[2].split(".")[-2] + "." + VERSION.split("\n")[2].split(".")[-1])
def VERSION_DATE():
return VERSION.split("\n")[3].split(".")[-1]
def VERSION_AUTHOR():
return VERSION.split("\n")[4].split(".")[-1]
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
class FontChooser(tkinter.Frame):
def __init__(self, master):
super().__init__(master)
self.status = tkinter.Variable(self)
self.fonts_list = list(tkinter.font.families())
self.fonts_list.sort()
self.font_sizes_list = list(map(str, [6, 8, 10, 12, 14, 16, 18, 20]))
self.font_weights_list = ["normal", "bold", "italic", "roman"]
# combo for font choice
# spinner for font size
# spinner for font weight
self.tv_label_font_name, \
self.label_font_name, \
self.tv_combo_font_name, \
self.combo_font_name, \
= combo_factory(
self,
tv_label="Font Name:",
kwargs_combo={
"values": self.fonts_list
}
)
self.tv_label_font_size = tkinter.StringVar(self, value="Font Size:")
self.label_font_size = tkinter.Label(self, textvariable=self.tv_label_font_size)
self.tv_font_size = tkinter.IntVar(self, value=12)
self.spin_font_size = tkinter.Spinbox(self, values=self.font_sizes_list, textvariable=self.tv_font_size)
self.tv_label_font_weight = tkinter.StringVar(self, value="Font Weight:")
self.label_font_weight = tkinter.Label(self, textvariable=self.tv_label_font_weight)
self.tv_font_weight = tkinter.StringVar(self)
self.spin_font_weight = tkinter.Spinbox(self, values=self.font_weights_list, textvariable=self.tv_font_weight)
self.tv_combo_font_name.trace_variable("w", self.update_name)
self.tv_font_size.trace_variable("w", self.update_size)
self.tv_font_weight.trace_variable("w", self.update_weight)
self.label_font_name.grid(row=0, column=0)
self.combo_font_name.grid(row=0, column=1)
self.label_font_size.grid(row=1, column=0)
self.spin_font_size.grid(row=1, column=1)
self.label_font_weight.grid(row=2, column=0)
self.spin_font_weight.grid(row=2, column=1)
def update_name(self, *args):
self.update_status()
def update_size(self, *args):
self.update_status()
def update_weight(self, *args):
self.update_status()
def update_status(self):
status = {
"name": self.tv_combo_font_name.get(),
"weight": self.tv_font_weight.get(),
"size": self.tv_font_size.get()
}
self.status.set(status)
class Theme:
def __init__(self):
self.name = tkinter.StringVar()
self.tv_text_box_object_back_colour = tkinter.StringVar()
self.tv_text_box_object_border_colour = tkinter.StringVar()
self.tv_text_box_text_font = tkinter.StringVar()
self.tv_text_box_text_fore_colour = tkinter.StringVar()
self.tv_label_object_back_colour = tkinter.StringVar()
self.tv_label_object_border_colour = tkinter.StringVar()
self.tv_label_text_font_name = tkinter.StringVar()
self.tv_label_text_fore_colour = tkinter.StringVar()
self.tv_list_box_object_back_colour = tkinter.StringVar()
self.tv_list_box_object_border_colour = tkinter.StringVar()
self.tv_list_box_text_font_name = tkinter.StringVar()
self.tv_list_box_text_fore_colour = tkinter.StringVar()
self.tv_combo_box_object_back_colour = tkinter.StringVar()
self.tv_combo_box_object_border_colour = tkinter.StringVar()
self.tv_combo_box_text_font_name = tkinter.StringVar()
self.tv_combo_box_text_fore_colour = tkinter.StringVar()
# self.tv_option_button_object_back_colour = tkinter.StringVar()
self.tv_option_button_object_border_colour = tkinter.StringVar()
# self.tv_option_button_text_font_name = tkinter.StringVar()
# self.tv_option_button_text_fore_colour = tkinter.StringVar()
self.tv_box_object_back_colour = tkinter.StringVar()
self.tv_box_object_border_colour = tkinter.StringVar()
self.tv_button_object_back_colour = tkinter.StringVar()
self.tv_button_object_border_colour = tkinter.StringVar()
self.tv_button_object_hover_colour = tkinter.StringVar()
self.tv_button_text_fore_colour = tkinter.StringVar()
self.tv_button_text_hover_colour = tkinter.StringVar()
self.tv_button_text_font_name = tkinter.StringVar()
def parse(self, data):
theme = Theme()
theme.name.set(data.get("Name", ""))
theme.tv_text_box_object_back_colour.set(data.get("TextBox", {}).get("object", {}).get("Back Colour", {}))
# theme.tv_text_box_object_border_colour = tkinter.StringVar()
# theme.tv_text_box_text_font = tkinter.StringVar()
# theme.tv_text_box_text_fore_colour = tkinter.StringVar()
#
# theme.tv_label_object_back_colour = tkinter.StringVar()
# theme.tv_label_object_border_colour = tkinter.StringVar()
# theme.tv_label_text_font_name = tkinter.StringVar()
# theme.tv_label_text_fore_colour = tkinter.StringVar()
#
# theme.tv_list_box_object_back_colour = tkinter.StringVar()
# theme.tv_list_box_object_border_colour = tkinter.StringVar()
# theme.tv_list_box_text_font_name = tkinter.StringVar()
# theme.tv_list_box_text_fore_colour = tkinter.StringVar()
#
# theme.tv_combo_box_object_back_colour = tkinter.StringVar()
# theme.tv_combo_box_object_border_colour = tkinter.StringVar()
# theme.tv_combo_box_text_font_name = tkinter.StringVar()
# theme.tv_combo_box_text_fore_colour = tkinter.StringVar()
#
# # self.tv_option_button_object_back_colour = tkinter.StringVar()
# theme.tv_option_button_object_border_colour = tkinter.StringVar()
# # self.tv_option_button_text_font_name = tkinter.StringVar()
# # self.tv_option_button_text_fore_colour = tkinter.StringVar()
#
# theme.tv_box_object_back_colour = tkinter.StringVar()
# theme.tv_box_object_border_colour = tkinter.StringVar()
#
# theme.tv_button_object_back_colour = tkinter.StringVar()
# theme.tv_button_object_border_colour = tkinter.StringVar()
# theme.tv_button_object_hover_colour = tkinter.StringVar()
# theme.tv_button_text_fore_colour = tkinter.StringVar()
# theme.tv_button_text_hover_colour = tkinter.StringVar()
# theme.tv_button_text_font_name = tkinter.StringVar()
return theme
def __repr__(self):
return "\n".join([
f"\t< THEME >",
f"{self.name.get()=}",
f"{self.tv_text_box_object_back_colour.get()=}",
f"{self.tv_text_box_object_border_colour.get()=}",
f"{self.tv_text_box_text_font.get()=}",
f"{self.tv_text_box_text_fore_colour.get()=}",
f"{self.tv_label_object_back_colour.get()=}",
f"{self.tv_label_object_border_colour.get()=}",
f"{self.tv_label_text_font_name.get()=}",
f"{self.tv_label_text_fore_colour.get()=}",
f"{self.tv_list_box_object_back_colour.get()=}",
f"{self.tv_list_box_object_border_colour.get()=}",
f"{self.tv_list_box_text_font_name.get()=}",
f"{self.tv_list_box_text_fore_colour.get()=}",
f"{self.tv_combo_box_object_back_colour.get()=}",
f"{self.tv_combo_box_object_border_colour.get()=}",
f"{self.tv_combo_box_text_font_name.get()=}",
f"{self.tv_combo_box_text_fore_colour.get()=}",
f"{self.tv_option_button_object_border_colour.get()=}",
f"{self.tv_box_object_back_colour.get()=}",
f"{self.tv_box_object_border_colour.get()=}",
f"{self.tv_button_object_back_colour.get()=}",
f"{self.tv_button_object_border_colour.get()=}",
f"{self.tv_button_object_hover_colour.get()=}",
f"{self.tv_button_text_fore_colour.get()=}",
f"{self.tv_button_text_hover_colour.get()=}",
f"{self.tv_button_text_font_name.get()=}"
])
class ThemePublisher(tkinter.Tk):
def __init__(
self,
theme_dir="./Themes/Version1"
):
super().__init__()
self.geometry(f"1000x600")
self.theme_dir = theme_dir
self.loaded_themes = []
self.theme = Theme()
self.working_theme = Theme()
self.theme_idx = tkinter.IntVar(self, value=0)
# object type: {access_property: tkinter_property}
# ttk.Combobox(b)
self.customizable = {
# "Form": {
# "object": {
# "Back Color": "fill"
# }
# },
"TextBox": {
"object": {
"Back Color": "background",
# "Border Color": "border"
},
"text": {
"Font Name": "font",
"Fore Color": "foreground"
}
},
"Label": {
"object": {
"Back Color": "background",
# "Border Color": "border",
},
"text": {
"Font Name": "font",
"Fore Color": "foreground"
}
},
"ListBox": {
"object": {
"Back Color": "background",
"Border Color": "border"
},
"text": {
"Font Name": "font",
"Fore Color": "foreground"
}
},
"ComboBox": {
"object": {
"Back Color": "background",
# "Border Color": "outline"
},
"text": {
"Font Name": "font",
"Fore Color": "foreground"
}
},
"OptionButton": {
"object": {
"Border Color": "outline"
}
},
"Box": {
"object": {
"Back Color": "background",
# "Border Color": "bd"
}
},
"Button": {
"object": {
"Back Color": "background",
# "Border Color": "border",
"Hover Color": "activebackground"
},
"text": {
"Fore Color": "foreground",
"Hover Fore Color": "activeforeground",
# "Font Size": "font"
}
}
}
self.load_themes()
self.dirty_themes = tkinter.Variable(self, value=[False for i in range(len(self.loaded_themes) + 1)])
# tkinter.Frame(b)
# btn = tkinter.Button(self)
# btn.configure(activebackground=, activeforeground=, background=, borderwidth=, cursor=, disabledforeground=, font=,foreground=,highlightbackground=, highlightcolor=, justify=)
self.tv_btn_prev_theme, \
self.btn_prev_theme \
= button_factory(
self,
tv_btn="<<",
kwargs_btn={
"command": self.click_prev_theme
}
)
self.tv_btn_next_theme, \
self.btn_next_theme \
= button_factory(
self,
tv_btn=">>",
kwargs_btn={
"command": self.click_next_theme
}
)
self.tv_label_theme_name, \
self.label_theme_name, \
self.tv_entry_theme_name, \
self.entry_theme_name, \
= entry_factory(
self,
tv_label="Theme Name:"
)
self.tv_label_combo_choice_1, \
self.label_combo_choice_1, \
self.tv_combo_choice_1, \
self.combo_choice_1 \
= combo_factory(
self,
tv_label="Customize:",
tv_combo=tkinter.StringVar(name="tv_combo_1", value=""),
kwargs_combo={
"values": list(self.customizable)
}
)
self.tv_label_combo_choice_2, \
self.label_combo_choice_2, \
self.tv_combo_choice_2, \
self.combo_choice_2 \
= combo_factory(
self,
tv_label="Option:",
tv_combo=tkinter.StringVar(name="tv_combo_2", value=""),
kwargs_combo={
"state": "disabled"
}
)
self.tv_label_combo_choice_3, \
self.label_combo_choice_3, \
self.tv_combo_choice_3, \
self.combo_choice_3 \
= combo_factory(
self,
tv_label="Attribute:",
tv_combo=tkinter.StringVar(name="tv_combo_3", value=""),
kwargs_combo={
"state": "disabled"
}
)
self.tv_combo_choice_1.trace_variable("w", self.combo_update)
self.tv_combo_choice_2.trace_variable("w", self.combo_update)
self.tv_combo_choice_3.trace_variable("w", self.combo_update)
print(f"{self.tv_combo_choice_1.__dict__['_name']=}")
self.tv_dc_frame_fill = tkinter.StringVar(self, value="")
self.tv_dc_textbox_fill = tkinter.StringVar(self, value="")
self.tv_dc_textbox_background = tkinter.StringVar(self, value="")
self.demo_dat_names_list = [
"Darth Vader",
"Luke Skywalker",
"Obi-wan Kenobi",
"Leia Skywalker"
]
self.demo_dat_label_title = "This is a Demo Form"
self.demo_dat_entry_text = "This is some demo text. Type something here to see how it looks"
self.demo_dat_label_entry_text = "Demo textBox:"
self.demo_dat_btn_label = "Click Me!"
self.demo_dat_combo_label = "Demo ComboBox:"
self.demo_dat_label_list = "This is a demo List:"
self.demo_dat_list_list = [
("Alderaan", "2 Billion"),
("Tatooine", "200 Thousand"),
("Mustafar", "20 Thousand")
]
self.demo_form_frame = tkinter.Frame(self)
self.demo_form_sub_frame = tkinter.Frame(self.demo_form_frame, background=random_colour(rgb=False))
# self.demo_tv_label_title = tkinter.StringVar(self, value=self.demo_dat_label_title)
# self.demo_label_title = tkinter.Label(self.demo_form_sub_frame, textvariable=self.demo_tv_label_title)
self.demo_tv_label_title, self.demo_label_title = label_factory(self.demo_form_sub_frame,
tv_label=self.demo_dat_label_title)
self.demo_label_entry, self.demo_label_entry, self.demo_tv_entry, self.demo_entry = entry_factory(
self.demo_form_sub_frame, tv_label=self.demo_dat_label_entry_text, tv_entry=self.demo_dat_entry_text)
self.demo_tv_button, self.demo_button = button_factory(self.demo_form_sub_frame, tv_btn=self.demo_dat_btn_label,
kwargs_btn={"command": self.click_demo_btn})
self.demo_tv_label_combo, self.demo_label_combo, self.demo_tv_combo, self.demo_combo = combo_factory(
self.demo_form_sub_frame, tv_label=self.demo_dat_combo_label,
kwargs_combo={"values": self.demo_dat_names_list})
self.demo_tv_label_list, self.demo_label_list, self.demo_tv_list, self.demo_list = list_factory(
self.demo_form_sub_frame, tv_label=self.demo_dat_label_list, tv_list=self.demo_dat_list_list)
self.demo_radio_frame = tkinter.Frame(self.demo_form_sub_frame)
self.tv_demo_radio = tkinter.IntVar(self)
self.demo_radio_1 = tkinter.Radiobutton(self.demo_radio_frame, text="Popcorn", variable=self.tv_demo_radio,
value=1, command=self.demo_radio_update)
self.demo_radio_2 = tkinter.Radiobutton(self.demo_radio_frame, text="Hot Dog", variable=self.tv_demo_radio,
value=2, command=self.demo_radio_update)
self.demo_radio_3 = tkinter.Radiobutton(self.demo_radio_frame, text="Cotton Candy", variable=self.tv_demo_radio,
value=3, command=self.demo_radio_update)
self.tv_btn_publish_theme, self.btn_publish_theme = button_factory(self, tv_btn="publish", kwargs_btn={
"command": self.click_publish_theme})
self.btn_prev_theme.grid(row=0, column=0)
self.btn_next_theme.grid(row=0, column=1)
self.label_theme_name.grid(row=1, column=0)
self.entry_theme_name.grid(row=1, column=1)
self.label_combo_choice_1.grid(row=2, column=0)
self.combo_choice_1.grid(row=2, column=1)
self.label_combo_choice_2.grid(row=3, column=0)
self.combo_choice_2.grid(row=3, column=1)
self.label_combo_choice_3.grid(row=4, column=0)
self.combo_choice_3.grid(row=4, column=1)
self.rgb_slider = RGBSlider(self, show_result=True)
self.rgb_slider.colour.trace_variable("w", self.rgb_update)
self.rgb_slider.grid(row=5, column=0, columnspan=2)
self.font_chooser = FontChooser(self)
self.font_chooser.status.trace_variable("w", self.font_update)
self.font_chooser.grid(row=5, column=0, columnspan=2)
self.number_chooser = ttk.Scale(self, from_=0, to=100, orient=tkinter.HORIZONTAL)
self.number_chooser.grid(row=5, column=0, columnspan=2)
self.btn_publish_theme.grid(row=6, column=0)
self.demo_form_frame.grid(row=7, column=0)
self.demo_form_sub_frame.grid(row=0, column=0)
self.demo_label_title.grid(row=0, column=0)
self.demo_label_entry.grid(row=1, column=0)
self.demo_entry.grid(row=1, column=1)
self.demo_button.grid(row=2, column=0)
self.demo_label_list.grid(row=3, column=0)
self.demo_list.grid(row=3, column=1)
self.demo_radio_frame.grid(row=4, column=0)
self.demo_radio_1.grid(row=0, column=0)
self.demo_radio_2.grid(row=1, column=0)
self.demo_radio_3.grid(row=2, column=0)
self.combo_update(None, None, None)
def combo_choice_data(self):
return {
"object": self.tv_combo_choice_1.get(),
"option": self.tv_combo_choice_2.get(),
"attribute": self.tv_combo_choice_3.get()
}
def combo_update(self, var_name, index, mode, *rest):
print(f"combo_update")
message = ""
# print(f"{var_name=}, {index=}, {mode=}, {rest=}")
if var_name == "tv_combo_1":
self.tv_combo_choice_2.set("")
self.tv_combo_choice_3.set("")
if var_name == "tv_combo_2":
self.tv_combo_choice_3.set("")
combo_data = self.combo_choice_data()
# print(f"{len(combo_data)=}, {combo_data=}")
obj = combo_data["object"]
option = combo_data["option"]
attribute = combo_data["attribute"]
showing = (True, False, False)
if obj:
message += "A"
options = list(self.customizable[obj])
if option:
message += "B"
attributes = list(self.customizable[obj][option])
if attribute:
message += "C"
if attributes:
message += "D"
match attribute:
case "Border" | "bd":
# self.number_chooser.configure(state="normal")
showing = (False, True, False)
self.number_chooser.grid(row=5, column=0, columnspan=2)
self.rgb_slider.grid_forget()
case "Font Name":
# self.font_chooser.configure(state="normal")
showing = (False, False, True)
self.font_chooser.grid(row=5, column=0, columnspan=2)
self.rgb_slider.grid_forget()
case _: # colour
# self.font_chooser.configure(state="disabled")
# self.number_chooser.configure(state="disabled")
showing = (True, False, False)
# print(f"{options=}, {attribute=}")
# print(f"{attributes=}, {attribute=}")
else:
message += "E"
self.combo_choice_2.configure(state="disabled")
else:
message += "F"
self.combo_choice_3.configure(state="active", values=attributes)
else:
message += "G"
self.combo_choice_2.configure(state="active", values=options)
self.combo_choice_3.configure(state="disabled")
self.tv_combo_choice_3.set("")
else:
message += "H"
self.combo_choice_2.configure(state="disabled")
self.combo_choice_3.configure(state="disabled")
self.tv_combo_choice_2.set("")
self.tv_combo_choice_3.set("")
# print(f"{combo_data=}")
# print(f"{message=}")
widget = None
if showing[0]:
self.number_chooser.grid_forget()
self.font_chooser.grid_forget()
if showing[1]:
self.rgb_slider.grid_forget()
self.font_chooser.grid_forget()
if showing[2]:
self.rgb_slider.grid_forget()
self.number_chooser.grid_forget()
print(f"{widget=}, {showing=}")
if widget:
widget.grid(row=5, column=0, columnspan=2)
def load_themes(self):
if not os.path.isdir(self.theme_dir):
os.mkdir(self.theme_dir)
return
loaded_themes = []
for file in os.listdir(self.theme_dir):
if file.endswith(".json") and file.startswith("TKTheme_"):
with open(f"{self.theme_dir}/{file}", "r") as f:
# loaded_themes.append(json.load(f))
theme = self.parse(json.load(f))
self.loaded_themes.append(theme)
print(f"LOADED THEME\n\n{theme}")
print(f"Loaded {len(self.loaded_themes)} themes on start.")
def click_demo_btn(self):
tkinter.messagebox.showinfo(title="Thanks", message="Thank You!")
def demo_radio_update(self):
print(f"got {self.tv_demo_radio.get()}")
def rgb_update(self, *args):
data = self.combo_choice_data()
c = self.rgb_slider.colour.get()
if all(data.values()):
print(f"updating demo\n\t{data=}\n\tcolour{self.rgb_slider.colour.get()}")
d1 = data["object"]
d2 = data["option"]
d3 = data["attribute"]
attr_name = {self.customizable[d1][d2][d3]: c}
match d1:
case "TextBox":
widgets = [self.demo_entry]
case "Label":
widgets = [
self.demo_label_title,
self.demo_label_entry,
self.demo_label_list,
self.demo_label_combo
]
case "ComboBox":
widgets = [self.demo_combo]
case "ListBox":
widgets = [self.demo_list]
case "Box":
widgets = [self.demo_form_sub_frame]
case "Button":
widgets = [self.demo_button]
case "OptionButton":
widgets = [
self.demo_radio_1,
self.demo_radio_2,
self.demo_radio_3
]
case _:
widgets = None
if widgets is not None:
print(f"ABOUT TO UPDATE WIDGETS\n{widgets=}\n{data=}\n{attr_name=}")
for widget in widgets:
widget.configure(**attr_name)
self.dirty_current_theme()
# for k1, v1 in self.customizable:
# if k1 == d1:
# for k2, v2 in v1.items():
# if k2 == d2:
# for k3, v3 in v2.items():
# if k3 == d3:
# attr_name = v3
def font_update(self, *args):
data = self.combo_choice_data()
f = eval(self.font_chooser.status.get())
f_name = f["name"]
f_weight = f["weight"]
f_size = f["size"]
f = (f_name, f_size, f_weight)
if all(data.values()):
print(f"updating demo\n\t{data=}\n\tcolour{self.rgb_slider.colour.get()}")
d1 = data["object"]
d2 = data["option"]
d3 = data["attribute"]
attr_name = {self.customizable[d1][d2][d3]: f}
match d1:
case "TextBox":
widgets = [self.demo_entry]
case "Label":
widgets = [
self.demo_label_title,
self.demo_label_entry,
self.demo_label_list,
self.demo_label_combo
]
case "ComboBox":
widgets = [self.demo_combo]
case "ListBox":
widgets = [self.demo_list]
case "Box":
widgets = [self.demo_form_sub_frame]
case "Button":
widgets = [self.demo_button]
case "OptionButton":
widgets = [
self.demo_radio_1,
self.demo_radio_2,
self.demo_radio_3
]
case _:
widgets = None
if widgets is not None:
print(f"ABOUT TO UPDATE WIDGETS\n{widgets=}\n{data=}\n{attr_name=}")
for widget in widgets:
widget.configure(**attr_name)
self.dirty_current_theme()
def click_prev_theme(self):
v = self.theme_idx.get()
print(f"click_prev_theme {v} -> {v - 1}")
if v > 0:
self.theme_idx.set(v - 1)
self.demo_current_theme()
else:
messagebox.showinfo(title="Theme Publisher", message="Cannot go back any further.")
def click_next_theme(self):
v = self.theme_idx.get()
print(f"click_next_theme {v} -> {v + 1}")
if v < len(self.loaded_themes):
self.theme_idx.set(v + 1)
self.demo_current_theme()
else:
messagebox.showinfo(title="Theme Publisher", message="Cannot go any farther forward.")
def demo_current_theme(self):
idx = self.theme_idx.get()
if idx < len(self.loaded_themes):
theme = self.loaded_themes[idx]
else:
theme = self.working_theme
print(f"{theme=}")
v_01 = theme.tv_box_object_back_colour.get()
if v_01:
print(f"{v_01=}")
self.demo_form_sub_frame.configure(background=v_01)
else:
print(f"theme.tv_box_object_back_colour is None")
# v_02 = theme.tv_box_object_border_colour.get()
# if v_02 is not None:
# print(f"{v_02=}")
# self.demo_entry.configure(background=v_02)
# else:
# print(f"theme.tv_box_object_border_colour is None")
v_03 = theme.tv_text_box_object_back_colour.get()
if v_03:
print(f"{v_03=}")
self.demo_entry.configure(background=v_03)
else:
print(f"theme.tv_text_box_object_back_colour is None")
# v_04 = theme.tv_text_box_object_border_colour.get()
# if v_04 is not None:
# print(f"{v_04=}")
# self.demo_form_sub_frame.configure(background=v_04)
# else:
# print(f"theme.tv_box_object_border_colour is None")
v_05 = theme.tv_text_box_text_font.get()
if v_05:
print(f"{v_05=}")
self.demo_entry.configure(font=v_05)
else:
print(f"theme.tv_box_object_border_colour is None")
v_06 = theme.tv_text_box_text_fore_colour.get()
if v_06:
print(f"{v_06=}")
self.demo_form_sub_frame.configure(background=v_06)
else:
print(f"theme.tv_box_object_border_colour is None")
v_07 = theme.tv_label_object_back_colour.get()
if v_07:
print(f"{v_07=}")
self.demo_form_sub_frame.configure(background=v_07)
else:
print(f"theme.tv_box_object_border_colour is None")
v_08 = theme.tv_label_object_border_colour.get()
if v_08:
print(f"{v_08=}")
self.demo_form_sub_frame.configure(background=v_08)
else:
print(f"theme.tv_box_object_border_colour is None")
v_09 = theme.tv_label_text_font_name.get()
if v_09:
print(f"{v_09=}")
self.demo_form_sub_frame.configure(background=v_09)
else:
print(f"theme.tv_box_object_border_colour is None")
v_10 = theme.tv_label_text_fore_colour.get()
if v_10:
print(f"{v_10=}")
self.demo_form_sub_frame.configure(background=v_10)
else:
print(f"theme.tv_box_object_border_colour is None")
v_11 = theme.tv_list_box_object_back_colour.get()
if v_11:
print(f"{v_11=}")
self.demo_form_sub_frame.configure(background=v_11)
else:
print(f"theme.tv_box_object_border_colour is None")
v_12 = theme.tv_list_box_object_border_colour.get()
if v_12:
print(f"{v_12=}")
self.demo_form_sub_frame.configure(background=v_12)
else:
print(f"theme.tv_box_object_border_colour is None")
v_13 = theme.tv_list_box_text_font_name.get()
if v_13:
print(f"{v_13=}")
self.demo_form_sub_frame.configure(background=v_13)
else:
print(f"theme.tv_box_object_border_colour is None")
v_14 = theme.tv_list_box_text_fore_colour.get()
if v_14:
print(f"{v_14=}")
self.demo_form_sub_frame.configure(background=v_14)
else:
print(f"theme.tv_box_object_border_colour is None")
v_15 = theme.tv_combo_box_object_back_colour.get()
if v_15:
print(f"{v_15=}")
self.demo_form_sub_frame.configure(background=v_15)
else:
print(f"theme.tv_box_object_border_colour is None")
v_16 = theme.tv_combo_box_object_border_colour.get()
if v_16:
print(f"{v_16=}")
self.demo_form_sub_frame.configure(background=v_16)
else:
print(f"theme.tv_box_object_border_colour is None")
v_17 = theme.tv_combo_box_text_font_name.get()
if v_17:
print(f"{v_17=}")
self.demo_form_sub_frame.configure(background=v_17)
else:
print(f"theme.tv_box_object_border_colour is None")
v_18 = theme.tv_combo_box_text_fore_colour.get()
if v_18:
print(f"{v_18=}")
self.demo_form_sub_frame.configure(background=v_18)
else:
print(f"theme.tv_box_object_border_colour is None")
# v_19 = theme.tv_option_button_object_back_colour.get()
# if v_19:
# print(f"{v_19=}")
# self.demo_form_sub_frame.configure(background=v_19)
# else:
# print(f"theme.tv_box_object_border_colour is None")
v_20 = theme.tv_option_button_object_border_colour.get()
if v_20:
print(f"{v_20=}")
self.demo_form_sub_frame.configure(background=v_20)
else:
print(f"theme.tv_box_object_border_colour is None")
# v_21 = theme.tv_option_button_text_font_name.get()
# if v_21:
# print(f"{v_21=}")
# self.demo_form_sub_frame.configure(background=v_21)
# else:
# print(f"theme.tv_box_object_border_colour is None")
#
# v_22 = theme.tv_option_button_text_fore_colour.get()
# if v_22:
# print(f"{v_22=}")
# self.demo_form_sub_frame.configure(background=v_22)
# else:
# print(f"theme.tv_box_object_border_colour is None")
v_23 = theme.tv_box_object_back_colour.get()
if v_23:
print(f"{v_23=}")
self.demo_form_sub_frame.configure(background=v_23)
else:
print(f"theme.tv_box_object_border_colour is None")
v_24 = theme.tv_box_object_border_colour.get()
if v_24:
print(f"{v_24=}")
self.demo_form_sub_frame.configure(background=v_24)
else:
print(f"theme.tv_box_object_border_colour is None")
v_25 = theme.tv_button_object_back_colour.get()
if v_25:
print(f"{v_25=}")
self.demo_form_sub_frame.configure(background=v_25)
else:
print(f"theme.tv_box_object_border_colour is None")
v_26 = theme.tv_button_object_border_colour.get()
if v_26:
print(f"{v_26=}")
self.demo_form_sub_frame.configure(background=v_26)
else:
print(f"theme.tv_box_object_border_colour is None")
v_27 = theme.tv_button_object_hover_colour.get()
if v_27:
print(f"{v_27=}")
self.demo_form_sub_frame.configure(background=v_27)
else:
print(f"theme.tv_box_object_border_colour is None")
v_28 = theme.tv_button_text_fore_colour.get()
if v_28:
print(f"{v_28=}")
self.demo_form_sub_frame.configure(background=v_28)
else:
print(f"theme.tv_box_object_border_colour is None")
v_29 = theme.tv_button_text_hover_colour.get()
if v_29:
print(f"{v_29=}")
self.demo_form_sub_frame.configure(background=v_29)
else:
print(f"theme.tv_box_object_border_colour is None")
v_30 = theme.tv_button_text_font_name.get()
if v_30:
print(f"{v_30=}")
self.demo_form_sub_frame.configure(background=v_30)
else:
print(f"theme.tv_box_object_border_colour is None")
self.theme = theme
def dirty_current_theme(self):
dirty = list(self.dirty_themes.get())
dirty[self.theme_idx.get()] = True
self.dirty_themes.set(dirty)
def click_publish_theme(self):
print(f"publish")
def parse(self, f_in):
# theme = None
parsed_theme = {}
theme_keys_1 = set(self.customizable.keys())
print(f"{theme_keys_1=}")
theme_keys_2 = set(flatten([list(v.keys()) for v in self.customizable.values()]))
print(f"{theme_keys_2=}")
theme_keys_3 = set(flatten([[list(v2.keys()) for v2 in v1.values()] for v1 in self.customizable.values()]))
print(f"{theme_keys_3=}")
for k1, v1 in f_in.items():
if k1 in theme_keys_1:
for k2, v2 in v1.items():
if k2 in theme_keys_2:
for k3, v3 in v2.items():
if k2 in theme_keys_2:
print(f"{k1=}, {k2=}, {k3=}, {v3=}")
# parsed_theme.get(k1, {}).get(k2, {}).get(k3, {})
parsed_theme.setdefault(k1, {}).setdefault(k2, {}).setdefault(k3, {})
parsed_theme[k1][k2][k3] = v3
# \
# = v3
theme = Theme()
theme = theme.parse(parsed_theme)
print(f"theme={theme}")
return theme
if __name__ == '__main__':
ThemePublisher().mainloop()
| [
"abriggs1@unb.ca"
] | abriggs1@unb.ca |
f930b669bdff8766d9fe09ddcf376ca7cb482bdc | c68aea1de91b46ae684792123c61e84c44ea0266 | /code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/distutils/tests/test_config.py | a3844974f2ad2b3535ec7a830b7ad4b928887f54 | [
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Winfredemalx54/algorithm-challenger-1 | 12e23bed89ca889701db1b17ac540ce62ce86d8e | 761c2c39e041fb155f853385998d5c6318a39913 | refs/heads/master | 2022-11-22T15:03:01.548605 | 2020-07-11T12:26:31 | 2020-07-11T12:26:31 | 297,955,141 | 3 | 0 | Apache-2.0 | 2020-09-23T11:58:19 | 2020-09-23T11:58:18 | null | UTF-8 | Python | false | false | 3,870 | py | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
import tempfile
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.log import set_threshold
from distutils.log import WARN
from distutils.tests import support
from test.support import run_unittest
PYPIRC = """\
[distutils]
index-servers =
server1
server2
server3
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
[server3]
username:cbiggles
password:yh^%#rest-of-my-password
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
WANTED = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:xxx
"""
class BasePyPIRCCommandTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
"""Patches the environment."""
super(BasePyPIRCCommandTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
os.environ['HOME'] = self.tmp_dir
self.rc = os.path.join(self.tmp_dir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
self.old_threshold = set_threshold(WARN)
def tearDown(self):
"""Removes the patch."""
set_threshold(self.old_threshold)
super(BasePyPIRCCommandTestCase, self).tearDown()
class PyPIRCCommandTestCase(BasePyPIRCCommandTestCase):
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
# old format
self.write_file(self.rc, PYPIRC_OLD)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
self.assertFalse(os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
try:
content = f.read()
self.assertEqual(content, WANTED)
finally:
f.close()
def test_config_interpolation(self):
# using the % character in .pypirc should not raise an error (#20120)
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
cmd.repository = 'server3'
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'yh^%#rest-of-my-password'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server3'), ('username', 'cbiggles')]
self.assertEqual(config, waited)
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| [
"bater.makhabel@gmail.com"
] | bater.makhabel@gmail.com |
f834375011f70f9856b73cfc2d76bbf75e4eda72 | 0b144606d44067d6160f1143a759fe7be1a67a93 | /spacy/lang/fa/lex_attrs.py | 99b8e27878bc52ac2dddbf7e67b24da9f088e1f1 | [
"MIT"
] | permissive | huaxz1986/spaCy | a64cba5eb0390068b48a8d4a49c6e8a8b0c22063 | c3c064ace40f4f310d2d220b54b4dc0325a1e3ba | refs/heads/master | 2023-06-09T19:20:46.409792 | 2023-06-02T12:29:52 | 2023-06-02T12:29:52 | 145,535,112 | 2 | 2 | MIT | 2021-07-09T08:11:47 | 2018-08-21T08:50:18 | Python | UTF-8 | Python | false | false | 1,386 | py | from ...attrs import LIKE_NUM
MIM = "م"
ZWNJ_O_MIM = "ام"
YE_NUN = "ین"
_num_words = set(
"""
صفر
یک
دو
سه
چهار
پنج
شش
شیش
هفت
هشت
نه
ده
یازده
دوازده
سیزده
چهارده
پانزده
پونزده
شانزده
شونزده
هفده
هجده
هیجده
نوزده
بیست
سی
چهل
پنجاه
شصت
هفتاد
هشتاد
نود
صد
یکصد
یکصد
دویست
سیصد
چهارصد
پانصد
پونصد
ششصد
شیشصد
هفتصد
هفصد
هشتصد
نهصد
هزار
میلیون
میلیارد
بیلیون
بیلیارد
تریلیون
تریلیارد
کوادریلیون
کادریلیارد
کوینتیلیون
""".split()
)
_ordinal_words = set(
"""
اول
سوم
سیام""".split()
)
_ordinal_words.update({num + MIM for num in _num_words})
_ordinal_words.update({num + ZWNJ_O_MIM for num in _num_words})
_ordinal_words.update({num + YE_NUN for num in _ordinal_words})
def like_num(text):
"""
check if text resembles a number
"""
text = (
text.replace(",", "")
.replace(".", "")
.replace("،", "")
.replace("٫", "")
.replace("/", "")
)
if text.isdigit():
return True
if text in _num_words:
return True
if text in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| [
"noreply@github.com"
] | huaxz1986.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.