hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2fb9ec9da81834174846ea3dfb94ffcb9f80a646
| 38,161
|
py
|
Python
|
recumpiler/mutators.py
|
Toasterstein/recumpiler
|
390957cfaa8f60ffeb24adb43b91981dd445c6b9
|
[
"MIT"
] | null | null | null |
recumpiler/mutators.py
|
Toasterstein/recumpiler
|
390957cfaa8f60ffeb24adb43b91981dd445c6b9
|
[
"MIT"
] | 8
|
2021-03-23T21:54:29.000Z
|
2021-03-30T23:17:06.000Z
|
recumpiler/mutators.py
|
Toasterstein/recumpiler
|
390957cfaa8f60ffeb24adb43b91981dd445c6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""garbage code to make garbage text"""
import random
import re
import string
import upsidedown
from functools import wraps
from math import ceil
from typing import List, Optional
from logging import getLogger
from timeit import default_timer as timer
import homoglyphs as hg
import inflect
import nltk
import numpy as np
import pronouncing
from better_profanity import profanity
from nltk.tokenize.treebank import TreebankWordDetokenizer
from textblob import TextBlob, Word, Sentence
from word2number import w2n
# TODO: issues with pyenchant
# import splitter
from recumpiler.cheap_emoji_alias import get_cheap_emoji_alias
# These are imported like this as the dependencies are complicated to install
# and require large files. These are annoying to test in a CI for now.
# TODO: avoid this work around in future make better CI solution
try:
from recumpiler.emojijnet import get_gloveword_emoji
except:
get_gloveword_emoji = lambda word: None
try:
from recumpiler.mutators_deepmoji import get_sentiment_emoji
except:
get_sentiment_emoji = lambda sentence: None
from recumpiler.mutators_emoji_data import get_emoji_from_data
from recumpiler.mutators_emotlib import get_emoticon
from recumpiler.utils import (
load_simple_text_emojis,
load_action_verbs,
load_rp_pronouns,
init_emoji_database,
get_emoji_database,
load_text_face_emoji,
load_garbage_tokens,
decision,
TweetWordTokenizer,
)
inflect_engine = inflect.engine()
__log__ = getLogger(__name__)
def logged_mutator(f):
@wraps(f)
def wrapper(*args, **kwds):
start = timer()
output = f(*args, **kwds)
end = timer()
# TODO: issue hitting recursion limit
# __log__.info(
# {
# "message": "called mutator",
# "mutator": f.__name__,
# "args": args,
# "kwargs": kwds,
# "output": output,
# "exc_time": "{0:.15f}".format(end - start),
# }
# )
return output
return wrapper
# TODO: refactor this global garbage
num_to_word_probability = 0.3
word_to_num_probability = 0.3
common_misspellings_probability = 0.2
hard_owo_replace_probability = 0.2
bold_text_probability = 0.04
REEE_probability = 0.06
REEE_allcaps_probability = 0.3
add_random_rp_action = True
add_random_rp_mid_sentence_action_probability = 0.005
add_random_rp_end_sentence_action_probability = 0.02
more_verbs_probability_decay = 0.4
add_random_garbage = True
add_random_garbage_probability = 0.01
add_random_plurals = True
add_random_plurals_probability = 0.1
randomly_lemmatize = True
randomly_lemmatize_probability = 0.1
randomly_overemphasis_punctuation = True
randomly_overemphasis_punctuation_probability = 0.5
randomly_overemphasis_punctuation_max_fuck = 4
randomly_capitalize_word = True
randomly_capitalize_word_probability = 0.1
randomly_spongebob_word = True
randomly_spongebob_word_probability = 0.1
add_randomly_text_face_emoji = True
add_randomly_text_face_emoji_probability = 0.05
add_random_simple_text_emoji = True
add_random_simple_text_emoji_probability = 0.07
randomly_swap_char = True
randomly_swap_char_probability = 0.04
randomly_swap_char_swap_percent = 0.2
randomly_insert_char = True
randomly_insert_char_probability = 0.04
randomly_insert_char_insert_percent = 0.1
random_leet_speak = True
random_leet_speak_probability = 0.1
utf_8_char_swaps_probability = 0.1
random_censor_probability = 0.01
random_censor_percent = 0.25
censor_profanity_probability = 0.7
censor_profanity_percent = 0.25
random_synonym_probability = 0.5
random_ending_y_probability = 0.05
leet_speak_min_token_length = 5
adding_ending_ksksk_andioop_probability = 0.8
adding_ending_ksksk_save_the_turtles_probability = 0.3
ksksk_enlargement_probability = 0.7
owo_vs_ouo_bias = 0.5
add_extra_ed_probability = 0.05
split_compound_word_probability = 0.03
lazy_char_subbing_probability = 0.6
uck_to_ucc_swap_probability = 0.4
juwuice_swap_probability = 0.5
add_x3_if_token_has_rawr_probability = 0.2
me_2_meh_swap_probability = 0.5
me_2_meow_swap_probability = 0.5
hard_uwu_replace_probability = 0.3
sub_to_subby_swap_probability = 0.3
fucking_normies_addition = 0.3
get_rhymes_probability = 0.01
max_runon_rhymes = 3
homofiy_probability = 0.3
homofiy_percentage = 0.3
back_tick_text_probability = 0.05
space_gap_text_probability = 0.02
space_gap_text_min_gap_size = 1
space_gap_text_max_gap_size = 4
add_text_relevant_emoji_probability = 0.1
wrap_text_relevant_emoji_probability = 0.02
lr_to_w_swap_probability = 0.4
invert_word_probability = 0.04
upside_down_word_probability = 0.05
@logged_mutator
def num_to_word(token: str) -> str:
try:
return str(w2n.word_to_num(token))
except ValueError:
return token
@logged_mutator
def word_to_num(token: str) -> str:
try:
return inflect_engine.number_to_words(int(token))
except ValueError:
return token
@logged_mutator
def knotter(token: str) -> str:
token = re.sub(
r"(([^kK]|^)no+t)",
lambda match: f"kn{'o' * random.choice(range(1, 3))}t",
token,
flags=re.IGNORECASE,
)
return token
@logged_mutator
def homoify(token: str, homo_percent: float = 0.3):
if len(token) <= 3: # dont homoglyph censor stuff this small
return token
swaps = int(ceil(len(token) * homo_percent))
indexes = random.choices(range(1, len(token)), k=swaps)
for i in indexes:
token = "".join(
[
token[w]
if w != i
else random.choice(hg.Homoglyphs().get_combinations(token[w]))
for w in range(len(token))
]
)
return token
@logged_mutator
def owoer(token: str) -> str:
# TODO: owo usually goes to owoo should suppress.
# TODO: does this still happen?
token = re.sub(
r"(ou)([^o]|$)",
lambda match: f"ouo{match.group(2) or ''}",
token,
flags=re.IGNORECASE,
)
token = re.sub(
r"(ow)([^o]|$)",
lambda match: f"owo{match.group(2) or ''}",
token,
flags=re.IGNORECASE,
)
token = re.sub(
r"(ov)([^o]|$)",
lambda match: f"ovo{match.group(2) or ''}",
token,
flags=re.IGNORECASE,
)
token = re.sub(r"(cor)", lambda match: f"cowor", token)
if (
"owo" not in token.lower()
and "ouo" not in token.lower()
and decision(hard_owo_replace_probability)
):
owo_str = "owo" if decision(owo_vs_ouo_bias) else "ouo"
token = re.sub(
r"(o+)",
lambda match: (owo_str * len(match.group(1))).replace("oo", "o"),
token,
flags=re.IGNORECASE,
count=random.choice(range(0, 2)),
)
# juice -> juwuice
if decision(juwuice_swap_probability):
token = re.sub(
r"u+(i?ce)",
lambda match: f"uwu{match.group(1)}",
token,
flags=re.IGNORECASE,
)
if "uwu" not in token.lower() and decision(hard_uwu_replace_probability):
uwu_str = "uwu"
token = re.sub(
r"u+", uwu_str, token, flags=re.IGNORECASE, count=random.choice(range(0, 2))
)
return token
@logged_mutator
def fuckyer(token: str) -> str:
extra_fun = ""
y_choice_1 = ("y" if decision(0.5) else "i") * random.choice(range(1, 5))
y_choice_2 = ("y" if decision(0.5) else "i") * random.choice(range(1, 5))
if decision(0.5):
extra_fun = f"w{'u' * random.choice(range(1, 5))}k{y_choice_2}"
token = re.sub(
r"([Ff])?uck(er|ing)?",
lambda match: f"{match.group(1) or ''}{'u' * random.choice(range(1,5))}k{y_choice_1}{match.group(2) or ''}"
+ " "
+ extra_fun,
token,
)
return token
@logged_mutator
def garbage(token: str) -> str:
# inserting gay
token = re.sub(r"([a-fh-zA-FH-Z])a+y+", lambda match: f"{match.group(1)}gay", token)
# hello -> hewwo
token = re.sub(r"([Hh])e+ll+o+?", lambda match: f"{match.group(1)}ewwo", token)
# er -> ur
if decision(0.4):
token = re.sub(
r"e+r+",
lambda match: f"u{'r' * ceil(np.random.rayleigh(1.2))}",
token,
flags=re.IGNORECASE,
)
# ello - >ewwo
if decision(0.4):
token = re.sub(
r"e+ll+o+?",
lambda match: f"ew{'w' * ceil(np.random.rayleigh(1.2))}o",
token,
flags=re.IGNORECASE,
) # 2-6ish
# cute -> koot
token = re.sub(
r"([Cc])u+te",
lambda match: f"{match.group(1)}oo{'o' * random.randint(0,5)}t",
token,
)
# ove -> wuv
if decision(0.7):
token = re.sub(r"(o+)ve", lambda match: f"w{'u' * len(match.group(1))}v", token)
# one -> wun
if decision(0.7):
token = re.sub(r"one", "wun", token, flags=re.IGNORECASE)
# as -> ass asss
if decision(0.5):
token = re.sub(
r"([aA])([sS])($|[^s])",
lambda match: f"{match.group(1)}{match.group(2) * random.randint(2,3)}t",
token,
)
# TODO: refactor (me -> meh|me -> meow) together?
# me -> meow
if decision(me_2_meow_swap_probability):
token = re.sub(
r"^me+$",
lambda match: f"m{'e' * random.randint(1,3)}{'o' * random.randint(1,3)}w",
token,
flags=re.IGNORECASE,
)
# me -> meh
if decision(me_2_meh_swap_probability):
token = re.sub(
r"^me+$",
lambda match: f"m{'e' * random.randint(1, 3)}h",
token,
flags=re.IGNORECASE,
)
# my -> mah, myah
if decision(0.5):
token = re.sub(
r"^my+$",
lambda match: f"m{'y' if decision(0.3) else ''}{'a' * random.randint(2, 3)}{'h' if decision(0.5) else ''}",
token,
)
# ion -> shun
if decision(0.5):
token = re.sub(r"ion$", "shun", token)
# .ome -> .um
if decision(0.5):
token = re.sub(r"([a-zA-Z])ome", lambda match: f"{match.group(1)}um", token)
# teh or da
if decision(0.5):
token = re.sub(r"^([Tt])he$", lambda match: f"{match.group(1)}eh", token)
else:
token = re.sub(
r"^([Tt])he$",
lambda match: f"{'D' if match.group(1) == 'T' else 'd'}a",
token,
)
# ing -> inn
if decision(0.5):
token = re.sub(
r"ing$",
f"in{'n' * random.randint(0,4) if decision(0.5) else 'in' * random.randint(0, 4)}",
token,
flags=re.IGNORECASE,
)
# ks -> ksksksk
if decision(ksksk_enlargement_probability):
token = re.sub(
r"[kK][sS]|[sS][kK]",
lambda match: f"{match.group(0) * random.randint(2,6)}",
token,
flags=re.IGNORECASE,
)
# uck -> ucc, uccci
if decision(uck_to_ucc_swap_probability):
token = re.sub(
r"u+c+k+",
lambda match: f"u{'c' * random.randint(2,6)}{'i' * random.randint(0,3)}",
token,
flags=re.IGNORECASE,
)
if decision(sub_to_subby_swap_probability):
token = re.sub(
r"s(u+)b",
lambda match: f"s{match.group(1)}bb{('y' if decision(0.5) else 'i') * random.randint(1, 2)}",
token,
flags=re.IGNORECASE,
)
# no -> nu+ nyu+
if decision(0.5):
token = re.sub(
"([nN])(o+)",
lambda match: f"{match.group(1)}{'y' if decision(0.5) else ''}{'u' * (len(match.group(2)) * random.randint(1, 6))}",
token,
flags=re.IGNORECASE,
)
return token
@logged_mutator
def reeeer(token: str) -> str:
if decision(REEE_probability):
token = re.sub(
r"([Rr])e*",
lambda match: f"{match.group(1)}e" + "e" * random.choice(range(1, 15)),
token,
)
if decision(REEE_allcaps_probability):
token = token.upper()
return token
def rawrer(token: str) -> str:
token = re.sub(r"ra([a-zA-Z])?", lambda match: f"rawr{match.group(1) or ''}", token)
token = re.sub(
r"ar([a-zA-Z])?", lambda match: f"arawr{match.group(1) or ''}", token
)
token = re.sub(r"([Rr])oar", lambda match: f"{match.group(1)}awr", token)
return token
@logged_mutator
def lr_to_w_swap(token: str) -> str:
token = re.sub(
r"([lL])",
lambda match: f"{('w' if decision(0.7) else 'wl') if match.group(1).islower() else ('W' if decision(0.7) else 'WL')}",
token,
)
token = re.sub(
r"([rR])",
lambda match: f"{('w' if decision(0.7) else 'wr') if match.group(1).islower() else ('W' if decision(0.7) else 'WR')}",
token,
)
return token
@logged_mutator
def jizzer(token: str) -> str:
token = re.sub(r"(.iz+)", "jizz", token)
return token
@logged_mutator
def cummer(token: str) -> str:
token = re.sub(r"(.ome|co+m|co+n{1,3})", "cum", token)
token = re.sub(r"(c.{0,2}u+m)", "cum", token)
token = re.sub(r"(cau|cou)", "cum", token)
token = re.sub(r"(cow)", "cum", token)
token = re.sub(r"(son|sun$)", "cum", token)
token = re.sub(r"([a-bd-zA-BD-Z])um", lambda match: f"{match.group(1)}cum", token)
token = re.sub(
r"([a-bd-zA-BD-Z])u(nn|mm)([yi])",
lambda match: f"{match.group(1)}cumm{match.group(3)}",
token,
)
token = re.sub(r"(cally)", "cummy", token)
return token
garbage_tokens = load_garbage_tokens()
@logged_mutator
def add_random_garbage_token():
return random.choice(garbage_tokens)
text_face_emojis = load_text_face_emoji()
@logged_mutator
def find_text_relevant_emoji(token: str) -> Optional[str]:
if (
len(token) < 4
): # TODO: find better logic to avoid getting garbage or complete unrelated emojis
return
results = (
get_emoji_database()
.execute(
"""select Emoji from Emoji_Sentiment_Data where "Unicode name" LIKE ?""",
("%" + token.upper() + "%",),
)
.fetchall()
)
if results:
return random.choice(results)[0]
emoji_database = init_emoji_database()
simple_text_emojis = load_simple_text_emojis()
action_verbs = load_action_verbs()
rp_pronouns = load_rp_pronouns()
@logged_mutator
def get_random_text_face_emojis():
return random.choice(text_face_emojis)
@logged_mutator
def get_random_simple_text_emojis():
return random.choice(simple_text_emojis)
@logged_mutator
def generate_spongebob_text(token: str) -> str:
"""gEnErAtEs sPoNgEbOb mEmE TeXt"""
spongebob_text = ""
for i, char in enumerate(token):
if i % 2 == 0:
spongebob_text += char.lower()
else:
spongebob_text += char.upper()
return spongebob_text
@logged_mutator
def shuffle_str(token: str) -> str:
token_str_list = list(token)
random.shuffle(token_str_list)
return "".join(token_str_list)
@logged_mutator
def get_runon_of_rhymes(
token: str,
max_runon: int = 3,
allow_token_dupe: bool = False,
allow_rhyme_dupes: bool = False,
) -> List[str]:
# TODO: this is a complicated mess
selected_rhymes = []
rhymes = get_pronouncing_rhyme(token)
if not allow_token_dupe:
try:
rhymes.remove(token)
except ValueError:
pass
level = 4
while True:
rhymes += get_nltk_rymes(token, level)
if not allow_token_dupe:
try:
rhymes.remove(token)
except ValueError:
pass
if rhymes:
break
if level == 0 or len(rhymes) > max_runon:
break
level -= 1
if not allow_token_dupe:
try:
rhymes.remove(token)
except ValueError:
pass
if not allow_rhyme_dupes:
rhymes = list(sorted(list(set(rhymes))))
if rhymes:
selected_rhymes += random.choices(rhymes, k=min(len(rhymes), max_runon))
return selected_rhymes
@logged_mutator
def get_pronouncing_rhyme(token: str) -> List[str]:
return pronouncing.rhymes(token)
@logged_mutator
def get_nltk_rymes(token: str, level: int) -> List[str]:
# TODO: stub
def rhyme(inp, level: int):
"""
1 bad rhymes
2
4 good rhymes
"""
entries = nltk.corpus.cmudict.entries()
syllables = [(word, syl) for word, syl in entries if word == inp]
rhymes = []
for (word, syllable) in syllables:
rhymes += [
word for word, pron in entries if pron[-level:] == syllable[-level:]
]
return set(rhymes)
return list(rhyme(token, level))
@logged_mutator
def over_emphasise_punctuation(token: str, max_fuck: int = 4) -> str:
if token == "?":
token += "".join(
random.choices(
[
"1",
# "i",
"!",
"?",
# "I",
# "/",
# ".",
# "\\"
],
k=random.choice(range(0, max_fuck)),
)
)
token = shuffle_str(token)
if token == "!":
token += "".join(
random.choices(
[
"1",
# "i",
"!",
"?",
# "I",
# "/",
"|",
],
k=random.choice(range(0, max_fuck)),
)
)
token = shuffle_str(token)
if token == ".":
token += "".join(
random.choices([",", "."], k=random.choice(range(0, max_fuck)))
)
token = shuffle_str(token)
return token
@logged_mutator
def to_rp_text(token: str) -> str:
return f"*{token}*"
@logged_mutator
def get_random_action_verb():
return random.choice(action_verbs)
@logged_mutator
def get_random_rp_pronoun():
return random.choice(rp_pronouns)
@logged_mutator
def random_swap_char(token: str, swaps_percent: float = 0.2) -> str:
if len(token) < 3: # dont do this for small tokens as they become un decipherable
return token
swaps = int(ceil(len(token) * swaps_percent))
indexes = random.choices(range(len(token)), k=swaps)
for i in indexes:
token = "".join(
[
token[w] if w != i else random.choice(string.ascii_letters)
for w in range(len(token))
]
)
return token
@logged_mutator
def random_insert_char(token: str, insert_percent: float = 0.1) -> str:
swaps = int(ceil(len(token) * insert_percent))
indexes = random.choices(range(len(token)), k=swaps)
token_str_list = list(token)
for i in indexes:
token_str_list.insert(i, random.choice(string.ascii_letters))
token = "".join(token_str_list)
return token
@logged_mutator
def token_to_leet(token: str) -> str:
if len(token) < 5: # leet speaking small text has hard to read results
return token
leet_char_mapping = {
# "a": "4",
"a": "@",
"e": "3",
"8": "&",
"l": "1",
"o": "0",
"s": "5",
"i": "1",
}
getchar = (
lambda c: leet_char_mapping[c.lower()] if c.lower() in leet_char_mapping else c
)
return "".join(getchar(c) for c in token)
# TODO: lots of options maybe something learned?
@logged_mutator
def utf_8_char_swaps(token: str) -> str:
if decision(0.5):
token = re.sub(r"ae", "æ", token)
token = re.sub(r"AE", "Æ", token)
if decision(0.3):
token = re.sub(r"ea", "æ", token)
token = re.sub(r"EA", "Æ", token)
return token
# TODO: this is only for discord so we don't break tokenization
@logged_mutator
def recumpile_sentence(sentence: Sentence) -> List[str]:
new_tokens = []
# TODO: determine mood classifier for sentence and add respective emoji
sentiment_emoji = None
if decision(0.89):
sentiment_emoji = get_sentiment_emoji(sentence)
for token in sentence.tokenize(TweetWordTokenizer()):
# TODO: this is only for discord so we dont break tokenization
if re.match(
r"@everyone|@here|<:[^:\s]+:[0-9]+>|<a:[^:\s]+:[0-9]+>|<(?:@!?\d+|:[A-Za-z0-9]+:)\w+>",
token,
):
new_tokens.append(token)
continue
emoji = None
alias_emoji = get_cheap_emoji_alias(token)
# TODO: refactor into its own mutator
if decision(0.9) and (
re.match("among", token, flags=re.IGNORECASE)
or re.match("amogus", token, flags=re.IGNORECASE)
or re.match(r"su+s", token, flags=re.IGNORECASE)
):
emoji = "ඞ"
emoticon = get_emoticon(token)
if alias_emoji:
if decision(0.1) or (len(str(token)) == 1 and decision(0.9)):
new_tokens.append(alias_emoji)
continue
else:
if decision(0.5):
new_tokens.append(alias_emoji)
if decision(0.5):
emoji = get_emoji_from_data(token)
if decision(0.3):
emoji = get_gloveword_emoji(token)
if emoji:
if decision(0.5):
new_tokens.append(emoji)
if decision(random_synonym_probability):
token = replace_with_random_synonym(token)
if decision(0.5) and profanity.contains_profanity(token):
token = token.upper()
if decision(censor_profanity_probability) and profanity.contains_profanity(
token
):
if decision(0.1):
token = custom_censoring(token, 1)
else:
token = custom_censoring(token, censor_profanity_percent)
elif decision(random_censor_probability):
token = custom_censoring(token, random_censor_percent)
if re.match("musk", token, flags=re.IGNORECASE):
add_husky = True
else:
add_husky = False
# processing
recumpiled_token = recumpile_token(token)
# post processing
new_tokens.append(recumpiled_token)
if emoji:
if decision(0.8):
new_tokens.append(emoji)
if alias_emoji:
if decision(0.8):
new_tokens.append(alias_emoji)
if emoticon:
if decision(0.8):
new_tokens.append(emoticon)
if add_husky:
new_tokens.append(recumpile_token("husky"))
if add_random_garbage and decision(add_random_garbage_probability):
new_tokens.append(recumpile_token(add_random_garbage_token()))
if add_randomly_text_face_emoji and decision(
add_randomly_text_face_emoji_probability
):
new_tokens.append(get_random_text_face_emojis())
if add_random_simple_text_emoji and decision(
# TODO: use textblob to determine mood of text and insert faces
# accordingly likely need to do this after reconstruction of the
# text blob and go through this sentence by sentence rather than
# word by word.
add_random_simple_text_emoji_probability
):
new_tokens.append(get_random_simple_text_emojis())
if add_random_rp_action and decision(
add_random_rp_mid_sentence_action_probability
):
new_tokens.append(get_random_rp_action_sentence())
if add_random_rp_action and decision(add_random_rp_end_sentence_action_probability):
new_tokens.append(get_random_rp_action_sentence())
if sentiment_emoji:
new_tokens.append(sentiment_emoji)
if decision(0.4):
for i in range(5):
if decision(0.3):
new_tokens.append(sentiment_emoji)
else:
break
return new_tokens
@logged_mutator
def add_ending_y(token: str) -> str:
return re.sub(r"([a-zA-Z]{4,}[^sy])", lambda match: f"{match.group(1)}y", token)
def remove_dupe_chars(text: str) -> str:
"""accept -> acept"""
text = re.sub(r"([a-zA-Z])\1+", r"\1", text)
return text
@logged_mutator
def recumpile_token(token: str) -> str:
# TODO: determine mood classifier for token and add respective emoji
if decision(split_compound_word_probability):
tokens = split_compound_word(token)
else:
tokens = [token]
# TODO: migrate fuck_token to maybe a generator?
fucked_tokens = []
for token in tokens:
relevant_emoji = None
if decision(add_text_relevant_emoji_probability):
relevant_emoji = find_text_relevant_emoji(
token
) # TODO: add ability to get multiple?
if relevant_emoji and decision(wrap_text_relevant_emoji_probability):
fucked_tokens.append(relevant_emoji)
if decision(0.1):
token = remove_dupe_chars(token)
if decision(lazy_char_subbing_probability):
token = lazy_char_subbing(token)
# TODO: this is a potential for unexpected behavior
if decision(word_to_num_probability):
token = word_to_num(token)
if decision(num_to_word_probability):
token = num_to_word(token)
if decision(lr_to_w_swap_probability):
token = lr_to_w_swap(token)
# TODO: this might be too much idk
if decision(invert_word_probability):
token = word_inverter(token)
if decision(upside_down_word_probability):
token = word_upside_downer(token)
elif decision(upside_down_word_probability):
token = word_upside_downer_preserve_char_order(token)
fucked_token = knotter(fuckyer(reeeer(rawrer(garbage(owoer(cummer(token)))))))
if decision(add_extra_ed_probability):
fucked_token = add_extra_ed(fucked_token)
if decision(random_ending_y_probability):
fucked_token = add_ending_y(fucked_token)
# TODO: likely making fu@k into k
# TODO: NOTE: indeed it is doing this fu@k
# >>>list(TextBlob("fu@k").words)
# ['fu', 'k']
if add_random_plurals and decision(add_random_plurals_probability):
fucked_token = Word(fucked_token).pluralize()
if randomly_lemmatize and decision(randomly_lemmatize_probability):
fucked_token = Word(fucked_token).lemmatize()
if randomly_capitalize_word and decision(randomly_capitalize_word_probability):
fucked_token = fucked_token.upper()
if randomly_spongebob_word and decision(randomly_spongebob_word_probability):
fucked_token = generate_spongebob_text(fucked_token)
if randomly_overemphasis_punctuation and decision(
randomly_overemphasis_punctuation_probability
):
fucked_token = over_emphasise_punctuation(
fucked_token, randomly_overemphasis_punctuation_max_fuck
)
if decision(common_misspellings_probability):
fucked_token = common_mispellings(fucked_token)
if randomly_swap_char and decision(randomly_swap_char_probability):
fucked_token = random_swap_char(
fucked_token, randomly_swap_char_swap_percent
)
if randomly_insert_char and decision(randomly_insert_char_probability):
fucked_token = random_insert_char(
fucked_token, randomly_insert_char_insert_percent
)
if decision(utf_8_char_swaps_probability):
fucked_token = utf_8_char_swaps(fucked_token)
if random_leet_speak and decision(random_leet_speak_probability):
fucked_token = token_to_leet(fucked_token)
if decision(common_misspellings_probability):
fucked_token = common_mispellings(fucked_token)
# TODO: likely also breaking the spacing between punctuation kittly 1!
# TODO: `fucked` went to `DS` investigate
# TODO: likely this is at fault
if decision(homofiy_probability):
fucked_token = homoify(fucked_token, homofiy_probability)
fucked_tokens.append(fucked_token)
if decision(add_x3_if_token_has_rawr_probability) and (
"rawr" in fucked_token.lower()
):
fucked_tokens.append("X3" if decision(0.5) else "x3")
if decision(adding_ending_ksksk_andioop_probability) and (
fucked_token.lower().endswith("ksk")
or fucked_token.lower().endswith("sks")
or "ksksk" in fucked_token.lower()
or "sksks" in fucked_token.lower()
):
for i in range(random.randint(1, 2)):
fucked_tokens.append(recumpile_token("andioop"))
if decision(adding_ending_ksksk_save_the_turtles_probability) and (
fucked_token.lower().endswith("ksk")
or fucked_token.lower().endswith("sks")
or "ksksk" in fucked_token.lower()
or "sksks" in fucked_token.lower()
):
fucked_tokens.append(recumpile_text("save the turtles!"))
if decision(fucking_normies_addition) and "reee" in fucked_token.lower():
fucked_tokens.append(recumpile_text("fucking normies!"))
if decision(get_rhymes_probability):
for rhyme in get_runon_of_rhymes(token, max_runon=max_runon_rhymes):
fucked_rhyme = recumpile_token(rhyme)
fucked_tokens.append(fucked_rhyme)
if relevant_emoji:
fucked_tokens.append(relevant_emoji)
for i, fucked_token in enumerate(fucked_tokens):
if decision(space_gap_text_probability):
# TODO: this modification may be better placed elsewhere
fucked_token = space_gap_text(
fucked_token,
min_gap_size=space_gap_text_min_gap_size,
max_gap_size=space_gap_text_max_gap_size,
)
# TODO: discord format options
if decision(bold_text_probability):
fucked_token = bold_text(fucked_token)
elif decision(back_tick_text_probability):
fucked_token = back_tick_text(fucked_token)
fucked_tokens[i] = fucked_token
return " ".join(fucked_tokens)
@logged_mutator
def bold_text(token: str) -> str:
if not token.strip(
string.punctuation
): # don't bold tokens of all punctuation as it bugs up rejoining punctuation later *todo: maybe alternate fix?
return token
return f"**{token.strip('*')}**"
@logged_mutator
def get_random_rp_action_sentence() -> str:
more_verbs = []
more_verbs_probability = 1
while True:
if decision(more_verbs_probability):
additional_verb = get_random_action_verb()
if decision(0.5): # TODO: config
additional_verb = Word(additional_verb).lemmatize()
additional_verb = recumpile_token(additional_verb)
additional_verb = Word(additional_verb).pluralize()
more_verbs.append(additional_verb)
else:
break
more_verbs_probability -= more_verbs_probability_decay
noun = get_random_rp_pronoun()
if decision(0.5): # TODO: config
noun = Word(noun).lemmatize()
# TODO: add boolean for enable
noun = recumpile_token(noun)
noun = Word(noun).pluralize()
return to_rp_text(f"{' and '.join(more_verbs)}{' ' if more_verbs else ''}{noun}")
@logged_mutator
def lazy_char_subbing(token: str) -> str:
"""e.g.you -> u are -> r"""
# TODO: better capital replacement
# you -> u, yuu
token = re.sub(
"^y+(o+)?u+$",
lambda match: f"u" if decision(0.5) else f"y{'u' * random.randint(1, 4)}",
token,
flags=re.IGNORECASE,
)
# are -> r, arrr
token = re.sub(
"^a+(r+)?e+$",
lambda match: f"r" if decision(0.5) else f"a{'r' * random.randint(1, 4)}",
token,
flags=re.IGNORECASE,
)
# with -> wif
token = re.sub(
"^wi+th+$",
lambda match: f"w{'i' * random.randint(1, 4)}{'f' * random.randint(1, 4)}",
token,
flags=re.IGNORECASE,
)
# what -> wat OR wut
if decision(0.5):
if decision(0.5):
token = re.sub(
"^wha+t$",
lambda match: f"w{random.choice(['a', 'u']) * random.randint(1, 4)}t",
token,
flags=re.IGNORECASE,
)
else:
token = re.sub(
"^wha+t$",
lambda match: f"wh{'u' * random.randint(1, 4)}t",
token,
flags=re.IGNORECASE,
)
# er -> ur
token = re.sub(
"(e+)r",
lambda match: f"{'u' * (len(match.group(1)) + random.randint(0, 3))}r",
token,
flags=re.IGNORECASE,
count=random.randint(0, 2),
)
# easy -> ez
token = re.sub(
"^ea+s+y+$",
lambda match: f"e{'z' * random.randint(1, 3)}",
token,
flags=re.IGNORECASE,
)
# to,too, -> 2
token = re.sub("to+$", lambda match: f"2", token, flags=re.IGNORECASE)
return token
# TODO: funny -> funni spells -> spellz
@logged_mutator
def common_mispellings(token: str) -> str:
# TODO: cleanup
token = re.sub(
r"([^\s])y$", lambda match: f"{match.group(1)}{'i'*random.randint(1,1)}", token
)
token = re.sub(
r"([^\s])Y$", lambda match: f"{match.group(1)}{'Y'*random.randint(1,2)}", token
)
token = re.sub(
r"([^\s])s$", lambda match: f"{match.group(1)}{'z'*random.randint(1,2)}", token
)
token = re.sub(
r"([^\s])S$", lambda match: f"{match.group(1)}{'Z'*random.randint(1,2)}", token
)
token = re.sub(
r"([^\s])z$", lambda match: f"{match.group(1)}{'s'*random.randint(1,2)}", token
)
token = re.sub(
r"([^\s])Z$", lambda match: f"{match.group(1)}{'S'*random.randint(1,2)}", token
)
token = re.sub(
r"([eE])([iI])", lambda match: f"{match.group(2)}{match.group(1)}", token
)
return token
@logged_mutator
def fix_punctuation_spacing(text: str) -> str:
# TODO: this is a meh way to solve punct being incorrectly joined should investigate
return re.sub(
r"([^\s]) ([!?.,]+)", lambda match: f"{match.group(1)}{match.group(2)}", text
)
@logged_mutator
def back_tick_text(token: str) -> str:
if not token.strip(
string.punctuation
): # don't back_tick tokens of all punctuation as it bugs up rejoining punctuation later *todo: maybe alternate fix?
return token
return f"`{token.strip('`')}`"
# TODO: issues with pyenchant quick
# patch to make this function do nothing for now
@logged_mutator
def split_compound_word(token: str) -> List[str]:
# tokens = splitter.split(str(token))
# if isinstance(tokens, list):
# return tokens
# return [tokens]
return [token]
@logged_mutator
def add_extra_ed(token: str) -> str:
return re.sub(
"([a-zA-Z]{2,})(d|ed)$",
lambda match: f"{match.group(1)}{'ed' * random.randint(1, 2)}",
token,
flags=re.IGNORECASE,
)
# TODO: grabagey code duplication
@logged_mutator
def custom_censoring(swear_word: str, censor_percent: float = 0.25) -> str:
if len(swear_word) <= 3: # dont censor stuff this small
return swear_word
censor_word_list = list("@#$%*")
swaps = int(ceil(len(swear_word) * censor_percent))
indexes = list(range(0, len(swear_word)))
random.shuffle(indexes)
indexes = indexes[:swaps]
# avoid censoring the start or end of a string if we are not completely censoring the string
if not (len(indexes) == len(swear_word)):
try:
indexes.remove(0)
except ValueError:
pass
try:
indexes.remove(len(swear_word) - 1)
except ValueError:
pass
for i in indexes:
swear_word = "".join(
[
swear_word[w] if w != i else random.choice(censor_word_list)
for w in range(len(swear_word))
]
)
return swear_word
@logged_mutator
def space_gap_text(token: str, min_gap_size: int = 1, max_gap_size: int = 4) -> str:
gap_size = random.randint(min_gap_size, max_gap_size)
token_ends = " " * (gap_size + 1)
token = token_ends + (" " * gap_size).join(token) + token_ends
return token
@logged_mutator
def replace_with_random_synonym(token: str) -> str:
# TODO: fill in with all synonyms for lulz?
# TODO: download manual dictionary
return token
@logged_mutator
def word_inverter(token: str) -> str:
# Quases shitty word inverter attempt
word = list(token)
word.reverse()
reversed_word = ""
for i in word:
reversed_word += i
token = reversed_word
return token
@logged_mutator
def word_upside_downer(token: str) -> str:
# Quases upside down word transformer
token = upsidedown.transform(token)
return token
@logged_mutator
def word_upside_downer_preserve_char_order(token: str) -> str:
new_token = []
for char_ in token:
new_token.append(upsidedown.transform(char_))
return "".join(new_token)
@logged_mutator
def recumpile_line(text: str) -> str:
new_tokens = []
for sentence in TextBlob(text).sentences:
new_tokens += recumpile_sentence(sentence)
out_str = TreebankWordDetokenizer().detokenize(new_tokens)
out_str = fix_punctuation_spacing(out_str)
return out_str
@logged_mutator
def recumpile_text(text: str) -> str:
# TODO: go sentence by sentence token by token all for sentiment analysis
lines = []
for line in text.split("\n"):
lines.append(recumpile_line(line))
return "\n".join(lines)
| 28.757347
| 128
| 0.60153
|
51bb2b936faa55a1448859efc6ee5608ca7ee9cc
| 1,551
|
py
|
Python
|
jetsoncar_gazebo/scripts/test_commands.py
|
denisshustov/JetsonCar-Simulation
|
fb4c6ebcee48e9b9cffc7cd2e2794ec3186c5fe5
|
[
"MIT"
] | 5
|
2018-08-03T10:08:28.000Z
|
2020-09-16T19:45:17.000Z
|
jetsoncar_gazebo/scripts/test_commands.py
|
denisshustov/JetsonCar-Simulation
|
fb4c6ebcee48e9b9cffc7cd2e2794ec3186c5fe5
|
[
"MIT"
] | 1
|
2020-03-23T18:16:59.000Z
|
2020-10-18T10:44:50.000Z
|
jetsoncar_gazebo/scripts/test_commands.py
|
denisshustov/JetsonCar-Simulation
|
fb4c6ebcee48e9b9cffc7cd2e2794ec3186c5fe5
|
[
"MIT"
] | 2
|
2018-06-01T02:18:43.000Z
|
2019-11-17T22:01:19.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import Float32
from std_msgs.msg import Float64
speed = 0.3
steering_angle = 0.5
def servo_commands():
rospy.init_node('servo_commands', anonymous=True)
pub_vel_left_front_wheel = rospy.Publisher('/jetsoncar/front_left_wheel_velocity_controller/command', Float64, queue_size=1)
pub_vel_right_front_wheel = rospy.Publisher('/jetsoncar/front_right_wheel_velocity_controller/command', Float64, queue_size=1)
#pub_vel_left_rear_wheel = rospy.Publisher('/jetsoncar/rear_left_wheel_velocity_controller/command', Float64, queue_size=1)
#pub_vel_right_rear_wheel = rospy.Publisher('/jetsoncar/rear_right_wheel_velocity_controller/command', Float64, queue_size=1)
pub_pos_left_steering_hinge = rospy.Publisher('/jetsoncar/front_left_hinge_position_controller/command', Float64, queue_size=1)
pub_pos_right_steering_hinge = rospy.Publisher('/jetsoncar/front_right_hinge_position_controller/command', Float64, queue_size=1)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
pub_vel_left_front_wheel.publish(speed)
pub_vel_right_front_wheel.publish(speed)
#pub_vel_left_rear_wheel.publish(speed)
#pub_vel_right_rear_wheel.publish(speed)
pub_pos_left_steering_hinge.publish(steering_angle)
pub_pos_right_steering_hinge.publish(steering_angle)
rate.sleep()
if __name__ == '__main__':
try:
servo_commands()
except rospy.ROSInterruptException:
pass
| 39.769231
| 133
| 0.778852
|
0dd2eaf38f9f4b17e654fc652b3a1ca6f40ef658
| 1,744
|
py
|
Python
|
tests/test_reduce_window.py
|
alexander-g/vkJAX
|
8ef0105891ee41734293b2f1a8c247957018d7c2
|
[
"Unlicense"
] | 5
|
2021-02-07T08:13:36.000Z
|
2021-02-28T09:48:48.000Z
|
tests/test_reduce_window.py
|
alexander-g/vkJAX
|
8ef0105891ee41734293b2f1a8c247957018d7c2
|
[
"Unlicense"
] | 2
|
2021-05-30T16:48:21.000Z
|
2021-06-02T04:50:02.000Z
|
tests/test_reduce_window.py
|
alexander-g/vkJAX
|
8ef0105891ee41734293b2f1a8c247957018d7c2
|
[
"Unlicense"
] | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES']=''
import vkjax
import jax, jax.numpy as jnp, numpy as np
import pytest
seed = np.random.randint(0, 1000000)
np.random.seed(seed)
#2d maxpooling
def reduce_window_max0(x): return jax.lax.reduce_window(x, -jnp.inf, jax.lax.max, (1,2,2,1), window_strides=(1,1,1,1), padding='VALID')
def reduce_window_max1(x): return jax.lax.reduce_window(x, -jnp.inf, jax.lax.max, (1,3,3,1), window_strides=(1,2,2,1), padding='SAME')
param_matrix = [
(reduce_window_max0, '2x2 no-pad', [np.random.random([11,100,111,5])] ),
(reduce_window_max1, '3x3 +pad', [np.random.random([77,10,99,17])] ),
]
@pytest.mark.parametrize("f,desc,args", param_matrix)
def test_reduce_window_matrix(f, desc, args):
print(f'==========TEST START: {desc}==========')
print(f'**********RANDOM SEED: {seed}*********')
args = jax.tree_map(jnp.asarray, args)
jaxpr = jax.make_jaxpr(f)(*args)
print(jaxpr)
vkfunc = vkjax.Function(f)
y = vkfunc(*args)
ytrue = f(*args)
#print(args[0].reshape(4,5).round(5))
print()
print(y.squeeze())
print()
print(ytrue.squeeze())
assert jax.tree_structure(y) == jax.tree_structure(ytrue)
assert np.all(jax.tree_leaves(jax.tree_multimap(lambda x,y: np.shape(x)==np.shape(y), y,ytrue)))
dtype = lambda x: np.asarray(x).dtype
assert np.all(jax.tree_leaves(jax.tree_multimap(lambda x,y: dtype(x)==dtype(y), y,ytrue)))
assert np.all(jax.tree_leaves(jax.tree_multimap(lambda x,y: np.allclose(x,y), y,ytrue)))
#assert np.all(jax.tree_leaves(jax.tree_multimap(lambda x,y: np.all(x==y), y,ytrue)))
print(f'==========TEST END: {desc}==========')
print()
| 31.709091
| 135
| 0.627294
|
88e30d69ef46537da5f7d555c2d3c7c39af1e408
| 2,495
|
py
|
Python
|
pipeline/run.py
|
aldengolab/ML-basics
|
5db39794b4a39904334284b5ae33697169148062
|
[
"MIT"
] | null | null | null |
pipeline/run.py
|
aldengolab/ML-basics
|
5db39794b4a39904334284b5ae33697169148062
|
[
"MIT"
] | null | null | null |
pipeline/run.py
|
aldengolab/ML-basics
|
5db39794b4a39904334284b5ae33697169148062
|
[
"MIT"
] | 1
|
2020-09-09T08:14:41.000Z
|
2020-09-09T08:14:41.000Z
|
'''
Runs the model loop for CivicScape.
'''
import csv
import argparse
import numpy as np
from model_loop import ModelLoop
from sparse_matrix import SparseMatrix
def pipeline(args):
'''
Runs the model loop.
If you wish to edit any of the parameters for the models, please edit the
model_loop.py file directly.
'''
train_data = SparseMatrix()
train_data.load_csv(args.train_filename)
y_train = train_data.get(args.label).todense()
X_train = train_data.get_all_except(args.label)
y_train[y_train>1] = 1 # Remove multiclass
y_train = np.array(np.reshape(y_train, y_train.shape[0]))[0] # Correct shape
test_data = SparseMatrix()
test_data.load_csv(args.test_filename)
y_test = test_data.get(args.label).todense()
X_test = test_data.get_all_except(args.label)
y_test[y_test>1] = 1 # Remove multiclass
y_test = np.array(np.reshape(y_test, y_test.shape[0]))[0] # Correct shape
loop = ModelLoop(X_train, X_test, y_train, y_test, args.models,
args.iterations, args.run_name,
args.thresholds, args.label, float(args.comparison),
args.project_folder)
loop.run()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Run a model loop.')
parser.add_argument('train_filename', type=str,
help='Location of training data csv file')
parser.add_argument('test_filename', type=str,
help='Location of test data csv file')
parser.add_argument('--label', type=str,
help='Label for outcome column', default = 'label')
parser.add_argument('--run_name', type=str,
help='Name of this run')
parser.add_argument('--iterations', type=int,
help='Number of iterations', default = 50)
parser.add_argument('--models', nargs='+',
help='Models to run', default = ['LR', 'RF', 'DT', 'SGD', 'SVM', 'AB', 'NN'])
parser.add_argument('--thresholds', nargs='+', type=float,
help='Thresholds', default = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4])
parser.add_argument('--project_folder', type=str, default = './',
help='Relative project folder for results')
args = parser.parse_args()
print "\n========= NEW MODEL LOOP RUN {} =========\n".format(args.run_name.upper())
for key in sorted(vars(args).keys()):
print "{}: {}".format(key, vars(args)[key])
pipeline(args)
| 40.241935
| 97
| 0.625651
|
8882e8fab1dd9bfdc1f7924dd6696e0298a81a0c
| 2,303
|
py
|
Python
|
Tests/GUI/DataND/test_is_overlay.py
|
TinaTabo/SciDataTool
|
a1a51b104248d3e6d07006f2c3f2806b4589624e
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DataND/test_is_overlay.py
|
TinaTabo/SciDataTool
|
a1a51b104248d3e6d07006f2c3f2806b4589624e
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DataND/test_is_overlay.py
|
TinaTabo/SciDataTool
|
a1a51b104248d3e6d07006f2c3f2806b4589624e
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from PySide2 import QtWidgets
import sys
from numpy import ones
from SciDataTool import DataTime, DataLinspace
from numpy import pi
class TestGUI(object):
@classmethod
def setup_class(cls):
"""Run at the begining of every test to setup the gui"""
if not QtWidgets.QApplication.instance():
cls.app = QtWidgets.QApplication(sys.argv)
else:
cls.app = QtWidgets.QApplication.instance()
X = DataLinspace(name="time", unit="s", initial=0, final=10, number=11)
Y = DataLinspace(
name="angle",
unit="rad",
initial=0,
final=2 * pi,
number=21,
is_overlay=True,
)
field_2d = ones((11, 21))
for i in range(11):
field_2d[i] *= i
cls.Field = DataTime(
name="Airgap flux density",
symbol="B_r",
unit="T",
axes=[X, Y],
values=field_2d,
)
cls.UI = cls.Field.plot(is_show_fig=False, is_create_appli=False)
@pytest.mark.gui
def check_combobox(self):
"""Testing that the combobox is disabled if there is only one item"""
# As we only have one axis then the combobox is disabled
assert (
self.UI.w_plot_manager.w_axis_manager.w_axis_1.c_axis.isEnabled() == False
)
def check_axis_2(self):
"""Testing that the second WAxisSelector is hidden as the second axis has is_overlay = True"""
assert self.UI.w_plot_manager.w_axis_manager.w_axis_2.isHidden() == True
def check_slice_op(self):
"""Testing that the is_overlay axis generated a WSliceOperator widget"""
axis_slice_op = self.UI.w_plot_manager.w_axis_manager.w_slice_op[0].axis
overlay_axis = self.Field.get_axes()[1]
assert axis_slice_op.name == overlay_axis.name
if __name__ == "__main__":
a = TestGUI()
a.setup_class()
# Testing that the checkbox are disabled if there is only one item in them
a.check_combobox()
# Testing that axis 2 is hidden
a.check_axis_2()
# Testing that the second axis is a WSliceOperator widget
a.check_slice_op()
print("Done")
| 29.525641
| 103
| 0.597482
|
927bc31121c237085a829c9efa758b46cd340aff
| 1,176
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualLink/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualLink/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualLink/cli/equal/golden_output_expected.py
|
nielsvanhooy/genieparser
|
9a1955749697a6777ca614f0af4d5f3a2c254ccd
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
'switch': {
1: {
'svl': {
1: {
'ports': {
'HundredGigE1/0/1': {
'link_status': 'U',
'protocol_status': 'P'},
'HundredGigE1/0/6': {
'link_status': 'U',
'protocol_status': 'P'
}
}
}
}
},
2: {
'svl': {
1: {
'ports': {
'HundredGigE2/0/1': {
'link_status': 'U',
'protocol_status': 'P'},
'HundredGigE2/0/6': {
'link_status': 'U',
'protocol_status': 'P'
}
}
}
}
}
}
}
| 33.6
| 58
| 0.177721
|
75ca47beb336251a44208cf5ac23458e3d6e27ee
| 225
|
py
|
Python
|
utils/logger.py
|
Nithin-Holla/meme_challenge
|
f4dc2079acb78ae30caaa31e112c4c210f93bf27
|
[
"MIT"
] | 20
|
2020-11-18T18:10:07.000Z
|
2022-02-17T17:53:02.000Z
|
utils/logger.py
|
Nithin-Holla/meme_challenge
|
f4dc2079acb78ae30caaa31e112c4c210f93bf27
|
[
"MIT"
] | 1
|
2021-04-08T06:19:11.000Z
|
2022-03-12T15:18:45.000Z
|
utils/logger.py
|
Nithin-Holla/meme_challenge
|
f4dc2079acb78ae30caaa31e112c4c210f93bf27
|
[
"MIT"
] | 8
|
2020-12-11T20:39:02.000Z
|
2022-03-12T15:19:05.000Z
|
import logging
_LOG_FMT = '%(asctime)s : %(levelname)s - %(message)s'
_DATE_FMT = '%d/%m/%Y %I:%M:%S %p'
logging.basicConfig(format=_LOG_FMT, datefmt=_DATE_FMT, level=logging.INFO)
LOGGER = logging.getLogger('TrainLogger')
| 37.5
| 75
| 0.711111
|
a5d15c4f6817637f9930bfecec9208ec30a92489
| 173
|
py
|
Python
|
learn1/60.py
|
raghavi101/HackerRank
|
48bf812f1b3c60b5201a95458be76ae9b7323a88
|
[
"MIT"
] | null | null | null |
learn1/60.py
|
raghavi101/HackerRank
|
48bf812f1b3c60b5201a95458be76ae9b7323a88
|
[
"MIT"
] | null | null | null |
learn1/60.py
|
raghavi101/HackerRank
|
48bf812f1b3c60b5201a95458be76ae9b7323a88
|
[
"MIT"
] | null | null | null |
c = int(input())
for i in range(c):
n = input()
a = set(map(int, input().split()))
m = input()
b = set(map(int, input().split()))
print(a.issubset(b))
| 17.3
| 38
| 0.508671
|
bfa30e409e6b4211a52346165f75981949ea3c1e
| 94
|
py
|
Python
|
boa3_test/test_sc/built_in_methods_test/CountTupleEmpty.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/built_in_methods_test/CountTupleEmpty.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/built_in_methods_test/CountTupleEmpty.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin import public
@public
def main() -> int:
a = ()
return a.count(1)
| 11.75
| 31
| 0.606383
|
441c5411eab6bcb49efb6fa244c443e468285d30
| 2,245
|
py
|
Python
|
reposynch/api.py
|
jvalderrama/python-syncrepos
|
31ad7474be171e2476e0c5d9b4005af3974d58f6
|
[
"Apache-2.0"
] | null | null | null |
reposynch/api.py
|
jvalderrama/python-syncrepos
|
31ad7474be171e2476e0c5d9b4005af3974d58f6
|
[
"Apache-2.0"
] | null | null | null |
reposynch/api.py
|
jvalderrama/python-syncrepos
|
31ad7474be171e2476e0c5d9b4005af3974d58f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016, Atos Spain SA. #
# Author: Jorge Edgar Valderrama Romero
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
# Copyright 2011 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, time
from daemon import Daemon
from bottle import route, run, template
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
"""
class DaemonSyncRepo(Daemon):
#run(host='localhost', port=8080)
def run(self):
while True:
time.sleep(1)
def init():
daemon = DaemonSyncRepo('/tmp/daemon-example.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
"""
def init():
run(host='localhost', port=8080)
if __name__ == "__main__":
init()
| 27.048193
| 78
| 0.653452
|
355a8f39200df324356f57b32f2b0874d4004eac
| 8,236
|
py
|
Python
|
deepcell/layers/padding_test.py
|
esgomezm/deepcell-tf
|
6693c9ed7e76793561e6c2281437acaf3e4fa441
|
[
"Apache-2.0"
] | 1
|
2020-06-24T23:04:14.000Z
|
2020-06-24T23:04:14.000Z
|
deepcell/layers/padding_test.py
|
esgomezm/deepcell-tf
|
6693c9ed7e76793561e6c2281437acaf3e4fa441
|
[
"Apache-2.0"
] | null | null | null |
deepcell/layers/padding_test.py
|
esgomezm/deepcell-tf
|
6693c9ed7e76793561e6c2281437acaf3e4fa441
|
[
"Apache-2.0"
] | 1
|
2020-06-24T23:04:26.000Z
|
2020-06-24T23:04:26.000Z
|
# Copyright 2016-2019 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for padding layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# from tensorflow.python import keras
# from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.platform import test
from deepcell.utils import testing_utils
from deepcell import layers
def _get_random_padding(dim):
R = lambda: np.random.randint(low=0, high=9)
return tuple([(R(), R()) for _ in range(dim)])
@keras_parameterized.run_all_keras_modes
class ReflectionPaddingTest(keras_parameterized.TestCase):
def test_reflection_padding_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
custom_objects = {'ReflectionPadding2D': layers.ReflectionPadding2D}
ins1 = np.ones((num_samples, input_num_row, input_num_col, stack_size))
ins2 = np.ones((num_samples, stack_size, input_num_row, input_num_col))
data_formats = ['channels_first', 'channels_last']
for data_format, inputs in zip(data_formats, [ins2, ins1]):
# basic test
testing_utils.layer_test(
layers.ReflectionPadding2D,
kwargs={'padding': (2, 2),
'data_format': data_format},
custom_objects=custom_objects,
input_shape=inputs.shape)
testing_utils.layer_test(
layers.ReflectionPadding2D,
kwargs={'padding': ((1, 2), (3, 4)),
'data_format': data_format},
custom_objects=custom_objects,
input_shape=inputs.shape)
# correctness test
# with self.cached_session():
# layer = layers.ReflectionPadding2D(
# padding=(2, 2), data_format=data_format)
# layer.build(inputs.shape)
# output = layer(keras.backend.variable(inputs))
# if context.executing_eagerly():
# np_output = output.numpy()
# else:
# np_output = keras.backend.eval(output)
# if data_format == 'channels_last':
# for offset in [0, 1, -1, -2]:
# np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
# np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
# np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
# elif data_format == 'channels_first':
# for offset in [0, 1, -1, -2]:
# np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
# np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
# np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
# layer = layers.ReflectionPadding2D(
# padding=((1, 2), (3, 4)), data_format=data_format)
# layer.build(inputs.shape)
# output = layer(keras.backend.variable(inputs))
# if context.executing_eagerly():
# np_output = output.numpy()
# else:
# np_output = keras.backend.eval(output)
# if data_format == 'channels_last':
# for top_offset in [0]:
# np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
# for bottom_offset in [-1, -2]:
# np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
# for left_offset in [0, 1, 2]:
# np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
# for right_offset in [-1, -2, -3, -4]:
# np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
# np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
# elif data_format == 'channels_first':
# for top_offset in [0]:
# np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
# for bottom_offset in [-1, -2]:
# np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
# for left_offset in [0, 1, 2]:
# np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
# for right_offset in [-1, -2, -3, -4]:
# np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
# np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
layers.ReflectionPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
layers.ReflectionPadding2D(padding=None)
def test_reflection_padding_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
custom_objects = {'ReflectionPadding3D': layers.ReflectionPadding3D}
inputs1 = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
inputs2 = np.ones((num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3))
data_formats = ['channels_first', 'channels_last']
for data_format, inputs in zip(data_formats, [inputs2, inputs1]):
# basic test
testing_utils.layer_test(
layers.ReflectionPadding3D,
kwargs={'padding': (2, 2, 2),
'data_format': data_format},
custom_objects=custom_objects,
input_shape=inputs.shape)
# correctness test
# with self.cached_session():
# layer = layers.ReflectionPadding3D(padding=(2, 2, 2))
# layer.build(inputs.shape)
# output = layer(keras.backend.variable(inputs))
# if context.executing_eagerly():
# np_output = output.numpy()
# else:
# np_output = keras.backend.eval(output)
# for offset in [0, 1, -1, -2]:
# np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
# np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
# np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
# np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
# test incorrect use
with self.assertRaises(ValueError):
layers.ReflectionPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
layers.ReflectionPadding3D(padding=None)
if __name__ == '__main__':
test.main()
| 46.269663
| 91
| 0.572608
|
5bff793cbba4ceb98c0ceab98c889840734b533b
| 118
|
py
|
Python
|
mayan/apps/document_indexing/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/document_indexing/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/document_indexing/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
default_app_config = 'mayan.apps.document_indexing.apps.DocumentIndexingApp'
| 29.5
| 76
| 0.872881
|
8f32f0b76cfb9a382449ffe625e45c9e9fc359d4
| 4,868
|
py
|
Python
|
tests/unit/alertapi30/test_alert.py
|
ahertz/pyowm
|
e7ccb5ec3f86bee0cbb9981070a62988a83a5b3c
|
[
"MIT"
] | 799
|
2015-01-03T12:07:57.000Z
|
2022-03-31T03:59:53.000Z
|
tests/unit/alertapi30/test_alert.py
|
ahertz/pyowm
|
e7ccb5ec3f86bee0cbb9981070a62988a83a5b3c
|
[
"MIT"
] | 279
|
2015-02-12T16:11:43.000Z
|
2022-02-14T21:49:03.000Z
|
tests/unit/alertapi30/test_alert.py
|
ahertz/pyowm
|
e7ccb5ec3f86bee0cbb9981070a62988a83a5b3c
|
[
"MIT"
] | 215
|
2015-01-06T19:07:11.000Z
|
2022-02-14T21:39:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import unittest
import pyowm.commons.exceptions
from pyowm.alertapi30.alert import Alert, AlertChannel
from pyowm.alertapi30.condition import Condition
class TestAlert(unittest.TestCase):
def test_alert_fails_with_wrong_parameters(self):
self.assertRaises(AssertionError, Alert, None, 'trigger1', [dict(a=1, b=2), dict(c=3, d=4)],
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 123, 'trigger1', [dict(a=1, b=2), dict(c=3, d=4)],
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 'alert1', None, [dict(a=1, b=2), dict(c=3, d=4)],
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 'alert1', 1234, [dict(a=1, b=2), dict(c=3, d=4)],
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 'alert1', 'trigger', None,
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 'alert1', 'trigger', 'wrong-value',
dict(lon=53, lat=45))
self.assertRaises(AssertionError, Alert, 'alert1', 'trigger', [dict(a=1, b=2), dict(c=3, d=4)], None)
self.assertRaises(AssertionError, Alert, 'alert1', 'trigger', [dict(a=1, b=2), dict(c=3, d=4)], 'wrong-value')
self.assertRaises(AssertionError, Alert, 'alert1', 'trigger', [dict(a=1, b=2), dict(c=3, d=4)],
dict(lon=53, lat=45), 'wrong-value')
def test_alert_last_updated_is_none(self):
alert = Alert('alert1', 'trigger1', [{
"current_value": 263.576,
"condition": Condition('humidity', 'LESS_THAN', 10)}],
{"lon": 37, "lat": 53})
self.assertIsNone(alert.last_update)
def test_from_dict(self):
the_dict = {
'_id': '5853dbe27416a400011b1b77',
'conditions': [{'_id': '5853dbe27416a400011b1b78',
'condition': {'amount': 273, 'expression': '$lt', 'name': 'temp'},
'current_value': {'max': 258.62, 'min': 258.62}}],
'coordinates': {'lat': '53', 'lon': '37'},
'date': '2016-12-17T00:00:00.000Z',
'last_update': '2016-12-16T11:19:46.352Z',
'triggerId': '5852816a9aaacb00153134a3'}
result = Alert.from_dict(the_dict)
self.assertIsInstance(result, Alert)
with self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError):
Alert.from_dict(None)
with self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError):
Alert.from_dict(dict(nonexistent='key'))
value_error_dict = copy.deepcopy(the_dict)
value_error_dict['last_update'] = 'not_valid_timestamp'
with self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError):
Alert.from_dict(value_error_dict)
def test_to_dict(self):
condition = Condition('humidity', 'LESS_THAN', 10)
instance = Alert('alert1', 'trigger1', [{
"current_value": 263.576,
"condition": condition.to_dict()}],
{"lon": 37, "lat": 53},
1481802090232)
result = instance.to_dict()
self.assertIsInstance(result, dict)
self.assertEqual('alert1', result['id'])
self.assertEqual('trigger1', result['trigger_id'])
self.assertEqual(1, len(result['met_conditions']))
mc = result['met_conditions'][0]
self.assertEqual(dict(current_value=263.576, condition=condition.to_dict()), mc)
self.assertEqual({"lon": 37, "lat": 53}, result['coordinates'])
self.assertEqual(1481802090232, result['last_update'])
def test_repr(self):
the_dict = {
'_id': '5853dbe27416a400011b1b77',
'conditions': [{'_id': '5853dbe27416a400011b1b78',
'condition': {'amount': 273, 'expression': '$lt', 'name': 'temp'},
'current_value': {'max': 258.62, 'min': 258.62}}],
'coordinates': {'lat': '53', 'lon': '37'},
'date': '2016-12-17T00:00:00.000Z',
'last_update': '2016-12-16T11:19:46.352Z',
'triggerId': '5852816a9aaacb00153134a3'}
instance = Alert.from_dict(the_dict)
print(instance)
class TestAlertChannel(unittest.TestCase):
def test_to_dict(self):
name = 'foobaz'
instance = AlertChannel(name)
self.assertEqual(dict(name=name), instance.to_dict())
def test_repr(self):
print(AlertChannel('foobaz'))
def test_alert_last_updated_is_none(self):
alert = Alert('alert1', 'trigger1', [{
"current_value": 263.576,
"condition": Condition('humidity', 'LESS_THAN', 10)}],
{"lon": 37, "lat": 53})
self.assertIsNone(alert.last_update)
| 43.079646
| 118
| 0.589359
|
4b4cbcf4b37d83ae8b594617c3ccebac9e383890
| 8,476
|
py
|
Python
|
tests/core/full_node/test_transactions.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 7
|
2021-09-07T02:14:15.000Z
|
2022-03-27T06:42:35.000Z
|
tests/core/full_node/test_transactions.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 1
|
2021-10-21T16:38:56.000Z
|
2021-11-15T13:03:15.000Z
|
tests/core/full_node/test_transactions.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 3
|
2021-10-21T07:17:40.000Z
|
2022-03-16T12:57:09.000Z
|
import asyncio
from secrets import token_bytes
from typing import Optional
import pytest
from skynet.consensus.block_record import BlockRecord
from skynet.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward, calculate_base_timelord_fee
from skynet.full_node.full_node_api import FullNodeAPI
from skynet.protocols import full_node_protocol
from skynet.simulator.simulator_protocol import FarmNewBlockProtocol
from skynet.types.peer_info import PeerInfo
from skynet.util.ints import uint16, uint32
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestTransactions:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_nodes_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node):
num_blocks = 5
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) + calculate_base_timelord_fee(uint32(i)) for i in range(1, num_blocks)]
)
# funds += calculate_base_farmer_reward(0)
await asyncio.sleep(2)
print(await wallet.get_confirmed_balance(), funds)
await time_out_assert(10, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_tx_propagation(self, three_nodes_two_wallets):
num_blocks = 5
full_nodes, wallets = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
wallet_1, wallet_server_1 = wallets[1]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph1 = await wallet_1.wallet_state_manager.main_wallet.get_new_puzzlehash()
#
# wallet0 <-> sever0 <-> server1 <-> server2 <-> wallet1
#
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) + calculate_base_timelord_fee(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(10, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
async def peak_height(fna: FullNodeAPI):
peak: Optional[BlockRecord] = fna.full_node.blockchain.get_peak()
if peak is None:
return -1
peak_height = peak.height
return peak_height
await time_out_assert(10, peak_height, num_blocks, full_node_api_1)
await time_out_assert(10, peak_height, num_blocks, full_node_api_2)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, ph1, 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
# Farm another block
for i in range(1, 8):
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) + calculate_base_timelord_fee(uint32(i))
for i in range(1, num_blocks + 1)
]
)
print(f"Funds: {funds}")
await time_out_assert(
10,
wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance,
(funds - 10),
)
await time_out_assert(15, wallet_1.wallet_state_manager.main_wallet.get_confirmed_balance, 10)
@pytest.mark.asyncio
async def test_mempool_tx_sync(self, three_nodes_two_wallets):
num_blocks = 5
full_nodes, wallets = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0 <-> server1
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_api_0.get_all_full_blocks()
for block in all_blocks:
await full_node_api_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) + calculate_base_timelord_fee(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(10, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, token_bytes(), 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
None,
tx.name,
)
# make a final connection.
# wallet0 <-> sever0 <-> server1 <-> server2
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
| 38.18018
| 159
| 0.675319
|
9784e74e824106d4bf86a7acbcc9e9336dc3f0dc
| 6,182
|
py
|
Python
|
train.py
|
zldzmfoq12/Tacotron
|
323c00c559327be14dc393f1eeefc6eb88e9f05b
|
[
"MIT"
] | 2
|
2020-01-12T06:11:21.000Z
|
2020-01-22T07:44:29.000Z
|
train.py
|
zldzmfoq12/Tacotron
|
323c00c559327be14dc393f1eeefc6eb88e9f05b
|
[
"MIT"
] | null | null | null |
train.py
|
zldzmfoq12/Tacotron
|
323c00c559327be14dc393f1eeefc6eb88e9f05b
|
[
"MIT"
] | null | null | null |
import argparse
from datetime import datetime
import math
import os
import subprocess
import time
import tensorflow as tf
import traceback
import sys
from datasets.datafeeder import DataFeeder
from hparams import hparams, hparams_debug_string
from models import create_model
from text import sequence_to_text
from util import audio, infolog, plot, ValueWindow
log = infolog.log
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
commit = get_git_commit() if args.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
log('Checkpoint path: %s' % checkpoint_path)
log('Loading training data from: %s' % input_path)
log('Using model: %s' % args.model)
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = DataFeeder(coord, input_path, hparams)
# Set up model:
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)
model.add_loss()
model.add_optimizer(global_step)
stats = add_stats(model)
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
# Train!
with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if args.restore_step:
# Restore from a checkpoint if the user requested it.
restore_path = '%s-%d' % (checkpoint_path, args.restore_step)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
feeder.start_in_session(sess)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (
step, time_window.average, loss, loss_window.average)
log(message, slack=(step % args.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % args.summary_interval == 0:
log('Writing summary at step: %d' % step)
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving audio and alignment...')
input_seq, spectrogram, alignment = sess.run([
model.inputs[0], model.linear_outputs[0], model.alignments[0]])
waveform = audio.inv_spectrogram(spectrogram.T)
audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))
plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),
info='%s, %s, %s, step=%d, loss=%.5f' % (
args.model, commit, time_string(), step, loss))
log('Input: %s' % sequence_to_text(input_seq))
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default=os.path.expanduser('~/jeewoo/Tacotron/'))
parser.add_argument('--input', default='training/train.txt')
parser.add_argument('--model', default='tacotron')
parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops.')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints.')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-lstm--%s' % run_name)
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
hparams.parse(args.hparams)
train(log_dir, args)
if __name__ == '__main__':
main()
| 38.397516
| 99
| 0.721773
|
cc1e88d4dd90c9fd53c4a04fe125e951168eedea
| 27,565
|
py
|
Python
|
eveIntel/dataprocessinginterface.py
|
Marclass/EveIntel
|
18d9bfef1b671ae189b413a807905e0895bc0224
|
[
"Apache-2.0"
] | null | null | null |
eveIntel/dataprocessinginterface.py
|
Marclass/EveIntel
|
18d9bfef1b671ae189b413a807905e0895bc0224
|
[
"Apache-2.0"
] | null | null | null |
eveIntel/dataprocessinginterface.py
|
Marclass/EveIntel
|
18d9bfef1b671ae189b413a807905e0895bc0224
|
[
"Apache-2.0"
] | null | null | null |
from eveIntel.evelinkinterface import evelinkinterface
from eveIntel.sqlinterface import sqlConnection
from tabulate import tabulate
from eveIntel.sdeinterface import sdeInterface
from eveIntel.Exceptions import *
from ascii_graph import Pyasciigraph
import time
from datetime import date
import datetime
from ast import literal_eval
import inspect
#Characters: ]90000000, 98000000[ ## only applies to stuff made after 64 bit
#Corporations: ]98000000, 99000000[ ## move, older shit will not fall in
#Alliances: ]99000000, 100000000[## these ranges :(
class dataProcessingInterface():
def __init__(self):
self.eve = evelinkinterface()
self.sql = sqlConnection()
self.sql.connect()
#self.sql.resetReportCache()
self.sde = sdeInterface()
self.homeHeader=["System", "#Kills", "#Losses", "Kill dt avg",
"Loss dt avg", "Kill dt variance", "Loss dt variance",
"First Kill/Loss", "Last Kill/Loss", "Certainty"]
def genericReportWithErrorHandeling(self, validationReportPairs, args):
"""attempts to run given reports and returns value for first report that passes input validation
want to use to reduce redundant code for starting different reports
return report value or None on failure"""
self.isDBlocked()
for i in validationReportPairs:
validator = i[0]
report = i[1]
start = int(time.time())
if(validator(args)):
end = int(time.time())
print(str(validator.func_name) +" took: "+str(end-start))
#print(report)
print(str(report.func_name)+" report at "+str(datetime.datetime.now()))
return report(args)
return None
def genReport(self, entity):
"""more compact genReport"""
print("generating home/Sol report for: "+str(entity))
pairs=[]
pairs.append((self.isChar, self.genCharReport))
pairs.append((self.isCorp, self.genCorpReport))
pairs.append((self.isAlliance, self.genAllianceReport))
pairs.append((self.isSystem, self.genSolReport))
entityID = self.sql.getEntityID(entity)
if(entityID is None):
self.entityNotFound(entity)
report = self.genericReportWithErrorHandeling(pairs, entityID)
if(report is None):
self.entityNotFound(entity)
return report
def genReportRaw(self, entity):
report = ""
start = int(time.time())
entityID = self.sql.getEntityID(entity)
end = int(time.time())
print("it took "+ str(end-start) +" seconds to look up: "+ str(entity))
#if(entityID is None):
#entityID = self.eve.resolveIDFromName(entity)
#print(entityID)
if(not isinstance(entityID, int)):
lastTOD = self.sql.sqlCommand("select max(timeofdeath) from kills")
if(lastTOD is None):
return "The DB appears to be locked. The previous day's kills are likely being processed, please wait a few minutes and try again."
if(len(lastTOD)>0):
lastTOD=lastTOD[0][0]
else:
return "The DB appears to be locked. The previous day's kills are likely being processed, please wait a few minutes and try again."
return "Entity: \""+ str(entity) +"\" has no kill/death history in w space as of "+str(lastTOD)
start = int(time.time())
if(self.isChar(entityID)):
end = int(time.time())
print("isX took: "+str(end-start)+"")
return self.genCharReportRaw(entityID)
elif(self.isCorp(entityID)):
end = int(time.time())
print("isX took: "+str(end-start)+"")
return self.genCorpReportRaw(entityID)
elif(self.isAlliance(entityID)):
end = int(time.time())
print("isX took: "+str(end-start)+"")
return self.genAllianceReportRaw(entityID)
elif(self.isSystem(entityID)):
end = int(time.time())
print("isX took: "+str(end-start)+"")
return self.genSolReportRaw(entityID)
else:
lastTOD = self.sql.sqlCommand("select max(timeofdeath) from kills")
if(len(lastTOD)>0):
lastTOD=lastTOD[0][0]
else:
return "The DB appears to be locked. The previous day's kills are likely being processed, please wait a few minutes and try again."
return "Entity: \""+ str(entity) +"\" has no kill/death history in w space as of "+str(lastTOD)
return report
def isChar(self, entityID):
if(int(entityID)>=90000000 and (int(entityID) <98000000)):
return True
#print("isChar checking db")
return len(self.sql.getCharacterByCCPID(entityID)) >0
#return False
def isCorp(self, entityID):
if(int(entityID)>=98000000 and (int(entityID) <99000000)):
return True
return len(self.sql.getCorpByCCPID(entityID)) >0
#return False
def isAlliance(self, entityID):
if(int(entityID)>=99000000 and (int(entityID) <100000000)):
return True
return len(self.sql.getAllianceByCCPID(entityID)) >0
#return False
def isSystem(self, entityID):
if(entityID==31000005 or (entityID >=31000007 and entityID <= 31002605)):
return True
return False
def genCharReport(self, char):
key = "characterID"
#kills = self.sql.getKillsByCharacterID(char)
#losses = self.sql.getLossesByCharacterID(char)
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), char)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(char))
return self.findCharHome(char, key, [],[])
report =""
kills = self.sql.getKillsAndLossesByCharacter(char)
#print(type(kills))
#print(kills)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(char))
if(type(kills) == str):
return kills
home = self.findCharHome(char, key, kills, [])
report = home
return report
def genCharReportRaw(self, char):
key = "characterID"
#kills = self.sql.getKillsByCharacterID(char)
#losses = self.sql.getLossesByCharacterID(char)
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), char)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(char))
return self.findCharHomeRaw(char, key, [],[])
report =""
kills = self.sql.getKillsAndLossesByCharacter(char)
#print(type(kills))
#print(kills)
end = int(time.time())
print("elapsed time was "+str(end - start) +" seconds to pull kills for corp: "+str(char))
if(type(kills) == str):
return kills
home = self.findCharHomeRaw(char, key, kills, [])
report = home
return report
def genCorpReport(self, corp):
key = "corporationID"
report =""
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), corp)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(corp))
return self.findCharHome(corp, key, [],[])
kills = self.sql.getKillsAndLossesByCorp(corp)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(corp))
if(type(kills) == str):
return kills
start = int(time.time())
home = self.findCharHome(corp, key, kills, [])
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to process kills for corp: "+str(corp))
report = home
return report
def genCorpReportRaw(self, corp):
key = "corporationID"
report =""
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), corp)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(corp))
return self.findCharHomeRaw(corp, key, [],[])
kills = self.sql.getKillsAndLossesByCorp(corp)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(corp))
if(type(kills) == str):
return kills
start = int(time.time())
home = self.findCharHomeRaw(corp, key, kills, [])
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to process kills for corp: "+str(corp))
report = home
return report
def genAllianceReport(self, alliance):
key = "allianceID"
report =""
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), alliance)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(alliance))
return self.findCharHome(alliance, key, [],[])
kills = self.sql.getKillsAndLossesByAlliance(alliance)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(alliance))
if(type(kills) == str):
return kills
home = self.findCharHome(alliance, key, kills, [])
report = home
return report
def genAllianceReportRaw(self, alliance):
key = "allianceID"
report =""
start = int(time.time())
r=self.sql.getCachedReport(self.getHomeReportType(), alliance)
if(len(r)>0):
print("using cache")
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(alliance))
return self.findCharHomeRaw(alliance, key, [],[])
kills = self.sql.getKillsAndLossesByAlliance(alliance)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to pull kills for corp: "+str(alliance))
if(type(kills) == str):
return kills
home = self.findCharHomeRaw(alliance, key, kills, [])
report = home
return report
def genLeadershipReport(self, entity):
#genericReportWithErrorHandeling
print("gnerating leadership report for: "+str(entity))
pairs=[]
pairs.append((self.isChar, self.charLeadershipReportFailureWrapper))
pairs.append((self.isCorp, self.genCorpLeadershipReport))
pairs.append((self.isAlliance, self.genAllianceLeadershipReport))
entityID = self.sql.getEntityID(entity)
if(entityID is None):
self.entityNotFound(entity)
report = self.genericReportWithErrorHandeling(pairs, entityID)
if(report is None):
self.entityNotFound(entity)
return report
def charLeadershipReportFailureWrapper(self, entity):
entity = self.sql.getCharacterNameByCCPID(entity)[0][0]
return self.invalidInputMsg(entity, "LeadershipReport")
def genSiegeReport(self):
print("generating siege report")
r=self.sql.getCachedReport(self.getSiegeReportType(), 0)
if(len(r)>0 and False):
print("using cache")
rows = literal_eval(r[0][0])
return tabulate(rows, headers = rhead)
sieges = self.sql.getSieges()
rhead = ["System", "Besieged", "Siege Date", "Siegers", "num Structures killed", "num Attackers"]
rows=[]
for i in sieges:
rows.append( (i[0],i[1],i[2],i[3],i[4], i[5]) )
self.sql.insertCachedReport(self.getSiegeReportType(), 0, str(rows))
response = tabulate(rows, headers = rhead)
return response
def genCorpLeadershipReport(self, corpID):
start = int(time.time())
r = self.genEntityLeadershipReport(self.sql.getLeadershipByCorp, corpID)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to gen leadership for corp: "+str(corpID))
return r
def genAllianceLeadershipReport(self, allianceID):
start = int(time.time())
r = self.genEntityLeadershipReport(self.sql.getLeadershipByAlliance, allianceID)
end = int(time.time())
print("elapsed time was "+str(end-start) +" seconds to gen leadership for alliance: "+str(allianceID))
return r
def genEntityLeadershipReport(self, sqlCall, eID):
rhead=["Pilot", "KillCount", "PossibleKills", "Whore %", "NumFights", "Confidence"]
r=self.sql.getCachedReport(self.getLeadershipReportType(), eID)
if(len(r)>0):
print("using cache")
rows = literal_eval(r[0][0])
#print(rows)
#print(r)
return tabulate(rows, headers = rhead)
#rows=[]
rtable=""
players = sqlCall(eID)
sort = self.processLeadershipReport(players)
rows=[]
for i in sort:
rows.append((i[1],i[2],i[3],i[4],i[5],i[6]))
if(len(rows)==0):
raise DBLockedException()
self.sql.insertCachedReport(self.getLeadershipReportType(), eID, str(rows))
#print(str(type(rows) )+"\n"+str(rows))
response = tabulate(rows, headers = rhead)
return response
def processLeadershipReport(self, rows):
l=[]
for i in rows:
#calc confidence and append it to row then sort by confidence
killCount = i[2]
totalKills = i[3]
fightPercent =i[4]
fightNum = i[5]
#print(i)
confidence = fightPercent**2 * 2**(fightNum/3)
l.append(list(i))
l[-1].append(confidence)
l.sort(key = lambda x:x[-1], reverse = True)
return l[:15]
def genHrsReport(self, entity):
#genericReportWithErrorHandeling
print("generating hrs report for: "+str(entity))
pairs=[]
pairs.append((self.isChar, self.genCharacterHrsReport))
pairs.append((self.isCorp, self.genCorpHrsReport))
pairs.append((self.isAlliance, self.genAllianceHrsReport))
entityID = self.sql.getEntityID(entity)
if(entityID is None):
self.entityNotFound(entity)
report = self.genericReportWithErrorHandeling(pairs, entityID)
if(report is None):
self.entityNotFound(entity)
return report
def genEntityHrsReport(self, hrFunction, eID):
r=self.sql.getCachedReport(self.getHrsReportType(), eID)
if(len(r)>0):
print("using cache")
rows = literal_eval(r[0][0])
else:
rows = hrFunction(eID)
#print(rows)
self.sql.insertCachedReport(self.getHrsReportType(), eID, str(rows))
graph = Pyasciigraph()
ascii = ""
for line in graph.graph('Activity by time of day (Eve time)', rows):
ascii = ascii+line+"\n"
return ascii
def genCorpHrsReport(self, corpID):
return self.genEntityHrsReport(self.sql.getHrsByCorp, corpID)
def genAllianceHrsReport(self, allianceID):
return self.genEntityHrsReport(self.sql.getHrsByAlliance, allianceID)
def genCharacterHrsReport(self, charID):
return self.genEntityHrsReport(self.sql.getHrsByCharacter, charID)
def genSolReport(self, sol, useCache=True):
#print("start genSolReport")
rhead =["corporation", "Kills+losses", "Days Represented","Confidence Rating", "Most recent kill/loss"]
rows = self.genSolReportRaw(sol)
#print(len(rows))
response = tabulate(rows, headers = rhead)
return response
def genSolReportRaw(self, sol):
key = "solarSystemID"
r=self.sql.getCachedReport(self.getSolReportType(), sol)
if(len(r)>0):
print("using cache")
return literal_eval(r[0][0])
kills = self.sql.getKillsAndLossesBySystem(sol)
#print("kills Len= "+str(len(kills)))
mails = self.processSolReportKills(kills)
rhead =["corporation", "Kills+losses", "Days Represented","Confidence Rating", "Most recent kill/loss"]
rtable=""
rows=[]
for i in mails:
rows.append((i[4],i[1],i[3],i[5],i[2]))
#response = "\r\n\r\n" + tabulate(rows, headers = rhead)
self.sql.insertCachedReport(self.getSolReportType(), sol, str(rows))
return rows
def processSolReportKills(self, kills):
#corp/AllianceID, kills+losses, last kill/loss, days represented(bugged by up to 2x off), name
#confidence = 2**(daycount/2) *killcount * max( -.1 * (avgdelta*10 -24)**2 +5, 1)
l=[]
for i in kills:
killCount=i[1]
lastKill=i[2]
daysRep=i[3]
#print(lastKill)
#lastKill=date(lastKill)
lastKill= datetime.datetime.strptime(lastKill.replace("-","").replace(" ","").replace(":",""),'%Y%m%d%H%M%S')
lastKill=lastKill.date()
now =datetime.datetime.now().date()
#print(now)
#now=date()
#print(type((now-lastKill).days))
#print(type(killCount))
#print(type(daysRep))
#print(2**(daysRep/2.0))
confidence= 2**(daysRep/2) * killCount * 1/max(1, 2**(((now-lastKill).days)/7) )
confidence=int(confidence)
#confidence=int(confidence*1000)
#confidence=confidence/1000.0
#print(confidence)
l.append(list(i))
#print(l[-1])
l[-1].append(confidence)
l[-1][3] = l[-1][3]/1 #divide days
l[-1][2] = l[-1][2][0:10] #remove time from datetime
#i.append(confidence)
l.sort(key = lambda x:x[-1], reverse = True)
for i in range(len(l)):
if(l[i][5]>=1000000):
l[i][5]="Inf"
if(False and len(l[i][4])<27):
l[i][4] = l[i][4] +"_"*(27- len(l[i][4])) #pad with underscores
l = l[0:min(len(kills),10)] #take top 10 systems
return l
def findCharHome(self, eID, key, kills, losses):
return self.findEntityHome(eID, key, kills+losses)
def findCharHomeRaw(self, eID, key, kills, losses):
return self.findEntityHomeRaw(eID, key, kills+losses)
def findCharPeakTime(self, eID, key, kills, losses):
return self.findEntityPeakTime(eID, key, kills, losses)
def findCharDoctrines(self, eID, key, kills, losses):
return self.findEntityDoctrines(eID, key, kills, losses)
def findCorpHome(self, eID, key, kills, losses):
return self.findEntityHome(eID, key, kills+losses)
def findCorpPeakTime(self, eID, key, kills, losses):
return self.findEntityPeakTime(eID, key, kills, losses)
def findCorpDoctrines(self, eID, key, kills, losses):
return self.findEntityDoctrines(eID, key, kills, losses)
def findAllianceHome(self, eID, key, kills, losses):
return self.findEntityHome(eID, key, kills+losses)
def findAlliancePeakTime(self, eID, key, kills, losses):
return self.findEntityPeakTime(eID, key, kills, losses)
def findAllianceDoctrines(self, eID, key, kills, losses):
return self.findEntityDoctrines(eID, key, kills, losses)
def findEntityHome(self, eID, key, kills):
#joint = kills+losses
response =""
#killHead = ["System", "NumKills+Losses", "DaysRepresented", "Avg Kill Delta(days)", "Confidence Rating", "Most recent kill/loss"]
killHead = ["System", "NumKills+Losses", "DaysRepresented", "Class", "Confidence Rating", "Most recent kill/loss"]
stats = self.findEntityHomeRaw(eID, key, kills)
response = tabulate(stats, headers = killHead)
return response
def findEntityHomeRaw(self, eID, key, kills):
#joint = kills+losses
r=self.sql.getCachedReport(self.getHomeReportType(), eID)
if(len(r)>0):
print("using cache")
return literal_eval(r[0][0])
response =""
killTable = ""
#killHead = ["System", "NumKills+Losses", "DaysRepresented", "Avg Kill Delta(days)", "Confidence Rating", "Most recent kill/loss"]
killHead = ["System", "NumKills+Losses", "DaysRepresented", "Class", "Confidence Rating", "Most recent kill/loss"]
killT = []
zkill = 0
system =1
time =2
systems ={}
for i in kills:
if(i[system] in systems):
#print(str(i[system])+" "+str(systems[i[system]]))
#wtf =
systems[i[system]].append(i)
else:
#print("adding "+ str(i[system]) +" "+str(i))
systems[i[system]] = [i]
stats =[]
#print(systems.keys())
for i in systems.keys():
#for j in systems[i]:
stats.append(self.processSystem(systems[i]))
stats.sort(key = lambda x:x[4], reverse = True) #sort by confidence rating
stats = stats[0:min(len(stats),15)] #take top 15 systems
response = stats
self.sql.insertCachedReport(self.getHomeReportType(), eID, str(response))
return response
def processSystem(self, system):
#print(system)
## if(len(system)>1):
## print(system)
## exit
sysID = system[0][1]
name = self.sql.getSolarNameBySolarID(sysID)[0][0]
killcount = len(system)
days={}
daycount = 0
dates = []
unix = datetime.datetime(1970,1,1)
for i in system:
day =i[2]
dates.append(datetime.datetime.strptime(day.replace("-","").replace(" ","").replace(":",""),'%Y%m%d%H%M%S'))
day =i[2].split(" ")[0]
if(day in days):
days[day] = days[day] +1
else:
days[day]=1
daycount = len(days)
#day=[]
dates.sort(reverse = True)
delta = 0
secondsInHr = 60*60
secondsInDay = 60*60*24
avgdelta = 0
if(len(dates)>2):
for i in range(1, len(dates)):
#delta = delta +(i-unix).total_seconds()/(secondsInDay)
delta = delta + (dates[i-1]-dates[i]).total_seconds()/(secondsInDay)
avgdelta = (delta +0.0)/len(dates)
if(len(dates)==2):
avgdelta = (dates[0]-dates[1]).total_seconds()/2/(secondsInDay)
avgdelta = float(int((avgdelta*1000))%1000)/1000
lastKill = str(dates[0].year)+"-"+str(dates[0].month)+"-"+str(dates[0].day)
now = datetime.datetime.now().date()
confidence = 2**(daycount/2) *killcount * 1/max(1, 2**(((now-dates[0].date()).days)/7) )
#max( -.1 * (avgdelta*10 -24)**2 +5, 1) *
sysType = self.getSysClassByID(sysID)
return (name, killcount, daycount, sysType, confidence, lastKill)
def getSysClassByID(self, sysID):
#thera =31000005
#c1 <=31000354
#c2 <=31000879
#c3 <=31001374
#c4 <=31001879
#c5-6 indexes buggy as fuck
#c5 <=31002366 and <=31002504 -C6
#c6 31002366 to 31002470 (inclusive)
#shattered >=31002505
if(sysID < 31000005):
return "K space"
if(sysID==31000005):
return "Thera"
if(sysID<=31000354):
return "C1"
if(sysID <=31000879):
return "C2"
if(sysID <=31001374):
return "C3"
if(sysID <= 31001879):
return "C4"
if((sysID >= 31002366 and sysID <=31002470) or (sysID ==31002487 or sysID ==31002489 or sysID ==31002492)):
return "C6 (buggy)"
if(sysID <=31002366 or sysID <=31002504):
return "C5 (buggy)"
return "Shattered"
def findEntityPeakTime(self, eID, key, kills, losses):
return "Peak Time not implemented"
def findEntityDoctrines(self, eID, key, kills, losses):
return "Doctrines not implemented"
def sortKillsBySystem(self, kills):
#should really move this to a data processing class but w/e
from operator import itemgetter
#returns (systemID, number of Kills) tuple
whSystems={}
for i in range(len(kills)):
sol=str(kills[i].solarSystemID)
if(sol in whSystems):
whSystems[sol] = whSystems[sol]+1
else:
whSystems[sol] = 1
sortedWH = sorted(whSystems.items(), key = itemgetter(1), reverse = True)
return sortedWH
def getHomeReportType(self):
return 1
def getSolReportType(self):
return 2
def getLeadershipReportType(self):
return 3
def getHrsReportType(self):
return 4
def getSiegeReportType(self):
return 5
def isDBlocked(self):
lastTOD = self.sql.sqlCommand("select max(timeofdeath) from kills")
if(lastTOD is None or len(lastTOD)<=0):
raise DBLockedException()
lastTOD = self.sql.sqlCommand("select max(kill) from attackers")
if(lastTOD is None or len(lastTOD)<=0):
raise DBLockedException()
def entityNotFound(self, entity):
lastTOD = self.sql.sqlCommand("select max(timeofdeath) from kills")
if(lastTOD is None or len(lastTOD)<=0):
raise DBLockedException()
raise EntityNotFoundException(entity, lastTOD[0][0])
def getFunctionName(self):
return str(inspect.currentframe().f_back.f_code.co_name)
def invalidInputMsg(self, i, reportName):
return "Input: "+str(i)+" invalid for report: "+str(reportName)+"\nBe sure you are requesting a report with arguments that make sense in the context of the report.\nRequesting a list of FCs makes sense for a corp, but not a character"
if(False):
d = dataProcessingInterface()
r =d.genReport("Marclass")
print(r)
r= d.genReport("Pos Party")
print(r)
r= d.genReport("Low-Class")
print(r)
r= d.genReport("lazerhawks")
print(r)
r= d.genReport("adfadfadfadfadsfadfasdfasdfasdf")
print(r)
| 33.656899
| 242
| 0.573154
|
9aafb6d869f7e361586633a8a53c80a15d2ca3a0
| 496
|
py
|
Python
|
micropython_adf/examples/install_libs.py
|
zhangtemplar/esp-adf
|
dc4f652efe8a3b3638b08a798af6e13763fd95c8
|
[
"MIT-0"
] | 960
|
2018-04-03T15:13:46.000Z
|
2022-03-29T02:48:46.000Z
|
micropython_adf/examples/install_libs.py
|
zhangtemplar/esp-adf
|
dc4f652efe8a3b3638b08a798af6e13763fd95c8
|
[
"MIT-0"
] | 786
|
2018-04-08T10:25:08.000Z
|
2022-03-31T23:20:40.000Z
|
micropython_adf/examples/install_libs.py
|
zhangtemplar/esp-adf
|
dc4f652efe8a3b3638b08a798af6e13763fd95c8
|
[
"MIT-0"
] | 512
|
2018-04-05T18:16:52.000Z
|
2022-03-31T21:27:53.000Z
|
# install_libs.py - install libs to SDCard
#
# This code is in the Public Domain (or CC0 licensed, at your option.)
# Unless required by applicable law or agreed to in writing, this
# software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied.
#
import upip
def install():
upip.install('micropython-json', '/sdcard/lib')
upip.install('micropython-urequests', '/sdcard/lib')
upip.install('micropython-hmac', '/sdcard/lib')
| 33.066667
| 70
| 0.727823
|
2ecf03e72203b4be82e5fac8fb7a596fcb9bb744
| 33
|
py
|
Python
|
examples/max/ex4.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/max/ex4.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/max/ex4.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
print(max([], default='Empty!'))
| 16.5
| 32
| 0.606061
|
10f499210b61d81dfaaee19722e1c855da963520
| 305
|
py
|
Python
|
server/src/py/config.py
|
NavneetSurana/TickerTape
|
d91700fde4324656799acce8576a92e93b721731
|
[
"MIT"
] | null | null | null |
server/src/py/config.py
|
NavneetSurana/TickerTape
|
d91700fde4324656799acce8576a92e93b721731
|
[
"MIT"
] | 2
|
2020-07-26T10:08:10.000Z
|
2020-08-01T16:57:41.000Z
|
server/src/py/config.py
|
NavneetSurana/TickerTape
|
d91700fde4324656799acce8576a92e93b721731
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
load_dotenv()
dbName = os.getenv("DB_NAME_TT")
user = os.getenv("DB_USER")
password = os.getenv("DB_PASSWORD")
host = os.getenv("DB_HOST")
port = os.getenv("DB_PORT")
url = os.getenv("DB_URL")
backupPath = os.getenv("DB_BACKUP_PATH")
authDb = os.getenv("AUTH_DB")
| 25.416667
| 40
| 0.734426
|
5dd90bc0ef6bc108f9bba8c8616cdabab243096b
| 649
|
py
|
Python
|
build/lib/biblelib/__init__.py
|
ndcorc/pyble
|
910c8f7f559781c8d707b87e7f958ad08c8679a9
|
[
"MIT"
] | 5
|
2019-05-03T20:18:13.000Z
|
2021-03-08T04:34:55.000Z
|
build/lib/biblelib/__init__.py
|
ndcorc/pyble
|
910c8f7f559781c8d707b87e7f958ad08c8679a9
|
[
"MIT"
] | null | null | null |
build/lib/biblelib/__init__.py
|
ndcorc/pyble
|
910c8f7f559781c8d707b87e7f958ad08c8679a9
|
[
"MIT"
] | 1
|
2020-10-28T11:33:15.000Z
|
2020-10-28T11:33:15.000Z
|
"""Utilities for working with Bible data"""
from .books import Book
from .groups import BookGroup
from .core import makeBiblerefFromDTR
#from .pericopes
#from .biblia
__version__ = "0.5"
__title__ = "biblelib"
__description__ = "Manage Bible references and metadata"
__uri__ = "http://github.com/Faithlife/Biblelib"
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Sean Boisen"
__email__ = "sean@logos.com"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2018 Faithlife Corporation"
# __all__ = ['BiblelibError', 'ReferenceValidationError',
# 'HUMAN_BIBLE_DATATYPES', 'BIBLE_DATATYPES', 'makeBiblerefFromDTR']
| 22.37931
| 79
| 0.733436
|
bb2d0eb74122ab305495d90cd5cef819c985e4c0
| 2,307
|
py
|
Python
|
src/echonn/sys/animator.py
|
larkwt96/honors-thesis
|
7e3a52c285c1fdaf4ae9659497154ba04e522f48
|
[
"MIT"
] | null | null | null |
src/echonn/sys/animator.py
|
larkwt96/honors-thesis
|
7e3a52c285c1fdaf4ae9659497154ba04e522f48
|
[
"MIT"
] | null | null | null |
src/echonn/sys/animator.py
|
larkwt96/honors-thesis
|
7e3a52c285c1fdaf4ae9659497154ba04e522f48
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from scipy.constants import pi
from abc import ABC, abstractmethod
class Animator(ABC):
def __init__(self, trail_length=1, max_t=10):
"""
trail_length in time, not dist
max_t should be set by implementer
"""
self.anim = None
self.trail_length = trail_length
self.ms_per_frame = 50
self.max_t = max_t
self.nFrames = self.calc_frames(max_t)
@abstractmethod
def init_plot(self):
"""
This method is implemented and the line objects are returned in an array following the figure
"""
fig = plt.figure()
data, *_ = plt.plot([1, 2, 3])[0]
# fig, self.lines = (figure, [line1, line2, line3, ...])
return fig, [data]
@abstractmethod
def animator(self, framei):
# t, y = self.get_data(framei)
# do somethign with self.lines
pass
def calc_frames(self, max_t):
return int(max_t * 1000 / self.ms_per_frame)
def render(self):
fig, self.lines = self.init_plot()
self.anim = animation.FuncAnimation(fig,
self.animator,
frames=self.nFrames,
interval=self.ms_per_frame,
blit=False)
def save(self, fname, ext='.gif'):
"""
saves the file to the format specified by ext, default is recommended.
if you'd like to override this setting, then set ext
"""
self.anim.save(fname+ext)
def render_ipynb(self):
rc('animation', html='jshtml')
rc('animation', embed_limit=50)
return self.anim
def get_data(self, frame_i, t, y):
start_time, end_time = self.get_data_t_span(frame_i)
mask = self.get_data_mask(start_time, end_time, t)
return t[mask], y[mask]
def get_data_t_span(self, frame_i):
end_time = frame_i * self.ms_per_frame / 1000
start_time = end_time - self.trail_length
return start_time, end_time
@staticmethod
def get_data_mask(start_time, end_time, t):
return np.where((start_time < t) & (t < end_time))
| 32.041667
| 101
| 0.579107
|
24d1cf2d42d8b1519b0efa24cad7d1f5a701781d
| 2,163
|
py
|
Python
|
controllers/water.py
|
nursix/rlp
|
87384b7523ca92834f84636663a38f4524cde06e
|
[
"MIT"
] | 2
|
2017-07-25T19:15:58.000Z
|
2018-10-09T22:57:41.000Z
|
controllers/water.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | null | null | null |
controllers/water.py
|
nursix/rlpptm
|
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
|
[
"MIT"
] | 1
|
2017-10-03T13:03:47.000Z
|
2017-10-03T13:03:47.000Z
|
# -*- coding: utf-8 -*-
"""
Water module
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module)
# -----------------------------------------------------------------------------
def debris_basin():
""" Debris Basins, RESTful controller """
return crud_controller()
# -----------------------------------------------------------------------------
def gauge():
""" Flood Gauges, RESTful controller """
# Pre-processor
def prep(r):
if r.interactive:
pass
elif r.representation == "plain":
# Map Popups
r.table.image_url.readable = False
return True
s3.prep = prep
# Post-processor
def postp(r, output):
if r.interactive:
pass
elif r.representation == "plain":
# Map Popups
# use the Image URL
# @ToDo: The default photo not the 1st
image_url = r.record.image_url
if image_url:
output["item"].append(IMG(_src=image_url,
# @ToDo: capture the size on upload & have controller resize where-required on-download
_width=400,
_height=310))
return output
s3.postp = postp
return crud_controller()
# -----------------------------------------------------------------------------
def river():
""" Rivers, RESTful controller """
return crud_controller()
# -----------------------------------------------------------------------------
def zone():
""" RESTful CRUD controller """
return crud_controller()
# -----------------------------------------------------------------------------
def zone_type():
""" RESTful CRUD controller """
return crud_controller()
# END =========================================================================
| 28.090909
| 129
| 0.409154
|
3e66ac3e1fb665c24d455f2b4901c1a96238d076
| 1,332
|
py
|
Python
|
examples/django-waffle-project/settings.py
|
SpazioDati/django-waffle
|
a78b47a7e813f7602e69b8bb83f4a2a3e274a97b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/django-waffle-project/settings.py
|
SpazioDati/django-waffle
|
a78b47a7e813f7602e69b8bb83f4a2a3e274a97b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/django-waffle-project/settings.py
|
SpazioDati/django-waffle
|
a78b47a7e813f7602e69b8bb83f4a2a3e274a97b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
# Make filepaths relative to settings.
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
DEBUG = True
TEMPLATE_DEBUG = True
JINJA_CONFIG = {}
SITE_ID = 1
SECRET_KEY = 'foobar'
TEST_RUNNER = 'django_nose.runner.NoseTestSuiteRunner'
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_nose',
'south',
'waffle',
'test_app',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'waffle.middleware.WaffleMiddleware',
)
ROOT_URLCONF = 'test_app.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
WAFFLE_FLAG_DEFAULT = False
WAFFLE_FLAG_AUTOCREATE = False
WAFFLE_FLAG_DEFAULTS = {}
WAFFLE_SWITCH_DEFAULT = False
WAFFLE_SWITCH_AUTOCREATE = False
WAFFLE_SWITCH_DEFAULTS = {}
WAFFLE_SAMPLE_DEFAULT = False
WAFFLE_SAMPLE_AUTOCREATE = False
WAFFLE_SAMPLE_DEFAULTS = {}
WAFFLE_OVERRIDE = False
| 21.836066
| 62
| 0.726727
|
10c5128263862dd8ab11f1566c488807314b2e11
| 5,766
|
py
|
Python
|
codes/test_cross_validation.py
|
boti996/onlab-public
|
3ee399b9f40979a54236cd646cc7566a3639a03f
|
[
"MIT"
] | null | null | null |
codes/test_cross_validation.py
|
boti996/onlab-public
|
3ee399b9f40979a54236cd646cc7566a3639a03f
|
[
"MIT"
] | null | null | null |
codes/test_cross_validation.py
|
boti996/onlab-public
|
3ee399b9f40979a54236cd646cc7566a3639a03f
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
import os
from keras.models import load_model
from sklearn.model_selection import train_test_split
import codes.my_helper as helper
def confusion_matrix(image_val, label_val, model):
# Get prediction from model
pred_val = (model.predict(image_val))
# print(pred_val.shape)
# TP: wh-wh, FN: wh-bl, TN: bl-bl , FP: bl-wh
tp = 0
fn = 0
tn = 0
fp = 0
no_images = label_val.shape[0]
rows = label_val.shape[1]
cols = label_val.shape[2]
for n in range(0, no_images):
for i in range(0, rows):
for j in range(0, cols):
label = label_val[n][i][j]
pred = pred_val[n][i][j]
# True: white
if label[1] != 0:
# Pred: white
if pred[1] > pred[0]:
tp += 1
# Pred: black
else:
fn += 1
# True: black
else:
# Pred: black
if pred[0] > pred[1]:
tn += 1
# Pred white
else:
fp += 1
div = rows * cols * no_images
print('TP:' + str(tp / div * 100) + '%')
print('FN:' + str(fn / div * 100) + '%')
print('FP:' + str(fp / div * 100) + '%')
print('TN:' + str(tn / div * 100) + '%')
def main():
# VALIDATION ON CAMVID DATASET
images = helper.read_images('../datas/images_camvid/image/')
size = (320, 256)
images = helper.resize_images(images, size)
images = np.array(images) / 255
classes = [[192, 0, 128]]
labels_path = "../datas/labels_transformed_camvid.p"
if os.path.exists(labels_path):
labels = pickle.load(open(labels_path, "rb"))
else:
labels = helper.read_images('../datas/images_camvid/label/')
labels = helper.clear_label_colors(labels)
labels = helper.resize_images(labels, size)
labels = np.array(labels)
labels = helper.rgb_to_classes(labels, classes)
pickle.dump(labels, open(labels_path, "wb"))
img_train, img_val, label_train, label_val = train_test_split(images, labels, test_size=0.15,
shuffle=True, random_state=helper.random_state)
print("CAMVID")
model_path = '../models/roma_full.0001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/roma_full.001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/roma_full.freeze.001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/camvid_full.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
# VALIDATION OR ROMA DATASET
# list (116,) of (1024, 1280, 3) nparrays
images = helper.read_images('../datas/images_roma/image/')
size = (320, 256)
images = helper.resize_images(images, size)
images = np.array(images) / 255
classes = [[255, 255, 255]]
labels_path = "../datas/labels_transformed_roma.p"
if os.path.exists(labels_path):
labels = pickle.load(open(labels_path, "rb"))
else:
labels = helper.read_images('../datas/images_roma/label/')
labels = helper.resize_images(labels, size)
labels = np.array(labels)
labels = helper.rgb_to_classes(labels, classes)
pickle.dump(labels, open(labels_path, "wb"))
img_train, img_val, label_train, label_val = train_test_split(images, labels, test_size=0.15,
shuffle=True, random_state=helper.random_state)
print("ROMA")
model_path = '../models/roma_full.0001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/roma_full.001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/roma_full.freeze.001.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
model_path = '../models/camvid_full.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
img_val = images
label_val = labels
print("ROMA FULL")
model_path = '../models/camvid_full.500.h5'
model = load_model(model_path)
batch_size = 16
score = model.evaluate(img_val, label_val, batch_size=batch_size)
print(model_path)
print(score)
confusion_matrix(img_val, label_val, model)
main()
| 33.523256
| 113
| 0.614291
|
ec26261fbcf94c8b08c0ee0b653e98b51be16b53
| 3,549
|
py
|
Python
|
RequestParser.py
|
dsande30/COSC560-PA1
|
4125122b0ac705657f7edfce73058444617314c2
|
[
"MIT"
] | null | null | null |
RequestParser.py
|
dsande30/COSC560-PA1
|
4125122b0ac705657f7edfce73058444617314c2
|
[
"MIT"
] | null | null | null |
RequestParser.py
|
dsande30/COSC560-PA1
|
4125122b0ac705657f7edfce73058444617314c2
|
[
"MIT"
] | null | null | null |
"""
Basic HTTP server request parser.
Parses HTTP headers and determines GET/POST actions
Stores all header and content information in an object
"""
import logging
import sys
import io
import re
import os
from urllib.parse import unquote
class RequestParser:
"""Base object for parsing http headers."""
def __init__(self, request):
"""Store all REQUEST data in the object."""
self.error_code = 200
self.action = ''
self.version = 0
self.path = ''
self.header = {}
self.request = request
def parseRequest(self):
"""Parse given request."""
try:
# this try statement catches GET and FORM POST requests
str_request = io.StringIO(self.request.decode())
self.parseHeader(str_request, True)
except:
# the except statement catches multipart/form-data POST requests
self.parseMultiPart()
def parseMultiPart(self):
"""Parses multipart/form-data POST requests."""
request = self.request
# regex is used to separate header from content
ct = re.compile(b'(Content-Type: )(\S+)\r\n\r\n')
name = re.compile(b'(filename=)(\")(.*\.*)(\")')
tmp_ct = ct.search(request).groups()[1]
fname = name.search(request).groups()[2]
end_header = request.find('\r\n\r\n'.encode())
index = request.find(tmp_ct) + len(tmp_ct) + 4 # get to start of content
# content and header are now determined based on regex results
final_bound = 0
final_bound = request[index:].find('------WebKitForm'.encode())
if final_bound != 0:
self.header['Payload'] = request[index:index+final_bound]
else:
self.header['Payload'] = request[index:]
self.header['Mime-Type'] = tmp_ct.decode()
# populates the header dictionary
self.parseHeader(io.StringIO(request[:end_header].decode()), False)
# file name to be uploaded
self.path = './site/uploads/' + fname.decode()
def parseHeader(self, header, check_payload):
"""Parses the header of any given request."""
lines = header.readlines()
last = lines[-1]
# the header is converted to a string and parsed
# each key: val line is converted into a dict representation
for line in lines[1:]:
if line is last and check_payload:
self.header['Payload'] = line
else:
tmp = [x.strip() for x in line.split(':', 1)]
if len(tmp) == 2:
if 'multipart/form-data' in tmp[1] and "Payload" not in self.header:
self.parseMultiPart()
return 0
self.header[tmp[0]] = tmp[1]
self.checkData(lines[0].strip())
def checkData(self, line):
"""Get request acion, pathname, and HTTP version from first line of a request."""
split_line = line.split()
self.action = split_line[0]
if split_line[1] == '/':
self.path = os.path.normpath("site" + os.path.sep + "index.html")
logging.debug(os.path.abspath(self.path))
else:
if split_line[1] in ['/dir', '/uploads']:
self.path = split_line[1]
else:
self.path = unquote(os.path.abspath('.') + os.path.sep + "site" + split_line[1])
version = split_line[2].split('/')
self.version = version[1]
| 34.794118
| 96
| 0.568611
|
d5fb0807d06236ce83f68474b5ecb3c23c854cc3
| 8,223
|
py
|
Python
|
python/ovs/unixctl/server.py
|
salambashir/ovs
|
f09f2e73eed7ac5a37999e65eadc7d56036d9642
|
[
"Apache-2.0"
] | 40
|
2020-06-16T03:44:12.000Z
|
2022-03-29T05:54:27.000Z
|
python/ovs/unixctl/server.py
|
salambashir/ovs
|
f09f2e73eed7ac5a37999e65eadc7d56036d9642
|
[
"Apache-2.0"
] | 44
|
2019-01-16T14:37:52.000Z
|
2019-11-05T16:17:34.000Z
|
python/ovs/unixctl/server.py
|
salambashir/ovs
|
f09f2e73eed7ac5a37999e65eadc7d56036d9642
|
[
"Apache-2.0"
] | 17
|
2020-06-22T02:37:30.000Z
|
2022-03-02T16:09:02.000Z
|
# Copyright (c) 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import os
import sys
import ovs.dirs
import ovs.jsonrpc
import ovs.stream
import ovs.unixctl
import ovs.util
import ovs.version
import ovs.vlog
import six
from six.moves import range
Message = ovs.jsonrpc.Message
vlog = ovs.vlog.Vlog("unixctl_server")
strtypes = six.string_types
class UnixctlConnection(object):
def __init__(self, rpc):
assert isinstance(rpc, ovs.jsonrpc.Connection)
self._rpc = rpc
self._request_id = None
def run(self):
self._rpc.run()
error = self._rpc.get_status()
if error or self._rpc.get_backlog():
return error
for _ in range(10):
if error or self._request_id:
break
error, msg = self._rpc.recv()
if msg:
if msg.type == Message.T_REQUEST:
self._process_command(msg)
else:
# XXX: rate-limit
vlog.warn("%s: received unexpected %s message"
% (self._rpc.name,
Message.type_to_string(msg.type)))
error = errno.EINVAL
if not error:
error = self._rpc.get_status()
return error
def reply(self, body):
self._reply_impl(True, body)
def reply_error(self, body):
self._reply_impl(False, body)
# Called only by unixctl classes.
def _close(self):
self._rpc.close()
self._request_id = None
def _wait(self, poller):
self._rpc.wait(poller)
if not self._rpc.get_backlog():
self._rpc.recv_wait(poller)
def _reply_impl(self, success, body):
assert isinstance(success, bool)
assert body is None or isinstance(body, strtypes)
assert self._request_id is not None
if body is None:
body = ""
if body and not body.endswith("\n"):
body += "\n"
if success:
reply = Message.create_reply(body, self._request_id)
else:
reply = Message.create_error(body, self._request_id)
self._rpc.send(reply)
self._request_id = None
def _process_command(self, request):
assert isinstance(request, ovs.jsonrpc.Message)
assert request.type == ovs.jsonrpc.Message.T_REQUEST
self._request_id = request.id
error = None
params = request.params
method = request.method
command = ovs.unixctl.commands.get(method)
if command is None:
error = '"%s" is not a valid command' % method
elif len(params) < command.min_args:
error = '"%s" command requires at least %d arguments' \
% (method, command.min_args)
elif len(params) > command.max_args:
error = '"%s" command takes at most %d arguments' \
% (method, command.max_args)
else:
for param in params:
if not isinstance(param, strtypes):
error = '"%s" command has non-string argument' % method
break
if error is None:
unicode_params = [six.text_type(p) for p in params]
command.callback(self, unicode_params, command.aux)
if error:
self.reply_error(error)
def _unixctl_version(conn, unused_argv, version):
assert isinstance(conn, UnixctlConnection)
version = "%s (Open vSwitch) %s" % (ovs.util.PROGRAM_NAME, version)
conn.reply(version)
class UnixctlServer(object):
def __init__(self, listener):
assert isinstance(listener, ovs.stream.PassiveStream)
self._listener = listener
self._conns = []
def run(self):
for _ in range(10):
error, stream = self._listener.accept()
if sys.platform == "win32" and error == errno.WSAEWOULDBLOCK:
# WSAEWOULDBLOCK would be the equivalent on Windows
# for EAGAIN on Unix.
error = errno.EAGAIN
if not error:
rpc = ovs.jsonrpc.Connection(stream)
self._conns.append(UnixctlConnection(rpc))
elif error == errno.EAGAIN:
break
else:
# XXX: rate-limit
vlog.warn("%s: accept failed: %s" % (self._listener.name,
os.strerror(error)))
for conn in copy.copy(self._conns):
error = conn.run()
if error and error != errno.EAGAIN:
conn._close()
self._conns.remove(conn)
def wait(self, poller):
self._listener.wait(poller)
for conn in self._conns:
conn._wait(poller)
def close(self):
for conn in self._conns:
conn._close()
self._conns = None
self._listener.close()
self._listener = None
@staticmethod
def create(path, version=None):
"""Creates a new UnixctlServer which listens on a unixctl socket
created at 'path'. If 'path' is None, the default path is chosen.
'version' contains the version of the server as reported by the unixctl
version command. If None, ovs.version.VERSION is used."""
assert path is None or isinstance(path, strtypes)
if path is not None:
path = "punix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
else:
if sys.platform == "win32":
path = "punix:%s/%s.ctl" % (ovs.dirs.RUNDIR,
ovs.util.PROGRAM_NAME)
else:
path = "punix:%s/%s.%d.ctl" % (ovs.dirs.RUNDIR,
ovs.util.PROGRAM_NAME,
os.getpid())
if version is None:
version = ovs.version.VERSION
error, listener = ovs.stream.PassiveStream.open(path)
if error:
ovs.util.ovs_error(error, "could not initialize control socket %s"
% path)
return error, None
ovs.unixctl.command_register("version", "", 0, 0, _unixctl_version,
version)
return 0, UnixctlServer(listener)
class UnixctlClient(object):
def __init__(self, conn):
assert isinstance(conn, ovs.jsonrpc.Connection)
self._conn = conn
def transact(self, command, argv):
assert isinstance(command, strtypes)
assert isinstance(argv, list)
for arg in argv:
assert isinstance(arg, strtypes)
request = Message.create_request(command, argv)
error, reply = self._conn.transact_block(request)
if error:
vlog.warn("error communicating with %s: %s"
% (self._conn.name, os.strerror(error)))
return error, None, None
if reply.error is not None:
return 0, str(reply.error), None
else:
assert reply.result is not None
return 0, None, str(reply.result)
def close(self):
self._conn.close()
self.conn = None
@staticmethod
def create(path):
assert isinstance(path, str)
unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
error, stream = ovs.stream.Stream.open_block(
ovs.stream.Stream.open(unix))
if error:
vlog.warn("failed to connect to %s" % path)
return error, None
return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))
| 31.505747
| 79
| 0.570473
|
502701e0b7074d09b90395700bf1761c9d053e2f
| 1,434
|
py
|
Python
|
src/templates/segmentation/tilling_bach.py
|
PeterJackNaylor/CellularHeatmaps
|
52829685683b6f3315b62246a77cc2206326e2b3
|
[
"Apache-2.0"
] | null | null | null |
src/templates/segmentation/tilling_bach.py
|
PeterJackNaylor/CellularHeatmaps
|
52829685683b6f3315b62246a77cc2206326e2b3
|
[
"Apache-2.0"
] | 2
|
2022-01-13T03:57:02.000Z
|
2022-03-12T01:01:45.000Z
|
src/templates/segmentation/tilling_bach.py
|
PeterJackNaylor/CellularHeatmaps
|
52829685683b6f3315b62246a77cc2206326e2b3
|
[
"Apache-2.0"
] | 1
|
2020-10-12T07:56:51.000Z
|
2020-10-12T07:56:51.000Z
|
#!/usr/bin/env python
"""
Input variables:
- sample: path of a tif WSI image.
- model: path of the tissue segmentation file.
Output files:
- {sample}_mask.npy
"""
import os
import numpy as np
from glob import glob
# from numpy.core.numeric import outer
from skimage.transform import resize
from skimage import io
from utils import load_model_v2, get_backbone_model # setup_data,
from validation import load_meta # is preppended in the makefile
# from useful_plot import coloring_bin, apply_mask_with_highlighted_borders
from utils import setup_data
def main():
sizex, sizey = int("${size_x}"), int("${size_y}")
# Load sample
files = glob("${sample}/*.tif")
slides = np.zeros((len(files), sizex, sizey, 3), dtype="float32")
for i, f in enumerate(files):
slides[i] = io.imread(f)
# Load segmentation_model
opt = type('', (), {})()
opt.meta = os.path.join("${model}", "meta.pkl")
opt.backbone, opt.model = get_backbone_model(os.path.join("${model}", "final_score.csv"))
opt.weights = os.path.join("${model}", "model_weights.h5")
opt.mean, opt.std = load_meta(opt.meta)
model = load_model_v2(opt.backbone, opt.model, opt.weights)
ds = setup_data(slides, opt.mean, opt.std, opt.backbone, batch_size=1, image_size=(sizex, sizey))
res = model.predict(ds)
np.savez("segmented_tiles.npz", tiles=res, raw=slides)
if __name__ == "__main__":
main()
| 28.68
| 101
| 0.682008
|
c6f671f6d60d74c7ab0cd8201c8efd9563bf81f6
| 642
|
py
|
Python
|
org/cu/api/ApplicationService.py
|
liujiage/JobAppSystem
|
10e18bc897d481e022f182059a7199eae770bac5
|
[
"Apache-2.0"
] | 1
|
2022-01-11T14:25:39.000Z
|
2022-01-11T14:25:39.000Z
|
org/cu/api/ApplicationService.py
|
liujiage/JobAppSystem
|
10e18bc897d481e022f182059a7199eae770bac5
|
[
"Apache-2.0"
] | null | null | null |
org/cu/api/ApplicationService.py
|
liujiage/JobAppSystem
|
10e18bc897d481e022f182059a7199eae770bac5
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABCMeta, abstractmethod
'''
@Author Liu JiaGe
@School Coventry University & PSB
@Date 01/02/21
@Handle apply Job, parent class
'''
class ApplicationService(metaclass=ABCMeta):
# init page and resources
@abstractmethod
def load(self):
pass
# process apply
@abstractmethod
def submitApply(self):
pass
# check whether illegal
@abstractmethod
def checkApply(self):
pass
# save the data into database
@abstractmethod
def saveApply(self):
pass
# query data from database
@abstractmethod
def queryApply(self):
pass
| 18.342857
| 44
| 0.643302
|
d8d156b46f819dfccd0959fb715009681409a34b
| 5,420
|
py
|
Python
|
elliot/recommender/neural/ConvMF/convolutional_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/recommender/neural/ConvMF/convolutional_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/recommender/neural/ConvMF/convolutional_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = 'Felice Antonio Merra, Vito Walter Anelli, Claudio Pomo'
__email__ = 'felice.merra@poliba.it, vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
from ast import literal_eval as make_tuple
import numpy as np
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_sampler as pws
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
from elliot.recommender.neural.ConvMF.convolutional_matrix_factorization_model import \
ConvMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
class ConvMF(RecMixin, BaseRecommenderModel):
r"""
Convolutional Matrix Factorization for Document Context-Aware Recommendation
For further details, please refer to the `paper <https://dl.acm.org/doi/10.1145/2959100.2959165>`_
Args:
embedding_size: Embedding dimension
lr: Learning rate
l_w: Regularization coefficient
l_b: Regularization coefficient of bias
cnn_channels: List of channels
cnn_kernels: List of kernels
cnn_strides: List of strides
dropout_prob: Dropout probability applied on the convolutional layers
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
ConvMF:
meta:
save_recs: True
epochs: 10
batch_size: 512
embedding_size: 100
lr: 0.001
l_w: 0.005
l_b: 0.0005
cnn_channels: (1, 32, 32)
cnn_kernels: (2,2)
cnn_strides: (2,2)
dropout_prob: 0
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
"""
Args:
data:
config:
params:
*args:
**kwargs:
"""
self._sampler = pws.Sampler(self._data.i_train_dict)
self._params_list = [
("_lr", "lr", "lr", 0.001, None, None),
("_embedding_size", "embedding_size", "embedding_size", 100, None, None),
("_cnn_channels", "cnn_channels", "cnn_channels", "(1, 32, 32)", lambda x: list(make_tuple(str(x))),
lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_cnn_kernels", "cnn_kernels", "cnn_kernels", "(2,2)", lambda x: list(make_tuple(str(x))),
lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_cnn_strides", "cnn_strides", "cnn_strides", "(2,2)", lambda x: list(make_tuple(str(x))),
lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_dropout_prob", "dropout_prob", "dropout_prob", 0, None, None),
("_l_w", "l_w", "l_w", 0.005, None, None),
("_l_b", "l_b", "l_b", 0.0005, None, None),
]
self.autoset_params()
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._model = ConvMatrixFactorizationModel(self._num_users, self._num_items, self._embedding_size,
self._lr, self._cnn_channels, self._cnn_kernels,
self._cnn_strides, self._dropout_prob, self._l_w, self._l_b,
self._seed
)
@property
def name(self):
return "ConvMF" \
+ f"_{self.get_base_params_shortcut()}" \
+ f"_{self.get_params_shortcut()}"
def train(self):
if self._restore:
return self.restore_weights()
for it in self.iterate(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
self.evaluate(it, loss.numpy()/(it + 1))
def get_recommendations(self, k: int = 100):
predictions_top_k_test = {}
predictions_top_k_val = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
recs_val, recs_test = self.process_protocol(k, predictions, offset, offset_stop)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return predictions_top_k_val, predictions_top_k_test
| 39.275362
| 116
| 0.579889
|
029f54fbe79aaa766eef7d9a1131811b889081e3
| 352
|
py
|
Python
|
radical_translations/core/migrations/0032_delete_instance.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 3
|
2022-02-08T18:03:44.000Z
|
2022-03-18T18:10:43.000Z
|
radical_translations/core/migrations/0032_delete_instance.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 19
|
2020-05-11T15:36:35.000Z
|
2022-02-08T11:26:40.000Z
|
radical_translations/core/migrations/0032_delete_instance.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-05-18 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0008_alter_field_classification_on_event'),
('core', '0031_delete_item'),
]
operations = [
migrations.DeleteModel(
name='Instance',
),
]
| 19.555556
| 63
| 0.616477
|
9de6cc631123807852edc7419ccf8f6d6b03320b
| 4,098
|
py
|
Python
|
code/routines/recorder.py
|
oliverruoff/PiBot_v4
|
9e724633e1b2972477b5d545e2b13e063a925689
|
[
"Apache-2.0"
] | 5
|
2022-01-04T09:52:12.000Z
|
2022-01-17T09:07:53.000Z
|
code/routines/recorder.py
|
oliverruoff/PiBot_v4
|
9e724633e1b2972477b5d545e2b13e063a925689
|
[
"Apache-2.0"
] | 4
|
2021-12-21T08:02:48.000Z
|
2022-02-09T14:54:49.000Z
|
code/routines/recorder.py
|
oliverruoff/PiBot_v4
|
9e724633e1b2972477b5d545e2b13e063a925689
|
[
"Apache-2.0"
] | null | null | null |
import json
recorded_movement = []
def record(left_stepper, right_stepper):
recorded_movement = []
input_ = ""
while True:
input_ = input("Move >> ")
if input_ == "w":
recorded_movement.append("w")
play_movement(left_stepper, right_stepper, "w")
elif input_ == "s":
recorded_movement.append("s")
play_movement(left_stepper, right_stepper, "s")
elif input_ == "a":
recorded_movement.append("a")
play_movement(left_stepper, right_stepper, "a")
elif input_ == "d":
recorded_movement.append("d")
play_movement(left_stepper, right_stepper, "d")
elif input_ == "u":
recorded_movement.append("u")
play_movement(left_stepper, right_stepper, "u")
elif input_ == "j":
recorded_movement.append("j")
play_movement(left_stepper, right_stepper, "j")
elif input_ == "h":
recorded_movement.append("h")
play_movement(left_stepper, right_stepper, "h")
elif input_ == "k":
recorded_movement.append("k")
play_movement(left_stepper, right_stepper, "k")
elif input_ == "r":
recorded_movement = []
print("Resetted recorded list.")
elif input_ == "m":
json_movement_list = json.dumps(recorded_movement)
with open("saved_movement.json", "w") as text_file:
text_file.write(json_movement_list)
print("Saved List:")
print(recorded_movement)
elif input_ == "l":
with open('saved_movement.json', 'r') as file:
recorded_movement = json.loads(file.read())
print("Loaded list:")
print(recorded_movement)
elif input_ == "p":
for movement in recorded_movement:
play_movement(left_stepper, right_stepper, movement)
elif input_ == "z":
exit()
def play_movement(left_stepper, right_stepper, input_):
if input_ == "w":
left_stepper.set_direction_clockwise(False)
right_stepper.set_direction_clockwise(True)
left_stepper.turn_stepper_angle(360, True, False, False)
right_stepper.turn_stepper_angle(360, False, False, False)
elif input_ == "s":
left_stepper.set_direction_clockwise(True)
right_stepper.set_direction_clockwise(False)
left_stepper.turn_stepper_angle(360, True, False, False)
right_stepper.turn_stepper_angle(360, False, False, False)
elif input_ == "a":
left_stepper.set_direction_clockwise(True)
right_stepper.set_direction_clockwise(True)
left_stepper.turn_stepper_angle(207, True)
right_stepper.turn_stepper_angle(207, False)
elif input_ == "d":
left_stepper.set_direction_clockwise(False)
right_stepper.set_direction_clockwise(False)
left_stepper.turn_stepper_angle(207, True)
right_stepper.turn_stepper_angle(207, False)
elif input_ == "u":
left_stepper.set_direction_clockwise(False)
right_stepper.set_direction_clockwise(True)
left_stepper.turn_stepper_angle(100, True, False, False)
right_stepper.turn_stepper_angle(100, False, False, False)
elif input_ == "j":
left_stepper.set_direction_clockwise(True)
right_stepper.set_direction_clockwise(False)
left_stepper.turn_stepper_angle(100, True, False, False)
right_stepper.turn_stepper_angle(100, False, False, False)
elif input_ == "h":
left_stepper.set_direction_clockwise(True)
right_stepper.set_direction_clockwise(True)
left_stepper.turn_stepper_angle(57, True)
right_stepper.turn_stepper_angle(57, False)
elif input_ == "k":
left_stepper.set_direction_clockwise(False)
right_stepper.set_direction_clockwise(False)
left_stepper.turn_stepper_angle(57, True)
right_stepper.turn_stepper_angle(57, False)
left_stepper.activate_stepper()
right_stepper.activate_stepper()
| 41.393939
| 68
| 0.644949
|
f82d189d0117c7b99e93dbac5049dff16d464736
| 2,147
|
py
|
Python
|
wordMergeApp/GoogleOAuthService.py
|
jackson-zhipeng-chang/WordMerge
|
2eb56ac5e95b231a5d9fb0ffe4bd3f5a8ff3b98b
|
[
"MIT"
] | null | null | null |
wordMergeApp/GoogleOAuthService.py
|
jackson-zhipeng-chang/WordMerge
|
2eb56ac5e95b231a5d9fb0ffe4bd3f5a8ff3b98b
|
[
"MIT"
] | 6
|
2021-04-08T21:58:55.000Z
|
2022-02-10T14:54:08.000Z
|
wordMergeApp/GoogleOAuthService.py
|
jackson-zhipeng-chang/WordMerge
|
2eb56ac5e95b231a5d9fb0ffe4bd3f5a8ff3b98b
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import re
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import file, client, tools
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render,get_list_or_404
from .models import Group
import json
from django.http import HttpResponse, HttpResponseNotFound, Http404
from decouple import config
import webbrowser
SCOPES = ["https://www.googleapis.com/auth/documents", "https://www.googleapis.com/auth/drive"]
def init(userid):
'''Initilization Function, takes SCOPES.
'''
group = get_object_or_404(Group, id=userid)
creds = group.token
docService = build('docs', 'v1', credentials=creds, cache_discovery=False)
driveService = build('drive', 'v3', credentials=creds, cache_discovery=False)
return docService, driveService
def getToken(request):
if request.user.is_authenticated:
creds = None
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
credentials = json.loads(config("credentials"))
flow = InstalledAppFlow.from_client_config(credentials, SCOPES)
creds = flow.run_local_server(port=0)
user_email = request.user.email
user = User.objects.get(email=user_email)
if not Group.objects.filter(user=user).exists() :
group = Group.objects.create(displayName=user.username,user=user, token=creds)
group.save()
return render(request, 'home.html', {'uuid':request.get_host()+ '/convert/' +str(group.id)})
else:
group = Group.objects.get(user=user)
return render(request, 'home.html', {'uuid':request.get_host()+ '/convert/' +str(group.id)})
| 40.509434
| 104
| 0.706567
|
1676ced1dfdc291bccecddd410498d72dbb63a0d
| 2,921
|
py
|
Python
|
aws_kinesis_consumer/error/handler.py
|
thinow/aws-kinesis-consumer
|
94aaa88bd91ff8e7dd8a6716083a95d303281a0f
|
[
"MIT"
] | 29
|
2021-02-19T18:35:20.000Z
|
2022-03-16T13:15:25.000Z
|
aws_kinesis_consumer/error/handler.py
|
thinow/aws-kinesis-consumer
|
94aaa88bd91ff8e7dd8a6716083a95d303281a0f
|
[
"MIT"
] | 1
|
2021-03-23T20:57:11.000Z
|
2021-03-23T20:57:11.000Z
|
aws_kinesis_consumer/error/handler.py
|
thinow/aws-kinesis-consumer
|
94aaa88bd91ff8e7dd8a6716083a95d303281a0f
|
[
"MIT"
] | null | null | null |
from typing import Optional
from botocore.exceptions import NoRegionError, NoCredentialsError, PartialCredentialsError, CredentialRetrievalError, \
ClientError
from aws_kinesis_consumer.configuration.configuration import Configuration
from aws_kinesis_consumer.ui.printer import Printer
class ErrorHandler:
def __init__(self, printer: Printer, configuration: Optional[Configuration] = None) -> None:
self.printer = printer
self.verbose = configuration.verbose if configuration else False
def handle(self, error: BaseException) -> None:
if self.verbose:
raise error
elif isinstance(error, KeyboardInterrupt):
# User intentionally interrupts the program. Ignore the exception and exit.
raise SystemExit(0)
elif isinstance(error, SystemExit):
raise SystemExit(error.code)
elif isinstance(error, NoRegionError):
self.printer.error('ERROR: AWS region has not been found.')
self.printer.error('Please pass the region using the --region argument. Example:')
self.printer.error('$ aws-kinesis-consumer --stream-name MyStream --region us-east-1')
raise SystemExit(1)
elif isinstance(error, (NoCredentialsError, PartialCredentialsError, CredentialRetrievalError)):
self.printer.error('ERROR: AWS credentials have not been found.')
self.printer.error('Please pass the credentials using the following environment variables :')
self.printer.error('AWS_ACCESS_KEY_ID')
self.printer.error('AWS_SECRET_ACCESS_KEY')
self.printer.error('AWS_SESSION_TOKEN (optional)')
raise SystemExit(1)
elif ErrorHandler.is_client_error_with_code(error, 'ExpiredTokenException'):
self.printer.error('ERROR: AWS session token has expired.')
self.printer.error('Please refresh the AWS credentials.')
raise SystemExit(1)
elif ErrorHandler.is_client_error_with_code(error, 'ResourceNotFoundException'):
self.printer.error('ERROR: the Kinesis Stream has not been found.')
self.printer.error(error.response.get('Error', {}).get('Message', 'Unknown'))
self.printer.error('Hint: verify the account id, the stream name, and the AWS region.')
raise SystemExit(1)
else:
self.printer.error(f'ERROR: the program stopped due to the following issue.')
self.printer.error(repr(error))
raise SystemExit(1)
@staticmethod
def is_client_error_with_code(error: BaseException, error_code: str) -> bool:
if not isinstance(error, ClientError):
return False
if 'Error' not in error.response:
return False
if 'Code' not in error.response['Error']:
return False
return error.response['Error']['Code'] == error_code
| 47.112903
| 119
| 0.675796
|
f21043ffe31bf9189f826f7767eb6aa748b6c8fa
| 1,927
|
py
|
Python
|
Models/_utils/time.py
|
schmocker/Pyjamas
|
52a72d6e8b915f77a2194d4e7d53c46d0ec28c17
|
[
"MIT"
] | 2
|
2018-05-31T15:02:08.000Z
|
2018-07-11T11:02:44.000Z
|
Models/_utils/time.py
|
schmocker/Pyjamas
|
52a72d6e8b915f77a2194d4e7d53c46d0ec28c17
|
[
"MIT"
] | null | null | null |
Models/_utils/time.py
|
schmocker/Pyjamas
|
52a72d6e8b915f77a2194d4e7d53c46d0ec28c17
|
[
"MIT"
] | null | null | null |
import time
import datetime
from pytz import timezone
import calendar
import pytz
# always use utc time form time.time between models
def utc_time2datetime(utc_time, tz=None):
utc_datetime = datetime.datetime.fromtimestamp(utc_time)
if tz is None:
tz_datetime = utc_datetime.astimezone(timezone('utc'))
else:
tz_datetime = utc_datetime.astimezone(tz)
return tz_datetime
def datetime2utc_time(datetime):
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=timezone('utc'))
utc_datetime = datetime.astimezone(timezone('utc')).replace(tzinfo=None)
utc_timetuple = utc_datetime.timetuple()
utc_time = calendar.timegm(utc_timetuple) + datetime.microsecond / 1E6
return utc_time
# Example
def example():
print(f"All time zones: {pytz.all_timezones}")
print("\nExample time")
t = time.time()
print(f"utc_timetime: {t} -> " + time.strftime("%Y-%m-%d %H:%M:%S+00:00", time.gmtime(t)))
print("\nFrom time to upc datetime and back to time")
utc_datetime = utc_time2datetime(t)
print(f"utc_datetime: {utc_datetime}")
utc_time = datetime2utc_time(utc_datetime)
print(f"utc_timetime: {utc_time} -> " + time.strftime("%Y-%m-%d %H:%M:%S+00:00", time.gmtime(utc_time)))
print("\nFrom time to Europe/Brussels datetime and back to time")
local_datetime = utc_time2datetime(t, timezone('Europe/Brussels'))
print(f"loc_datetime: {local_datetime}")
utc_time2 = datetime2utc_time(local_datetime)
print(f"utc_timetime: {utc_time2} -> " + time.strftime("%Y-%m-%d %H:%M:%S+00:00", time.gmtime(utc_time2)))
print("\nFrom timestring > 2038 to datetime (only 64 bit)")
t_string = 'Fri Jun 22 10:55:01 2060'
t_tuple = time.strptime(t_string)
t = calendar.timegm(t_tuple)
dt = utc_time2datetime(t)
print(dt)
if __name__ == "__main__":
example()
| 33.807018
| 111
| 0.677218
|
d5bf974e33ebcc6c2591c752524119293ed89709
| 100,604
|
py
|
Python
|
lib/python2.7/matplotlib/mlab.py
|
ashley8jain/IITD-complaint-system-web
|
21a94601cba710f558d1689b87cfc391a1541c9f
|
[
"BSD-3-Clause"
] | 1
|
2017-01-25T00:38:48.000Z
|
2017-01-25T00:38:48.000Z
|
lib/python2.7/matplotlib/mlab.py
|
ashley8jain/IITD-complaint-system-web
|
21a94601cba710f558d1689b87cfc391a1541c9f
|
[
"BSD-3-Clause"
] | null | null | null |
lib/python2.7/matplotlib/mlab.py
|
ashley8jain/IITD-complaint-system-web
|
21a94601cba710f558d1689b87cfc391a1541c9f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in MATLAB, but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:meth:`contiguous_regions`
return the indices of the regions spanned by some logical mask
:meth:`cross_from_below`
return the indices where a 1D array crosses a threshold from below
:meth:`cross_from_above`
return the indices where a 1D array crosses a threshold from above
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`recs_join`
a simple join of multiple recarrays using a single column as a key
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`save`
save ASCII file - use numpy.savetxt
"""
from __future__ import division
import csv, warnings, copy, os, operator
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
from matplotlib import docstring
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
else:
y = x
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs, n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
Pxy /= (np.abs(windowVals)**2).sum()
# Also include scaling factors for one-sided densities and dividing by the
# sampling frequency, if desired. Scale everything, except the DC component
# and the NFFT/2 component:
Pxy[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
Pxy /= Fs
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[numFreqs//2:] - Fs, freqs[:numFreqs//2]))
Pxy = np.concatenate((Pxy[numFreqs//2:, :], Pxy[:numFreqs//2, :]), 0)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(PSD=cbook.dedent("""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
@docstring.dedent_interpd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segments and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
return Pxx, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. I.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
Eg., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i,j in ij:
allColumns.add(i); allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy, axis=0)
#Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
#Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
WARNING: this function is deprecated -- please see class PCA instead
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the MATLAB
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
warnings.warn('This function is deprecated -- see class PCA instead')
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
class PCA:
def __init__(self, a):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu* : a numdims array of means of a
*sigma* : a numdims array of atandard deviation of a
*fracs* : the proportion of variance of each of the principal components
*Wt* : the weight vector for projecting a numdims point or array into PCA space
*Y* : a projected into PCA space
The factor loadings are in the Wt factor, ie the factor
loadings for the 1st principal component are given by Wt[0]
"""
n, m = a.shape
if n<m:
raise RuntimeError('we assume data in a is organized with numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
Y = np.dot(Vh, a.T).T
vars = s**2/float(len(s))
self.fracs = vars/vars.sum()
self.Wt = Vh
self.Y = Y
def project(self, x, minfrac=0.):
'project x onto the principle axes, dropping any axes where fraction of variance<minfrac'
x = np.asarray(x)
ndims = len(x.shape)
if (x.shape[-1]!=self.numcols):
raise ValueError('Expected an array with dims[-1]==%d'%self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs>=minfrac
if ndims==2:
Yreduced = Y[:,mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'center the data using the mean and sigma from training set a'
return (x - self.mu)/self.sigma
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667 , 0.45962655, 0.07608613, 0.135818 ,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437 , 0.10824303, 0.1723387 , 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883 , -0.26614584, 0.88067144, 1.00474954,
-1.1616545 , 0.0266109 , 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737 , 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction
scalar = True
if cbook.iterable(p):
scalar = False
per = np.array(p)
values = np.array(x).ravel() # copy
values.sort()
idxs = per /100. * (values.shape[0] - 1)
ai = idxs.astype(np.int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if scalar:
if cond:
ai -= 1
bi -= 1
frac += 1
else:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
return _interpolate(values[ai],values[bi],frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
This function will be removed from matplotlib.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Lyapunov Exponent
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
warnings.warn("This does not belong in matplotlib and will be removed", DeprecationWarning) # 2009/06/13
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xy = np.asarray([(x,y),])
self.dataLim.update_from_data_xy(xy, None)
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_from_data(x, y, True)
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
Deprecated. Use numpy.savetxt.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
warnings.warn("use numpy.savetxt", DeprecationWarning) # 2009/06/13
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
Deprecated: use numpy.loadtxt.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree
Exercises many of these options.
"""
warnings.warn("use numpy.loadtxt", DeprecationWarning) # 2009/06/13
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
### end fperez numutils code
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype names "%s"'%(
thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key), *[iter(r) for r in recs])
def extract(r):
if r is None: return missing
else: return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + map(extract, row))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + map(extract, row))
if postfixes is None:
postfixes = ['%d'%i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix) for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while 1:
# skip past any comments and consume one line of column header
row = reader.next()
if len(row) and row[0].startswith(comments):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi,interp='nn'):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
If interp keyword is set to '`nn`' (default),
uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
If the interp keyword is set to '`linear`', then linear interpolation
is used instead of natural neighbor. In this case, the output grid
is assumed to be regular with a constant grid spacing in both the x and
y directions. For regular grids with nonconstant grid spacing, you
must use natural neighbor interpolation. Linear interpolation is only valid if
:mod:`matplotlib.delaunay` package is used - :mod:`mpl_tookits.natgrid`
only provides natural neighbor interpolation.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
# make sure mask is not a scalar boolean array.
if z.mask.ndim:
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if interp != 'nn':
raise ValueError("only natural neighor interpolation"
" allowed when using natgrid toolkit in griddata.")
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
if interp == 'nn':
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
elif interp == 'linear':
# make sure grid has constant dx, dy
dx = xi[0,1:]-xi[0,0:-1]
dy = yi[1:,0]-yi[0:-1,0]
epsx = np.finfo(xi.dtype).resolution
epsy = np.finfo(yi.dtype).resolution
if dx.max()-dx.min() > epsx or dy.max()-dy.min() > epsy:
raise ValueError("output grid must have constant spacing"
" when using interp='linear'")
interp = tri.linear_interpolator(z)
zo = interp[yi.min():yi.max():complex(0,yi.shape[0]),
xi.min():xi.max():complex(0,xi.shape[1])]
else:
raise ValueError("interp keyword must be one of"
" 'linear' (for linear interpolation) or 'nn'"
" (for natural neighbor interpolation). Default is 'nn'.")
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| 31.626533
| 141
| 0.609359
|
4d7a14a01a1ef7a1a96184a023324e160776a21a
| 13,390
|
py
|
Python
|
metaheuristics/others/HHOQL_SCP.py
|
m-arnao-molina/SCA-QL-SARSA
|
65f859fce96bb8c11509238c2f7a5d8dd2ad042a
|
[
"MIT"
] | null | null | null |
metaheuristics/others/HHOQL_SCP.py
|
m-arnao-molina/SCA-QL-SARSA
|
65f859fce96bb8c11509238c2f7a5d8dd2ad042a
|
[
"MIT"
] | null | null | null |
metaheuristics/others/HHOQL_SCP.py
|
m-arnao-molina/SCA-QL-SARSA
|
65f859fce96bb8c11509238c2f7a5d8dd2ad042a
|
[
"MIT"
] | null | null | null |
# Utils
import sys
import os
import settings
from envs import env
import numpy as np
import time
from datetime import datetime
from pathlib import Path
import math
# SQL
import sqlalchemy as db
import psycopg2
import json
import pickle
import zlib
import database.Database as Database
# MH
from Problem.util import read_instance as Instance
from Problem import SCP as Problem
from Metrics import Diversidad as dv
# ML
from MachineLearning.QLearning import Q_Learning as QL
# RepairGPU
from Problem.util import SCPProblem
# Definicion Environments Vars
workdir = os.path.abspath(os.getcwd())
workdirInstance = workdir+env('DIR_INSTANCES')
connect = Database.Database()
transferFunction = ['V1', 'V2', 'V3', 'V4', 'S1', 'S2', 'S3', 'S4']
operatorBinarization = ['Standard','Complement','Elitist','Static','ElitistRoulette']
DS_actions = [tf + "," + ob for tf in transferFunction for ob in operatorBinarization]
def HHOQL_SCP(id,instance_file,instance_dir,population,maxIter,discretizacionScheme,ql_alpha,ql_gamma,repair,policy,rewardType,qlAlphaType):
instance_path = workdirInstance + instance_dir + instance_file
if not os.path.exists(instance_path):
print(f'No se encontró la instancia: {instance_path}')
return False
instance = Instance.Read(instance_path)
problemaGPU = SCPProblem.SCPProblem(instance_path)
pondRestricciones = 1/np.sum(problemaGPU.instance.get_r(), axis=1)
matrizCobertura = np.array(instance.get_r())
vectorCostos = np.array(instance.get_c())
dim = len(vectorCostos)
pob = population
maxIter = maxIter
DS = discretizacionScheme #[v1,Standard]
#Variables de diversidad
diversidades = []
maxDiversidades = np.zeros(7) #es tamaño 7 porque calculamos 7 diversidades
PorcentajeExplor = []
PorcentajeExplot = []
state = []
#Generar población inicial
poblacion = np.random.uniform(low=0.0, high=1.0, size=(pob,dim))
#matrixBin = np.zeros((pob,dim))
matrixBin = np.random.randint(low=0, high=2, size = (pob,dim))
fitness = np.zeros(pob)
solutionsRanking = np.zeros(pob)
# QLEARNING
agente = QL(ql_gamma, DS_actions, 2, qlAlphaType, rewardType, maxIter, qlAlpha = ql_alpha)
DS = agente.getAccion(0,policy)
#Binarizar y calcular fitness
matrixBin,fitness,solutionsRanking,numReparaciones = Problem.SCP(poblacion,matrixBin,solutionsRanking,vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)
#Calcular Diversidad y Estado
diversidades, maxDiversidades, PorcentajeExplor, PorcentajeExplot, state = dv.ObtenerDiversidadYEstado(matrixBin,maxDiversidades)
#QLEARNING
CurrentState = state[0] #Estamos midiendo según Diversidad "DimensionalHussain"
#Parámetros fijos de HHO
beta=1.5 #Escalar según paper
sigma=(math.gamma(1+beta)*math.sin(math.pi*beta/2)/(math.gamma((1+beta)/2)*beta*2**((beta-1)/2)))**(1/beta) #Escalar
LB = -10 #Limite inferior de los valores continuos
UB = 10 #Limite superior de los valores continuos
inicio = datetime.now()
timerStartResult = time.time()
memory = []
for iter in range(0, maxIter):
#print(iter)
processTime = time.process_time()
timerStart = time.time()
#HHOQL
E0 = np.random.uniform(low=-1.0,high=1.0,size=pob) #vector de tam Pob
E = 2 * E0 * (1-(iter/maxIter)) #vector de tam Pob
Eabs = np.abs(E)
q = np.random.uniform(low=0.0,high=1.0,size=pob) #vector de tam Pob
r = np.random.uniform(low=0.0,high=1.0,size=pob) #vector de tam Pob
Xm = np.mean(poblacion,axis=0)
bestRowAux = solutionsRanking[0]
Best = poblacion[bestRowAux]
BestBinary = matrixBin[bestRowAux]
BestFitness = np.min(fitness)
#print(f'Eabs: {Eabs}')
#ecu 1.1
indexCond1_1 = np.intersect1d(np.argwhere(Eabs>=1),np.argwhere(q>=0.5)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 1.1
#print("indexCond1_1")
if indexCond1_1.shape[0] != 0:
Xrand = poblacion[np.random.randint(low=0, high=pob, size=indexCond1_1.shape[0])] #Me entrega un conjunto de soluciones rand de tam indexCond1_1.shape[0] (osea los que cumplen la cond11)
poblacion[indexCond1_1] = Xrand - np.multiply(np.random.uniform(low= 0.0, high=1.0, size=indexCond1_1.shape[0]), np.abs(Xrand- (2* np.multiply(np.random.uniform(low= 0.0, high=1.0, size = indexCond1_1.shape[0]),poblacion[indexCond1_1].T).T)).T).T #Aplico la ecu 1.1 solamente a las que cumplen las condiciones np.argwhere(Eabs>=1),np.argwhere(q>=0.5)
#ecu 1.2
indexCond1_2 = np.intersect1d(np.argwhere(Eabs>=1),np.argwhere(q<0.5)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 1.2
#print("indexCond1_2")
if indexCond1_2.shape[0] != 0:
array_Xm = np.zeros(poblacion[indexCond1_2].shape)
array_Xm = array_Xm + Xm
poblacion[indexCond1_2] = ((Best - array_Xm).T - np.multiply( np.random.uniform(low= 0.0, high=1.0, size = indexCond1_2.shape[0]), (LB + np.random.uniform(low= 0.0, high=1.0, size = indexCond1_2.shape[0]) * (UB-LB)) )).T
#ecu 4
indexCond4 = np.intersect1d(np.argwhere(Eabs>=0.5),np.argwhere(r>=0.5)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 4
#print("indexCond4")
if indexCond4.shape[0] != 0:
array_Xm = np.zeros(poblacion[indexCond4].shape)
array_Xm = array_Xm + Xm
poblacion[indexCond4] = ((array_Xm - poblacion[indexCond4]) - np.multiply( E[indexCond4], np.abs(np.multiply( 2*(1-np.random.uniform(low= 0.0, high=1.0, size=indexCond4.shape[0])), array_Xm.T ).T - poblacion[indexCond4]).T).T)
#ecu 10
indexCond10 = np.intersect1d(np.argwhere(Eabs>=0.5),np.argwhere(r<0.5))#Nos entrega los index de las soluciones a las que debemos aplicar la ecu 10
if indexCond10.shape[0] != 0:
#print("indexCond10")
y10 = poblacion
#ecu 7
Array_y10 = np.zeros(poblacion[indexCond10].shape)
Array_y10 = Array_y10 + y10[bestRowAux]
y10[indexCond10] = Array_y10- np.multiply( E[indexCond10], np.abs( np.multiply( 2*(1-np.random.uniform(low= 0.0, high=1.0, size=indexCond10.shape[0])), Array_y10.T ).T- Array_y10 ).T ).T
#ecu 8
z10 = y10
S = np.random.uniform(low= 0.0, high=1.0, size=(y10[indexCond10].shape))
LF = np.divide((0.01 * np.random.uniform(low= 0.0, high=1.0, size=(y10[indexCond10].shape)) * sigma),np.power(np.abs(np.random.uniform(low= 0.0, high=1.0, size=(y10[indexCond10].shape))),(1/beta)))
z10[indexCond10] = y10[indexCond10] + np.multiply(LF,S)
#evaluar fitness de ecu 7 y 8
Fy10 = solutionsRanking
Fy10[indexCond10] = Problem.SCP(y10[indexCond10],matrixBin[indexCond10],solutionsRanking[indexCond10],vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)[1]
Fz10 = solutionsRanking
Fz10[indexCond10] = Problem.SCP(z10[indexCond10],matrixBin[indexCond10],solutionsRanking[indexCond10],vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)[1]
#ecu 10.1
indexCond101 = np.intersect1d(indexCond10, np.argwhere(Fy10 < solutionsRanking)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 10.1
if indexCond101.shape[0] != 0:
#print("indexCond101")
poblacion[indexCond101] = y10[indexCond101]
#ecu 10.2
indexCond102 = np.intersect1d(indexCond10, np.argwhere(Fz10 < solutionsRanking)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 10.2
if indexCond102.shape[0] != 0:
#print("indexCond102")
poblacion[indexCond102] = z10[indexCond102]
# ecu 6
indexCond6 = np.intersect1d(np.argwhere(Eabs<0.5),np.argwhere(r>=0.5)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 6
if indexCond6.shape[0] != 0:
#print("indexCond6")
poblacion[indexCond6] = Best - np.multiply(E[indexCond6], np.abs(Best - poblacion[indexCond6] ).T ).T
#ecu 11
indexCond11 = np.intersect1d(np.argwhere(Eabs<0.5),np.argwhere(r<0.5))#Nos entrega los index de las soluciones a las que debemos aplicar la ecu 11
if indexCond11.shape[0] != 0:
#print("indexCond11")
#ecu 12
y11 = poblacion
array_Xm = np.zeros(poblacion[indexCond11].shape)
array_Xm = array_Xm + Xm
y11[indexCond11] = y11[bestRowAux]- np.multiply(E[indexCond11], np.abs( np.multiply( 2*(1-np.random.uniform(low= 0.0, high=1.0, size=poblacion[indexCond11].shape)), y11[bestRowAux] )- array_Xm ).T ).T
#ecu 13
z11 = y11
S = np.random.uniform(low= 0.0, high=1.0, size=(y11.shape))
LF = np.divide((0.01 * np.random.uniform(low= 0.0, high=1.0, size=(y11.shape)) * sigma),np.power(np.abs(np.random.uniform(low= 0.0, high=1.0, size=(y11.shape))),(1/beta)))
z11[indexCond11] = y11[indexCond11] + np.multiply(S[indexCond11],LF[[indexCond11]])
#evaluar fitness de ecu 12 y 13
if solutionsRanking is None: solutionsRanking = np.ones(pob)*999999
Fy11 = solutionsRanking
Fy11[indexCond11] = Problem.SCP(y11[indexCond11],matrixBin[indexCond11],solutionsRanking[indexCond11],vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)[1]
Fz11 = solutionsRanking
Fz11[indexCond11] = Problem.SCP(z11[indexCond11],matrixBin[indexCond11],solutionsRanking[indexCond11],vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)[1]
#ecu 11.1
indexCond111 = np.intersect1d(indexCond11, np.argwhere(Fy11 < solutionsRanking)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 11.1
if indexCond111.shape[0] != 0:
poblacion[indexCond111] = y11[indexCond111]
#ecu 11.2
indexCond112 = np.intersect1d(indexCond11, np.argwhere(Fz11 < solutionsRanking)) #Nos entrega los index de las soluciones a las que debemos aplicar la ecu 11.2
if indexCond112.shape[0] != 0:
poblacion[indexCond112] = z11[indexCond112]
# Escogemos esquema desde QL
DS = agente.getAccion(CurrentState,policy)
oldState = CurrentState #Rescatamos estado actual
#Binarizamos y evaluamos el fitness de todas las soluciones de la iteración t
matrixBin,fitness,solutionsRanking,numReparaciones = Problem.SCP(poblacion,matrixBin,solutionsRanking,vectorCostos,matrizCobertura,DS_actions[DS],repair,problemaGPU,pondRestricciones)
#Conservo el Best
if fitness[bestRowAux] > BestFitness:
fitness[bestRowAux] = BestFitness
matrixBin[bestRowAux] = BestBinary
#Calcular Diversidad y Estado
diversidades, maxDiversidades, PorcentajeExplor, PorcentajeExplot, state = dv.ObtenerDiversidadYEstado(matrixBin,maxDiversidades)
BestFitnes = str(np.min(fitness)) # para JSON
CurrentState = state[0]
# Observamos, y recompensa/castigo. Actualizamos Tabla Q
agente.updateQtable(np.min(fitness), DS, CurrentState, oldState, iter)
walltimeEnd = np.round(time.time() - timerStart,6)
processTimeEnd = np.round(time.process_time()-processTime,6)
dataIter = {
"id_ejecucion": id,
"numero_iteracion":iter,
"fitness_mejor": BestFitnes,
"parametros_iteracion": json.dumps({
"fitness": BestFitnes,
"clockTime": walltimeEnd,
"processTime": processTimeEnd,
"DS":str(DS),
"Diversidades": str(diversidades),
"PorcentajeExplor": str(PorcentajeExplor)
#"PorcentajeExplot": str(PorcentajeExplot),
#"state": str(state)
})
}
memory.append(dataIter)
if iter % 100 == 0:
memory = connect.insertMemory(memory)
# Si es que queda algo en memoria para insertar
if(len(memory)>0):
memory = connect.insertMemory(memory)
#Actualizamos la tabla resultado_ejecucion, sin mejor_solucion
memory2 = []
fin = datetime.now()
qtable = agente.getQtable()
dataResult = {
"id_ejecucion": id,
"fitness": BestFitnes,
"inicio": inicio,
"fin": fin,
"mejor_solucion": json.dumps(qtable.tolist())
}
memory2.append(dataResult)
dataResult = connect.insertMemoryBest(memory2)
# Update ejecucion
if not connect.endEjecucion(id,datetime.now(),'terminado'):
return False
return True
| 44.337748
| 362
| 0.642121
|
1a5d5f9047adfa32783a8cc094cb9e4ace20fdf6
| 1,025
|
py
|
Python
|
dataMining/model-0-2-2.py
|
evandropp10/bitcoin_prediction
|
3619deb175e3a38bd08e929429c9b7590e9d202d
|
[
"MIT"
] | null | null | null |
dataMining/model-0-2-2.py
|
evandropp10/bitcoin_prediction
|
3619deb175e3a38bd08e929429c9b7590e9d202d
|
[
"MIT"
] | null | null | null |
dataMining/model-0-2-2.py
|
evandropp10/bitcoin_prediction
|
3619deb175e3a38bd08e929429c9b7590e9d202d
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
df = pd.read_csv('data_mining.csv')
#x = df[['bitcoin','bitcoin buy','bitcoin mining', 'bitcoin price', 'blockchain']]
x = df[['bitcoin', 'revenue', 'trade_volume', 'market_cap', 'value']]
y = df[['value_tomorrow']]
scale = MinMaxScaler()
x = scale.fit_transform(x)
y = scale.fit_transform(y)
pf = PolynomialFeatures(degree=2)
x_poly = pf.fit_transform(x)
lr = LinearRegression()
prediction = cross_val_predict(lr, x_poly, y, cv=22)
y_test = scale.inverse_transform(y)
prediction = scale.inverse_transform(prediction)
#result = pd.DataFrame(columns=['Date', 'Test', 'Prediction'])
#
#result['Date'] = df['date']
#result['Test'] = y['value_tomorrow']
#result['Prediction'] = prediction
###-----
print('MAE:', metrics.mean_absolute_error(y_test,prediction))
| 23.295455
| 82
| 0.741463
|
825c71ad9f705e9f094ebea374354b35abfbee76
| 10,111
|
py
|
Python
|
hubspot/crm/objects/feedback_submissions/models/batch_response_simple_public_object_with_errors.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/crm/objects/feedback_submissions/models/batch_response_simple_public_object_with_errors.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/crm/objects/feedback_submissions/models/batch_response_simple_public_object_with_errors.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
# coding: utf-8
"""
Feedback Submissions
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.objects.feedback_submissions.configuration import Configuration
class BatchResponseSimplePublicObjectWithErrors(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"status": "str",
"results": "list[SimplePublicObject]",
"num_errors": "int",
"errors": "list[StandardError]",
"requested_at": "datetime",
"started_at": "datetime",
"completed_at": "datetime",
"links": "dict(str, str)",
}
attribute_map = {
"status": "status",
"results": "results",
"num_errors": "numErrors",
"errors": "errors",
"requested_at": "requestedAt",
"started_at": "startedAt",
"completed_at": "completedAt",
"links": "links",
}
def __init__(self, status=None, results=None, num_errors=None, errors=None, requested_at=None, started_at=None, completed_at=None, links=None, local_vars_configuration=None): # noqa: E501
"""BatchResponseSimplePublicObjectWithErrors - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._results = None
self._num_errors = None
self._errors = None
self._requested_at = None
self._started_at = None
self._completed_at = None
self._links = None
self.discriminator = None
self.status = status
self.results = results
if num_errors is not None:
self.num_errors = num_errors
if errors is not None:
self.errors = errors
if requested_at is not None:
self.requested_at = requested_at
self.started_at = started_at
self.completed_at = completed_at
if links is not None:
self.links = links
@property
def status(self):
"""Gets the status of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The status of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BatchResponseSimplePublicObjectWithErrors.
:param status: The status of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["PENDING", "PROCESSING", "CANCELED", "COMPLETE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `status` ({0}), must be one of {1}".format(status, allowed_values)) # noqa: E501
self._status = status
@property
def results(self):
"""Gets the results of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The results of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: list[SimplePublicObject]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchResponseSimplePublicObjectWithErrors.
:param results: The results of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: list[SimplePublicObject]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
@property
def num_errors(self):
"""Gets the num_errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The num_errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: int
"""
return self._num_errors
@num_errors.setter
def num_errors(self, num_errors):
"""Sets the num_errors of this BatchResponseSimplePublicObjectWithErrors.
:param num_errors: The num_errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: int
"""
self._num_errors = num_errors
@property
def errors(self):
"""Gets the errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: list[StandardError]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this BatchResponseSimplePublicObjectWithErrors.
:param errors: The errors of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: list[StandardError]
"""
self._errors = errors
@property
def requested_at(self):
"""Gets the requested_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The requested_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: datetime
"""
return self._requested_at
@requested_at.setter
def requested_at(self, requested_at):
"""Sets the requested_at of this BatchResponseSimplePublicObjectWithErrors.
:param requested_at: The requested_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: datetime
"""
self._requested_at = requested_at
@property
def started_at(self):
"""Gets the started_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The started_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this BatchResponseSimplePublicObjectWithErrors.
:param started_at: The started_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and started_at is None: # noqa: E501
raise ValueError("Invalid value for `started_at`, must not be `None`") # noqa: E501
self._started_at = started_at
@property
def completed_at(self):
"""Gets the completed_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The completed_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: datetime
"""
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
"""Sets the completed_at of this BatchResponseSimplePublicObjectWithErrors.
:param completed_at: The completed_at of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and completed_at is None: # noqa: E501
raise ValueError("Invalid value for `completed_at`, must not be `None`") # noqa: E501
self._completed_at = completed_at
@property
def links(self):
"""Gets the links of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:return: The links of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this BatchResponseSimplePublicObjectWithErrors.
:param links: The links of this BatchResponseSimplePublicObjectWithErrors. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchResponseSimplePublicObjectWithErrors):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BatchResponseSimplePublicObjectWithErrors):
return True
return self.to_dict() != other.to_dict()
| 33.369637
| 192
| 0.650776
|
d2e58cd7cbf88cea1fe41ae6d29464433a5d6183
| 906
|
py
|
Python
|
test/augmenter/word/test_stopwords.py
|
joaoantonioverdade/nlpaug
|
137a3a60fe1ab2d8dfc51d21e160d32c10b2538c
|
[
"MIT"
] | null | null | null |
test/augmenter/word/test_stopwords.py
|
joaoantonioverdade/nlpaug
|
137a3a60fe1ab2d8dfc51d21e160d32c10b2538c
|
[
"MIT"
] | null | null | null |
test/augmenter/word/test_stopwords.py
|
joaoantonioverdade/nlpaug
|
137a3a60fe1ab2d8dfc51d21e160d32c10b2538c
|
[
"MIT"
] | null | null | null |
import unittest
import nlpaug.augmenter.word as naw
class TestStopWords(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.stopwords = ['a', 'an', 'the']
def test_delete(self):
text = 'The quick brown fox jumps over lazy dog'
self.assertLess(0, len(text))
aug = naw.StopWordsAug(stopwords=['fox'])
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertTrue('fox' not in augmented_text)
# Test case sensitive = False
aug = naw.StopWordsAug(stopwords=['the'], case_sensitive=False)
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
# Test case sensitive = True
aug = naw.StopWordsAug(stopwords=['the'], case_sensitive=True)
augmented_text = aug.augment(text)
self.assertEqual(text, augmented_text)
| 28.3125
| 71
| 0.655629
|
3144a18b99a2075d8ec957fd3c5f52d460d24b3e
| 28,706
|
py
|
Python
|
tools/LogCollector/source/omslinux_agentlog.py
|
superminiek/OMS-Agent-for-Linux
|
faacc094723779074cea8172a3b23eccf85307f2
|
[
"Apache-2.0"
] | null | null | null |
tools/LogCollector/source/omslinux_agentlog.py
|
superminiek/OMS-Agent-for-Linux
|
faacc094723779074cea8172a3b23eccf85307f2
|
[
"Apache-2.0"
] | null | null | null |
tools/LogCollector/source/omslinux_agentlog.py
|
superminiek/OMS-Agent-for-Linux
|
faacc094723779074cea8172a3b23eccf85307f2
|
[
"Apache-2.0"
] | null | null | null |
'''
OMS Log Collector to collect logs and command line outputs for
troubleshooting OMS Linux Agent (Github, Extension & Container)
issues by support personnel
Authors, Reviewers & Contributors :
KR Kandavel Azure CAT PM
Keiko Harada OMS PM
Laura Galbraith OMS SE
Jim Britt Azure CAT PM
Gary Keong OMS Eng. Mgr.
Adrian Doyle CSS PM
Steve Chilcoat CSS Esc. Eng.
Date : 2017-07-20
Version : 2.3
'''
# coding: UTF-8
import os
import subprocess
import logging
import sys, getopt
import datetime
if "check_output" not in dir( subprocess ): # duck punch it in!
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
subprocess.check_output = check_output
'''
Get OMS container ID for running docker command inside container
'''
def getOMSAgentContainerID():
cmd='docker ps | grep -i microsoft/oms | grep -v grep'
out=execCommand(cmd)
strs=out.split(' ')
omsContainerID=strs[0]
return omsContainerID
'''
Get OMS container Name for running docker command inside container
'''
def getOMSAgentContainerName():
cmd='docker ps | grep -i microsoft/oms | grep -v grep'
out=execCommand(cmd)
strs=out.split(' ')
omsContainerName=strs[-1]
return omsContainerName
'''
Use docker command to collect OMS Linux Agent (omsagent container) logs
from container host
'''
def runDockerCommands(omsContainerID):
cmd='docker info'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker ps -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker inspect omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker logs omsagent 1>/tmp/omscontainer.log 2>&1'
out=execCommand2(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='cat /tmp/omscontainer.log'
out=execCommand(cmd)
writeLogOutput(str(out))
cmd='docker inspect omsagent | grep -I -A 4 label'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Use docker command to collect OMS Linux Agent (omsagent container) logs
from container hosting OMS Agent
'''
def runContainerCommands(omsContainerName):
cmd='docker exec omsagent df -k'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent ps -ef | grep -i oms | grep -v grep'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent ps -ef | grep -i omi | grep -v grep'
out=execCommand(cmd)
writeLogOutput(cmd)
writeLogOutput(out)
cmd='docker exec omsagent /opt/microsoft/omsagent/bin/omsadmin.sh -l > /tmp/oms.status'
writeLogCommand(cmd)
out=execCommand2(cmd)
cmd='cat /tmp/oms.status'
out=execCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent /opt/omi/bin/omicli ei root/cimv2 Container_ContainerStatistics'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent /opt/omi/bin/omicli ei root/cimv2 Container_ContainerInventory'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Use docker command to copy logs from container hosting OMS Agent
'''
def copyContainerFiles(omsContainerName, omsLinuxType):
cmd='docker exec omsagent find ' + '. ' + '/var/opt/microsoft/omsagent ' + '-name ' + 'omsagent.log'
file=execCommand(cmd)
cmd='docker cp omsagent:' + file[:len(file)-1] + ' /tmp/omslogs/container'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='docker cp omsagent:' + '/var/opt/microsoft/omsconfig/omsconfig.log ' + '/tmp/omslogs/container'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='docker cp omsagent:' + '/var/opt/microsoft/scx/log/scx.log ' + '/tmp/omslogs/container'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='docker cp omsagent:' + '/etc/opt/microsoft/omsagent/* ' + '/tmp/omslogs/container/WSData'
out=execCommand(cmd)
writeLogCommand(cmd)
if(omsLinuxType == 'Ubuntu'):
cmd='docker cp omsagent:' + '/var/log/syslog /tmp/omslogs/container'
else:
cmd='docker cp omsagent:' + '/var/log/messages /tmp/omslogs/container'
out=execCommand(cmd)
writeLogCommand(cmd)
return 0
'''
Run extension (Azure Agent) specific commands
'''
def runExtensionCommands():
cmd='waagent -version'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Run common OS level commands needed for OMS agent troubleshooting
'''
def runCommonCommands():
cmd='df -k'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps -ef | grep -i oms | grep -v grep'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps -ef | grep -i omi | grep -v grep'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps aux --sort=-pcpu | head -10'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps aux --sort -rss | head -10'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps aux --sort -vsz | head -10'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='ps -e -o pid,ppid,user,etime,time,pcpu,nlwp,vsz,rss,pmem,args | grep -i omsagent | grep -v grep'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='/opt/microsoft/omsagent/bin/omsadmin.sh -l > /tmp/oms.status'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cat /tmp/oms.status'
out=execCommand(cmd)
writeLogOutput(out)
return 0
'''
Run Ubuntu OS specific commands needed for OMS agent troubleshooting
'''
def runUbuntuCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent apt show omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent apt show omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='apt show omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='apt show omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Run CentOS specific commands needed for OMS agent troubleshooting
'''
def runCentOSCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return out
'''
Run Redhat OS specific commands needed for OMS agent troubleshooting
'''
def runRedhatCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Run Oracle OS specific commands needed for OMS agent troubleshooting
'''
def runOracleCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Run Suse OS specific commands needed for OMS agent troubleshooting
'''
def runSLESCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='rpm -qi omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Run Debian OS specific commands needed for OMS agent troubleshooting
'''
def runDebianCommands(omsInstallType):
if(omsInstallType == 3):
cmd='docker exec omsagent uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent apt show omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='docker exec omsagent apt show omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
else:
cmd='uname -a'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='apt show omsagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='apt show omsconfig'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
return 0
'''
Copy common logs for all 3 types of OMS agents into /tmp/omslogs
'''
def copyCommonFiles(omsLinuxType):
cmd='cp /var/opt/microsoft/omsagent/log/omsagent* /tmp/omslogs'
out=execCommand2(cmd)
writeLogCommand(cmd)
cmd='cp /var/opt/microsoft/omsconfig/omsconfig* /tmp/omslogs'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cp /var/opt/microsoft/scx/log/scx* /tmp/omslogs'
out=execCommand2(cmd)
writeLogCommand(cmd)
cmd='mkdir -p /tmp/omslogs/dscconfiguration'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cp -rf /etc/opt/omi/conf/omsconfig/configuration/* /tmp/omslogs/dscconfiguration'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='mkdir -p /tmp/omslogs/WSData'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cp -rf /etc/opt/microsoft/omsagent/* /tmp/omslogs/WSData'
out=execCommand(cmd)
writeLogCommand(cmd)
if(omsLinuxType == 'Ubuntu'):
cmd='cp /var/log/syslog* /tmp/omslogs'
else:
cmd='cp /var/log/messages* /tmp/omslogs'
out=execCommand(cmd)
writeLogCommand(cmd)
return 0
'''
Copy OMS agent (Extension) specific logs into /tmp/omslogs
'''
def copyExtensionFiles():
cmd='cp /var/log/waagent.log /tmp/omslogs/vmagent'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cp -R /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux /tmp/omslogs/extension/log'
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='ls /var/lib/waagent | grep -i Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-'
file=execCommand(cmd)
lfiles=file.split()
print(lfiles)
cmd='cp -R /var/lib/waagent/' + lfiles[0] + '/status ' + '/tmp/omslogs/extension/lib'
print(cmd)
out=execCommand(cmd)
writeLogCommand(cmd)
cmd='cp -R /var/lib/waagent/' + lfiles[0] + '/config ' + '/tmp/omslogs/extension/lib'
out=execCommand(cmd)
writeLogCommand(cmd)
return 0
'''
Remove temporary files under /tmp/omslogs once it is archived
'''
def removeTempFiles():
cmd='rm -R -rf /tmp/omslogs'
out=execCommand(cmd)
print(cmd)
print(out)
cmd='rm -rf /tmp/oms.status'
out=execCommand(cmd)
print(cmd)
print(out)
return 0
'''
Estimate disk space required for OMS agent (Github)
'''
def estCommonFileSize(omsLinuxType):
reqSize=0
folderName='/var/opt/microsoft/omsagent/log/'
reqSize+=getFolderSize(folderName)
folderName='/etc/opt/microsoft/omsagent/'
reqSize+=getFolderSize(folderName)
reqSize+=os.path.getsize('/var/opt/microsoft/omsconfig/omsconfig.log')
reqSize+=os.path.getsize('/var/opt/microsoft/scx/log/scx.log')
if(omsLinuxType == 'Ubuntu'):
reqSize+=os.path.getsize('/var/log/syslog')
else:
reqSize+=os.path.getsize('/var/log/messages')
return reqSize
'''
Estimate disk space required for OMS agent (Extension)
'''
def estExtensionFileSize(omsLinuxType):
reqSize=0
folderName='/var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux'
reqSize+=getFolderSize(folderName)
reqSize+=os.path.getsize('/var/log/waagent.log')
folderName='/var/lib/waagent/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-*'
reqSize+=getFolderSize(folderName)
return reqSize
'''
Check if /tmp has adequate disk space to copy logs and command outputs
'''
def chkDiskFreeSpace(estSize, estExtSize, cmdSize):
tmpSpace = 0
arcSize = (estSize + estExtSize + cmdSize) * 0.1
totSize = (estSize + estExtSize + cmdSize) + arcSize
print '*' * 80
print "1. Disk space required to copy Common files in /tmp : ", int(estSize / 1024), 'KBytes'
print "2. Disk space required to copy Extension files in /tmp : ", int(estExtSize / 1024), 'KBytes'
print "3. Disk space required for command outputs in /tmp : ", int(cmdSize / 1024), 'KBytes'
print "4. Disk space required to archive files in /tmp : ", int(arcSize / 1024), 'KBytes'
print "5. Total disk space required in /tmp : ", int(totSize / 1024), 'KBytes'
print '*' * 80
print "Files created in step 1, 2 & 3 are temporary and deleted at the end"
print '*' * 80
stat= os.statvfs('/tmp')
# use f_bfree for superuser, or f_bavail if filesystem
# has reserved space for superuser
freeSpace=stat.f_bfree*stat.f_bsize
if(totSize < freeSpace):
print 'Enough space available in /tmp to store logs...'
print '*' * 80
else:
print 'Not enough free space available in /tmp to store logs...'
print '*' * 80
tmpSpace = 1
return tmpSpace
'''
Checks if OMS Linux Agent install directory is present, if not then it recommends running
the OMS Linux Agent installation before collecting logs for troubleshooting
'''
def chkOMSAgentInstallStatus(omsInstallType):
omsInstallDir = [ "/var/opt/microsoft/omsagent",
"/var/opt/microsoft/omsconfig"
]
if(omsInstallType != 3):
for dir in omsInstallDir:
if(not os.path.exists(dir)):
return 1
return 0
'''
Check the type (Github, Extension, Container) of agent running in Linux machine
'''
def chkOMSAgentInstallType():
omsInstallType=0
cmd='ls /var/lib/waagent | grep -i Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-'
file=execCommand2(cmd)
if(file == 1):
omsExtension = False
else:
file=execCommand(cmd)
lfiles=file.split()
path='/var/lib/waagent/' + lfiles[0]
print(path)
omsExtension=os.path.exists(path)
print(omsExtension)
if(omsExtension == True):
out="OMS Linux Agent is installed through VM Extension...\n"
omsInstallType=1
elif(omsExtension == False):
path='/var/opt/microsoft/omsagent'
omsAgent=os.path.exists(path)
if(omsAgent == True):
out="OMS Linux Agent installed with NO VM Extension (Github)...\n"
omsInstallType=2
elif(omsAgent == False):
cmd='which docker 1>/dev/null 2>&1'
out=execCommand2(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
if(out == 0):
cmd='docker ps | grep -i microsoft/oms | grep -v grep'
out=execCommand(cmd)
count=out.splitlines()
if(len(count) == 1):
out="Containerized OMS Linux Agent is installed...\n"
omsInstallType=3
else:
out="No OMS Linux Agent installed on this machine...\n"
omsInstallType=0
else:
out="No OMS Linux Agent installed on this machine...\n"
omsInstallType=0
writeLogOutput(out)
return omsInstallType
'''
Get size in bytes of a folder
'''
def getFolderSize(foldername):
fileSize=0
for root, dirs, files in os.walk(foldername):
fileSize=sum(os.path.getsize(os.path.join(root, name)) for name in files)
return fileSize
'''
Common logic to run any command and check/get its output for further use
'''
def execCommand(cmd):
try:
out = subprocess.check_output(cmd, shell=True)
return out
except subprocess.CalledProcessError as e:
print(e.returncode)
return (e.returncode)
'''
Common logic to run any command and check if it is success/failed
'''
def execCommand2(cmd):
try:
out = subprocess.call(cmd, shell=True)
return out
except subprocess.CalledProcessError as e:
print(e.returncode)
return (e.returncode)
'''
Common logic to save command outputs into /tmp/omslogs/omslinux.out
'''
def writeLogOutput(out):
if(type(out) != str): out=str(out)
outFile.write(out + '\n')
outFile.write('-' * 80)
outFile.write('\n')
return
'''
Common logic to save command itself into /tmp/omslogs/omslinux.out
'''
def writeLogCommand(cmd):
print(cmd)
outFile.write(cmd + '\n')
outFile.write('=' * 40)
outFile.write('\n')
return
'''
Compress all logs & command o/p files in a TAR ball for sending it to Support
'''
def compressOMSLog(source, target):
cmd='tar -cvzf ' + target + ' ' + source
out=execCommand(cmd)
print(cmd)
print(out)
return 0
'''
Logic to validate input arguments before collecting the logs
'''
def inpArgCheck(argv):
global srnum, comname
srnum = ''
comname = ''
try:
opts, args = getopt.getopt(argv, "hs:c:", ['srnum=', 'comname='])
except getopt.GetoptError:
print 'Usage: sudo python omsagentlog.py [-h] -s <SR Number> [-c <Company Name>]'
return 2
if(len(argv) == 0):
print 'Usage: sudo python omsagentlog.py [-h] -s <SR Number> [-c <Company Name>]'
return 1
for opt, arg in opts:
if (opt == '-h'):
print 'Usage: sudo python omsagentlog.py [-h] -s <SR Number> [-c <Company Name>]'
return 1
elif opt in ('-s', '--srnum'):
srnum = arg
elif opt in ('-c', '--comname'):
comname = arg
return 0
'''
Main() logic for log collection, calling the above functions
'''
ret=inpArgCheck(sys.argv[1:])
if(ret == 1 or ret == 2):
sys.exit(1)
print 'SR Number : ', srnum
print 'Company Name :', comname
global logger
outDir='/tmp/omslogs'
outFile=outDir + '/omslinux.out'
compressFile='/tmp/omslinuxagentlog' + '-' + srnum + '-' + str(datetime.datetime.utcnow().isoformat()) + '.tgz'
print(compressFile)
centRHOraPath='/etc/system-release'
ubuntuPath='/etc/lsb-release'
slesDebianPath='/etc/os-release'
fedoraPath='/etc/fedora-release'
try:
'''
Initialize routine to create necessary files and directories for storing logs & command o/p
'''
cmd='mkdir -p ' + outDir + '/ '
out=execCommand(cmd)
outFile = open(outFile, 'w')
writeLogOutput('SR Number : ' + srnum + ' Company Name : ' + comname)
curutctime=datetime.datetime.utcnow()
logtime='Log Collection Start Time (UTC) : %s' % (curutctime)
print(logtime)
writeLogOutput(logtime)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='hostname -f'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='python -V'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(sys.version)
'''
Logic to check what Linux distro is running in machine
'''
if (os.path.isfile(centRHOraPath)):
cmd='cat %s' % centRHOraPath
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
strs=out.split(' ')
linuxType=strs[0]
linuxVer=strs[3]
if(linuxType == 'Red'):
linuxType=strs[0] + strs[1]
linuxVer=strs[6]
elif (os.path.isfile(ubuntuPath)):
cmd='cat %s' % ubuntuPath
out=execCommand(cmd)
writeLogCommand(out)
writeLogOutput(out)
lines=out.split('\n')
strs=lines[0].split('=')
linuxType=strs[1]
elif (os.path.isfile(slesDebianPath)):
cmd='cat %s' % slesDebianPath
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
lines=out.split('\n')
strs=lines[0].split('=')
print(strs[1])
if (strs[1].find('SLES') != -1):
linuxType='SLES'
elif (strs[1].find('Debian') != -1):
linuxType='Debian'
else:
msg = 'Unsupported Linux OS...Stopping OMS Log Collection...%s' % linuxType
print(msg)
writeLogOutput(msg)
sys.exit()
else:
msg = 'Unsupported Linux OS...Stopping OMS Log Collection...%s' % linuxType
print(msg)
writeLogOutput(msg)
sys.exit(1)
'''
Logic to check which OMS Linux agent type is installed in machine
[0 - No Agent, Extension=1, Github=2, Container=3]
'''
writeLogOutput('Linux type installed is...%s' % linuxType)
omsInstallType=chkOMSAgentInstallType()
if(omsInstallType == 1):
cmd='mkdir -p ' + outDir + '/vmagent'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='mkdir -p ' + outDir + '/extension/log'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='mkdir -p ' + outDir + '/extension/lib'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
estSize=estCommonFileSize(linuxType)
estExtSize=estExtensionFileSize(linuxType)
cmdSize=10 * 1024
tmpSpace=chkDiskFreeSpace(estSize, estExtSize, cmdSize)
if(tmpSpace == 0):
copyCommonFiles(linuxType)
copyExtensionFiles()
runExtensionCommands()
else:
sys.exit(1)
elif(omsInstallType == 2):
estSize=estCommonFileSize(linuxType)
cmdSize=10 * 1024
tmpSpace=chkDiskFreeSpace(estSize, 0, cmdSize)
if(tmpSpace == 0):
copyCommonFiles(linuxType)
else:
sys.exit(1)
elif(omsInstallType == 3):
cmd='mkdir -p ' + outDir + '/container'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
cmd='mkdir -p ' + outDir + '/container/WSData'
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
omsContainerID=getOMSAgentContainerID()
omsContainerName=getOMSAgentContainerName()
estSize=estCommonFileSize(linuxType)
cmdSize=10 * 1024
tmpSpace=chkDiskFreeSpace(estSize, 0, cmdSize)
if(tmpSpace == 0):
runDockerCommands(omsContainerID)
copyContainerFiles(omsContainerName, linuxType)
runContainerCommands(omsContainerName)
else:
sys.exit(1)
else:
msg='No OMS Linux Agent installed on this machine...Stopping Log Collection...%s' % omsInstallType
print(msg)
writeLogOutput(msg)
sys.exit(1)
'''
Checks if OMS Linux Agent install directory is present, if not then it recommends
running the OMS Linux Agent installation before collecting logs for troubleshooting
'''
writeLogOutput('OMS Linux agent installed is (0 - No Agent 1 - Extension, 2 - GitHub, 3 - Container...%s' % omsInstallType)
omsInstallStatus=chkOMSAgentInstallStatus(omsInstallType)
if(omsInstallStatus != 0):
msg='OMS Linux Agent install directories under /var/opt/microsoft are missing...'
writeLogOutput(msg)
print '*' * 80
print 'OMS Linux Agent install directories are not present'
print 'please run OMS Linux Agent install script'
print 'For details on installing OMS Agent, please refer documentation'
print 'https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-agent-linux'
print '*' * 80
sys.exit(1)
else:
msg='OMS Linux Agent install directories under /var/opt/microsoft are present...'
writeLogOutput(msg)
'''
Call OS specific routines to run commands and save its o/p
to /tmp/omslogs/omslinux.out
'''
print 'Linux type installed is...%s' % linuxType
if(linuxType == 'CentOS'):
runCentOSCommands(omsInstallType)
elif(linuxType == 'RedHat'):
runRedhatCommands(omsInstallType)
elif(linuxType == 'Oracle'):
runOracleCommands(omsInstallType)
elif(linuxType == 'Ubuntu'):
runUbuntuCommands(omsInstallType)
elif(linuxType == 'SLES'):
runSLESCommands(omsInstallType)
elif(linuxType == 'Debian'):
runDebianCommands(omsInstallType)
else:
msg='Unsupported Linux OS...Stopping OMS Log Collection...%s' % linuxType
print(msg)
writeLogOutput(msg)
sys.exit(1)
'''
Run common OS commands after running omsagent specific commands
'''
if(omsInstallType == 1 or omsInstallType == 2):
runCommonCommands()
'''
Run DSC diagnostics commands
'''
cmd='chmod +x ./dscDiagnostics.sh'
out=execCommand(cmd)
cmd='bash ./dscDiagnostics.sh ' + outDir + '/dscdiagnostics-' + str(datetime.datetime.utcnow().isoformat())
out=execCommand(cmd)
writeLogCommand(cmd)
writeLogOutput(out)
'''
Logic to capture IOError or OSError in above logic
'''
except (IOError), e:
print(e)
logging.error('Could not save repo to repofile %s: %s' % (outFile, e))
sys.exit(2)
except (OSError), e:
print(e)
logging.error('Error occurred in OS command execution %s' % (e))
sys.exit(2)
except (Exception), e:
print(e)
logging.error('General Exception occurred %s' % (e))
sys.exit(2)
finally:
'''
Final logic to close o/p file and create tar ball for sending it to support
'''
outFile.close()
compressOMSLog(outDir, compressFile)
removeTempFiles()
print('OMS Linux Agent Log is archived in file : %s' % (compressFile))
sys.exit()
| 31.441402
| 127
| 0.641713
|
01437e197842b53dbff33bf10594d8e1b95771f1
| 1,100
|
py
|
Python
|
tests/test_wallet_schema.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
tests/test_wallet_schema.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
tests/test_wallet_schema.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
import unittest
import unittest.mock as mock
from uuid import UUID
from datetime import datetime
class TestCase(unittest.TestCase):
@mock.patch('app.utils.iso_datetime')
@mock.patch('uuid.uuid4')
def test_load_default(self, m_uuid4, m_iso_datetime):
m_uuid4.return_value = UUID('87654321876543218765432187654321')
m_iso_datetime.return_value = '2000-01-02T03:04:05+00:00'
from app.serializers.wallet_schema import WalletSchema
schema = WalletSchema(strict=True)
data, errors = schema.load({
'client_id': UUID('12345678123456781234567812345678'),
'currency': 'EUR',
})
self.assertDictEqual(errors, {})
self.assertDictEqual(data, {
'id': UUID('87654321876543218765432187654321'),
'is_enabled': True,
'created_at': datetime(2000, 1, 2, 3, 4, 5),
'updated_at': datetime(2000, 1, 2, 3, 4, 5),
'client_id': UUID('12345678123456781234567812345678'),
'currency': 'EUR',
})
if __name__ == '__main__':
unittest.main()
| 30.555556
| 71
| 0.632727
|
c2f65672595d5bb2eb1beb99ac89f899e0828231
| 27,862
|
py
|
Python
|
src/shimmer_api/shimmer_class.py
|
tbazina/shimmer_ros
|
1cb56a67dc6f7cdae6ac8539ddb0eccf9ce65e87
|
[
"MIT"
] | null | null | null |
src/shimmer_api/shimmer_class.py
|
tbazina/shimmer_ros
|
1cb56a67dc6f7cdae6ac8539ddb0eccf9ce65e87
|
[
"MIT"
] | null | null | null |
src/shimmer_api/shimmer_class.py
|
tbazina/shimmer_ros
|
1cb56a67dc6f7cdae6ac8539ddb0eccf9ce65e87
|
[
"MIT"
] | null | null | null |
#/usr/bin/python3
import rospy
import sys, struct, serial
from collections import defaultdict, deque
from datetime import datetime, timedelta
import time
from shimmer_ros.msg import Emg
class ShimmerCaptureEMG():
def __init__(self, port) -> None:
# Serial port
self.port = port
# // Packet Types// Packet Types
self.packet_type = {
'ACK_COMMAND_PROCESSED': 0xFF,
'GET_SAMPLING_RATE_COMMAND': 0x03,
'SET_SAMPLING_RATE_COMMAND': 0x05,
'INQUIRY_COMMAND': 0x01,
'GET_BUFFER_SIZE_COMMAND': 0x36,
'BUFFER_SIZE_RESPONSE': 0x35,
'SET_INFOMEM_COMMAND': 0x8C,
'GET_INFOMEM_COMMAND': 0x8E,
'GET_CHARGE_STATUS_LED_COMMAND': 0x32,
'CHARGE_STATUS_LED_RESPONSE': 0x31,
'SET_SENSORS_COMMAND': 0x08,
'SET_EXG_REGS_COMMAND': 0x61,
'GET_DAUGHTER_CARD_ID_COMMAND': 0x66,
'DAUGHTER_CARD_ID_RESPONSE': 0x65,
'GET_EXG_REGS_COMMAND': 0x63,
'EXG_REGS_RESPONSE': 0x62,
'GET_SHIMMERNAME_COMMAND': 0x7b,
'SHIMMERNAME_RESPONSE': 0x7a,
'START_STREAMING_COMMAND': 0x07,
'STOP_STREAMING_COMMAND': 0x20,
'DATA_PACKET': 0x00,
'SET_RWC_COMMAND': 0x8F,
'RWC_RESPONSE': 0x90,
'GET_RWC_COMMAND': 0x91,
'SET_CONFIGTIME_COMMAND': 0x85,
'CONFIGTIME_RESPONSE': 0x86,
'GET_CONFIGTIME_COMMAND': 0x87,
'SET_SHIMMERNAME_COMMAND': 0x79,
'SHIMMERNAME_RESPONSE': 0x7a,
'GET_SHIMMERNAME_COMMAND': 0x7b,
}
# EMG gain configuration bytes
self.emg_gain_config = defaultdict(
lambda: 0b110, # default 12
{
1: 0b001,
2: 0b010,
3: 0b011,
4: 0b100,
6: 0b000,
8: 0b101,
12: 0b110
}
)
# Default gain value
self.emg_gain_config_default = 12
# EMG multiplexer settings for both channels
self.emg_mux_config = defaultdict(
lambda: 0b1001, # Route INxP and INxN to channel x inputs
{
'normal': 0b0000, # Normal electrode input (default)
'shorted': 0b0001, # Input shorted (for offset measurements and power down channel)
'test': 0b0101, # Test signal
'measure_EMG': 0b1001, # Route INxP and INxN to channel x inputs
}
)
# EMG data rate configuration bytes
self.emg_data_rate_config = defaultdict(
lambda: 0x02, # 500 Hz
{
125: 0x00,
250: 0x01,
500: 0x02,
1000: 0x03,
2000: 0x04,
4000: 0x05,
}
)
# EMG Right-Leg Drive (Common-mode Rejection) settings
# RLD2N, RLD2P, RLD1N and RLD1P should be 0 for both chips
self.emg_rld_config = 0b0000
# PDB_RLD: RLD buffer power bit determines the RLD buffer power state
# 0 = powered down (default), 1 = enabled
# RLD_LOFF_SENSE: RLD lead-off sense function
# 0 = RLD lead-off sense is disabled (default)
# 1 = RLD lead-off sense is enabled
self.emg_pbd_rld_loff_sense_config = {
'on': 0b10,
'off': 0b00
}
# RLDREF_INT: RLDREF signal
# 0 = RLDREF signal fed externally
# 1 = RLDREF signal (AVDD – AVSS) / 2 generated internally (default)
self.emg_rldref_int_config = {
'external': 0,
'internal': 1
}
# INT_TEST: Test signal selection
# This bit determines whether the test signal is turned on or off.
# 0 = Off (default)
# 1 = On; amplitude = ±(VREFP – VREFN) / 2400
# TEST_FREQ: Test signal frequency
# This bit determines the test signal frequency.
# 0 = At dc (default)
# 1 = Square wave at 1 Hz
self.test_signal_config = {
'DC': 0b10,
'square': 0b11,
'off': 0b00
}
# EMG 24 bit sensor activation
self.emg_24bit = [0x18, 0x00, 0x00]
# Signal calibration parameters
self.adc_offset_ch1 = 0
self.adc_offset_ch2 = 0
self.adc_sensitivity = 2420 / (2**23 - 1)
# Dictionary to store possible values for charge status LED
self.charge_status_dict = defaultdict(
lambda: 'UNDEFINED', {
0: 'FULL',
1: 'MEDIUM',
2: 'LOW',
}
)
# Signal calibrated indicator
self.signal_calibrated = False
def open_port(self):
self.ser = serial.Serial(
port=self.port,
baudrate=115200,
timeout=5.0,
write_timeout=5.0,
)
rospy.loginfo(f'Port {self.port} opened: {self.ser.is_open}')
self.ser.reset_input_buffer()
return self.ser
def __enter__(self):
return self.open_port()
def close_port(self):
if self.ser.is_open:
self.ser.close()
rospy.loginfo(f'Port {self.port} opened: {self.ser.is_open}')
def __exit__(self, exception_type, exception_value, exception_traceback):
# Exception handling
self.close_port()
def wait_for_ack(self):
"""
Acknowledge the error-free receipt of transmitted data.
"""
if self.ser.is_open:
ddata = ""
ack = struct.pack(
'B', self.packet_type['ACK_COMMAND_PROCESSED']
)
while ddata != ack:
ddata = self.ser.read(1)
else:
raise serial.SerialException
def set_sampling_rate(self, sampling_rate):
"""Set Shimmer3 sampling rate to desired value.
Args:
sampling_rate (int): Sampling rate in Hz.
"""
try:
sampling_rate = int(sampling_rate)
if not(0.6<=sampling_rate<=1024):
raise ValueError
if self.ser.is_open:
# Calculate from Hz to ms
sampling_period = round(32768/sampling_rate)
# Send sampling rate command bitwise & and >> due to alignment issues
self.ser.write(struct.pack(
'BBB',
self.packet_type['SET_SAMPLING_RATE_COMMAND'],
(sampling_period&0xFF),
((sampling_period&0xFF00)>>8)
))
self.wait_for_ack()
# Read and print set sampling rate
self.get_sampling_rate(echo=True)
else:
raise serial.SerialException
except ValueError as e:
sys.exit(
'set_sampling_rate not an integer or between 1 and 1024 Hz'
'{}'.format(e)
)
def get_sampling_rate(self, echo=False):
"""Read and print sampling rate"""
if self.ser.is_open:
# Send sampling rate command
self.ser.write(struct.pack(
'B', self.packet_type['GET_SAMPLING_RATE_COMMAND']
))
self.wait_for_ack()
# Read incoming data 1 identifying byte, 2 for uint sampling rate
data = self.ser.read(size=3)
clock_wait = struct.unpack('H', data[1:3])[0]
self.sampling_rate = 32768/clock_wait
if echo:
rospy.loginfo(f'Shimmer sampling rate: {self.sampling_rate:.4f} Hz')
return self.sampling_rate
else:
raise serial.SerialException
def get_buffer_size(self, echo=False):
"""Read and print buffer size"""
if self.ser.is_open:
# Send get buffer size command
self.ser.write(struct.pack(
'B', self.packet_type['GET_BUFFER_SIZE_COMMAND']
))
self.wait_for_ack()
# Read incoming data 1 identifying byte, 2 for uint sampling rate
data = self.ser.read(size=2)
if data[0] == self.packet_type['BUFFER_SIZE_RESPONSE']:
self.buffer_size = data[1]
if echo:
rospy.loginfo(f'Buffer size: {self.buffer_size}')
return self.buffer_size
else:
rospy.logerr('Did not recieve BUFFER_SIZE_RESPONSE')
else:
raise serial.SerialException
def get_charge_status_led(self, echo=False):
"""Read and print charge status LED"""
if self.ser.is_open:
# send get charge status led command
self.ser.write(struct.pack(
'B', self.packet_type['GET_CHARGE_STATUS_LED_COMMAND']
))
self.wait_for_ack()
# Read incoming data 1 identifying byte, 1 charge status
data = self.ser.read(size=2)
if data[0] == self.packet_type['CHARGE_STATUS_LED_RESPONSE']:
charge_status_num = data[1]
self.charge_status = self.charge_status_dict[charge_status_num]
if echo:
rospy.loginfo(f'Charge status: {self.charge_status}')
return self.charge_status
else:
rospy.logerr('Did not recieve CHARGE_STATUS_LED_RESPONSE')
else:
raise serial.SerialException
def get_id_and_rev(self, echo=True):
"""Get the daughter card ID byte (SR number) and Revision number"""
if self.ser.is_open:
self.ser.write(struct.pack(
'BBB', self.packet_type['GET_DAUGHTER_CARD_ID_COMMAND'], 0x02, 0x00
))
self.wait_for_ack()
# Read incoming data byte 3 - serial number, byte 4 revision
data = self.ser.read(size=4)
if data[0] == self.packet_type['DAUGHTER_CARD_ID_RESPONSE']:
self.serial_number = data[2]
self.revision_number = data[3]
if echo:
rospy.loginfo(f'Device: SR{self.serial_number}-{self.revision_number}')
return (self.serial_number, self.revision_number)
else:
rospy.logerr('Did not recieve DAUGHTER_CARD_ID_RESPONSE')
else:
raise serial.SerialException
def get_shimmer_name(self, echo=True):
"""Get the Shimmer name"""
if self.ser.is_open:
self.ser.write(struct.pack(
'B', self.packet_type['GET_SHIMMERNAME_COMMAND']
))
self.wait_for_ack()
# Read 2 incoming data bytes: 0 - response, 1 - length, 2: encoded name
data = self.ser.read(2)
if data[0] == self.packet_type['SHIMMERNAME_RESPONSE']:
# Read entire name length
data = self.ser.read(data[1])
self.shimmer_name = data.decode('ascii')
if echo:
rospy.loginfo(f'Device name: {self.shimmer_name}')
return self.shimmer_name
else:
rospy.logerr('Did not recieve SHIMMERNAME_RESPONSE')
else:
raise serial.SerialException
def set_shimmer_name(self, shimmer_name):
"""Set the Shimmer name to string with 11 characters"""
if len(shimmer_name) > 11:
sys.exit('Desire shimmer name too long (must be <= 11 characters)')
if self.ser.is_open:
self.ser.write(struct.pack(
'B'*(len(shimmer_name)+2),
self.packet_type['SET_SHIMMERNAME_COMMAND'],
len(shimmer_name),
*shimmer_name.encode('ascii')
))
self.wait_for_ack()
self.get_shimmer_name()
else:
raise serial.SerialException
def set_emg_gain(self, gain, echo=True):
"""Set gain to variable"""
# Check if valid gain input
if gain in self.emg_gain_config.keys():
self.emg_gain = gain
else:
self.emg_gain = self.emg_gain_config_default
if echo:
rospy.loginfo(f'EMG gain: {self.emg_gain}')
self.emg_gain_packet = self.emg_gain_config[self.emg_gain]
rospy.logdebug(f'{self.emg_gain_packet:03b}')
return self.emg_gain
def set_emg_data_rate(self, emg_data_rate, echo=True):
"""Set emg chip data rate (samples per second) to variable"""
# Check if valid data rate input
if emg_data_rate in self.emg_data_rate_config.keys():
self.emg_data_rate = emg_data_rate
else:
self.sampling_rate = self.get_sampling_rate()
# Set the data rate to first value higher than Shimmer sampling rate
self.emg_data_rate = min(
[i for i in self.emg_data_rate_config.keys() if i > self.sampling_rate]
)
if echo:
rospy.loginfo(f'EMG chip data rate: {self.emg_data_rate} Hz')
self.emg_data_rate_packet = self.emg_data_rate_config[self.emg_data_rate]
return self.emg_data_rate
def get_emg_registers(self, chip_num=0, echo=False):
"""Get byte values for all 10 registers for chip_num on EMG unit"""
if chip_num not in [0, 1]:
sys.exit('Wrong chip number specified. Must be 0 or 1')
if self.ser.is_open:
self.ser.write(struct.pack(
'B'*4, self.packet_type['GET_EXG_REGS_COMMAND'], chip_num, 0, 10
))
self.wait_for_ack()
# Read incoming data bytes (EXG_REGS_RESPONSE + number of bytes + 10 registers)
data = self.ser.read(size=12)
if data[0] == self.packet_type['EXG_REGS_RESPONSE']:
emg_regs = list(struct.unpack('B'*10, data[2:]))
# Store only chip1 registers
if chip_num == 0:
self.emg_regs = emg_regs
if echo:
rospy.logdebug(
f'EMG register settings for chip {chip_num+1}:\n'
f'\tCONFIG1: {emg_regs[0]:08b}\n'
f'\tCONFIG2: {emg_regs[1]:08b}\n'
f'\tLOFF: {emg_regs[2]:08b}\n'
f'\tCH1SET: {emg_regs[3]:08b}\n'
f'\tCH2SET: {emg_regs[4]:08b}\n'
f'\tRLD_SENS: {emg_regs[5]:08b}\n'
f'\tLOFF_SENS: {emg_regs[6]:08b}\n'
f'\tLOFF_STAT: {emg_regs[7]:08b}\n'
f'\tRESP1: {emg_regs[8]:08b}\n'
f'\tRESP2: {emg_regs[9]:08b}\n'
)
return emg_regs
else:
rospy.logerr('Did not recieve EXG_REGS_RESPONSE')
else:
raise serial.SerialException
def power_down_chip_2(self, echo=True):
"""
Chip 2 should be powered down for EMG signal acquisition.
Input multiplexer should be set to input shorted configuration.
Bit 7 in CH1SET and CH2SET bytes should be set to 1 (Channel x power-down).
"""
# Get EMG registers for chip 2
chip_num = 1
# self.get_emg_registers(chip_num=chip_num, echo=True)
self.chip2_emg_regs = [None]*10
# CONFIG1 byte - Data Rate
self.chip2_emg_regs[0] = self.emg_data_rate_packet
# CONFIG2 byte - Test signals
self.chip2_emg_regs[1] = (
0b10101000 | self.test_signal_config['off']
)
# LOFF byte
self.chip2_emg_regs[2] = 0b00010000
# CH1SET - Bit7 to 1, gain to default and mux to shorted
self.chip2_emg_regs[3] = (
0b1 << 7 | self.emg_gain_packet << 4 | self.emg_mux_config['shorted']
)
# CH2SET - Bit7 to 1, gain to default and mux to shorted
self.chip2_emg_regs[4] = (
0b1 << 7 | self.emg_gain_packet << 4 | self.emg_mux_config['shorted']
)
# RLD_SENS byte - all 8 bits to 0
self.chip2_emg_regs[5] = (
0b00 << 6 | self.emg_pbd_rld_loff_sense_config['off'] << 4 |
self.emg_rld_config
)
# LOFF_SENS and LOFF_STAT bytes
self.chip2_emg_regs[6], self.chip2_emg_regs[7] = 0b0, 0b0
# RESP1 byte
self.chip2_emg_regs[8] = (
0b10
)
# RESP 2 byte
self.chip2_emg_regs[9] = (
self.emg_rldref_int_config['external'] << 1 | 0b1
)
# rospy.loginfo('EMG register settings for chip {}:\n'.format(chip_num+1),
# '\tCONFIG1: {:08b}\n'.format(self.chip2_emg_regs[0]),
# '\tCONFIG2: {:08b}\n'.format(self.chip2_emg_regs[1]),
# '\tLOFF: {:08b}\n'.format(self.chip2_emg_regs[2]),
# '\tCH1SET: {:08b}\n'.format(self.chip2_emg_regs[3]),
# '\tCH2SET: {:08b}\n'.format(self.chip2_emg_regs[4]),
# '\tRLD_SENS: {:08b}\n'.format(self.chip2_emg_regs[5]),
# '\tLOFF_SENS: {:08b}\n'.format(self.chip2_emg_regs[6]),
# '\tLOFF_STAT: {:08b}\n'.format(self.chip2_emg_regs[7]),
# '\tRESP1: {:08b}\n'.format(self.chip2_emg_regs[8]),
# '\tRESP2: {:08b}\n'.format(self.chip2_emg_regs[9]),
# )
# Write configuration to chip 2 register
if self.ser.is_open:
# Send set registers command, chip to write to, starting byte,
# number of bytes to write and unpack a list with registers to write
self.ser.write(struct.pack(
'B'*14, self.packet_type['SET_EXG_REGS_COMMAND'], chip_num, 0, 10,
*self.chip2_emg_regs
))
self.wait_for_ack()
if echo:
rospy.loginfo(f'Chip {chip_num+1} powered down for EMG measurement!')
return self.chip2_emg_regs
else:
raise serial.SerialException
def activate_emg_sensors(self, echo=True):
"""Set the 24 bit EXG sensors"""
sensors = [self.packet_type['SET_SENSORS_COMMAND']] + self.emg_24bit
if self.ser.is_open:
# Send set sensors command and emg_24 bit bytes to activate sensor
self.ser.write(struct.pack('B'*4, *sensors))
self.wait_for_ack()
if echo:
rospy.loginfo('24 bit EMG sensor activated!')
else:
raise serial.SerialException
def set_emg_registers(self, test_signal=False, echo=True):
"""Set the EMG registers for chip 1 (EMG signal or test signal)"""
# Get EMG registers for chip 0
chip_num = 0
if test_signal:
# Set mux config and test signal byte
mux = 'test'
test = 'square'
else:
mux = 'measure_EMG'
test = 'off'
# Initialize list for EMG registers
self.chip1_emg_regs = [None]*10
# CONFIG1 byte - Data Rate
self.chip1_emg_regs[0] = self.emg_data_rate_packet
# CONFIG2 byte - Test signals
self.chip1_emg_regs[1] = (
0b10101000 | self.test_signal_config[test]
)
# LOFF byte
self.chip1_emg_regs[2] = 0b00010000
# CH1SET - Bit7 to 0, gain to defined and mux to test/measure_EMG
self.chip1_emg_regs[3] = (
0b0 << 7 | self.emg_gain_packet << 4 | self.emg_mux_config[mux]
)
# CH2SET - Bit7 to 0, gain to defined and mux to test/measure_EMG
self.chip1_emg_regs[4] = (
0b0 << 7 | self.emg_gain_packet << 4 | self.emg_mux_config[mux]
)
# RLD_SENS byte - PBD_RLD to on
self.chip1_emg_regs[5] = (
0b00 << 6 | self.emg_pbd_rld_loff_sense_config['on'] << 4 |
self.emg_rld_config
)
# LOFF_SENS and LOFF_STAT bytes
self.chip1_emg_regs[6], self.chip1_emg_regs[7] = 0b0, 0b0
# RESP1 byte
self.chip1_emg_regs[8] = (
0b10
)
# RESP 2 byte - RLDREF_INT to internal
self.chip1_emg_regs[9] = (
self.emg_rldref_int_config['internal'] << 1 | 0b1
)
# rospy.logdebug(
# f'EMG register settings for chip {chip_num+1}:\n'
# f'\tCONFIG1: {self.chip1_emg_regs[0]:08b}\n'
# f'\tCONFIG2: {self.chip1_emg_regs[1]:08b}\n'
# f'\tLOFF: {self.chip1_emg_regs[2]:08b}\n'
# f'\tCH1SET: {self.chip1_emg_regs[3]:08b}\n'
# f'\tCH2SET: {self.chip1_emg_regs[4]:08b}\n'
# f'\tRLD_SENS: {self.chip1_emg_regs[5]:08b}\n'
# f'\tLOFF_SENS: {self.chip1_emg_regs[6]:08b}\n'
# f'\tLOFF_STAT: {self.chip1_emg_regs[7]:08b}\n'
# f'\tRESP1: {self.chip1_emg_regs[8]:08b}\n'
# f'\tRESP2: {self.chip1_emg_regs[9]:08b}\n'
# )
# Write configuration to chip 1 register
if self.ser.is_open:
# Send set registers command, chip to write to, starting byte,
# number of bytes to write and unpack a list with registers to write
self.ser.write(struct.pack(
'B'*14, self.packet_type['SET_EXG_REGS_COMMAND'], chip_num, 0, 10,
*self.chip1_emg_regs
))
self.wait_for_ack()
if echo:
rospy.loginfo(f'Chip {chip_num+1} EMG register settings written!')
return self.chip1_emg_regs
else:
raise serial.SerialException
def start_streaming_EMG(self, publisher, test_signal=False, echo=True):
"""
Start streaming EMG signal from both channels to desired publisher.
Set chip 1 settings,
Start streaming EMG signal
Args:
test_signal (bool, optional): stream/test/acquired signal. Defaults to False.
echo (bool, optional): print info to console. Defaults to True.
"""
# Calibration constants and name
self.calibration_constant = self.adc_sensitivity / self.emg_gain
calibration_constant = self.calibration_constant
adc_offset_ch1 = self.adc_offset_ch1
adc_offset_ch2 = self.adc_offset_ch2
shimmer_name = self.shimmer_name
# Set the chip 1 configuration registers
self.set_emg_registers(test_signal=test_signal)
self.get_emg_registers(chip_num=0, echo=echo)
# Read incoming data
# 1 byte packet type, 3 bytes timestamp, 14 bytes EMG data
framesize = 18
# Firmware clock in 1/32768 sec
clock_step = 32768
# ROS message
emg_data = Emg()
emg_data.header.frame_id = shimmer_name
# Iterator
sig_iter = 0
# Send start streaming command
self.send_streaming_command('start', echo=True)
# Sleep for 0.5 sec before data acquisition to make sure clock ticks are
# sufficiently large
rospy.sleep(0.5)
try:
while not rospy.is_shutdown():
data = self.ser.read(size=framesize)
if data[0] == self.packet_type['DATA_PACKET']:
# Convert bytes to internal clock_ticks
clock_ticks = int.from_bytes(data[1:4], 'little')
# Resync with ROS time or handle clock_tick overflow
# after after 3 bytes max uint - 16777215
# max time: 511.9999 sec or 8.5333 min
# (exploit OR evaluating only first condition if True)
if sig_iter == 0 or clock_ticks <= clock_ticks_ref:
# Set starting time
time_start = rospy.Time.now()
# Set now clock ticks to zero reference
clock_ticks_ref = clock_ticks
# Convert to duration in secs and nsecs
duration = rospy.Duration.from_sec(
(clock_ticks - clock_ticks_ref) / clock_step
)
emg_data.header.stamp = time_start + duration
# Convert chip 1, channel 1 and 2 data to integer values
c1ch1 = int.from_bytes(data[5:8], byteorder='big', signed=True)
c1ch2 = int.from_bytes(data[8:11], byteorder='big', signed=True)
# Calibrate emg channels using constant and offset:
c1ch1 = c1ch1 * calibration_constant - adc_offset_ch1
c1ch2 = c1ch2 * calibration_constant - adc_offset_ch2
# Publish acquired data to topic
emg_data.emg_ch1 = c1ch1
emg_data.emg_ch2 = c1ch2
emg_data.header.seq = sig_iter
publisher.publish(emg_data)
# Increment ID
sig_iter += 1
else:
rospy.logerr(f'Did not recieve DATA_PACKET: {data}')
except rospy.ROSInterruptException:
# Send stop streaming command
self.send_streaming_command('stop', echo=True)
# Necessary to wait a bit before flushing input
rospy.sleep(0.1)
self.ser.reset_input_buffer()
raise rospy.ROSInterruptException
# Send stop streaming command
self.send_streaming_command('stop', echo=True)
# Necessary to wait a bit before flushing input
rospy.sleep(0.1)
self.ser.reset_input_buffer()
return
def send_streaming_command(self, command='start', echo=False):
"""Send start streaming command to Shimmer3"""
if command == 'start':
command_type = 'START_STREAMING_COMMAND'
print_msg = 'Starting data stream!'
else:
command_type = 'STOP_STREAMING_COMMAND'
print_msg = 'Stopping data stream!'
if self.ser.is_open:
self.ser.write(struct.pack(
'B', self.packet_type[command_type]
))
self.wait_for_ack()
if echo: rospy.loginfo(print_msg)
else:
raise serial.SerialException
def calibrate_test_signal(self, duration=5, echo=True):
"""Calibrate ADC_offset using square test signal"""
# Calibration constant
self.calibration_constant = self.adc_sensitivity / self.emg_gain
calibration_constant = self.calibration_constant
if echo:
rospy.loginfo(f'Calibration constant: {self.calibration_constant:.6e}')
if duration <= 0:
return
# Length of signal should correspond to duration in seconds
signal_length = round(self.sampling_rate * duration)
# Set the chip 1 configuration registers to stream test signal
self.set_emg_registers(test_signal=True)
self.get_emg_registers(chip_num=0, echo=echo)
if echo:
rospy.loginfo(
f'Performing signal calibration ... please wait '
f'{duration} s!'
)
# Send start streaming command
self.send_streaming_command('start', echo=True)
# Read incoming data
# 1 byte packet type, 3 bytes timestamp, 14 bytes EMG data
framesize = 18
# Queue for faster data acquisition
c1ch1_q, c1ch2_q = deque(maxlen=int(1e6)), deque(maxlen=int(1e6))
try:
for _ in range(signal_length):
data = self.ser.read(size=framesize)
if data[0] == self.packet_type['DATA_PACKET']:
# Convert chip 1, channel 1 and 2 data to integer values
c1ch1 = int.from_bytes(data[5:8], byteorder='big', signed=True)
c1ch2 = int.from_bytes(data[8:11], byteorder='big', signed=True)
# Calibrate emg channels using constant and offset:
c1ch1 = c1ch1 * calibration_constant
c1ch2 = c1ch2 * calibration_constant
# Append results to deque
c1ch1_q.append(c1ch1)
c1ch2_q.append(c1ch2)
else:
rospy.logerr(f'Did not recieve DATA_PACKET: {data}')
except rospy.ROSInterruptException:
# Send stop streaming command
self.send_streaming_command('stop', echo=echo)
# Necessary to wait a bit before flushing input
rospy.sleep(0.2)
self.ser.reset_input_buffer()
raise rospy.ROSInterruptException
# Send stop streaming command
self.send_streaming_command('stop', echo=echo)
# Necessary to wait a bit before flushing input
rospy.sleep(0.2)
self.ser.reset_input_buffer()
# Retain only values with amplitude near -1 and 1
c1ch1 = [i for i in c1ch1_q if abs(abs(i) - 1) < 0.1]
c1ch2 = [i for i in c1ch2_q if abs(abs(i) - 1) < 0.1]
# Keep same number of positive and negative values in list
pos_count = len([i for i in c1ch1 if i >= 0])
while pos_count != len(c1ch1) - pos_count:
c1ch1.pop()
pos_count = len([i for i in c1ch1 if i >= 0])
pos_count = len([i for i in c1ch2 if i >= 0])
while pos_count != len(c1ch2) - pos_count:
c1ch2.pop()
pos_count = len([i for i in c1ch2 if i >= 0])
# Calculate ADC offset as signal mean value
self.adc_offset_ch1 = sum(c1ch1) / len(c1ch1)
self.adc_offset_ch2 = sum(c1ch2) / len(c1ch2)
self.signal_calibrated = True
if echo:
rospy.loginfo('Calibration done!')
rospy.loginfo(f'ADC offset for Channel 1: {self.adc_offset_ch1:.6f}')
rospy.loginfo(f'ADC offset for Channel 2: {self.adc_offset_ch2:.6f}')
return
def synchronise_system_time(self, ntimes=3, echo=False):
"""Get real world clock from shimmer"""
# Firmware clock in 1/32768 sec
if echo: rospy.loginfo('Synchronising Shimmer time with system time!')
clock_step = 32768
rate = rospy.Rate(2) # 2 Hz
if self.ser.is_open:
for i in range(ntimes):
# Capture ROS time to timestamp
system_ts = rospy.Time.now()
self.ser.write(struct.pack(
'B', self.packet_type['GET_RWC_COMMAND']
))
self.wait_for_ack()
# Read 9 bytes - first response and 8 system time in clock time
data = self.ser.read(9)
if data[0] == self.packet_type['RWC_RESPONSE']:
# Convert bytes to internal clock_ticks
clock_ticks = int.from_bytes(data[1:], 'little')
# Convert to timestamp in secs
timestamp = clock_ticks / clock_step
dt = datetime.fromtimestamp(timestamp)
# Convert ROS time to datetime
system_dt = datetime.fromtimestamp(system_ts.to_sec())
system_timestamp = system_dt.timestamp()
if echo: rospy.loginfo(
f'Shimmer3 time: {dt},\t timestamp: {timestamp},\t'
)
if echo: rospy.loginfo(
f'System time: {system_dt},\t timestamp: {system_timestamp}\t'
)
# Sending set real world clock command
self.ser.write(struct.pack(
'B'*9,
self.packet_type['SET_RWC_COMMAND'],
*((round(rospy.Time.now().to_sec()*clock_step)).to_bytes(8, 'little'))
))
self.wait_for_ack()
# Wait till specific rate finished
rate.sleep()
else:
rospy.logerr('Did not recieve RWC_RESPONSE!')
else:
raise serial.SerialException
| 36.420915
| 91
| 0.637068
|
5e3e4d6b32e2e93db94e791685ab0d4c9eb9c4cb
| 3,238
|
py
|
Python
|
lib/surface/compute/copy_files.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/copy_files.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/copy_files.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for copying files from and to virtual machines."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import scp_utils
DEPRECATION_WARNING = (
'`gcloud compute copy-files` is deprecated. Please use `gcloud compute '
'scp` instead. Note that `gcloud compute scp` does not have recursive '
'copy on by default. To turn on recursion, use the `--recurse` flag.')
@base.Deprecate(is_removed=False, warning=DEPRECATION_WARNING)
class CopyFiles(base.Command):
"""Copy files to and from Google Compute Engine virtual machines via scp."""
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser.
"""
scp_utils.BaseScpHelper.Args(parser)
def Run(self, args):
"""See scp_utils.BaseScpCommand.Run."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
cua_holder = base_classes.ComputeUserAccountsApiHolder(self.ReleaseTrack())
scp_helper = scp_utils.BaseScpHelper()
return scp_helper.RunScp(holder, cua_holder, args, recursive=True,
release_track=self.ReleaseTrack())
# pylint:disable=line-too-long
CopyFiles.detailed_help = {
'DESCRIPTION':
"""\
*{command}* copies files between a virtual machine instance and your
local machine using the scp command. This command does not work for
Windows VMs.
To denote a remote file, prefix the file name with the virtual machine
instance name (e.g., _example-instance_:~/_FILE_). To denote a local
file, do not add a prefix to the file name (e.g., ~/_FILE_). For
example, to copy a remote directory to your local host, run:
$ {command} example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a
In the above example, `~/REMOTE-DIR` from `example-instance` is copied
into the ~/_LOCAL-DIR_ directory.
Conversely, files from your local computer can be copied to a virtual
machine:
$ {command} ~/LOCAL-FILE-1 ~/LOCAL-FILE-2 example-instance:~/REMOTE-DIR --zone us-central1-a
If a file contains a colon (``:''), you must specify it by either using
an absolute path or a path that begins with
``./''.
Under the covers, *scp(1)* or pscp (on Windows) is used to facilitate
the transfer.
When the destination is local, all sources must be the same virtual
machine instance. When the destination is remote, all sources must be
local.
"""
}
| 38.547619
| 102
| 0.698579
|
93eb30d1d6c8e7b5521e8e494b86e6e2fc420f59
| 709
|
py
|
Python
|
prove 2 - Numeriske metoder/Oppgave 5.py
|
theodorklauritzen/ProgMod-Innleveringer
|
c7f7d69906c483ed447f152a82c1f15c38e7ab9f
|
[
"MIT"
] | 1
|
2020-10-12T01:13:00.000Z
|
2020-10-12T01:13:00.000Z
|
prove 2 - Numeriske metoder/Oppgave 5.py
|
theodorklauritzen/ProgMod-Innleveringer
|
c7f7d69906c483ed447f152a82c1f15c38e7ab9f
|
[
"MIT"
] | null | null | null |
prove 2 - Numeriske metoder/Oppgave 5.py
|
theodorklauritzen/ProgMod-Innleveringer
|
c7f7d69906c483ed447f152a82c1f15c38e7ab9f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 09:21:34 2020
@author: xuper
"""
from pylab import *
k = 0.1 # En konstant som forteller hvor raske temperaturen endrer seg
T_O = 21 # Omgivelsestemperaturen (C)
T_0 = 75 # Start temperaturen (C)
t_S = 60 # Slutttid (min)
N = 10000 # Antal interasjoner
dt = (t_S) / N # Tidssteg
# Arayer
tid = zeros(N) # Tid (min)
T = zeros(N) # Termperatur (C)
# Initsialbetingelser
T[0] = T_0
# Eulers metode
for i in range(N - 1):
tid[i + 1] = tid[i] + dt
T[i + 1] = T[i] - k * (T[i] - T_O) * dt
# Plot resultatet
plot(tid, T)
title("Temperaturen i kaffekoppen")
xlabel("Tid (min)")
ylabel("Temperatur (C)")
show()
| 19.162162
| 78
| 0.589563
|
847c9a97d58499ff39e071439e872e223f6603a2
| 940
|
py
|
Python
|
setup.py
|
heiparta/pylightxl
|
2ac7bcbeecd378313543e41afea38183d490cc2b
|
[
"MIT"
] | null | null | null |
setup.py
|
heiparta/pylightxl
|
2ac7bcbeecd378313543e41afea38183d490cc2b
|
[
"MIT"
] | null | null | null |
setup.py
|
heiparta/pylightxl
|
2ac7bcbeecd378313543e41afea38183d490cc2b
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pylightxl", # Replace with your own username
version="1.51",
author="Viktor Kis",
author_email="realpydpiper@gmail.com",
license="MIT",
description="A light weight excel read/writer for python27 and python3 with no dependencies",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PydPiper/pylightxl",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.7',
)
| 34.814815
| 97
| 0.652128
|
97974360a454b581eb63bdfd2af2e2afa05596c7
| 37,226
|
py
|
Python
|
examples/speech_recognition/models/vggtransformer.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 115
|
2021-08-25T14:58:12.000Z
|
2022-03-21T11:25:36.000Z
|
examples/speech_recognition/models/vggtransformer.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 10
|
2021-11-14T12:28:48.000Z
|
2022-02-28T14:13:40.000Z
|
examples/speech_recognition/models/vggtransformer.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 14
|
2021-05-17T06:55:01.000Z
|
2022-03-28T12:07:42.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.pooling_kernel_sizes = []
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
self.pooling_kernel_sizes.append(pooling_kernel_size)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for s in self.pooling_kernel_sizes:
input_lengths = (input_lengths.float() / s).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}: ".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads {}".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
"of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
)
)
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 36.496078
| 88
| 0.604524
|
44d71a9ff43fb09304caf7cd1362833bdf6222c0
| 5,855
|
py
|
Python
|
sphinx/source/conf.py
|
tonimaxx/unicorn-binance-websocket-api
|
a5ba60a5711a60ec9d1db7a2e0bd02306340476d
|
[
"MIT"
] | 1
|
2021-09-29T22:54:59.000Z
|
2021-09-29T22:54:59.000Z
|
sphinx/source/conf.py
|
tonimaxx/unicorn-binance-websocket-api
|
a5ba60a5711a60ec9d1db7a2e0bd02306340476d
|
[
"MIT"
] | null | null | null |
sphinx/source/conf.py
|
tonimaxx/unicorn-binance-websocket-api
|
a5ba60a5711a60ec9d1db7a2e0bd02306340476d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'unicorn-binance-websocket-api'
copyright = '2021, Oliver Zehentleitner'
author = 'Oliver Zehentleitner'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.34.0'
html_last_updated_fmt = "%b %d %Y at %H:%M (CET)"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'recommonmark',
'sphinx_markdown_tables'
]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'python_docs_theme_technopathy'
html_context = {'github_user_name': 'oliver-zehentleitner',
'github_repo_name': 'unicorn-binance-websocket-api',
'project_name': project}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.docs', 'relations.docs', 'sourcelink.docs',
# 'searchbox.docs']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'unicorn-binance-websocket-apidoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'unicorn-binance-websocket-api.tex', 'unicorn-binance-websocket-api Documentation',
'Oliver Zehentleitner', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unicorn-binance-websocket-api', 'unicorn-binance-websocket-api Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'unicorn-binance-websocket-api', 'unicorn-binance-websocket-api Documentation',
author, 'unicorn-binance-websocket-api', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.docs']
# -- Extension configuration -------------------------------------------------
| 30.494792
| 100
| 0.65585
|
b2dad8a58f08c025226850d3acb70b751ae74d55
| 1,797
|
py
|
Python
|
thelper/nn/sr/srcnn.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | null | null | null |
thelper/nn/sr/srcnn.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | null | null | null |
thelper/nn/sr/srcnn.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | 1
|
2020-02-17T14:14:46.000Z
|
2020-02-17T14:14:46.000Z
|
import thelper.nn
class SRCNN(thelper.nn.Module):
"""Implements the SRCNN architecture.
See Dong et al., "Image Super-Resolution Using Deep Convolutional Networks" (2014) for more
information (https://arxiv.org/abs/1501.00092).
"""
def __init__(self, task, num_channels=1, base_filter=64, groups=1):
# note: must always forward args to base class to keep backup
super(SRCNN, self).__init__(task, num_channels=num_channels, base_filter=base_filter, groups=groups)
self.conv1 = thelper.nn.common.ConvBlock(num_channels, base_filter * groups, kernel_size=9,
stride=1, padding=0, activation="relu", norm=None, groups=groups)
self.conv2 = thelper.nn.common.ConvBlock(base_filter * groups, base_filter // 2 * groups, kernel_size=5,
stride=1, padding=0, activation="relu", norm=None, groups=groups)
self.conv3 = thelper.nn.common.ConvBlock((base_filter // 2) * groups, num_channels, kernel_size=5,
stride=1, padding=0, activation=None, norm=None, groups=groups)
self.set_task(task)
def forward(self, x):
x0 = x.view(x.shape[0] * x.shape[1], 1, x.shape[2], x.shape[3])
x0 = self.conv1(x0)
x0 = self.conv2(x0)
x0 = self.conv3(x0)
x0 = x0.view(x.shape[0], x.shape[1], x0.shape[2], x0.shape[3])
return x0
def weight_init(self):
for m in self.modules():
thelper.nn.common.weights_init_xavier(m)
def set_task(self, task):
if not isinstance(task, thelper.tasks.Regression):
raise AssertionError("SRCNN architecture only available for super res regression tasks")
self.task = task
| 46.076923
| 114
| 0.616027
|
1f8a8813d4dda5dfa6e244c5d56450e1e6254c2a
| 25,357
|
py
|
Python
|
main.py
|
ruanyyyyyyy/text2shape
|
276379df22ffdbe4cb54f30a88d65cf87bf53243
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ruanyyyyyyy/text2shape
|
276379df22ffdbe4cb54f30a88d65cf87bf53243
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ruanyyyyyyy/text2shape
|
276379df22ffdbe4cb54f30a88d65cf87bf53243
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import numpy as np
import os
import pprint
import yaml
import pdb
# HACK: Get logger to print to stdout
import sys
sys.ps1 = '>>> ' # Make it "interactive"
import tensorflow as tf
from multiprocessing import Queue
from lib.config import cfg_from_file, cfg_from_list, cfg
from lib.data_process import make_data_processes, kill_processes
from lib.solver import Solver
from lib.solver_encoder import TextEncoderSolver, TextEncoderCosDistSolver, LBASolver
from lib.solver_gan import End2EndGANDebugSolver
from lib.solver_classifier import ClassifierSolver
from lib.cwgan import CWGAN
from lib.lba import LBA
from lib.classifier import Classifier
import lib.utils as utils
import models
del sys.ps1 # HACK: Get logger to print to stdout
def parse_args():
"""Parse the arguments.
"""
parser = argparse.ArgumentParser(
description='Main text2voxel train/test file.')
parser.add_argument('--cfg',
dest='cfg_files',
action='append',
help='optional config file',
default=None,
type=str)
parser.add_argument('--dont_save_voxels', dest='dont_save_voxels', action='store_true')
parser.add_argument('--lba_only', dest='lba_only', action='store_true')
parser.add_argument('--metric_learning_only', dest='metric_learning_only', action='store_true')
parser.add_argument('--non_inverted_loss', dest='non_inverted_loss', action='store_true')
parser.add_argument('--synth_embedding', dest='synth_embedding', action='store_true')
parser.add_argument('--all_tuples', dest='all_tuples', action='store_true')
parser.add_argument('--reed_classifier', dest='reed_classifier', action='store_true')
parser.add_argument('--val_split',
dest='split',
help='data split for validation/testing (train, val, test)',
default=None,
type=str)
parser.add_argument('--queue_capacity',
dest='queue_capacity',
help='size of queue',
default=None,
type=int)
parser.add_argument('--n_minibatch_test',
dest='n_minibatch_test',
help='number of minibatches to use for test phase',
default=None,
type=int)
parser.add_argument('--dataset', dest='dataset',
help='dataset',
default=None,
type=str)
parser.add_argument('--improved_wgan', dest='improved_wgan', action='store_true')
parser.add_argument('--debug', dest='is_debug', action='store_true')
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--tiny_dataset', dest='tiny_dataset',
help='use a tiny dataset (~5 examples)',
action='store_true')
parser.add_argument('--model',
dest='model',
help='name of the network model',
default=None,
type=str)
parser.add_argument('--text_encoder', dest='text_encoder',
help='train/test on text encoder',
action='store_true')
parser.add_argument('--classifier', dest='classifier',
help='train/test on classifier',
action='store_true')
parser.add_argument('--end2end', dest='end2end',
help='train/test using end2end model such as End2EndLBACWGAN',
action='store_true')
parser.add_argument('--shapenet_ct_classifier', dest='shapenet_ct_classifier',
help='chair/table classifier (sets up for classification)',
action='store_true')
parser.add_argument('--noise_size',
dest='noise_size',
help='dimension of the noise',
default=None,
type=int)
parser.add_argument('--noise_dist', dest='noise_dist',
help='noise distribution (uniform, gaussian)',
default=None,
type=str)
parser.add_argument('--validation', dest='validation',
help='run validation while training',
action='store_true')
parser.add_argument('--test', dest='test',
help='test mode',
action='store_true')
parser.add_argument('--test_npy', dest='test_npy',
help='test mode using npy files',
action='store_true')
parser.add_argument('--save_outputs', dest='save_outputs',
help='save the outputs to a file',
action='store_true')
parser.add_argument('--summary_freq',
dest='summary_freq',
help='summary frequency',
default=None,
type=int)
parser.add_argument('--optimizer',
dest='optimizer',
help='name of the optimizer',
default=None,
type=str)
parser.add_argument('--critic_optimizer',
dest='critic_optimizer',
help='name of the critic optimizer',
default=None,
type=str)
parser.add_argument('--batch_size',
dest='batch_size',
help='batch size',
default=None,
type=int)
parser.add_argument('--lba_mode',
dest='lba_mode',
help='LBA mode type (TST, STS, MM)',
default=None,
type=str)
parser.add_argument('--lba_test_mode',
dest='lba_test_mode',
help='LBA test mode (shape, text) - what to input during forward pass',
default=None,
type=str)
parser.add_argument('--visit_weight',
dest='visit_weight',
help='visit weight for lba models',
default=None,
type=float)
parser.add_argument('--lba_unnormalize', dest='lba_unnormalize', action='store_true')
parser.add_argument('--num_critic_steps',
dest='num_critic_steps',
help='number of critic steps per train step',
default=None,
type=int)
parser.add_argument('--intense_training_freq',
dest='intense_training_freq',
help='frequency of intense critic training',
default=None,
type=int)
parser.add_argument('--uniform_max',
dest='uniform_max',
help='absolute max for uniform distribution',
default=None,
type=float)
parser.add_argument('--match_loss_coeff',
dest='match_loss_coeff',
help='coefficient for real match loss',
default=None,
type=float)
parser.add_argument('--fake_match_loss_coeff',
dest='fake_match_loss_coeff',
help='coefficient for fake match loss',
default=None,
type=float)
parser.add_argument('--fake_mismatch_loss_coeff',
dest='fake_mismatch_loss_coeff',
help='coefficient for fake mismatch loss',
default=None,
type=float)
parser.add_argument('--gp_weight',
dest='gp_weight',
help='coefficient for gradient penalty',
default=None,
type=float)
parser.add_argument('--text2text_weight',
dest='text2text_weight',
help='coefficient for text2text loss',
default=None,
type=float)
parser.add_argument('--shape2shape_weight',
dest='shape2shape_weight',
help='coefficient for shape2shape loss',
default=None,
type=float)
parser.add_argument('--learning_rate',
dest='learning_rate',
help='learning rate',
default=None,
type=float)
parser.add_argument('--critic_lr_multiplier',
dest='critic_lr_multiplier',
help='critic learning rate multiplier',
default=None,
type=float)
parser.add_argument('--decay_steps',
dest='decay_steps',
help='decay steps',
default=None,
type=int)
parser.add_argument('--num_epochs',
dest='num_epochs',
help='number of epochs',
default=None,
type=int)
parser.add_argument('--augment_max',
dest='augment_max',
help='maximum augmentation perturbation out of 255',
default=None,
type=int)
parser.add_argument('--set',
dest='set_cfgs',
help='set config keys',
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--ckpt_path', dest='ckpt_path',
help='Initialize network from checkpoint',
default=None)
parser.add_argument('--lba_ckpt_path', dest='lba_ckpt_path',
help='Initialize LBA component of end2endlbawgan network from checkpoint',
default=None)
parser.add_argument('--val_ckpt_path', dest='val_ckpt_path',
help='Initialize validation network from checkpoint',
default=None)
parser.add_argument('--log_path', dest='log_path', help='set log path',
default=None)
args = parser.parse_args()
return args
def modify_args(args):
"""Modify the default config based on the command line arguments.
"""
# modify default config if requested
if args.cfg_files is not None:
for cfg_file in args.cfg_files:
cfg_from_file(cfg_file)
randomize = args.randomize
if args.test: # Always randomize in test phase
randomize = True
if not randomize:
np.random.seed(cfg.CONST.RNG_SEED)
# NOTE: Unfortunately order matters here
if args.lba_only is True:
cfg_from_list(['LBA.COSINE_DIST', False])
if args.metric_learning_only is True:
cfg_from_list(['LBA.NO_LBA', True])
if args.non_inverted_loss is True:
cfg_from_list(['LBA.INVERTED_LOSS', False])
if args.dataset is not None:
cfg_from_list(['CONST.DATASET', args.dataset])
if args.lba_mode is not None:
cfg_from_list(['LBA.MODEL_TYPE', args.lba_mode])
if args.lba_test_mode is not None:
cfg_from_list(['LBA.TEST_MODE', args.lba_test_mode])
# cfg_from_list(['LBA.N_CAPTIONS_PER_MODEL', 1]) # NOTE: Important!
if args.shapenet_ct_classifier is True:
cfg_from_list(['CONST.SHAPENET_CT_CLASSIFIER', args.shapenet_ct_classifier])
if args.visit_weight is not None:
cfg_from_list(['LBA.VISIT_WEIGHT', args.visit_weight])
if args.lba_unnormalize is True:
cfg_from_list(['LBA.NORMALIZE', False])
if args.improved_wgan is True:
cfg_from_list(['CONST.IMPROVED_WGAN', args.improved_wgan])
if args.synth_embedding is True:
cfg_from_list(['CONST.SYNTH_EMBEDDING', args.synth_embedding])
if args.all_tuples is True:
cfg_from_list(['CONST.TEST_ALL_TUPLES', args.all_tuples])
if args.reed_classifier is True:
cfg_from_list(['CONST.REED_CLASSIFIER', args.reed_classifier])
if args.noise_dist is not None:
cfg_from_list(['GAN.NOISE_DIST', args.noise_dist])
if args.uniform_max is not None:
cfg_from_list(['GAN.NOISE_UNIF_ABS_MAX', args.uniform_max])
if args.num_critic_steps is not None:
cfg_from_list(['WGAN.NUM_CRITIC_STEPS', args.num_critic_steps])
if args.intense_training_freq is not None:
cfg_from_list(['WGAN.INTENSE_TRAINING_FREQ', args.intense_training_freq])
if args.match_loss_coeff is not None:
cfg_from_list(['WGAN.MATCH_LOSS_COEFF', args.match_loss_coeff])
if args.fake_match_loss_coeff is not None:
cfg_from_list(['WGAN.FAKE_MATCH_LOSS_COEFF', args.fake_match_loss_coeff])
if args.fake_mismatch_loss_coeff is not None:
cfg_from_list(['WGAN.FAKE_MISMATCH_LOSS_COEFF', args.fake_mismatch_loss_coeff])
if args.gp_weight is not None:
cfg_from_list(['WGAN.GP_COEFF', args.gp_weight])
if args.text2text_weight is not None:
cfg_from_list(['WGAN.TEXT2TEXT_WEIGHT', args.text2text_weight])
if args.shape2shape_weight is not None:
cfg_from_list(['WGAN.SHAPE2SHAPE_WEIGHT', args.shape2shape_weight])
if args.learning_rate is not None:
cfg_from_list(['TRAIN.LEARNING_RATE', args.learning_rate])
if args.critic_lr_multiplier is not None:
cfg_from_list(['GAN.D_LEARNING_RATE_MULTIPLIER', args.critic_lr_multiplier])
if args.decay_steps is not None:
cfg_from_list(['TRAIN.DECAY_STEPS', args.decay_steps])
if args.queue_capacity is not None:
cfg_from_list(['CONST.QUEUE_CAPACITY', args.queue_capacity])
if args.n_minibatch_test is not None:
cfg_from_list(['CONST.N_MINIBATCH_TEST', args.n_minibatch_test])
if args.noise_size is not None:
cfg_from_list(['GAN.NOISE_SIZE', args.noise_size])
if args.batch_size is not None:
cfg_from_list(['CONST.BATCH_SIZE', args.batch_size])
if args.summary_freq is not None:
cfg_from_list(['TRAIN.SUMMARY_FREQ', args.summary_freq])
if args.num_epochs is not None:
cfg_from_list(['TRAIN.NUM_EPOCHS', args.num_epochs])
if args.model is not None:
cfg_from_list(['NETWORK', args.model])
if args.optimizer is not None:
cfg_from_list(['TRAIN.OPTIMIZER', args.optimizer])
if args.critic_optimizer is not None:
cfg_from_list(['GAN.D_OPTIMIZER', args.critic_optimizer])
if args.ckpt_path is not None:
cfg_from_list(['DIR.CKPT_PATH', args.ckpt_path])
if args.lba_ckpt_path is not None:
cfg_from_list(['END2END.LBA_CKPT_PATH', args.lba_ckpt_path])
if args.val_ckpt_path is not None:
cfg_from_list(['DIR.VAL_CKPT_PATH', args.val_ckpt_path])
if args.log_path is not None:
cfg_from_list(['DIR.LOG_PATH', args.log_path])
if args.augment_max is not None:
cfg_from_list(['TRAIN.AUGMENT_MAX', args.augment_max])
if args.test:
cfg_from_list(['TRAIN.AUGMENT_MAX', 0])
cfg_from_list(['CONST.BATCH_SIZE', 1])
cfg_from_list(['LBA.N_CAPTIONS_PER_MODEL', 1]) # NOTE: Important!
cfg_from_list(['LBA.N_PRIMITIVE_SHAPES_PER_CATEGORY', 1]) # NOTE: Important!
if args.test_npy:
cfg_from_list(['CONST.BATCH_SIZE', 1])
# To overwrite default variables, put the set_cfgs after all argument initializations
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
def get_inputs_dict(args):
"""Gets the input dict for the current model and dataset.
"""
if cfg.CONST.DATASET == 'shapenet':
if (args.text_encoder is True) or (args.end2end is True) or (args.classifier is True):
inputs_dict = utils.open_pickle(cfg.DIR.TRAIN_DATA_PATH) #
val_inputs_dict = utils.open_pickle(cfg.DIR.VAL_DATA_PATH)
test_inputs_dict = utils.open_pickle(cfg.DIR.TEST_DATA_PATH)
else: # Learned embeddings
inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TRAIN)
val_inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_VAL)
test_inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TEST)
elif cfg.CONST.DATASET == 'primitives':
if ((cfg.CONST.SYNTH_EMBEDDING is True) or (args.text_encoder is True) or
(args.classifier is True)):
if args.classifier and not cfg.CONST.REED_CLASSIFIER: # Train on all splits for classifier
tf.logging.info('Using all (train/val/test) splits for training')
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_ALL_SPLITS_DATA_PATH)
else:
tf.logging.info('Using train split only for training')
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_TRAIN_DATA_PATH)
val_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_VAL_DATA_PATH)
test_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_TEST_DATA_PATH)
else: # Learned embeddings
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TRAIN)
val_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_VAL)
test_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TEST)
else:
raise ValueError('Please use a valid dataset (shapenet, primitives).')
if args.tiny_dataset is True:
if ((cfg.CONST.DATASET == 'primitives' and cfg.CONST.SYNTH_EMBEDDING is True)
or (args.text_encoder is True)):
raise NotImplementedError('Tiny dataset not supported for synthetic embeddings.')
ds = 5 # New dataset size
if cfg.CONST.BATCH_SIZE > ds:
raise ValueError('Please use a smaller batch size than {}.'.format(ds))
inputs_dict = utils.change_dataset_size(inputs_dict, new_dataset_size=ds)
val_inputs_dict = utils.change_dataset_size(val_inputs_dict, new_dataset_size=ds)
test_inputs_dict = utils.change_dataset_size(test_inputs_dict, new_dataset_size=ds)
# Select the validation/test split
if args.split == 'train':
split_str = 'train'
val_inputs_dict = inputs_dict
elif (args.split == 'val') or (args.split is None):
split_str = 'val'
val_inputs_dict = val_inputs_dict
elif args.split == 'test':
split_str = 'test'
val_inputs_dict = test_inputs_dict
else:
raise ValueError('Please select a valid split (train, val, test).')
print('Validation/testing on {} split.'.format(split_str))
if (cfg.CONST.DATASET == 'shapenet') and (cfg.CONST.SHAPENET_CT_CLASSIFIER is True):
category_model_list, class_labels = Classifier.set_up_classification(inputs_dict)
val_category_model_list, val_class_labels = Classifier.set_up_classification(val_inputs_dict)
assert class_labels == val_class_labels
# Update inputs dicts
inputs_dict['category_model_list'] = category_model_list
inputs_dict['class_labels'] = class_labels
val_inputs_dict['category_model_list'] = val_category_model_list
val_inputs_dict['class_labels'] = val_class_labels
return inputs_dict, val_inputs_dict
def get_solver(g, net, args, is_training):
if isinstance(net, LBA):
solver = LBASolver(net, g, is_training)
elif args.text_encoder:
solver = TextEncoderSolver(net, g, is_training)
elif isinstance(net, Classifier):
solver = ClassifierSolver(net, g, is_training)
elif isinstance(net, CWGAN):
solver = End2EndGANDebugSolver(net, g, is_training)
else:
raise ValueError('Invalid network.')
return solver
def main():
"""Main text2voxel function.
"""
args = parse_args()
print('Called with args:')
print(args)
# pdb.set_trace()
if args.save_outputs is True and args.test is False:
raise ValueError('Can only save outputs when testing, not training.')
if args.validation:
assert not args.test
if args.test:
assert args.ckpt_path is not None
modify_args(args)
print('----------------- CONFIG -------------------')
pprint.pprint(cfg)
# Save yaml
os.makedirs(cfg.DIR.LOG_PATH, exist_ok=True)
with open(os.path.join(cfg.DIR.LOG_PATH, 'run_cfg.yaml'), 'w') as out_yaml:
yaml.dump(cfg, out_yaml, default_flow_style=False)
# set up logger
tf.logging.set_verbosity(tf.logging.INFO)
try:
with tf.Graph().as_default() as g: # create graph
# Load data
inputs_dict, val_inputs_dict = get_inputs_dict(args)
# Build network
is_training = not args.test
print('------------ BUILDING NETWORK -------------')
network_class = models.load_model(cfg.NETWORK)
net = network_class(inputs_dict, is_training)
# Prefetching data processes
#
# Create worker and data queue for data processing. For training data, use
# multiple processes to speed up the loading. For validation data, use 1
# since the queue will be popped every TRAIN.NUM_VALIDATION_ITERATIONS.
# set up data queue and start enqueue
np.random.seed(123)
data_process_class = models.get_data_process_pairs(cfg.NETWORK, is_training)
val_data_process_class = models.get_data_process_pairs(cfg.NETWORK, is_training=False)
if is_training:
global train_queue, train_processes
train_queue = Queue(cfg.CONST.QUEUE_CAPACITY)
train_processes = make_data_processes(data_process_class, train_queue, inputs_dict,
cfg.CONST.NUM_WORKERS, repeat=True)
if args.validation:
global val_queue, val_processes
val_queue = Queue(cfg.CONST.QUEUE_CAPACITY)
val_processes = make_data_processes(val_data_process_class, val_queue,
val_inputs_dict, 1, repeat=True)
else:
global test_queue, test_processes
test_inputs_dict = val_inputs_dict
test_queue = Queue(cfg.CONST.QUEUE_CAPACITY)
test_processes = make_data_processes(val_data_process_class, test_queue,
test_inputs_dict, 1, repeat=False)
# Create solver
solver = get_solver(g, net, args, is_training)
# Run solver
if is_training:
if args.validation:
if cfg.DIR.VAL_CKPT_PATH is not None:
assert train_processes[0].iters_per_epoch != 0
assert val_processes[0].iters_per_epoch != 0
solver.train(train_processes[0].iters_per_epoch, train_queue,
val_processes[0].iters_per_epoch, val_queue=val_queue,
val_inputs_dict=val_inputs_dict)
else:
if isinstance(net, LBA):
assert cfg.LBA.TEST_MODE is not None
assert cfg.LBA.TEST_MODE == 'shape'
assert train_processes[0].iters_per_epoch != 0
assert val_processes[0].iters_per_epoch != 0
solver.train(train_processes[0].iters_per_epoch, train_queue,
val_processes[0].iters_per_epoch, val_queue=val_queue,
val_inputs_dict=val_inputs_dict)
else:
assert train_processes[0].iters_per_epoch != 0
assert val_processes[0].iters_per_epoch != 0
solver.train(train_processes[0].iters_per_epoch, train_queue,
val_processes[0].iters_per_epoch, val_queue=val_queue)
else:
solver.train(train_processes[0].iters_per_epoch, train_queue)
else:
solver.test(test_processes[0], test_queue,
num_minibatches=cfg.CONST.N_MINIBATCH_TEST,
save_outputs=args.save_outputs)
finally:
# Clean up the processes and queues
is_training= True # modify by yuer
if is_training:
kill_processes(train_queue, train_processes)
if args.validation:
kill_processes(val_queue, val_processes)
else:
kill_processes(test_queue, test_processes)
if __name__ == '__main__':
main()
| 46.612132
| 103
| 0.588279
|
8ffa6f95b0550ae76cc4266b390c02600cbb1cd0
| 2,935
|
py
|
Python
|
hocrox/layer/augmentation/flip/random_vertical_flip.py
|
imdeepmind/hocrox
|
271f7ee8f7b9c0fb5466f5b8d9acbc0a703b570f
|
[
"MIT"
] | 2
|
2021-12-09T19:20:23.000Z
|
2021-12-26T13:22:50.000Z
|
hocrox/layer/augmentation/flip/random_vertical_flip.py
|
imdeepmind/hocrox
|
271f7ee8f7b9c0fb5466f5b8d9acbc0a703b570f
|
[
"MIT"
] | 29
|
2021-04-28T14:37:36.000Z
|
2022-03-17T18:37:52.000Z
|
hocrox/layer/augmentation/flip/random_vertical_flip.py
|
imdeepmind/hocrox
|
271f7ee8f7b9c0fb5466f5b8d9acbc0a703b570f
|
[
"MIT"
] | 1
|
2022-03-07T10:55:36.000Z
|
2022-03-07T10:55:36.000Z
|
"""RandomVerticalFlip layer for Hocrox."""
import cv2
from hocrox.utils import Layer
class RandomVerticalFlip(Layer):
"""RandomVerticalFlip layer randomly flips an image vertically.
Here is an example code to use the RandomVerticalFlip layer in a model.
```python
from hocrox.model import Model
from hocrox.layer.augmentation.flip import RandomVerticalFlip
from hocrox.layer import Read
# Initializing the model
model = Model()
# Adding model layers
model.add(Read(path="./img"))
model.add(RandomVerticalFlip(number_of_outputs=1))
# Printing the summary of the model
print(model.summary())
```
"""
def __init__(self, probability=1.0, number_of_outputs=1, name=None):
"""Init method for the RandomVerticalFlip layer.
Args:
probability (float, optional): Probability rate for the layer, if the rate of 0.5 then the layer is applied
on 50% of images. Defaults to 1.0.
number_of_outputs (int, optional): Number of images to output. Defaults to 1.
name (str, optional): Name of the layer, if not provided then automatically generates a unique name for
the layer. Defaults to None.
Raises:
ValueError: If the number_of_images parameter is not valid
"""
if not isinstance(probability, float) or probability < 0.0 or probability > 1.0:
raise ValueError(f"The value {probability} for the argument probability is not valid")
if not isinstance(number_of_outputs, int) or number_of_outputs < 1:
raise ValueError(f"The value {number_of_outputs} for the argument number_of_outputs is not valid")
super().__init__(
name,
"random_vertical_flip",
self.STANDARD_SUPPORTED_LAYERS,
f"Probability: {probability}, Number of Outputs: {number_of_outputs}",
)
self.__number_of_outputs = number_of_outputs
self.__probability = probability
def _apply_layer(self, images, name=None):
"""Apply the transformation method to change the layer.
Args:
images (list[ndarray]): List of images to transform.
name (str, optional): Name of the image series, used for saving the images. Defaults to None.
Returns:
list[ndarray]: Return the transform images
"""
transformed_images = []
for image in images:
for _ in range(self.__number_of_outputs):
should_perform = self._get_probability(self.__probability)
if image is not None and len(image) != 0:
transformed_image = cv2.flip(image, 0) if should_perform else image
if transformed_image is not None and len(transformed_image) != 0:
transformed_images.append(transformed_image)
return transformed_images
| 36.234568
| 119
| 0.650426
|
4308df5f37b667e16b78898157d372583cb6f71b
| 6,447
|
py
|
Python
|
selfdrive/car/car_helpers.py
|
dragonpilot/xx979xx-openpilot
|
d50ba12b1876c267b17d2d8a4682cd1c0582463a
|
[
"MIT"
] | 1
|
2021-11-24T09:01:03.000Z
|
2021-11-24T09:01:03.000Z
|
selfdrive/car/car_helpers.py
|
dragonpilot/openpilot
|
d50ba12b1876c267b17d2d8a4682cd1c0582463a
|
[
"MIT"
] | null | null | null |
selfdrive/car/car_helpers.py
|
dragonpilot/openpilot
|
d50ba12b1876c267b17d2d8a4682cd1c0582463a
|
[
"MIT"
] | 1
|
2021-07-28T12:52:39.000Z
|
2021-07-28T12:52:39.000Z
|
import os
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.version import comma_remote, tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from cereal import car, log
EventName = car.CarEvent.EventName
HwType = log.HealthData.HwType
def get_startup_event(car_recognized, controller_available):
if comma_remote and tested_branch:
event = EventName.startup
else:
event = EventName.startupMaster
if not car_recognized:
event = EventName.startupNoCar
elif car_recognized and not controller_available:
event = EventName.startupNoControl
return event
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if has_relay and not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
fw_candidates, car_fw = set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0]} # attempt fingerprint on bus 0 only
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = messaging.get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1:
if frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
return car_fingerprint, finger, vin, car_fw, source
def get_car(logcan, sendcan, has_relay=False):
candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
return CarInterface(car_params, CarController, CarState), car_params
| 36.01676
| 110
| 0.716457
|
6841773a9da34999681ae5f4bd9d07fa43d0653b
| 40,670
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/layout/_shape.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/_shape.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/_shape.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Shape(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.shape"
_valid_props = {
"editable",
"fillcolor",
"fillrule",
"layer",
"line",
"name",
"opacity",
"path",
"templateitemname",
"type",
"visible",
"x0",
"x1",
"xanchor",
"xref",
"xsizemode",
"y0",
"y1",
"yanchor",
"yref",
"ysizemode",
}
# editable
# --------
@property
def editable(self):
"""
Determines whether the shape could be activated for edit or
not. Has no effect when the older editable shapes mode is
enabled via `config.editable` or `config.edits.shapePosition`.
The 'editable' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["editable"]
@editable.setter
def editable(self, val):
self["editable"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the color filling the shape's interior. Only applies to
closed shapes.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# fillrule
# --------
@property
def fillrule(self):
"""
Determines which regions of complex paths constitute the
interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
The 'fillrule' property is an enumeration that may be specified as:
- One of the following enumeration values:
['evenodd', 'nonzero']
Returns
-------
Any
"""
return self["fillrule"]
@fillrule.setter
def fillrule(self, val):
self["fillrule"] = val
# layer
# -----
@property
def layer(self):
"""
Specifies whether shapes are drawn below or above traces.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['below', 'above']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.shape.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.layout.shape.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the shape.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# path
# ----
@property
def path(self):
"""
For `type` "path" - a valid SVG path with the pixel values
replaced by data values in `xsizemode`/`ysizemode` being
"scaled" and taken unmodified as pixels relative to `xanchor`
and `yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not relative.
So the allowed segments are: M, L, H, V, Q, C, T, S, and Z arcs
(A) are not allowed because radius rx and ry are relative. In
the future we could consider supporting relative commands, but
we would have to decide on how to handle date and log axes.
Note that even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa. no
chained "polybezier" commands - specify the segment type for
each one. On category axes, values are numbers scaled to the
serial numbers of categories because using the categories
themselves there would be no way to describe fractional
positions On data axes: because space and T are both normal
components of path strings, we can't use either to separate
date from time parts. Therefore we'll use underscore for this
purpose: 2015-02-21_13:45:56.789
The 'path' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["path"]
@path.setter
def path(self, val):
self["path"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# type
# ----
@property
def type(self):
"""
Specifies the shape type to be drawn. If "line", a line is
drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect to the axes'
sizing mode. If "circle", a circle is drawn from
((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius (|(`x0`+`x1`)/2 -
`x0`|, |(`y0`+`y1`)/2 -`y0`)|) with respect to the axes' sizing
mode. If "rect", a rectangle is drawn linking (`x0`,`y0`),
(`x1`,`y0`), (`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect
to the axes' sizing mode. If "path", draw a custom SVG path
using `path`. with respect to the axes' sizing mode.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'rect', 'path', 'line']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this shape is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x0
# --
@property
def x0(self):
"""
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
# x1
# --
@property
def x1(self):
"""
Sets the shape's end x position. See `type` and `xsizemode` for
more info.
The 'x1' property accepts values of any type
Returns
-------
Any
"""
return self["x1"]
@x1.setter
def x1(self, val):
self["x1"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Only relevant in conjunction with `xsizemode` set to "pixel".
Specifies the anchor point on the x axis to which `x0`, `x1`
and x coordinates within `path` are relative to. E.g. useful to
attach a pixel sized shape to a certain data value. No effect
when `xsizemode` not set to "pixel".
The 'xanchor' property accepts values of any type
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xref
# ----
@property
def xref(self):
"""
Sets the shape's x coordinate axis. If set to a x axis id (e.g.
"x" or "x2"), the `x` position refers to a x coordinate. If set
to "paper", the `x` position refers to the distance from the
left of the plotting area in normalized coordinates where 0 (1)
corresponds to the left (right). If set to a x axis ID followed
by "domain" (separated by a space), the position behaves like
for "paper", but refers to the distance in fractions of the
domain length from the left of the domain of that axis: e.g.,
*x2 domain* refers to the domain of the second x axis and a x
position of 0.5 refers to the point between the left and the
right of the domain of the second x axis. If the axis `type` is
"log", then you must take the log of your desired range. If the
axis `type` is "date", then you must convert the date to unix
time in milliseconds.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
# xsizemode
# ---------
@property
def xsizemode(self):
"""
Sets the shapes's sizing mode along the x axis. If set to
"scaled", `x0`, `x1` and x coordinates within `path` refer to
data values on the x axis or a fraction of the plot area's
width (`xref` set to "paper"). If set to "pixel", `xanchor`
specifies the x position in terms of data or plot fraction but
`x0`, `x1` and x coordinates within `path` are pixels relative
to `xanchor`. This way, the shape can have a fixed width while
maintaining a position relative to data or plot fraction.
The 'xsizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['scaled', 'pixel']
Returns
-------
Any
"""
return self["xsizemode"]
@xsizemode.setter
def xsizemode(self, val):
self["xsizemode"] = val
# y0
# --
@property
def y0(self):
"""
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
# y1
# --
@property
def y1(self):
"""
Sets the shape's end y position. See `type` and `ysizemode` for
more info.
The 'y1' property accepts values of any type
Returns
-------
Any
"""
return self["y1"]
@y1.setter
def y1(self, val):
self["y1"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Only relevant in conjunction with `ysizemode` set to "pixel".
Specifies the anchor point on the y axis to which `y0`, `y1`
and y coordinates within `path` are relative to. E.g. useful to
attach a pixel sized shape to a certain data value. No effect
when `ysizemode` not set to "pixel".
The 'yanchor' property accepts values of any type
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# yref
# ----
@property
def yref(self):
"""
Sets the annotation's y coordinate axis. If set to a y axis id
(e.g. "y" or "y2"), the `y` position refers to a y coordinate.
If set to "paper", the `y` position refers to the distance from
the bottom of the plotting area in normalized coordinates where
0 (1) corresponds to the bottom (top). If set to a y axis ID
followed by "domain" (separated by a space), the position
behaves like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the domain of
that axis: e.g., *y2 domain* refers to the domain of the second
y axis and a y position of 0.5 refers to the point between the
bottom and the top of the domain of the second y axis.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
# ysizemode
# ---------
@property
def ysizemode(self):
"""
Sets the shapes's sizing mode along the y axis. If set to
"scaled", `y0`, `y1` and y coordinates within `path` refer to
data values on the y axis or a fraction of the plot area's
height (`yref` set to "paper"). If set to "pixel", `yanchor`
specifies the y position in terms of data or plot fraction but
`y0`, `y1` and y coordinates within `path` are pixels relative
to `yanchor`. This way, the shape can have a fixed height while
maintaining a position relative to data or plot fraction.
The 'ysizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['scaled', 'pixel']
Returns
-------
Any
"""
return self["ysizemode"]
@ysizemode.setter
def ysizemode(self, val):
self["ysizemode"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
editable
Determines whether the shape could be activated for
edit or not. Has no effect when the older editable
shapes mode is enabled via `config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior. Only
applies to closed shapes.
fillrule
Determines which regions of complex paths constitute
the interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
layer
Specifies whether shapes are drawn below or above
traces.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the pixel
values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and taken
unmodified as pixels relative to `xanchor` and
`yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not
relative. So the allowed segments are: M, L, H, V, Q,
C, T, S, and Z arcs (A) are not allowed because radius
rx and ry are relative. In the future we could consider
supporting relative commands, but we would have to
decide on how to handle date and log axes. Note that
even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa.
no chained "polybezier" commands - specify the segment
type for each one. On category axes, values are numbers
scaled to the serial numbers of categories because
using the categories themselves there would be no way
to describe fractional positions On data axes: because
space and T are both normal components of path strings,
we can't use either to separate date from time parts.
Therefore we'll use underscore for this purpose:
2015-02-21_13:45:56.789
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If "line", a line
is drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect
to the axes' sizing mode. If "circle", a circle is
drawn from ((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2 -`y0`)|) with
respect to the axes' sizing mode. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect to
the axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes' sizing
mode.
visible
Determines whether or not this shape is visible.
x0
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
xanchor
Only relevant in conjunction with `xsizemode` set to
"pixel". Specifies the anchor point on the x axis to
which `x0`, `x1` and x coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `xsizemode` not
set to "pixel".
xref
Sets the shape's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to a x
coordinate. If set to "paper", the `x` position refers
to the distance from the left of the plotting area in
normalized coordinates where 0 (1) corresponds to the
left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", then you must
convert the date to unix time in milliseconds.
xsizemode
Sets the shapes's sizing mode along the x axis. If set
to "scaled", `x0`, `x1` and x coordinates within `path`
refer to data values on the x axis or a fraction of the
plot area's width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in terms of
data or plot fraction but `x0`, `x1` and x coordinates
within `path` are pixels relative to `xanchor`. This
way, the shape can have a fixed width while maintaining
a position relative to data or plot fraction.
y0
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
yanchor
Only relevant in conjunction with `ysizemode` set to
"pixel". Specifies the anchor point on the y axis to
which `y0`, `y1` and y coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `ysizemode` not
set to "pixel".
yref
Sets the annotation's y coordinate axis. If set to a y
axis id (e.g. "y" or "y2"), the `y` position refers to
a y coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
ysizemode
Sets the shapes's sizing mode along the y axis. If set
to "scaled", `y0`, `y1` and y coordinates within `path`
refer to data values on the y axis or a fraction of the
plot area's height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in terms of
data or plot fraction but `y0`, `y1` and y coordinates
within `path` are pixels relative to `yanchor`. This
way, the shape can have a fixed height while
maintaining a position relative to data or plot
fraction.
"""
def __init__(
self,
arg=None,
editable=None,
fillcolor=None,
fillrule=None,
layer=None,
line=None,
name=None,
opacity=None,
path=None,
templateitemname=None,
type=None,
visible=None,
x0=None,
x1=None,
xanchor=None,
xref=None,
xsizemode=None,
y0=None,
y1=None,
yanchor=None,
yref=None,
ysizemode=None,
**kwargs,
):
"""
Construct a new Shape object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Shape`
editable
Determines whether the shape could be activated for
edit or not. Has no effect when the older editable
shapes mode is enabled via `config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior. Only
applies to closed shapes.
fillrule
Determines which regions of complex paths constitute
the interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
layer
Specifies whether shapes are drawn below or above
traces.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the pixel
values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and taken
unmodified as pixels relative to `xanchor` and
`yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not
relative. So the allowed segments are: M, L, H, V, Q,
C, T, S, and Z arcs (A) are not allowed because radius
rx and ry are relative. In the future we could consider
supporting relative commands, but we would have to
decide on how to handle date and log axes. Note that
even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa.
no chained "polybezier" commands - specify the segment
type for each one. On category axes, values are numbers
scaled to the serial numbers of categories because
using the categories themselves there would be no way
to describe fractional positions On data axes: because
space and T are both normal components of path strings,
we can't use either to separate date from time parts.
Therefore we'll use underscore for this purpose:
2015-02-21_13:45:56.789
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If "line", a line
is drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect
to the axes' sizing mode. If "circle", a circle is
drawn from ((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2 -`y0`)|) with
respect to the axes' sizing mode. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect to
the axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes' sizing
mode.
visible
Determines whether or not this shape is visible.
x0
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
xanchor
Only relevant in conjunction with `xsizemode` set to
"pixel". Specifies the anchor point on the x axis to
which `x0`, `x1` and x coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `xsizemode` not
set to "pixel".
xref
Sets the shape's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to a x
coordinate. If set to "paper", the `x` position refers
to the distance from the left of the plotting area in
normalized coordinates where 0 (1) corresponds to the
left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", then you must
convert the date to unix time in milliseconds.
xsizemode
Sets the shapes's sizing mode along the x axis. If set
to "scaled", `x0`, `x1` and x coordinates within `path`
refer to data values on the x axis or a fraction of the
plot area's width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in terms of
data or plot fraction but `x0`, `x1` and x coordinates
within `path` are pixels relative to `xanchor`. This
way, the shape can have a fixed width while maintaining
a position relative to data or plot fraction.
y0
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
yanchor
Only relevant in conjunction with `ysizemode` set to
"pixel". Specifies the anchor point on the y axis to
which `y0`, `y1` and y coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `ysizemode` not
set to "pixel".
yref
Sets the annotation's y coordinate axis. If set to a y
axis id (e.g. "y" or "y2"), the `y` position refers to
a y coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
ysizemode
Sets the shapes's sizing mode along the y axis. If set
to "scaled", `y0`, `y1` and y coordinates within `path`
refer to data values on the y axis or a fraction of the
plot area's height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in terms of
data or plot fraction but `y0`, `y1` and y coordinates
within `path` are pixels relative to `yanchor`. This
way, the shape can have a fixed height while
maintaining a position relative to data or plot
fraction.
Returns
-------
Shape
"""
super(Shape, self).__init__("shapes")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Shape
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Shape`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("editable", None)
_v = editable if editable is not None else _v
if _v is not None:
self["editable"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("fillrule", None)
_v = fillrule if fillrule is not None else _v
if _v is not None:
self["fillrule"] = _v
_v = arg.pop("layer", None)
_v = layer if layer is not None else _v
if _v is not None:
self["layer"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("path", None)
_v = path if path is not None else _v
if _v is not None:
self["path"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x0", None)
_v = x0 if x0 is not None else _v
if _v is not None:
self["x0"] = _v
_v = arg.pop("x1", None)
_v = x1 if x1 is not None else _v
if _v is not None:
self["x1"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xref", None)
_v = xref if xref is not None else _v
if _v is not None:
self["xref"] = _v
_v = arg.pop("xsizemode", None)
_v = xsizemode if xsizemode is not None else _v
if _v is not None:
self["xsizemode"] = _v
_v = arg.pop("y0", None)
_v = y0 if y0 is not None else _v
if _v is not None:
self["y0"] = _v
_v = arg.pop("y1", None)
_v = y1 if y1 is not None else _v
if _v is not None:
self["y1"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("yref", None)
_v = yref if yref is not None else _v
if _v is not None:
self["yref"] = _v
_v = arg.pop("ysizemode", None)
_v = ysizemode if ysizemode is not None else _v
if _v is not None:
self["ysizemode"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.449355
| 84
| 0.565773
|
faa1d7c9814d4562705b13dce3c637acc7864072
| 1,650
|
py
|
Python
|
rpc/utils/knn.py
|
AAlben/img_retrieval_child_books
|
7618308e294c1e94552b8e0edbbe0ed65fc154ae
|
[
"MIT"
] | null | null | null |
rpc/utils/knn.py
|
AAlben/img_retrieval_child_books
|
7618308e294c1e94552b8e0edbbe0ed65fc154ae
|
[
"MIT"
] | null | null | null |
rpc/utils/knn.py
|
AAlben/img_retrieval_child_books
|
7618308e294c1e94552b8e0edbbe0ed65fc154ae
|
[
"MIT"
] | null | null | null |
import torch
from typing import Dict
class KNN(object):
"""
Similarity measure based on the euclidean distance.
Hyper-Params:
top_k (int): top_k nearest neighbors will be output in sorted order. If it is 0, all neighbors will be output.
"""
default_hyper_params = {
"top_k": 0,
}
def __init__(self, hps: Dict or None = None):
"""
Args:
hps (dict): default hyper parameters in a dict (keys, values).
"""
super(KNN, self).__init__()
self._hyper_params = hps
def _cal_dis(self, query_fea: torch.tensor, gallery_fea: torch.tensor) -> torch.tensor:
"""
Calculate the distance between query set features and gallery set features.
Args:
query_fea (torch.tensor): query set features.
gallery_fea (torch.tensor): gallery set features.
Returns:
dis (torch.tensor): the distance between query set features and gallery set features.
"""
query_fea = query_fea.transpose(1, 0)
inner_dot = gallery_fea.mm(query_fea)
dis = (gallery_fea ** 2).sum(dim=1, keepdim=True) + (query_fea ** 2).sum(dim=0, keepdim=True)
dis = dis - 2 * inner_dot
dis = dis.transpose(1, 0)
return dis
def __call__(self, query_fea: torch.tensor, gallery_fea: torch.tensor) -> (torch.tensor, torch.tensor):
dis = self._cal_dis(query_fea, gallery_fea)
sorted_index = torch.argsort(dis, dim=1)
if self._hyper_params["top_k"] != 0:
sorted_index = sorted_index[:, :self._hyper_params["top_k"]]
return dis, sorted_index
| 34.375
| 118
| 0.618788
|
d7a641694da969cdb5ae360bd7935d4ab5e13e57
| 740
|
py
|
Python
|
perplex/manager.py
|
ofek/perplex
|
339284ac7c54500ebf15eadbeb721d079b61f017
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
perplex/manager.py
|
ofek/perplex
|
339284ac7c54500ebf15eadbeb721d079b61f017
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-02-04T03:55:43.000Z
|
2019-02-04T03:55:43.000Z
|
perplex/manager.py
|
ofek/perplex
|
339284ac7c54500ebf15eadbeb721d079b61f017
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from copy import deepcopy
from .compose import DockerCompose
from .config import get_instance_dir
from .gcloud import GCloud
from .gsutil import GSUtil
from .kubectl import Kubectl
class InstanceManager:
def __init__(self, config):
self.default_config = deepcopy(config)
self.instance_config = self.default_config.pop('instances').pop(config['instance'])
self.default_config.update(self.instance_config)
self.local_path = get_instance_dir(self.instance_config['name'])
self.gcloud = GCloud(self.default_config)
self.gsutil = GSUtil(self.default_config)
self.docker_compose = DockerCompose(self.default_config, self.local_path)
self.kompose = Kubectl(self.local_path)
| 33.636364
| 91
| 0.743243
|
e1aac323d21dd1570de79b463e7b464d3ca6de14
| 14,075
|
py
|
Python
|
bridge/reports/models.py
|
lutovna/klever
|
29c0e4fa60def241032a2ea2b81103d817994eef
|
[
"Apache-2.0"
] | 1
|
2021-01-09T08:44:37.000Z
|
2021-01-09T08:44:37.000Z
|
bridge/reports/models.py
|
lutovna/klever
|
29c0e4fa60def241032a2ea2b81103d817994eef
|
[
"Apache-2.0"
] | 3
|
2021-03-19T09:15:16.000Z
|
2021-09-22T19:24:40.000Z
|
bridge/reports/models.py
|
lutovna/klever
|
29c0e4fa60def241032a2ea2b81103d817994eef
|
[
"Apache-2.0"
] | 1
|
2020-05-22T15:53:39.000Z
|
2020-05-22T15:53:39.000Z
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import JSONField, ArrayField
from django.core.files import File
from django.db import models
from django.db.models.signals import post_delete
from django.utils.timezone import now
from mptt.models import MPTTModel, TreeForeignKey
from bridge.vars import COMPARE_VERDICT, REPORT_ARCHIVE
from bridge.utils import CheckArchiveError, WithFilesMixin, remove_instance_files
from users.models import User
from jobs.models import Job
from service.models import Decision
MAX_COMPONENT_LEN = 20
ORIGINAL_SOURCES_DIR = 'OriginalSources'
COVERAGE_STAT_COLOR = ['#f18fa6', '#f1c0b2', '#f9e19b', '#e4f495', '#acf1a8']
def get_component_path(instance, filename):
curr_date = now()
return os.path.join('Reports', instance.component, str(curr_date.year), str(curr_date.month), filename)
def get_coverage_arch_dir(instance, filename):
curr_date = now()
return os.path.join('Reports', instance.report.component, str(curr_date.year), str(curr_date.month), filename)
def get_coverage_dir(instance, filename):
return os.path.join('Reports', 'CoverageCache', 'CovArch-%s' % instance.archive_id, filename)
def get_attr_data_path(instance, filename):
return os.path.join('Reports', 'AttrData', 'Decision-%s' % str(instance.decision_id), filename)
class AttrBase(models.Model):
name = models.CharField(max_length=64, db_index=True)
value = models.CharField(max_length=255)
class Meta:
abstract = True
class Report(MPTTModel):
decision = models.ForeignKey(Decision, models.CASCADE)
parent = TreeForeignKey('self', models.CASCADE, null=True, related_name='children')
identifier = models.CharField(max_length=255, db_index=True)
cpu_time = models.BigIntegerField(null=True)
wall_time = models.BigIntegerField(null=True)
memory = models.BigIntegerField(null=True)
def __str__(self):
return self.identifier
class Meta:
db_table = 'report'
unique_together = [('decision', 'identifier')]
index_together = [('decision', 'identifier')]
class AttrFile(WithFilesMixin, models.Model):
decision = models.ForeignKey(Decision, models.CASCADE)
file = models.FileField(upload_to=get_attr_data_path)
class Meta:
db_table = 'report_attr_file'
class ReportAttr(AttrBase):
report = models.ForeignKey(Report, models.CASCADE, related_name='attrs')
compare = models.BooleanField(default=False)
associate = models.BooleanField(default=False)
data = models.ForeignKey(AttrFile, models.CASCADE, null=True)
class Meta:
db_table = 'report_attrs'
indexes = [models.Index(fields=['name', 'value'])]
class Computer(models.Model):
identifier = models.CharField(max_length=128, db_index=True)
display = models.CharField(max_length=512)
data = JSONField()
class Meta:
db_table = 'computer'
class OriginalSources(WithFilesMixin, models.Model):
identifier = models.CharField(max_length=128, unique=True, db_index=True)
archive = models.FileField(upload_to=ORIGINAL_SOURCES_DIR)
def add_archive(self, fp, save=False):
self.archive.save(REPORT_ARCHIVE['original_sources'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.archive.name)):
raise CheckArchiveError('OriginalSources.archive was not saved')
class Meta:
db_table = 'report_original_sources'
ordering = ('identifier',)
class AdditionalSources(WithFilesMixin, models.Model):
decision = models.ForeignKey(Decision, models.CASCADE)
archive = models.FileField(upload_to='Sources/%Y/%m')
def add_archive(self, fp, save=False):
self.archive.save(REPORT_ARCHIVE['additional_sources'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.archive.name)):
raise CheckArchiveError('AdditionalSources.archive was not saved')
class Meta:
db_table = 'report_additional_sources'
class ReportComponent(WithFilesMixin, Report):
computer = models.ForeignKey(Computer, models.CASCADE)
component = models.CharField(max_length=MAX_COMPONENT_LEN)
verification = models.BooleanField(default=False)
start_date = models.DateTimeField(default=now)
finish_date = models.DateTimeField(null=True)
data = JSONField(null=True, default=list)
log = models.FileField(upload_to=get_component_path, null=True)
verifier_files = models.FileField(upload_to=get_component_path, null=True)
# Sources for Verification reports
original_sources = models.ForeignKey(OriginalSources, models.PROTECT, null=True)
additional_sources = models.ForeignKey(AdditionalSources, models.CASCADE, null=True)
def add_log(self, fp, save=False):
self.log.save(REPORT_ARCHIVE['log'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.log.name)):
raise CheckArchiveError('ReportComponent.log was not saved')
def add_verifier_files(self, fp, save=False):
self.verifier_files.save(REPORT_ARCHIVE['verifier_files'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.verifier_files.name)):
raise CheckArchiveError('ReportComponent.verifier_files was not saved')
class Meta:
db_table = 'report_component'
class ReportComponentLeaf(models.Model):
report = models.ForeignKey(ReportComponent, models.CASCADE, related_name='leaves')
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
db_table = 'cache_report_component_leaf'
class CoverageArchive(WithFilesMixin, models.Model):
report = models.ForeignKey(ReportComponent, models.CASCADE, related_name='coverages')
name = models.CharField(max_length=128, default='-')
identifier = models.CharField(max_length=128, default='')
archive = models.FileField(upload_to=get_coverage_arch_dir)
total = JSONField(null=True)
has_extra = models.BooleanField(default=False)
def add_coverage(self, fp, save=False):
self.archive.save(REPORT_ARCHIVE['coverage'], File(fp), save=save)
class Meta:
db_table = 'report_coverage_archive'
class CoverageStatistics(models.Model):
coverage = models.ForeignKey(CoverageArchive, models.CASCADE)
identifier = models.PositiveIntegerField()
parent = models.PositiveIntegerField(null=True)
is_leaf = models.BooleanField()
name = models.CharField(max_length=128)
path = models.TextField(null=True)
depth = models.PositiveIntegerField(default=0)
lines_covered = models.PositiveIntegerField(default=0)
lines_total = models.PositiveIntegerField(default=0)
funcs_covered = models.PositiveIntegerField(default=0)
funcs_total = models.PositiveIntegerField(default=0)
lines_covered_extra = models.PositiveIntegerField(default=0)
lines_total_extra = models.PositiveIntegerField(default=0)
funcs_covered_extra = models.PositiveIntegerField(default=0)
funcs_total_extra = models.PositiveIntegerField(default=0)
def calculate_color(self, div):
color_id = int(div * len(COVERAGE_STAT_COLOR))
if color_id >= len(COVERAGE_STAT_COLOR):
color_id = len(COVERAGE_STAT_COLOR) - 1
elif color_id < 0:
color_id = 0
return COVERAGE_STAT_COLOR[color_id]
@property
def lines_percentage(self):
if not self.lines_total:
return '-'
return '{}%'.format(round(100 * self.lines_covered / self.lines_total))
@property
def funcs_percentage(self):
if not self.funcs_total:
return '-'
return '{}%'.format(round(100 * self.funcs_covered / self.funcs_total))
@property
def lines_color(self):
if not self.lines_total:
return None
return self.calculate_color(self.lines_covered / self.lines_total)
@property
def funcs_color(self):
if not self.funcs_total:
return None
return self.calculate_color(self.funcs_covered / self.funcs_total)
@property
def lines_percentage_extra(self):
if not self.lines_total_extra:
return '-'
return '{}%'.format(round(100 * self.lines_covered_extra / self.lines_total_extra))
@property
def funcs_percentage_extra(self):
if not self.funcs_total_extra:
return '-'
return '{}%'.format(round(100 * self.funcs_covered_extra / self.funcs_total_extra))
@property
def lines_color_extra(self):
if not self.lines_total_extra:
return None
return self.calculate_color(self.lines_covered_extra / self.lines_total_extra)
@property
def funcs_color_extra(self):
if not self.funcs_total_extra:
return None
return self.calculate_color(self.funcs_covered_extra / self.funcs_total_extra)
@property
def indentation(self):
return ' ' * (self.depth - 1)
@property
def shown(self):
if not hasattr(self, '_shown'):
setattr(self, '_shown', False)
return getattr(self, '_shown')
@shown.setter
def shown(self, value):
setattr(self, '_shown', bool(value))
class Meta:
db_table = 'report_coverage_statistics'
class CoverageDataStatistics(models.Model):
coverage = models.ForeignKey(CoverageArchive, models.CASCADE)
name = models.CharField(max_length=255)
data = JSONField()
class Meta:
db_table = 'report_coverage_data_statistics'
class ReportUnsafe(WithFilesMixin, Report):
trace_id = models.UUIDField(unique=True, db_index=True, default=uuid.uuid4)
error_trace = models.FileField(upload_to='Unsafes/%Y/%m')
leaves = GenericRelation(ReportComponentLeaf, related_query_name='unsafes')
def add_trace(self, fp, save=False):
self.error_trace.save(REPORT_ARCHIVE['error_trace'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.error_trace.name)):
raise CheckArchiveError('ReportUnsafe.error_trace was not saved')
class Meta:
db_table = 'report_unsafe'
class ReportSafe(Report):
leaves = GenericRelation(ReportComponentLeaf, related_query_name='safes')
class Meta:
db_table = 'report_safe'
class ReportUnknown(WithFilesMixin, Report):
component = models.CharField(max_length=MAX_COMPONENT_LEN)
problem_description = models.FileField(upload_to='Unknowns/%Y/%m')
leaves = GenericRelation(ReportComponentLeaf, related_query_name='unknowns')
def add_problem_desc(self, fp, save=False):
self.problem_description.save(REPORT_ARCHIVE['problem_description'], File(fp), save)
if not os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.problem_description.name)):
raise CheckArchiveError('ReportUnknown.problem_description was not saved')
class Meta:
db_table = 'report_unknown'
class CompareDecisionsInfo(models.Model):
user = models.ForeignKey(User, models.CASCADE)
decision1 = models.ForeignKey(Decision, models.CASCADE, related_name='+')
decision2 = models.ForeignKey(Decision, models.CASCADE, related_name='+')
names = ArrayField(models.CharField(max_length=64))
class Meta:
db_table = 'cache_report_decisions_compare_info'
class ComparisonObject(models.Model):
info = models.ForeignKey(CompareDecisionsInfo, models.CASCADE)
values = ArrayField(models.CharField(max_length=255))
verdict1 = models.CharField(max_length=1, choices=COMPARE_VERDICT)
verdict2 = models.CharField(max_length=1, choices=COMPARE_VERDICT)
class Meta:
db_table = 'cache_report_comparison_object'
index_together = ["info", "verdict1", "verdict2"]
class ComparisonLink(models.Model):
comparison = models.ForeignKey(ComparisonObject, models.CASCADE, related_name='links')
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
db_table = 'cache_report_comparison_link'
class DecisionCache(models.Model):
decision = models.ForeignKey(Decision, models.CASCADE)
component = models.CharField(max_length=MAX_COMPONENT_LEN)
cpu_time = models.BigIntegerField(default=0)
wall_time = models.BigIntegerField(default=0)
memory = models.BigIntegerField(default=0)
total = models.IntegerField(default=0)
finished = models.IntegerField(default=0)
class Meta:
db_table = 'cache_decision_data'
index_together = ['component', 'decision']
post_delete.connect(remove_instance_files, sender=AttrFile)
post_delete.connect(remove_instance_files, sender=OriginalSources)
post_delete.connect(remove_instance_files, sender=AdditionalSources)
post_delete.connect(remove_instance_files, sender=ReportComponent)
post_delete.connect(remove_instance_files, sender=ReportUnsafe)
post_delete.connect(remove_instance_files, sender=ReportUnknown)
post_delete.connect(remove_instance_files, sender=CoverageArchive)
| 36.463731
| 114
| 0.72952
|
0825856c7716801caa734ad731077dbadec3b833
| 1,546
|
py
|
Python
|
scripts/05_modules/motion_tracker/motiontracker_get_2dtrack_from_object_r18.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
scripts/05_modules/motion_tracker/motiontracker_get_2dtrack_from_object_r18.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
scripts/05_modules/motion_tracker/motiontracker_get_2dtrack_from_object_r18.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright: MAXON Computer GmbH
Description:
- Loops through all the tracks of the active Motion Tracker object.
Class/method highlighted:
- BaseObject.Get2dTrackData()
- Mt2dTrackData.GetTrackCount()
- Mt2dTrackData.GetTrackByIndex()
Compatible:
- Win / Mac
- R18, R19, R20, R21, S22
"""
import c4d
def main():
# Checks if there is an active object
if op is None:
raise ValueError("op is None, please select one object.")
# Checks if the selected object is a Motion Tracker Object
if not op.IsInstanceOf(c4d.Omotiontracker):
raise TypeError("op is not a c4d.Omotiontracker.")
# Retrieves tracking data
data = op.Get2dTrackData()
if data is None:
raise RuntimeError("Failed to retrieve the 2D track data.")
# Loops through all tracks
trackCount = data.GetTrackCount()
for trackIdx in range(trackCount):
track = data.GetTrackByIndex(trackIdx)
if track is None:
continue
# Checks track status
status = track.GetStatus()
statusStr = ""
if status == c4d.INVALID_TRACK:
statusStr = "invalid"
elif status == c4d.UNTRACKED:
statusStr = "untracked"
elif status == c4d.TRACKED_VALID:
statusStr = "valid"
elif status == c4d.TRACKED_STALE:
statusStr = "stale"
# Prints track information
print("Track #{0}: {1} is {2}".format(trackIdx, track.GetName(), statusStr))
if __name__ == '__main__':
main()
| 25.344262
| 84
| 0.628719
|
07a3e36dcd1ccd83866ceca4253c03e9f008c84f
| 1,408
|
py
|
Python
|
samples/CabinPressure/main.py
|
microsoft/bonsai-twin-builder
|
141c538dc8f439d2e4219cd60631bddae717a262
|
[
"MIT"
] | 2
|
2021-12-25T23:14:46.000Z
|
2022-02-23T00:11:14.000Z
|
samples/CabinPressure/main.py
|
microsoft/bonsai-twin-builder
|
141c538dc8f439d2e4219cd60631bddae717a262
|
[
"MIT"
] | null | null | null |
samples/CabinPressure/main.py
|
microsoft/bonsai-twin-builder
|
141c538dc8f439d2e4219cd60631bddae717a262
|
[
"MIT"
] | 1
|
2022-02-10T01:56:57.000Z
|
2022-02-10T01:56:57.000Z
|
"""
Runs the sample cabin pressure digital twin model as a Bonsai simulator.
Copyright 2021, Microsoft Corp.
"""
#!/usr/bin/env python3
import os
import sys
# Add parent directory containing the TwinBuilderConnector folder to path.
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# Add CabinPressureTwin directory containing twin_runtime to the path.
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "CabinPressureTwin"))
print(f"path is {sys.path}")
from TwinBuilderConnector.RunSession import RunSession
if __name__ == '__main__':
twin_model_file = "./CabinPressureTwin/TwinModel.twin"
# Locate twin_model_file if it is relative path from this file.
CUR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
if not os.path.isabs(twin_model_file):
twin_model_file = os.path.join(CUR_DIR, *twin_model_file.split(os.sep))
if not os.path.isfile(twin_model_file):
print('File does not exist: {}'.format(twin_model_file))
sys.exit(1)
state_variable_names = ['PC', 'PCabin', 'ceiling_t', 'altitude_out', 'velocity_out']
action_variable_names = ['controlFlow', 'outflow']
action_variable_values = [(6.0-0.0)/2.0, (350.0-0.0)/2.0] # start at average action values
RunSession(twin_model_file, state_variable_names, action_variable_names, 5, action_variable_values)
| 40.228571
| 103
| 0.744318
|
4d91890d9e66fce0a88983512350a23b6c563081
| 4,040
|
py
|
Python
|
components/isceobj/InsarProc/runOrbit2sch.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133
|
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
components/isceobj/InsarProc/runOrbit2sch.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 276
|
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
components/isceobj/InsarProc/runOrbit2sch.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 235
|
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Brett George
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import logging
import stdproc
import isceobj
import copy
logger = logging.getLogger('isce.insar.runOrbit2sch')
def runOrbit2sch(self):
from isceobj.Catalog import recordInputsAndOutputs
import numpy
logger.info("Converting the orbit to SCH coordinates")
# Piyush
####We don't know the correct SCH heights yet.
####Not computing average peg height yet.
peg = self.insar.peg
pegHavg = self.insar.averageHeight
planet = self.insar.planet
# if self.pegSelect.upper() == 'REFERENCE':
# pegHavg = self.insar.getFirstAverageHeight()
# elif self.pegSelect.upper() == 'SECONDARY':
# pegHavg = self.insar.getSecondAverageHeight()
# elif self.pegSelect.upper() == 'AVERAGE':
# pegHavg = self.insar.averageHeight
# else:
# raise Exception('Unknown peg selection method: ', self.pegSelect)
referenceOrbit = self.insar.referenceOrbit
secondaryOrbit = self.insar.secondaryOrbit
objOrbit2sch1 = stdproc.createOrbit2sch(averageHeight=pegHavg)
objOrbit2sch1.stdWriter = self.stdWriter.set_file_tags("orbit2sch",
"log",
"err",
"log")
objOrbit2sch2 = stdproc.createOrbit2sch(averageHeight=pegHavg)
objOrbit2sch2.stdWriter = self.stdWriter
## loop over reference/secondary orbits
for obj, orb, tag, order in zip((objOrbit2sch1, objOrbit2sch2),
(self.insar.referenceOrbit, self.insar.secondaryOrbit),
('reference', 'secondary'),
('First', 'Second')):
obj(planet=planet, orbit=orb, peg=peg)
recordInputsAndOutputs(self.insar.procDoc, obj,
"runOrbit2sch." + tag,
logger,
"runOrbit2sch." + tag)
#equivalent to self.insar.referenceOrbit =
setattr(self.insar,'%sOrbit'%(tag), obj.orbit)
#Piyush
####The heights and the velocities need to be updated now.
(ttt, ppp, vvv, rrr) = obj.orbit._unpackOrbit()
#equivalent to self.insar.setFirstAverageHeight()
# SCH heights replacing the earlier llh heights
# getattr(self.insar,'set%sAverageHeight'%(order))(numpy.sum(numpy.array(ppp),axis=0)[2] /(1.0*len(ppp)))
#equivalent to self.insar.setFirstProcVelocity()
getattr(self.insar,'set%sProcVelocity'%(order))(vvv[len(vvv)//2][0])
return None
| 41.649485
| 113
| 0.62599
|
0ee45aef4db4eadc669f743b4430b601960b6acd
| 3,021
|
py
|
Python
|
homeassistant/components/switch/isy994.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | null | null | null |
homeassistant/components/switch/isy994.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | null | null | null |
homeassistant/components/switch/isy994.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | 1
|
2018-11-20T17:44:08.000Z
|
2018-11-20T17:44:08.000Z
|
"""
homeassistant.components.switch.isy994
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for ISY994 switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/isy994/
"""
import logging
from homeassistant.components.isy994 import (
HIDDEN_STRING, ISY, SENSOR_STRING, ISYDeviceABC)
from homeassistant.const import STATE_OFF, STATE_ON # STATE_OPEN, STATE_CLOSED
# The frontend doesn't seem to fully support the open and closed states yet.
# Once it does, the HA.doors programs should report open and closed instead of
# off and on. It appears that on should be open and off should be closed.
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the ISY994 platform. """
# pylint: disable=too-many-locals
logger = logging.getLogger(__name__)
devs = []
# verify connection
if ISY is None or not ISY.connected:
logger.error('A connection has not been made to the ISY controller.')
return False
# import not dimmable nodes and groups
for (path, node) in ISY.nodes:
if not node.dimmable and SENSOR_STRING not in node.name:
if HIDDEN_STRING in path:
node.name += HIDDEN_STRING
devs.append(ISYSwitchDevice(node))
# import ISY doors programs
for folder_name, states in (('HA.doors', [STATE_ON, STATE_OFF]),
('HA.switches', [STATE_ON, STATE_OFF])):
try:
folder = ISY.programs['My Programs'][folder_name]
except KeyError:
# HA.doors folder does not exist
pass
else:
for dtype, name, node_id in folder.children:
if dtype is 'folder':
custom_switch = folder[node_id]
try:
actions = custom_switch['actions'].leaf
assert actions.dtype == 'program', 'Not a program'
node = custom_switch['status'].leaf
except (KeyError, AssertionError):
pass
else:
devs.append(ISYProgramDevice(name, node, actions,
states))
add_devices(devs)
class ISYSwitchDevice(ISYDeviceABC):
""" Represents as ISY light. """
_domain = 'switch'
_dtype = 'binary'
_states = [STATE_ON, STATE_OFF]
class ISYProgramDevice(ISYSwitchDevice):
""" Represents a door that can be manipulated. """
_domain = 'switch'
_dtype = 'binary'
def __init__(self, name, node, actions, states):
super().__init__(node)
self._states = states
self._name = name
self.action_node = actions
def turn_on(self, **kwargs):
""" Turns the device on/closes the device. """
self.action_node.runThen()
def turn_off(self, **kwargs):
""" Turns the device off/opens the device. """
self.action_node.runElse()
| 33.566667
| 79
| 0.600463
|
8cea50103c74f3eb4077190a9d7acbfd7c365907
| 2,854
|
py
|
Python
|
ooobuild/lo/io/x_active_data_control.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/io/x_active_data_control.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/io/x_active_data_control.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.io
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .x_stream_listener import XStreamListener as XStreamListener_baf80bea
class XActiveDataControl(XInterface_8f010a43):
"""
makes it possible to control an active data source.
This interface should be supported by objects which implement XActiveDataSource or XActiveDataSink.
See Also:
`API XActiveDataControl <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1io_1_1XActiveDataControl.html>`_
"""
__ooo_ns__: str = 'com.sun.star.io'
__ooo_full_ns__: str = 'com.sun.star.io.XActiveDataControl'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.io.XActiveDataControl'
@abstractmethod
def addListener(self, aListener: 'XStreamListener_baf80bea') -> None:
"""
registers an object to receive events from this data source.
It is suggested to allow multiple registration of the same listener, thus for each time a listener is added, it has to be removed.
"""
@abstractmethod
def removeListener(self, aListener: 'XStreamListener_baf80bea') -> None:
"""
unregisters an object to receive events from this data source.
It is suggested to allow multiple registration of the same listener, thus for each time a listener is added, it has to be removed.
"""
@abstractmethod
def start(self) -> None:
"""
starts I/O.
Either XActiveDataControl.setInputStream() or XActiveDataControl.setOutputStream() must be called beforehand.
This method does not block the thread, so reading is generally not finished when the method returns.
"""
@abstractmethod
def terminate(self) -> None:
"""
does a weak abort.
It closes all connected resources and calls XInputStream.close() or XOutputStream.close() and fires the XStreamListener.terminated()-event.
"""
__all__ = ['XActiveDataControl']
| 38.567568
| 147
| 0.713385
|
7b5c8e6635c51c7f5a7d6e6ca06f035dbf37d185
| 1,899
|
py
|
Python
|
tests/extras_mako_test.py
|
justindalzell/webapp2
|
90e444b4d643f3dccaaf6653e717161871c7cc05
|
[
"Apache-2.0"
] | 157
|
2016-04-08T21:10:41.000Z
|
2021-12-30T13:47:30.000Z
|
tests/extras_mako_test.py
|
justindalzell/webapp2
|
90e444b4d643f3dccaaf6653e717161871c7cc05
|
[
"Apache-2.0"
] | 57
|
2016-04-08T20:26:50.000Z
|
2021-03-20T05:47:28.000Z
|
tests/extras_mako_test.py
|
justindalzell/webapp2
|
90e444b4d643f3dccaaf6653e717161871c7cc05
|
[
"Apache-2.0"
] | 74
|
2016-04-11T02:08:53.000Z
|
2022-01-19T18:10:19.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2011 webapp2 AUTHORS.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import webapp2
from webapp2_extras import mako
current_dir = os.path.abspath(os.path.dirname(__file__))
template_path = os.path.join(current_dir, 'resources', 'mako_templates')
class TestMako(unittest.TestCase):
def test_render_template(self):
app = webapp2.WSGIApplication(config={
'webapp2_extras.mako': {
'template_path': template_path,
},
})
req = webapp2.Request.blank('/')
app.set_globals(app=app, request=req)
m = mako.Mako(app)
message = 'Hello, World!'
res = m.render_template('template1.html', message=message)
self.assertEqual(res, message + '\n')
def test_set_mako(self):
app = webapp2.WSGIApplication()
self.assertEqual(len(app.registry), 0)
mako.set_mako(mako.Mako(app), app=app)
self.assertEqual(len(app.registry), 1)
j = mako.get_mako(app=app)
self.assertTrue(isinstance(j, mako.Mako))
def test_get_mako(self):
app = webapp2.WSGIApplication()
self.assertEqual(len(app.registry), 0)
j = mako.get_mako(app=app)
self.assertEqual(len(app.registry), 1)
self.assertTrue(isinstance(j, mako.Mako))
if __name__ == '__main__':
unittest.main()
| 31.65
| 74
| 0.671406
|
abfd5f0a89b1902ea61b29cb1404826af9858e07
| 3,761
|
py
|
Python
|
gni_site/settings.py
|
pastorenue/gni-site
|
1b152e7b4f471d2f4cf3ba086153ccf2cd53d781
|
[
"BSD-2-Clause"
] | null | null | null |
gni_site/settings.py
|
pastorenue/gni-site
|
1b152e7b4f471d2f4cf3ba086153ccf2cd53d781
|
[
"BSD-2-Clause"
] | null | null | null |
gni_site/settings.py
|
pastorenue/gni-site
|
1b152e7b4f471d2f4cf3ba086153ccf2cd53d781
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Django settings for gni_site project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('GRIN_SECRET_KEY', 'ad6ev8!0t(9k*kq-7#o$&(2bi(3&b=$e@)@!nj05sl0q3cr$x5')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('LOCAL_DEBUG', False)
ALLOWED_HOSTS = ['localhost', '159.65.100.136', '.grinnetinnovations.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'contacts',
'newsletter'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gni_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gni_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
#Login Credentials and urls
LOGIN_URL = 'login' #reverse_lazy('login') ##VERY HARMFUL TO LOGINREQUIRED VIEWS
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_REDIRECT_URL = '/'
| 27.452555
| 100
| 0.702739
|
5baeb01d0b23f081e1c4fe369b5eac9956135536
| 1,980
|
py
|
Python
|
rebench/tests/features/issue_57_binary_on_path_test.py
|
cmccandless/ReBench
|
0ec399a8c23a84022aa0271eb1caf30bdea8b2f6
|
[
"MIT"
] | null | null | null |
rebench/tests/features/issue_57_binary_on_path_test.py
|
cmccandless/ReBench
|
0ec399a8c23a84022aa0271eb1caf30bdea8b2f6
|
[
"MIT"
] | null | null | null |
rebench/tests/features/issue_57_binary_on_path_test.py
|
cmccandless/ReBench
|
0ec399a8c23a84022aa0271eb1caf30bdea8b2f6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from ...configurator import Configurator, load_config
from ...executor import Executor
from ...persistence import DataStore
from ..rebench_test_case import ReBenchTestCase
class Issue57ExecutableOnPath(ReBenchTestCase):
def setUp(self):
super(Issue57ExecutableOnPath, self).setUp()
self._set_path(__file__)
def test_sleep_gives_results(self):
store = DataStore(self._ui)
cnf = Configurator(load_config(self._path + '/issue_57.conf'), store,
self._ui, data_file=self._tmp_file)
runs = list(cnf.get_runs())
runs = sorted(runs, key=lambda e: e.benchmark.name)
ex = Executor(runs, False, self._ui, False)
ex.execute()
self.assertEqual("Bench1", runs[0].benchmark.name)
self.assertEqual(10, runs[0].get_number_of_data_points())
| 45
| 78
| 0.727778
|
ddd46a6cd481595a0542ad191c829e955a5fa11a
| 659
|
py
|
Python
|
cross_test.py
|
nmondal/exit_poll_classification
|
1db66c4c922332a3f79154aa4d8454c50f1dfebd
|
[
"MIT"
] | 1
|
2020-12-12T11:08:33.000Z
|
2020-12-12T11:08:33.000Z
|
cross_test.py
|
nmondal/exit_poll_classification
|
1db66c4c922332a3f79154aa4d8454c50f1dfebd
|
[
"MIT"
] | null | null | null |
cross_test.py
|
nmondal/exit_poll_classification
|
1db66c4c922332a3f79154aa4d8454c50f1dfebd
|
[
"MIT"
] | null | null | null |
from exit_poll import verify_classification, weighted_score
INDEX_DIR = "./index_2"
LABEL_CONFIG = {
"label_column": "senti", "label_densities": {"4": 0.5, "0": 0.5},
"limit": 15
}
QUESTION_BANK_CSV = "./data_dir/Tweets.csv"
QUESTION_CSV_CONFIG = {"id_column": "tweet_id", "label_column": "airline_sentiment", "text_column": "text",
"label_mapping": {"positive": "4", "negative": "0"}}
def do_cross_testing():
verify_classification(QUESTION_BANK_CSV, QUESTION_CSV_CONFIG, INDEX_DIR,
LABEL_CONFIG, prediction_algorithm=weighted_score)
pass
if __name__ == '__main__':
do_cross_testing()
| 31.380952
| 107
| 0.669196
|
bea24f393565c86f4f560ed7ef0341512aee6f64
| 1,113
|
py
|
Python
|
bw2regional/errors.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 1
|
2022-03-02T10:33:39.000Z
|
2022-03-02T10:33:39.000Z
|
bw2regional/errors.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 3
|
2020-03-03T15:44:56.000Z
|
2021-07-21T13:34:29.000Z
|
bw2regional/errors.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 1
|
2022-02-14T14:04:51.000Z
|
2022-02-14T14:04:51.000Z
|
class BW2RegionalizationError(Exception):
"""Base class for BW2 regionalization errors"""
pass
class UnprocessedDatabase(BW2RegionalizationError):
"""A ``Database`` object doesn't have a list of reference geocollections."""
pass
class SiteGenericMethod(BW2RegionalizationError):
"""This ``Method`` doesn't have links to ``geocollections``, making it site-generic."""
pass
class MissingIntersection(BW2RegionalizationError):
"""Missing an ``Intersection`` object and its data needed for regionalized LCA"""
pass
class GeocollectionsMismatch(BW2RegionalizationError):
pass
class MissingSpatialSourceData(BW2RegionalizationError):
pass
class TopologyError(BW2RegionalizationError):
"""Inventory includes locations for which no topology data is available"""
pass
class IncompleteSpatialDefinition(BW2RegionalizationError):
"""Given metadata is not enough to understand a spatial data source"""
pass
class WindowsPathCharacterLimit(BW2RegionalizationError):
"""Windows has an absolute limit of 255 characters in a filepath"""
pass
| 22.714286
| 91
| 0.753819
|
69a6dc2a6ab7c2faeced6b03674481f211fb4332
| 286
|
py
|
Python
|
eggs/numpy-1.9.1-py2.7-linux-x86_64.egg/numpy/core/multiarray.py
|
kruthikarshankar/bemoss_os
|
460a5a41b38240bb9f6dacc23d373ae1942259a8
|
[
"Unlicense"
] | 3
|
2018-11-25T01:09:55.000Z
|
2021-08-24T01:56:36.000Z
|
eggs/numpy-1.9.1-py2.7-linux-x86_64.egg/numpy/core/multiarray.py
|
kwarodom/bemoss_os_1.2
|
460a5a41b38240bb9f6dacc23d373ae1942259a8
|
[
"Unlicense"
] | null | null | null |
eggs/numpy-1.9.1-py2.7-linux-x86_64.egg/numpy/core/multiarray.py
|
kwarodom/bemoss_os_1.2
|
460a5a41b38240bb9f6dacc23d373ae1942259a8
|
[
"Unlicense"
] | 3
|
2018-11-09T03:38:09.000Z
|
2020-02-24T06:26:10.000Z
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'multiarray.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| 35.75
| 73
| 0.769231
|
d089290d047c146bd4d64f42afb41dbe32ba43cc
| 4,662
|
py
|
Python
|
tests/test_adaptors.py
|
loftwah/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2020-04-23T13:05:29.000Z
|
2020-04-23T13:05:29.000Z
|
tests/test_adaptors.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_adaptors.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2021-09-20T12:10:01.000Z
|
2021-09-20T12:10:01.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import itertools
from monai.transforms.adaptors import adaptor, apply_alias, to_kwargs, FunctionSignature
class TestAdaptors(unittest.TestCase):
def test_function_signature(self):
def foo(image, label=None, *a, **kw):
pass
f = FunctionSignature(foo)
def test_single_in_single_out(self):
def foo(image):
return image * 2
it = itertools.product(
['image', ['image']],
[None, 'image', ['image'], {'image': 'image'}]
)
for i in it:
d = {'image': 2}
dres = adaptor(foo, i[0], i[1])(d)
self.assertEqual(dres['image'], 4)
d = {'image': 2}
dres = adaptor(foo, 'image')(d)
self.assertEqual(dres['image'], 4)
d = {'image': 2}
dres = adaptor(foo, 'image', 'image')(d)
self.assertEqual(dres['image'], 4)
d = {'image': 2}
dres = adaptor(foo, 'image', {'image': 'image'})(d)
self.assertEqual(dres['image'], 4)
d = {'img': 2}
dres = adaptor(foo, 'img', {'img': 'image'})(d)
self.assertEqual(dres['img'], 4)
d = {'img': 2}
dres = adaptor(foo, ['img'], {'img': 'image'})(d)
self.assertEqual(dres['img'], 4)
def test_multi_in_single_out(self):
def foo(image, label):
return image * label
it = itertools.product(
['image', ['image']],
[None, ['image', 'label'], {'image': 'image', 'label': 'label'}]
)
for i in it:
d = {'image': 2, 'label': 3}
dres = adaptor(foo, i[0], i[1])(d)
self.assertEqual(dres['image'], 6)
self.assertEqual(dres['label'], 3)
it = itertools.product(
['newimage', ['newimage']],
[None, ['image', 'label'], {'image': 'image', 'label': 'label'}]
)
for i in it:
d = {'image': 2, 'label': 3}
dres = adaptor(foo, i[0], i[1])(d)
self.assertEqual(dres['image'], 2)
self.assertEqual(dres['label'], 3)
self.assertEqual(dres['newimage'], 6)
it = itertools.product(
['img', ['img']],
[{'img': 'image', 'lbl': 'label'}]
)
for i in it:
d = {'img': 2, 'lbl': 3}
dres = adaptor(foo, i[0], i[1])(d)
self.assertEqual(dres['img'], 6)
self.assertEqual(dres['lbl'], 3)
def test_default_arg_single_out(self):
def foo(a, b=2):
return a * b
d = {'a': 5}
dres = adaptor(foo, 'c')(d)
self.assertEqual(dres['c'], 10)
d = {'b': 5}
with self.assertRaises(TypeError):
dres = adaptor(foo, 'c')(d)
def test_multi_out(self):
def foo(a, b):
return a * b, a / b
d = {'a': 3, 'b': 4}
dres = adaptor(foo, ['c', 'd'])(d)
self.assertEqual(dres['c'], 12)
self.assertEqual(dres['d'], 3 / 4)
def test_dict_out(self):
def foo(a):
return {'a': a * 2}
d = {'a': 2}
dres = adaptor(foo, {'a': 'a'})(d)
self.assertEqual(dres['a'], 4)
d = {'b': 2}
dres = adaptor(foo, {'a': 'b'}, {'b': 'a'})(d)
self.assertEqual(dres['b'], 4)
class TestApplyAlias(unittest.TestCase):
def test_apply_alias(self):
def foo(d):
d['x'] *= 2
return d
d = {'a': 1, 'b': 3}
result = apply_alias(foo, {'b': 'x'})(d)
self.assertDictEqual({'a': 1, 'b': 6}, result)
class TestToKwargs(unittest.TestCase):
def test_to_kwargs(self):
def foo(**kwargs):
results = {k: v * 2 for k, v in kwargs.items()}
return results
def compose_like(fn, data):
data = fn(data)
return data
d = {'a': 1, 'b': 2}
actual = compose_like(to_kwargs(foo), d)
self.assertDictEqual(actual, {'a': 2, 'b': 4})
with self.assertRaises(TypeError):
actual = compose_like(foo, d)
| 28.254545
| 88
| 0.512656
|
3905a40d58d8751fb6ab5e003aab404f68e5ccca
| 2,036
|
py
|
Python
|
include/PyBool_builder.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
include/PyBool_builder.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
include/PyBool_builder.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# Tyler Sorensen
# February 15, 2012
# University of Utah
# PyBool_builder.py
# The interface to build recursive style boolean expressions
# See README.txt for more information
def mk_const_expr(val):
"""
returns a constant expression of value VAL
VAL should be of type boolean
"""
return {"type": "const",
"value": val}
def mk_var_expr(name):
"""
returns a variable expression of name NAME
where NAME is a string
"""
return {"type": "var",
"name": (name, 0)}
def mk_neg_expr(expr):
"""
returns a negated expression where EXPR
is the expression to be negated
"""
return {"type": "neg",
"expr": expr}
def mk_and_expr(expr1, expr2):
"""
returns an and expression
of the form (EXPR1 /\ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type": "and",
"expr1": expr1,
"expr2": expr2}
def mk_or_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 \/ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type": "or",
"expr1": expr1,
"expr2": expr2}
# NOT NEEDED
def mk_paren_expr(expr):
return {"type": "paren",
"expr": expr}
def mk_impl_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 -> EXPR2)
where EXPR1 and EXPR2 are expressions
NOTE: Order of expr1 and expr2 matters here
"""
return {"type": "impl",
"expr1": expr1,
"expr2": expr2}
def mk_eqv_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 <=> EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type": "eqv",
"expr1": expr1,
"expr2": expr2}
def mk_xor_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 XOR EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type": "xor",
"expr1": expr1,
"expr2": expr2}
| 20.565657
| 60
| 0.575147
|
2fb735b74580e386bd66797240192ed2f2c9be64
| 673
|
py
|
Python
|
dueros/Constants.py
|
ayxue/BaiduSaxoOpenAPI
|
d042366bb33ebdc4471b29e167b01c4cb7cb298d
|
[
"Apache-2.0"
] | null | null | null |
dueros/Constants.py
|
ayxue/BaiduSaxoOpenAPI
|
d042366bb33ebdc4471b29e167b01c4cb7cb298d
|
[
"Apache-2.0"
] | null | null | null |
dueros/Constants.py
|
ayxue/BaiduSaxoOpenAPI
|
d042366bb33ebdc4471b29e167b01c4cb7cb298d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/4/15
"""
desc:pass
"""
class _Constants(object):
class ConstError(TypeError): pass
class ConstCaseError(ConstError): pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("can't change const %s" % name)
if not name.isupper():
raise self.ConstCaseError('const name "%s" is not all uppercase' % name)
self.__dict__[name] = value
constants = _Constants()
#日志存放地址
constants.LOG_PATH = './apps/log/dueros'
if __name__ == '__main__':
print(constants.LOG_PATH)
pass
| 19.794118
| 84
| 0.643388
|
52a9a4e32f184f25a634f2a0229e843432ab2c66
| 1,751
|
py
|
Python
|
sapsan/core/abstrctions/experiment.py
|
MilesCranmer/Sapsan
|
4d21954baf196ede2d4dafc765aed98a0cfca21b
|
[
"BSD-3-Clause"
] | 11
|
2020-05-25T18:59:02.000Z
|
2021-11-30T15:27:43.000Z
|
sapsan/core/abstrctions/experiment.py
|
MilesCranmer/Sapsan
|
4d21954baf196ede2d4dafc765aed98a0cfca21b
|
[
"BSD-3-Clause"
] | 19
|
2020-04-17T05:54:09.000Z
|
2021-09-14T05:24:17.000Z
|
sapsan/core/abstrctions/experiment.py
|
MilesCranmer/Sapsan
|
4d21954baf196ede2d4dafc765aed98a0cfca21b
|
[
"BSD-3-Clause"
] | 3
|
2021-09-01T16:11:18.000Z
|
2021-09-06T07:39:06.000Z
|
from abc import ABC, abstractmethod
from typing import List
from sapsan.core.abstrctions.algorithm import Parameter, Metric, Artifact
from sapsan.core.abstrctions.tracking import TrackingBackend
from sapsan.core.tracking.logger import LoggingBackend
class Experiment(ABC):
"""
Base experiment class
"""
def __init__(self,
tracking_backend: TrackingBackend):
"""
@param tracking_backend: tracking backend
"""
self.tracking_backend = tracking_backend
def execute(self, *args, **kwargs):
result = self.run(*args, **kwargs)
self.tracking_backend.log_parameters(parameters=self.parameters)
self.tracking_backend.log_metrics(metrics=self.metrics)
self.tracking_backend.log_artifacts(artifacts=self.artifacts)
return result
@abstractmethod
def run(self, *args, **kwargs):
"""
Pass of experiment
@return:
"""
pass
@abstractmethod
def test(self,
parameters: Parameter):
"""
Test/evaluation of experiment
@param parameters: parameters for test
@return:
"""
pass
@property
@abstractmethod
def parameters(self) -> List[Parameter]:
"""
List of parameters of algorithm
@return: list of parameters for algorithm
"""
pass
@property
@abstractmethod
def metrics(self) -> List[Metric]:
"""
List of metrics of algorithm
@return: list of metrics
"""
pass
@property
@abstractmethod
def artifacts(self) -> List[Artifact]:
"""
List of artifacts produced by algorithm
@return:
"""
pass
| 25.014286
| 73
| 0.609366
|
b170439baf1c6dfcc165345842bf619f35389695
| 9,080
|
py
|
Python
|
instapy/clarifai_util.py
|
Rob-Rychs/InstaPy
|
3e71a6522a6e8e694cea59f914fe64d4bb995d7c
|
[
"MIT"
] | 1
|
2019-05-13T03:44:07.000Z
|
2019-05-13T03:44:07.000Z
|
instapy/clarifai_util.py
|
Rob-Rychs/InstaPy
|
3e71a6522a6e8e694cea59f914fe64d4bb995d7c
|
[
"MIT"
] | null | null | null |
instapy/clarifai_util.py
|
Rob-Rychs/InstaPy
|
3e71a6522a6e8e694cea59f914fe64d4bb995d7c
|
[
"MIT"
] | null | null | null |
"""Module which handles the clarifai api and checks
the image for invalid content"""
from clarifai.rest import ClarifaiApp
from clarifai.rest import Workflow
from selenium.common.exceptions import NoSuchElementException
def check_image(browser,
clarifai_api_key,
img_tags,
img_tags_skip_if_contain,
logger,
clarifai_models,
workflow,
probability,
full_match=False,
check_video=False,
proxy=None,
picture_url=None):
try:
"""Uses the link to the image to check for invalid content in the
image.
If a workflow has been selected, get list of tags from Clarifai API
by checking link against models included in the workflow. If a workflow
hasn't been provided, InstaPy will check images against given model(
s)"""
clarifai_api = ClarifaiApp(api_key=clarifai_api_key)
clarifai_tags = []
if proxy is not None:
clarifai_api.api.session.proxies = {'https': proxy}
# Set req image or video source URL to given one or get it from
# current page
if picture_url is None:
source_link = get_source_link(browser)
else:
source_link = [picture_url]
# No image in page
if not source_link:
return True, [], []
# Check image using workflow if provided. If no workflow,
# check image using model(s)
if workflow:
clarifai_workflow = Workflow(clarifai_api.api,
workflow_id=workflow[0])
# If source is video, checks keyframe against models as video
# inputs not supported when using workflows
if source_link[0].endswith('mp4'):
clarifai_response = clarifai_workflow.predict_by_url(
source_link[1])
else:
clarifai_response = clarifai_workflow.predict_by_url(
source_link[0])
for response in clarifai_response['results'][0]['outputs']:
results = get_clarifai_tags(response, probability)
clarifai_tags.extend(results)
else:
for model in clarifai_models:
clarifai_response = get_clarifai_response(
clarifai_api, model, source_link, check_video
)
results = get_clarifai_tags(
clarifai_response['outputs'][0], probability
)
clarifai_tags.extend(results)
logger.info(
'source_link {} got predicted result(s):\n{}'.format(
source_link, clarifai_tags
)
)
# Will not comment on an image if any of the tags in
# img_tags_skip_if_contain are matched
if given_tags_in_result(img_tags_skip_if_contain, clarifai_tags):
logger.info(
'Not Commenting, image contains concept(s): "{}".'.format(
', '.join(list(
set(clarifai_tags) & set(img_tags_skip_if_contain)))
)
)
return False, [], clarifai_tags
for (tags, should_comment, comments) in img_tags:
if should_comment and given_tags_in_result(tags, clarifai_tags,
full_match):
return True, comments, clarifai_tags
elif given_tags_in_result(tags, clarifai_tags, full_match):
logger.info(
'Not Commenting, image contains concept(s): "{}".'.format(
', '.join(list(set(clarifai_tags) & set(tags)))
)
)
return False, [], clarifai_tags
return True, [], clarifai_tags
except Exception as err:
logger.error('Image check error: {}'.format(err))
def given_tags_in_result(search_tags, clarifai_tags, full_match=False):
"""Checks the clarifai tags if it contains one (or all) search tags """
if full_match:
return all([tag in clarifai_tags for tag in search_tags])
else:
return any((tag in clarifai_tags for tag in search_tags))
def get_source_link(browser):
"""Checks to see if a post is an image. If so, returns list with image
source URL.
If a NoSuchElement exception occurs, checks post for video and returns
the source URLs
for both the video and the video's keyframe."""
source = []
try:
source.append(
browser.find_element_by_xpath(
'//img[@class="FFVAD"]').get_attribute('src')
)
except NoSuchElementException:
source.append(
browser.find_element_by_xpath(
'//video[@class="tWeCl"]').get_attribute(
'src'
)
)
source.append(
browser.find_element_by_xpath(
'//img[@class="_8jZFn"]').get_attribute('src')
)
return source
def get_clarifai_response(clarifai_api, clarifai_model, source_link,
check_video):
"""Compiles a list of tags from Clarifai using the chosen models.
First checks the value of each item in the models list against a
dictionary. If the model value provided does not match one of the
keys in the dictionary below, model value is used in
clarifai_api.models.get(). Useful for custom models."""
# List of models which support video inputs
video_models = ['apparel', 'food', 'general', 'nsfw', 'travel', 'wedding']
clarifai_models = {
'general': 'general-v1.3',
'nsfw': 'nsfw-v1.0',
'apparel': 'apparel',
'celebrity': 'celeb-v1.3',
'color': 'color',
'demographics': 'demographics',
'food': 'food-items-v1.0',
'landscape quality': 'Landscape Quality',
'logo': 'logo',
'moderation': 'moderation',
'portrait quality': 'Portrait Quality',
'textures': 'Textures & Patterns',
'travel': 'travel-v1.0',
'weddings': 'weddings-v1.0',
}
model = clarifai_api.models.get(
clarifai_models.get(clarifai_model.lower(), clarifai_model)
)
# Get response from Clarifai API
# If source is video, model accepts video inputs and check_video is
# True, analyze content of frames in video
if (
check_video
and source_link[0].endswith('mp4')
and clarifai_model.lower() in video_models
):
response = model.predict_by_url(source_link[0], is_video=True)
# If source is video but model does not accept video inputs or
# check_video is False, analyze content of keyframe
elif source_link[0].endswith('mp4'):
response = model.predict_by_url(source_link[1])
else:
response = model.predict_by_url(source_link[0])
return response
def get_clarifai_tags(clarifai_response, probability):
"""Get the response from the Clarifai API and return results filtered by
concepts with a confidence set by probability parameter (default 50%)"""
results = []
concepts = []
# Parse response for Color model
try:
concepts = [
{concept.get('w3c', {}).get('name').lower(): concept.get('value')}
for concept in clarifai_response['data']['colors']
]
except KeyError:
pass
# Parse response for Celebrity and Demographics models
try:
for value in clarifai_response['data']['regions']:
for face in value['data']['face'].values():
concepts.extend(
[
{concept.get('name').lower(): concept.get('value')}
for concept in face['concepts']
]
)
except KeyError:
pass
# Parse response for Logo model
try:
concepts = [
{concept.get('name').lower(): concept.get('value')}
for concept in
clarifai_response['data']['regions'][0]['data']['concepts']
]
except KeyError:
pass
# Parse response for General model and similarly structured responses
try:
concepts = [
{concept.get('name').lower(): concept.get('value')}
for concept in clarifai_response['data']['concepts']
]
except KeyError:
pass
# Parse response for Video input
try:
for frame in clarifai_response['data']['frames']:
concepts.extend(
[
{concept.get('name').lower(): concept.get('value')}
for concept in frame['data']['concepts']
]
)
except KeyError:
pass
# Filter concepts based on probability threshold
for concept in concepts:
if float([x for x in concept.values()][0]) > probability:
results.append(str([x for x in concept.keys()][0]))
return results
| 35.748031
| 79
| 0.576982
|
2923b881bafe40434e063a6ada8fc2c220ba5884
| 8,015
|
py
|
Python
|
src/dev/riscv/HiFive.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 765
|
2015-01-14T16:17:04.000Z
|
2022-03-28T07:46:28.000Z
|
src/dev/riscv/HiFive.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 30
|
2015-01-01T21:49:38.000Z
|
2021-04-20T19:01:54.000Z
|
src/dev/riscv/HiFive.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 807
|
2015-01-06T09:55:38.000Z
|
2022-03-30T10:23:36.000Z
|
# Copyright (c) 2021 Huawei International
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects.Platform import Platform
from m5.objects.PMAChecker import PMAChecker
from m5.objects.Clint import Clint
from m5.objects.Plic import Plic
from m5.objects.RTC import RiscvRTC
from m5.objects.Uart import RiscvUart8250
from m5.objects.Terminal import Terminal
from m5.params import *
from m5.proxy import *
from m5.util.fdthelper import *
class HiFive(Platform):
"""HiFive Platform
Implementation:
This is the base class for SiFive's HiFive
board series. It contains the CLINT and PLIC
interrupt controllers, Uart and Disk.
Implementation details are based on SiFive
FU540-C000. https://sifive.cdn.prismic.io/
sifive/b5e7a29c-d3c2-44ea-85fb-acc1df282e2
1_FU540-C000-v1p3.pdf
Setup:
The following sections outline the required
setup for a RISC-V HiFive platform. See
configs/example/riscv/fs_linux.py for example.
Driving CLINT:
CLINT has an interrupt pin which increments
mtime. It can be connected to any interrupt
source pin which acts as the RTCCLK pin. An
abstract RTC wrapper called RiscvRTC can be
used.
Attaching PLIC devices:
PLIC handles external interrupts. Interrupt
PioDevices should inherit from PlicIntDevice
(PCI and DMA not yet implemented). It contains
a parameter interrupt_id which should be used
to call platform->postPciInt(id).
All PLIC interrupt devices should be returned
by _off_chip_devices(). Calling attachPlic sets
up the PLIC interrupt source count.
Uart:
The HiFive platform also has an uart_int_id.
This is because Uart8250 uses postConsoleInt
instead of postPciInt. In the future if a Uart
that inherits PlicIntDevice is implemented,
this can be removed.
Disk:
See fs_linux.py for setup example.
PMAChecker:
The PMAChecker will be attached to the MMU of
each CPU (which allows them to differ). See
fs_linux.py for setup example.
"""
type = 'HiFive'
cxx_header = "dev/riscv/hifive.hh"
cxx_class = 'gem5::HiFive'
# CLINT
clint = Param.Clint(Clint(pio_addr=0x2000000), "CLINT")
# PLIC
plic = Param.Plic(Plic(pio_addr=0xc000000), "PLIC")
# Uart
uart = RiscvUart8250(pio_addr=0x10000000)
# Int source ID to redirect console interrupts to
# Set to 0 if using a pci interrupt for Uart instead
uart_int_id = Param.Int(0xa, "PLIC Uart interrupt ID")
terminal = Terminal()
def _on_chip_devices(self):
"""Returns a list of on-chip peripherals
"""
return [
self.clint,
self.plic
]
def _off_chip_devices(self):
"""Returns a list of off-chip peripherals
"""
devices = [self.uart]
if hasattr(self, "disk"):
devices.append(self.disk)
return devices
def _on_chip_ranges(self):
"""Returns a list of on-chip peripherals
address range
"""
return [
AddrRange(dev.pio_addr, size=dev.pio_size)
for dev in self._on_chip_devices()
]
def _off_chip_ranges(self):
"""Returns a list of off-chip peripherals
address range
"""
return [
AddrRange(dev.pio_addr, size=dev.pio_size)
for dev in self._off_chip_devices()
]
def attachPlic(self):
"""Count number of PLIC interrupt sources
"""
plic_srcs = [self.uart_int_id]
for device in self._off_chip_devices():
if hasattr(device, "interrupt_id"):
plic_srcs.append(device.interrupt_id)
self.plic.n_src = max(plic_srcs) + 1
def attachOnChipIO(self, bus):
"""Attach on-chip IO devices, needs modification
to support DMA and PCI
"""
for device in self._on_chip_devices():
device.pio = bus.mem_side_ports
def attachOffChipIO(self, bus):
"""Attach off-chip IO devices, needs modification
to support DMA and PCI
"""
for device in self._off_chip_devices():
device.pio = bus.mem_side_ports
def setNumCores(self, num_cpu):
""" Sets the PLIC and CLINT to have the right number of threads and
contexts. Assumes that the cores have a single hardware thread.
"""
self.plic.n_contexts = num_cpu * 2
self.clint.num_threads = num_cpu
def generateDeviceTree(self, state):
cpus_node = FdtNode("cpus")
cpus_node.append(FdtPropertyWords("timebase-frequency", [10000000]))
yield cpus_node
node = FdtNode("soc")
local_state = FdtState(addr_cells=2, size_cells=2)
node.append(local_state.addrCellsProperty())
node.append(local_state.sizeCellsProperty())
node.append(FdtProperty("ranges"))
node.appendCompatible(["simple-bus"])
for subnode in self.recurseDeviceTree(local_state):
node.append(subnode)
yield node
# For generating devicetree
_cpu_count = 0
def annotateCpuDeviceNode(self, cpu, state):
cpu.append(FdtPropertyStrings('mmu-type', 'riscv,sv48'))
cpu.append(FdtPropertyStrings('status', 'okay'))
cpu.append(FdtPropertyStrings('riscv,isa', 'rv64imafdcsu'))
cpu.appendCompatible(["riscv"])
int_node = FdtNode("interrupt-controller")
int_state = FdtState(interrupt_cells=1)
int_node.append(int_state.interruptCellsProperty())
int_node.append(FdtProperty("interrupt-controller"))
int_node.appendCompatible("riscv,cpu-intc")
cpus = self.system.unproxy(self).cpu
phandle = int_state.phandle(cpus[self._cpu_count])
self._cpu_count += 1
int_node.append(FdtPropertyWords("phandle", [phandle]))
cpu.append(int_node)
| 37.106481
| 76
| 0.684467
|
dab5e1de4ed6a02d804820d5e182de989ed0e24d
| 4,474
|
py
|
Python
|
library/nsxt_upgrade_eula_accept_facts.py
|
lcamarda/nsxtlivefire-v2t
|
a95cbf25306aee6c115c510a48260b40b38f6be5
|
[
"BSD-2-Clause"
] | 6
|
2020-03-25T16:49:52.000Z
|
2020-04-11T16:01:35.000Z
|
library/nsxt_upgrade_eula_accept_facts.py
|
lcamarda/nsxtlivefire-v2t
|
a95cbf25306aee6c115c510a48260b40b38f6be5
|
[
"BSD-2-Clause"
] | 3
|
2020-03-26T19:30:15.000Z
|
2020-04-16T22:17:24.000Z
|
library/nsxt_upgrade_eula_accept_facts.py
|
lcamarda/nsxtlivefire-v2t
|
a95cbf25306aee6c115c510a48260b40b38f6be5
|
[
"BSD-2-Clause"
] | 2
|
2020-03-25T23:49:30.000Z
|
2020-03-26T21:52:23.000Z
|
#!/usr/bin/env python
#
# Copyright 2019 VMware, Inc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_upgrade_eula_accept_facts
short_description: 'Gets EULA acceptance status and contents'
description: "Returns EULA acceptance status and the contents."
version_added: '2.7'
author: 'Kommireddy Akhilesh'
options:
hostname:
description: 'Deployed NSX manager hostname.'
required: true
type: str
username:
description: 'The username to authenticate with the NSX manager.'
required: true
type: str
password:
description: 'The password to authenticate with the NSX manager.'
required: true
type: str
required_info:
choices:
- acceptance
- contents
description: "required_info can be either 'acceptance' or 'contents'.
'acceptance' returns the acceptance status of end user license agreement .
'contents' Return the content of end user license agreement in the specified format.
By default, it's pure string without line break. "
required: true
'''
EXAMPLES = '''
- name: Gets EULA acceptance status and contents
nsxt_upgrade_eula_accept_facts:
hostname: "10.192.167.137"
username: "admin"
password: "Admin!23Admin"
validate_certs: False
required_info: "acceptance"
'''
RETURN = '''# '''
import json, time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible.module_utils.common_utils import get_id_from_display_name_results
from ansible.module_utils._text import to_native
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(required_info=dict(required=True, type='str',
choices=['acceptance', 'contents']))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
required_info = module.params['required_info']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
if required_info == 'acceptance':
try:
(rc, resp) = request(manager_url + '/upgrade/eula/acceptance',
headers=dict(Accept='application/json'), url_username=mgr_username,
url_password=mgr_password, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing upgrade EULA acceptance '
'status. Error [%s]' % (to_native(err)))
module.exit_json(changed=False, **resp)
elif required_info == 'contents':
try:
(rc, resp) = request(manager_url + '/upgrade/eula/content',
headers=dict(Accept='application/json'), url_username=mgr_username,
url_password=mgr_password, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing upgrade EULA contents '
'status. Error [%s]' % (to_native(err)))
module.exit_json(changed=False, **resp)
else:
module.fail_json(msg='Invalid value passed for required_info.')
if __name__ == '__main__':
main()
| 40.306306
| 136
| 0.678587
|
b633d18cc9893e01bdca644136451bcad16ff0aa
| 772
|
py
|
Python
|
setup.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | 11
|
2020-07-13T09:59:23.000Z
|
2022-01-20T21:17:36.000Z
|
setup.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | null | null | null |
setup.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cuttlefs",
version="0.0.1",
author="Anthony Rebello",
author_email="arebello@wisc.edu",
description="Emulate file-system failure handling characteristics.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/WiscADSL/cuttlefs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"Topic :: System :: Filesystems",
],
# TODO license
# TODO requirements / dependencies
entry_points={
'console_scripts': ['cuttlefs=cuttlefs.cli:main'],
}
)
| 28.592593
| 72
| 0.661917
|
bdea7ce4526a32e277eae1d26b11cdc14659e452
| 6,025
|
py
|
Python
|
idom/dialect.py
|
philippjfr/idom
|
c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16
|
[
"MIT"
] | 1
|
2022-02-11T17:48:58.000Z
|
2022-02-11T17:48:58.000Z
|
idom/dialect.py
|
philippjfr/idom
|
c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16
|
[
"MIT"
] | null | null | null |
idom/dialect.py
|
philippjfr/idom
|
c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16
|
[
"MIT"
] | 1
|
2022-02-11T17:48:59.000Z
|
2022-02-11T17:48:59.000Z
|
import ast
from typing import Any, List, Optional, Tuple, Union
import htm
from pyalect import Dialect, DialectError
class HtmlDialectTranspiler(Dialect, name="html"):
"""An HTML dialect transpiler for Python."""
def __init__(self, filename: Optional[str] = None):
self.filename: str = filename or "<dialect: html>"
def transform_src(self, source: str) -> str:
return source
def transform_ast(self, node: ast.AST) -> ast.AST:
new_node: ast.AST = HtmlDialectNodeTransformer(self.filename).visit(node)
return new_node
class HtmlDialectNodeTransformer(ast.NodeTransformer):
def __init__(self, filename: str):
super().__init__()
self.filename = filename
def visit_Call(self, node: ast.Call) -> Optional[ast.AST]:
if isinstance(node.func, ast.Name):
if node.func.id == "html":
if (
not node.keywords
and len(node.args) == 1
and isinstance(node.args[0], ast.JoinedStr)
):
try:
new_node = self._transform_string(node.args[0])
except htm.ParseError as error:
raise DialectError(str(error), self.filename, node.lineno)
return self.generic_visit(
ast.fix_missing_locations(ast.copy_location(new_node, node))
)
return node
def _transform_string(self, node: ast.JoinedStr) -> ast.Call:
htm_strings: List[str] = []
exp_nodes: List[ast.AST] = []
for inner_node in node.values:
if isinstance(inner_node, ast.Str):
htm_strings.append(inner_node.s)
elif isinstance(inner_node, ast.FormattedValue):
if len(htm_strings) == len(exp_nodes):
htm_strings.append("")
if inner_node.conversion != -1 or inner_node.format_spec:
exp_nodes.append(ast.JoinedStr([inner_node]))
else:
exp_nodes.append(inner_node.value)
call_stack = _HtmlCallStack()
for op_type, *data in htm.htm_parse(htm_strings):
getattr(self, f"_transform_htm_{op_type.lower()}")(
exp_nodes, call_stack, *data
)
return call_stack.finish()
def _transform_htm_open(
self,
exp_nodes: List[ast.AST],
call_stack: "_HtmlCallStack",
is_index: bool,
tag_or_index: Union[str, int],
) -> None:
if isinstance(tag_or_index, int):
call_stack.begin_child(exp_nodes[tag_or_index])
else:
call_stack.begin_child(ast.Str(tag_or_index))
def _transform_htm_close(
self, exp_nodes: List[ast.AST], call_stack: "_HtmlCallStack"
) -> None:
call_stack.end_child()
def _transform_htm_spread(
self, exp_nodes: List[ast.AST], call_stack: "_HtmlCallStack", _: Any, index: int
) -> None:
call_stack.add_attributes(None, exp_nodes[index])
def _transform_htm_prop_single(
self,
exp_nodes: List[ast.AST],
call_stack: "_HtmlCallStack",
attr: str,
is_index: bool,
value_or_index: Union[str, int],
) -> None:
if isinstance(value_or_index, bool):
const = ast.NameConstant(value_or_index)
call_stack.add_attributes(ast.Str(attr), const)
elif isinstance(value_or_index, int):
call_stack.add_attributes(ast.Str(attr), exp_nodes[value_or_index])
else:
call_stack.add_attributes(ast.Str(attr), ast.Str(value_or_index))
def _transform_htm_prop_multi(
self,
exp_nodes: List[ast.AST],
call_stack: "_HtmlCallStack",
attr: str,
items: Tuple[Tuple[bool, Union[str, int]]],
) -> None:
op_root = current_op = ast.BinOp(None, None, None)
for _, value_or_index in items:
if isinstance(value_or_index, str):
current_op.right = ast.BinOp(ast.Str(value_or_index), ast.Add(), None)
else:
current_op.right = ast.BinOp(exp_nodes[value_or_index], ast.Add(), None)
last_op = current_op
current_op = current_op.right
last_op.right = current_op.left
call_stack.add_attributes(ast.Str(attr), op_root.right)
def _transform_htm_child(
self,
exp_nodes: List[ast.AST],
call_stack: "_HtmlCallStack",
is_index: bool,
child_or_index: Union[str, int],
) -> None:
if isinstance(child_or_index, int):
call_stack.add_child(exp_nodes[child_or_index])
else:
call_stack.add_child(ast.Str(child_or_index))
class _HtmlCallStack:
def __init__(self) -> None:
self._root = self._new(ast.Str())
self._stack: List[ast.Call] = [self._root]
def begin_child(self, tag: ast.AST) -> None:
new = self._new(tag)
last = self._stack[-1]
children = last.args[2].elts # type: ignore
children.append(new)
self._stack.append(new)
def add_child(self, child: ast.AST) -> None:
current = self._stack[-1]
children = current.args[2].elts # type: ignore
children.append(child)
def add_attributes(self, key: Optional[ast.Str], value: ast.AST) -> None:
current = self._stack[-1]
attributes: ast.Dict = current.args[1] # type: ignore
attributes.keys.append(key)
attributes.values.append(value) # type: ignore
def end_child(self) -> None:
self._stack.pop(-1)
def finish(self) -> ast.Call:
root = self._root
self._root = self._new(ast.Str())
self._stack.clear()
return root.args[2].elts[0] # type: ignore
@staticmethod
def _new(tag: ast.AST) -> ast.Call:
args = [tag, ast.Dict([], []), ast.List([], ast.Load())]
return ast.Call(ast.Name("html", ast.Load()), args, [])
| 35.441176
| 88
| 0.592033
|
24428ea7f89c71dd8dda7437f544402870d2be77
| 1,726
|
py
|
Python
|
crownstone_core/packets/microapp/MicroappInfoPacket.py
|
crownstone/crownstone-lib-python-core
|
e50eaae8680614c030df186aa06f13c9b20ac9ba
|
[
"MIT"
] | null | null | null |
crownstone_core/packets/microapp/MicroappInfoPacket.py
|
crownstone/crownstone-lib-python-core
|
e50eaae8680614c030df186aa06f13c9b20ac9ba
|
[
"MIT"
] | 3
|
2020-08-04T19:32:03.000Z
|
2021-10-06T09:07:08.000Z
|
crownstone_core/packets/microapp/MicroappInfoPacket.py
|
crownstone/crownstone-lib-python-core
|
e50eaae8680614c030df186aa06f13c9b20ac9ba
|
[
"MIT"
] | null | null | null |
import logging
from typing import List
from crownstone_core.packets.BasePacket import BasePacket
from crownstone_core.packets.microapp.MicroappSdkVersionPacket import MicroappSdkVersionPacket
from crownstone_core.packets.microapp.MicroappStatusPacket import MicroappStatusPacket
from crownstone_core.util.BufferReader import BufferReader
from crownstone_core.util.BufferWriter import BufferWriter
_LOGGER = logging.getLogger(__name__)
class MicroappInfoPacket(BasePacket):
def __init__(self, data=None):
self.protocol = 0
self.maxApps = 0
self.maxAppSize = 0
self.maxChunkSize = 0
self.maxRamUsage = 0
self.sdkVersion = MicroappSdkVersionPacket()
self.appsStatus: List[MicroappStatusPacket] = []
if data is not None:
self.deserialize(data)
def _deserialize(self, reader: BufferReader):
self.protocol = reader.getUInt8()
self.maxApps = reader.getUInt8()
self.maxAppSize = reader.getUInt16()
self.maxChunkSize = reader.getUInt16()
self.maxRamUsage = reader.getUInt16()
self.sdkVersion.deserialize(reader)
self.appsStatus = []
for i in range(0, self.maxApps):
statusPacket = MicroappStatusPacket()
statusPacket.deserialize(reader)
self.appsStatus.append(statusPacket)
def __str__(self):
appsStatusString = "["
for status in self.appsStatus:
appsStatusString += f"{status}, "
appsStatusString = appsStatusString[:-2] + "]"
return f"MicroappInfoPacket(" \
f"protocol={self.protocol}, " \
f"maxApps={self.maxApps}, " \
f"maxAppSize={self.maxAppSize}, " \
f"maxChunkSize={self.maxChunkSize}, " \
f"maxRamUsage={self.maxRamUsage}, " \
f"sdkVersion={self.sdkVersion}, " \
f"appsStatus={appsStatusString})"
| 32.566038
| 94
| 0.739861
|
5bd9d3b4daedfca76477f6bd5486960b61d952c6
| 576
|
py
|
Python
|
allauth/socialaccount/providers/naver/provider.py
|
s-tatus/django-allauth
|
25fe632acf12571ae2ac9e692e8890019d5a6e7b
|
[
"MIT"
] | 2
|
2016-05-24T21:13:32.000Z
|
2017-12-27T13:43:26.000Z
|
allauth/socialaccount/providers/naver/provider.py
|
s-tatus/django-allauth
|
25fe632acf12571ae2ac9e692e8890019d5a6e7b
|
[
"MIT"
] | 15
|
2020-06-05T19:26:26.000Z
|
2022-03-11T23:33:53.000Z
|
allauth/socialaccount/providers/naver/provider.py
|
s-tatus/django-allauth
|
25fe632acf12571ae2ac9e692e8890019d5a6e7b
|
[
"MIT"
] | 1
|
2021-07-24T12:47:00.000Z
|
2021-07-24T12:47:00.000Z
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class NaverAccount(ProviderAccount):
def get_avatar_url(self):
return self.account.extra_data.get('profile_image')
def to_str(self):
return self.account.extra_data.get('nickname', self.account.uid)
class NaverProvider(OAuth2Provider):
id = 'naver'
name = 'Naver'
account_class = NaverAccount
def extract_uid(self, data):
return str(data['id'])
provider_classes = [NaverProvider]
| 24
| 74
| 0.737847
|
88693900caa7b213698ec7c125f9506c07ef4608
| 1,907
|
py
|
Python
|
dingding_bot/api/bot.py
|
lpig/dingding_bot
|
3fbdd4280df38789b0b61abc7cd7efaae2fc0a38
|
[
"MIT"
] | null | null | null |
dingding_bot/api/bot.py
|
lpig/dingding_bot
|
3fbdd4280df38789b0b61abc7cd7efaae2fc0a38
|
[
"MIT"
] | null | null | null |
dingding_bot/api/bot.py
|
lpig/dingding_bot
|
3fbdd4280df38789b0b61abc7cd7efaae2fc0a38
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
import requests
import logging
from .tools import json
logger = logging.getLogger(__name__)
API_URL = 'https://oapi.dingtalk.com/robot/'
class APIError(object):
def __init__(self, code, msg):
self.code = code
self.message = msg
class DingDingBot(object):
def __init__(self, access_token):
self.access_token = access_token
def _process_response(self, rsp):
data = {
"code": 0,
"msg": "success",
}
if rsp.status_code != 200:
data['code'] = rsp.status_code
data['msg'] = u'Http Error'
try:
content = rsp.json()
if 'errcode' in content and content['errcode'] != 0:
data['code'] = content.get('errcode', 9999)
data['msg'] = content.get('errmsg', u'')
except:
data['code'] = 9999
data['msg'] = u'Invalid Rsp'
return data
def _post(self, path, data, ctype='json'):
headers = {'Content-type': 'application/json'}
path = API_URL + path
if '?' in path:
path += '&access_token=' + self.access_token
else:
path += '?access_token=' + self.access_token
if ctype == 'json':
data = json.dumps(data, ensure_ascii=False).encode('utf-8')
rsp = requests.post(path, data=data, headers=headers, verify=True)
return self._process_response(rsp)
def send_text(self, text, at=None, is_all=False):
if at and isinstance(at, list):
raise TypeError(u'at must be a list!')
at = at if at else []
data = {
"msgtype": "text",
"text": {
"content": text,
},
"at": {
"atMobiles": at,
"isAtAll": is_all
}
}
rsp = self._post('send', data)
| 25.426667
| 74
| 0.516518
|
1751e5188fd4392b422e6d2cbefe4deedd899a93
| 1,665
|
py
|
Python
|
lib/bbox/box_transform.py
|
leonieganswindt/RetinaNet
|
1cf27bbb51d369d23f2fb6ec151cef2fe18b03e2
|
[
"MIT"
] | null | null | null |
lib/bbox/box_transform.py
|
leonieganswindt/RetinaNet
|
1cf27bbb51d369d23f2fb6ec151cef2fe18b03e2
|
[
"MIT"
] | null | null | null |
lib/bbox/box_transform.py
|
leonieganswindt/RetinaNet
|
1cf27bbb51d369d23f2fb6ec151cef2fe18b03e2
|
[
"MIT"
] | 1
|
2019-07-17T09:39:56.000Z
|
2019-07-17T09:39:56.000Z
|
"""
BBox transform
"""
import torch
def bbox_transform(boxes, gtboxes):
""" Bounding Box Transform
from groundtruth boxes and proposal boxes to deltas
Args:
boxes: [N, 4] torch.Tensor (xyxy)
gtboxes: [N, 4] torch.Tensor (xywh)
Return:
delta: [N, 4] torch.Tensor
"""
gt_w = gtboxes[:, 2] - gtboxes[:, 0] + 1
gt_h = gtboxes[:, 3] - gtboxes[:, 1] + 1
# center
gt_x = gtboxes[:, 0] + 0.5 * gt_w
gt_y = gtboxes[:, 1] + 0.5 * gt_h
# Anchors [x,y,w,h]
anchor_x = boxes[:, 0]
anchor_y = boxes[:, 1]
anchor_w = boxes[:, 2]
anchor_h = boxes[:, 3]
delta_x = (gt_x - anchor_x) / anchor_w
delta_y = (gt_y - anchor_y) / anchor_h
delta_w = torch.log(gt_w / anchor_w)
delta_h = torch.log(gt_h / anchor_h)
# [N, 4]
return torch.stack([delta_x, delta_y, delta_w, delta_h]).transpose(0, 1)
def bbox_transform_inv(boxes, delta):
""" Inverse Bounding Box Transform
from deltas and proposal boxes to predicted boxes
Args:
boxes: [N, 4] torch.Tensor (xywh)
delta: [N, 4] torch.Tensor (xywh)
Return:
pred: [N, 4] torch.Tensor (xyxy)
"""
pred_boxes = torch.zeros_like(boxes)
pred_x = boxes[:, 0] + boxes[:, 2] * delta[:, 0]
pred_y = boxes[:, 1] + boxes[:, 3] * delta[:, 1]
pred_w = boxes[:, 2] * torch.exp(delta[:, 2])
pred_h = boxes[:, 3] * torch.exp(delta[:, 3])
pred_boxes[:, 0] = pred_x - 0.5 * pred_w
pred_boxes[:, 1] = pred_y - 0.5 * pred_h
pred_boxes[:, 2] = pred_x + 0.5 * pred_w
pred_boxes[:, 3] = pred_y + 0.5 * pred_h
return pred_boxes
if __name__ == '__main__':
pass
| 25.615385
| 76
| 0.570571
|
2fc99abbac74b3ce700d9e68c0a4ed8f323f5da6
| 7,328
|
py
|
Python
|
pyxstr2swift/xstr2swift.py
|
ocworld/pyxstr2swift
|
83220488ce11e9123c2b828e8c199d40582f46d9
|
[
"MIT"
] | 6
|
2018-08-21T01:25:23.000Z
|
2019-03-01T13:28:42.000Z
|
pyxstr2swift/xstr2swift.py
|
ocworld/pyxstr2swift
|
83220488ce11e9123c2b828e8c199d40582f46d9
|
[
"MIT"
] | 1
|
2018-08-20T11:35:57.000Z
|
2018-08-20T11:35:57.000Z
|
pyxstr2swift/xstr2swift.py
|
ocworld/pyxstr2swift
|
83220488ce11e9123c2b828e8c199d40582f46d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import os
import io
def _get_keys_and_values_from_strings_file(strings_file_path):
"""
get keys_and_values from xcode strings file
:param strings_file_path: str. xcode strings file full path
:return:
"""
def _comment_remover(text):
import re
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
with io.open(strings_file_path, mode='r', encoding='utf-8') as f:
contents = f.read()
lines = _comment_remover(contents).splitlines()
key_index = 0
value_index = 1
kv_list = list(filter(lambda kv: kv[key_index].strip() != "", [line.split('=') for line in lines]))
kv_dic = {kv[key_index].strip(): kv[value_index].strip('"; ') for kv in kv_list}
return kv_dic
def _write_keys_to_swift_file(kv_dic, out_file_path, tablename="", swift_struct_name="",
is_write_values_as_comment=False):
'''
write string keys to swift file.
:param kv_dic: dictionary for keys and values
:param out_file_path: a target swift file path including .swift extension
:param swift_struct_name: swift struct name in a target swift file path.
if "" outfile filename is used.
:return:
'''
from os.path import basename, splitext
default_struct_name = splitext(basename(out_file_path))[0]
struct_name = swift_struct_name if swift_struct_name != "" else default_struct_name
headlines = ["import Foundation", "", "struct %s {" % struct_name]
taillines = ["}", ""]
if is_write_values_as_comment:
bodylines = [" static let %s = NSLocalizedString(\"%s\", tableName: \"%s\", comment: \"\") // %s" % (
key, key, tablename, value) for key, value in kv_dic.items()]
else:
bodylines = [" static let %s = NSLocalizedString(\"%s\", tableName: \"%s\", comment: \"\")" % (
key, key, tablename) for key in kv_dic.keys()]
lines = headlines + bodylines + taillines
with io.open(out_file_path, mode='w+', encoding='utf-8') as f:
f.write(u'%s' % '\n'.join(lines))
def xstr2swift(strings_file_path, out_file_path,
swift_struct_name="",
overwrite_if_out_path_exist=True,
is_write_values_as_comment=False):
'''
Generating swift file from xcode strings file.
for example,
import Foundation
struct StringKeys {
static let key1 = NSLocalizedString("key1", tableName: "Localizable.strings", comment: "") // value
}
:param is_write_values_as_comment:
:param strings_file_path: str. xcode strings file full path
:param out_file_path: a target swift file path including .swift extension
:param swift_struct_name: swift struct name in a target swift file path.
if "" outfile filename is used.
:param overwrite_if_out_path_exist: overwrite if a outpath already exist.
:return:
'''
logging.info('xstr2swift: Try to convert (%s) to (%s)' % (strings_file_path, out_file_path))
if not os.path.exists(strings_file_path):
logging.error('xstr2swift: %s is not exist' % strings_file_path)
raise OSError(2) # ENOENT
if os.path.exists(out_file_path):
if overwrite_if_out_path_exist:
logging.info('xstr2swift: %s is removed' % out_file_path)
os.remove(out_file_path)
else:
logging.error('xstr2swift: %s is already exist' % out_file_path)
raise OSError(17) # EEXIST
logging.info('xstr2swift: try to get_keys_from_strings_file(%s)' % out_file_path)
try:
kv_dic = _get_keys_and_values_from_strings_file(strings_file_path)
except IOError as err:
logging.error('xstr2swift: failed to get_keys_from_strings_file %s with IOError (no: %d)(err: %s)' % (
strings_file_path, err.errno, err.message))
raise err
except OSError as err:
logging.error('xstr2swift: failed to get_keys_from_strings_file %s with OSError (no: %d)(err: %s)' % (
strings_file_path, err.errno, err.message))
raise err
except Exception as ex:
logging.error('xstr2swift: failed to get_keys_from_strings_file %s with Exception (no: %d)(err: %s)' % (
strings_file_path, ex.errno, ex.message))
raise ex
logging.info('xstr2swift: try to write_keys_to_swift_file(%s)' % out_file_path)
from os.path import basename, splitext
tablename = splitext(basename(strings_file_path))[0]
if swift_struct_name == "":
swift_struct_name = tablename
try:
_write_keys_to_swift_file(kv_dic, out_file_path, tablename, swift_struct_name, is_write_values_as_comment)
except OSError as err:
logging.error('xstr2swift: failed to write_keys_to_swift_file %s with os error (no: %d)(err: %s)' % (
strings_file_path, err.errno, err.message))
raise err
except Exception as ex:
logging.error('xstr2swift: failed to write_keys_to_swift_file %s with exception (no: %d)(err: %s)' % (
strings_file_path, ex.errno, ex.message))
raise ex
logging.info('xstr2swift: Success to convert (%s) to (%s)' % (strings_file_path, out_file_path))
def main():
import argparse
parser = argparse.ArgumentParser(description='pyxstr2swift needs arguments')
parser.add_argument('source', type=str, help='source: a strings file')
parser.add_argument('target', type=str, help='target: a swift file')
parser.add_argument('-st', '--structname', type=str, default="", help='structname: a struct name in a target file')
parser.add_argument('-f', '--force', action='store_true', help='force to write a target file if already exist')
parser.add_argument('-m', '--comment', action='store_true', help='values are added as comment')
parser.add_argument('-v', '--verbose', action='store_true', help='Display console output')
args = parser.parse_args()
is_forced = True if args.force else False
is_comment_value = True if args.comment else False
is_verbose = True if args.verbose else False
if is_verbose:
import sys
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
logging.info('source : %s' % args.source)
logging.info('target : %s' % args.target)
logging.info('structname : %s' % args.structname)
logging.info('is_forced : %s' % 'True' if args.force else 'False')
logging.info('is_verbose : %s' % 'True' if args.force else 'False')
logging.info('is_comment_value : %s' % 'True' if args.verbose else 'False')
xstr2swift(args.source, args.target, args.structname, is_forced, is_comment_value)
if __name__ == "__main__":
main()
| 38.772487
| 119
| 0.646698
|
a257f947f9d83091dd668f62bb9fa0c75a8eafcd
| 2,698
|
py
|
Python
|
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
src/get_test_results.py
|
williamdjones/deep_protein_binding
|
10b00835024702b6d0e73092c777fed267215ca7
|
[
"MIT"
] | null | null | null |
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, r2_score
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", type=str, help="path to directory containing test results",
default="/scratch/wdjo224/deep_protein_binding/experiments")
parser.add_argument("--exp_name", type=str, help="name of the experiment to collect results", default="binding_debug")
parser.add_argument("--exp_type", type=str, help="indicate regression (reg) or classification (class)",
default="class")
parser.add_argument("--exp_epoch", type=int, help="which epoch to get results for", default=4)
args = parser.parse_args()
test_dict = {"path": [], "score": []}
test_list = []
print("reading test results...")
for root, dirs, files in tqdm(os.walk(args.exp_dir), total=len(os.listdir(args.exp_dir))):
if "test_results" in root and args.exp_name in root and "epoch{}".format(args.exp_epoch) in root:
process = root.split("/")[-1].split("_")[0]
test_df = pd.DataFrame({"idx": [], "pred": [], "true": [], "loss": []})
for file in os.listdir(root):
test_df = pd.concat([test_df, pd.read_csv(root + "/" + file, index_col=0)])
score = None
if args.exp_type == "class":
y_true = test_df.true.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
y_pred = test_df.pred.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
score = f1_score(y_pred=y_pred, y_true=y_true)
elif args.exp_type == "reg":
y_true = test_df.true.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
y_pred = test_df.pred.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
score = r2_score(y_pred=y_pred, y_true=y_true)
else:
raise Exception("not a valid output type")
test_list.append({"path": root, "score": score, "process": process})
print("finished reading. finding best result")
best_score = -9999999
best_idx = 0
for idx, test in tqdm(enumerate(test_list)):
if test["score"] > best_score:
best_score = test["score"]
best_idx = idx
best_test = test_list[best_idx]
print("best test results:\n score: {} \t process: {} \t path: {}".format(best_test["score"], best_test["process"],
best_test["path"]))
pd.DataFrame(test_list).sort_values(by="score", ascending=False).to_csv(
"/scratch/wdjo224/deep_protein_binding/"+args.exp_name+"_test_results.csv")
| 46.517241
| 118
| 0.636027
|
431ece344438970d683027785a62319198b67ebf
| 2,866
|
py
|
Python
|
spyne/test/model/test_include.py
|
infoxchange/spyne
|
60ed622b088c13f4f84c81f1f43302edbc7f6027
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/test/model/test_include.py
|
infoxchange/spyne
|
60ed622b088c13f4f84c81f1f43302edbc7f6027
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/test/model/test_include.py
|
infoxchange/spyne
|
60ed622b088c13f4f84c81f1f43302edbc7f6027
|
[
"BSD-3-Clause"
] | 3
|
2016-10-08T15:01:49.000Z
|
2018-05-24T03:14:24.000Z
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import unittest
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
from lxml import etree
from spyne.model.complex import ComplexModel
from spyne.model.primitive import Integer
from spyne.model.primitive import String
from spyne.protocol.xml import XmlDocument
from spyne.protocol.soap.mime import _join_attachment
from spyne.const import xml_ns as ns
# Service Classes
class DownloadPartFileResult(ComplexModel):
ErrorCode = Integer
ErrorMessage = String
Data = String
# Tests
class TestInclude(unittest.TestCase):
def test_bytes_join_attachment(self):
href_id="http://tempuri.org/1/634133419330914808"
payload="ANJNSLJNDYBC SFDJNIREMX:CMKSAJN"
envelope = '''
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<DownloadPartFileResponse xmlns="http://tempuri.org/">
<DownloadPartFileResult xmlns:a="http://schemas.datacontract.org/2004/07/KlanApi.Common"
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<a:ErrorCode>0</a:ErrorCode>
<a:ErrorMessage i:nil="true"/>
<a:Data>
<xop:Include href="cid:%s" xmlns:xop="http://www.w3.org/2004/08/xop/include"/>
</a:Data>
</DownloadPartFileResult>
</DownloadPartFileResponse>
</s:Body>
</s:Envelope>
''' % quote_plus(href_id)
(joinedmsg, numreplaces) = _join_attachment(href_id, envelope, payload)
soaptree = etree.fromstring(joinedmsg)
body = soaptree.find("{%s}Body" % ns.soap11_env)
response = body.getchildren()[0]
result = response.getchildren()[0]
r = XmlDocument().from_element(None, DownloadPartFileResult, result)
self.assertEquals(payload, r.Data)
if __name__ == '__main__':
unittest.main()
| 37.710526
| 112
| 0.652128
|
47850a75918581c18595ed2dbfaebb0bfd2c8a02
| 503
|
py
|
Python
|
sandpit_app/webapp/security.py
|
mr-tim/sandpit
|
25ee43927b87b27936db191232aefc57026309d4
|
[
"MIT"
] | 2
|
2017-02-16T10:24:55.000Z
|
2018-08-14T09:58:17.000Z
|
sandpit_app/webapp/security.py
|
mr-tim/sandpit
|
25ee43927b87b27936db191232aefc57026309d4
|
[
"MIT"
] | null | null | null |
sandpit_app/webapp/security.py
|
mr-tim/sandpit
|
25ee43927b87b27936db191232aefc57026309d4
|
[
"MIT"
] | null | null | null |
from flask import abort, redirect
from functools import wraps
from core import current_user
def logged_in(f):
@wraps(f)
def wrapper(*args, **kwds):
if current_user == None:
return redirect('/login')
else:
return f(*args, **kwds)
return wrapper
def admin(f):
@wraps(f)
def wrapper(*args, **kwds):
if current_user == None:
return redirect('/login')
elif current_user.is_admin:
return f(*args, **kwds)
else:
abort(401)
return wrapper
| 20.958333
| 37
| 0.632207
|
1dbdec283d1bf9303c7da9274e99bdd3b02bf9f7
| 22
|
py
|
Python
|
Lambdata_veritaem/__init__.py
|
veritaem/Lambdata
|
f104c0a93e59676a6555e8dc05564928bf34f10b
|
[
"MIT"
] | null | null | null |
Lambdata_veritaem/__init__.py
|
veritaem/Lambdata
|
f104c0a93e59676a6555e8dc05564928bf34f10b
|
[
"MIT"
] | null | null | null |
Lambdata_veritaem/__init__.py
|
veritaem/Lambdata
|
f104c0a93e59676a6555e8dc05564928bf34f10b
|
[
"MIT"
] | 1
|
2019-02-14T16:09:24.000Z
|
2019-02-14T16:09:24.000Z
|
# VERSION: 0.0.1
| 7.333333
| 20
| 0.454545
|
e2c9ddbbc991b503308d2cbd4196f9ff1ec30848
| 278
|
py
|
Python
|
python/2.OOP/1Encapsulation/1.6.static_class.py
|
dunitian/BaseCode
|
4855ef4c6dd7c95d7239d2048832d8acfe26e084
|
[
"Apache-2.0"
] | 25
|
2018-06-13T08:13:44.000Z
|
2020-11-19T14:02:11.000Z
|
python/2.OOP/1Encapsulation/1.6.static_class.py
|
dunitian/BaseCode
|
4855ef4c6dd7c95d7239d2048832d8acfe26e084
|
[
"Apache-2.0"
] | null | null | null |
python/2.OOP/1Encapsulation/1.6.static_class.py
|
dunitian/BaseCode
|
4855ef4c6dd7c95d7239d2048832d8acfe26e084
|
[
"Apache-2.0"
] | 13
|
2018-06-13T08:13:38.000Z
|
2022-01-06T06:45:07.000Z
|
class Person(object):
# age为类属性
age = 1
def __init__(self, name):
# name为实例属性
self.name = name
def main():
# 类名.类属性
print(Person.age)
xiaoming = Person("小明")
# 对象.类属性
print(xiaoming.age)
if __name__ == '__main__':
main()
| 13.9
| 29
| 0.55036
|
65ec692dab3abb24a12cc34f797aafbc490b6b5f
| 5,123
|
py
|
Python
|
Dangerous/Golismero/tools/sqlmap/extra/mssqlsig/update.py
|
JeyZeta/Dangerous-
|
824ea6b571eda98bb855f176361e9b35dfda578e
|
[
"MIT"
] | null | null | null |
Dangerous/Golismero/tools/sqlmap/extra/mssqlsig/update.py
|
JeyZeta/Dangerous-
|
824ea6b571eda98bb855f176361e9b35dfda578e
|
[
"MIT"
] | null | null | null |
Dangerous/Golismero/tools/sqlmap/extra/mssqlsig/update.py
|
JeyZeta/Dangerous-
|
824ea6b571eda98bb855f176361e9b35dfda578e
|
[
"MIT"
] | 1
|
2018-07-04T18:35:16.000Z
|
2018-07-04T18:35:16.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import os
import re
import urllib2
import urlparse
from xml.dom.minidom import Document
# Path to the XML file with signatures
MSSQL_XML = os.path.abspath("../../xml/banner/mssql.xml")
# Url to update Microsoft SQL Server XML versions file from
MSSQL_VERSIONS_URL = "http://www.sqlsecurity.com/FAQs/SQLServerVersionDatabase/tabid/63/Default.aspx"
def updateMSSQLXML():
if not os.path.exists(MSSQL_XML):
errMsg = "[ERROR] file '%s' does not exist. Please run the script from its parent directory" % MSSQL_XML
print errMsg
return
infoMsg = "[INFO] retrieving data from '%s'" % MSSQL_VERSIONS_URL
print infoMsg
try:
req = urllib2.Request(MSSQL_VERSIONS_URL)
f = urllib2.urlopen(req)
mssqlVersionsHtmlString = f.read()
f.close()
except urllib2.URLError:
__mssqlPath = urlparse.urlsplit(MSSQL_VERSIONS_URL)
__mssqlHostname = __mssqlPath[1]
warnMsg = "[WARNING] sqlmap was unable to connect to %s," % __mssqlHostname
warnMsg += " check your Internet connection and retry"
print warnMsg
return
releases = re.findall("class=\"BCC_DV_01DarkBlueTitle\">SQL Server\s(.+?)\sBuilds", mssqlVersionsHtmlString, re.I | re.M)
releasesCount = len(releases)
# Create the minidom document
doc = Document()
# Create the <root> base element
root = doc.createElement("root")
doc.appendChild(root)
for index in xrange(0, releasesCount):
release = releases[index]
# Skip Microsoft SQL Server 6.5 because the HTML
# table is in another format
if release == "6.5":
continue
# Create the <signatures> base element
signatures = doc.createElement("signatures")
signatures.setAttribute("release", release)
root.appendChild(signatures)
startIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index])
if index == releasesCount - 1:
stopIdx = len(mssqlVersionsHtmlString)
else:
stopIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index + 1])
mssqlVersionsReleaseString = mssqlVersionsHtmlString[startIdx:stopIdx]
servicepackVersion = re.findall("</td><td>[7\.0|2000|2005|2008|2008 R2]*(.*?)</td><td.*?([\d\.]+)</td>[\r]*\n", mssqlVersionsReleaseString, re.I | re.M)
for servicePack, version in servicepackVersion:
if servicePack.startswith(" "):
servicePack = servicePack[1:]
if "/" in servicePack:
servicePack = servicePack[:servicePack.index("/")]
if "(" in servicePack:
servicePack = servicePack[:servicePack.index("(")]
if "-" in servicePack:
servicePack = servicePack[:servicePack.index("-")]
if "*" in servicePack:
servicePack = servicePack[:servicePack.index("*")]
if servicePack.startswith("+"):
servicePack = "0%s" % servicePack
servicePack = servicePack.replace("\t", " ")
servicePack = servicePack.replace("No SP", "0")
servicePack = servicePack.replace("RTM", "0")
servicePack = servicePack.replace("TM", "0")
servicePack = servicePack.replace("SP", "")
servicePack = servicePack.replace("Service Pack", "")
servicePack = servicePack.replace("<a href=\"http:", "")
servicePack = servicePack.replace(" ", " ")
servicePack = servicePack.replace("+ ", "+")
servicePack = servicePack.replace(" +", "+")
if servicePack.endswith(" "):
servicePack = servicePack[:-1]
if servicePack and version:
# Create the main <card> element
signature = doc.createElement("signature")
signatures.appendChild(signature)
# Create a <version> element
versionElement = doc.createElement("version")
signature.appendChild(versionElement)
# Give the <version> elemenet some text
versionText = doc.createTextNode(version)
versionElement.appendChild(versionText)
# Create a <servicepack> element
servicepackElement = doc.createElement("servicepack")
signature.appendChild(servicepackElement)
# Give the <servicepack> elemenet some text
servicepackText = doc.createTextNode(servicePack)
servicepackElement.appendChild(servicepackText)
# Save our newly created XML to the signatures file
mssqlXml = codecs.open(MSSQL_XML, "w", "utf8")
doc.writexml(writer=mssqlXml, addindent=" ", newl="\n")
mssqlXml.close()
infoMsg = "[INFO] done. retrieved data parsed and saved into '%s'" % MSSQL_XML
print infoMsg
if __name__ == "__main__":
updateMSSQLXML()
| 37.123188
| 160
| 0.619559
|
d1cc7194d5a3fec6dc28a0016994345645003c02
| 3,423
|
py
|
Python
|
main/gui/Frames/ClassificationResultsFrame.py
|
MattScho/MLUI
|
c6e609a9a9cae4e428545e4ccbeecaf8ba452b82
|
[
"Apache-2.0"
] | 2
|
2018-10-09T00:39:32.000Z
|
2018-10-14T23:18:02.000Z
|
main/gui/Frames/ClassificationResultsFrame.py
|
MattScho/MLUI
|
c6e609a9a9cae4e428545e4ccbeecaf8ba452b82
|
[
"Apache-2.0"
] | null | null | null |
main/gui/Frames/ClassificationResultsFrame.py
|
MattScho/MLUI
|
c6e609a9a9cae4e428545e4ccbeecaf8ba452b82
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import Frame, Entry, Label,Button
from tkinter import filedialog
import pickle
from main.gui.Utilities.Settings import Settings
'''
Displays results of a classification algorithm
'''
class ClassificationAlgorithmResultsFrame(Frame):
def __init__(self, parent, res):
Frame.__init__(self, master=parent, bg=Settings.BACKGROUND_COLOR.value)
self.pack()
self.result = res
self.addHeader()
self.addParameters()
self.addStatistics()
self.addPickleBtn()
def addHeader(self):
algName = Label(master=self, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value, font=Settings.REGULAR_FONT,text=self.result.get("Algorithm"))
algName.pack()
def addParameters(self):
paramsOutput = ""
paramsDict = self.result.get("Params")
for param in paramsDict.keys():
paramsOutput += param + " = " + str(paramsDict[param]) + "\n"
params = Label(master=self,font=Settings.REGULAR_FONT.value, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value,
text="Parameters:\n" + paramsOutput
)
params.pack()
def addStatistics(self):
accuracyLabel = Label(master=self, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value,font=Settings.REGULAR_FONT, text="Accuracy: " + str(self.result.get("Statistics").get("Accuracy")))
accuracyLabel.pack()
precLabel = Label(master=self, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value,font=Settings.REGULAR_FONT, text="Precision: " + str(self.result.get("Statistics").get("Precision")))
precLabel.pack()
recLabel = Label(master=self, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value,font=Settings.REGULAR_FONT, text="Recall: " + str(self.result.get("Statistics").get("Recall")))
recLabel.pack()
f1Label = Label(master=self, fg=Settings.FONT_COLOR.value, bg=Settings.BACKGROUND_COLOR.value,font=Settings.REGULAR_FONT, text="F1: " + str(self.result.get("Statistics").get("F1")))
f1Label.pack()
trainTimeLabel = Label(master=self, fg=Settings.FONT_COLOR.value, font=Settings.REGULAR_FONT.value, bg=Settings.BACKGROUND_COLOR.value, text="Training Time: " + str(self.result.get("Statistics").get("Fit Time")) + " seconds")
trainTimeLabel.pack()
def addPickleBtn(self):
pickleModelBtn = Button(self, text='Pickle Model', width= 15,font=Settings.REGULAR_FONT, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda : self.pickleModel())
pickleModelBtn.pack()
outputResultCSVBtn = Button(self, text="Results as CSV", width= 15,font=Settings.REGULAR_FONT, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda : self.pickleModel())
outputResultCSVBtn.pack()
if self.result.get("Algorithm") == "Perceptron":
outputWeightsToCSVBtn = Button(self, text="Weights as CSV", width= 15,font=Settings.REGULAR_FONT, bg=Settings.GOOD_BUTTON_COLOR.value, command=lambda : self.pickleModel())
outputWeightsToCSVBtn.pack()
def pickleModel(self):
fileToPickleTo = filedialog.asksaveasfile(mode='wb', defaultextension=".MLModel")
if fileToPickleTo != None:
pickle.dump(self.result.get("Model"), fileToPickleTo)
fileToPickleTo.close()
| 51.089552
| 234
| 0.683903
|
9d369e9e542e063bebc52725f555be469d5875c9
| 7,018
|
py
|
Python
|
ml_studio/model_evaluation/optimization.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | 1
|
2020-01-30T09:37:00.000Z
|
2020-01-30T09:37:00.000Z
|
ml_studio/model_evaluation/optimization.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | 3
|
2019-12-05T19:37:59.000Z
|
2020-03-31T05:49:53.000Z
|
ml_studio/model_evaluation/optimization.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | null | null | null |
# =========================================================================== #
# Project: ML Studio #
# Version: 0.1.14 #
# File: \optimization.py #
# Python Version: 3.8.0 #
# --------------- #
# Author: John James #
# Company: Decision Scients #
# Email: jjames@decisionscients.com #
# --------------- #
# Create Date: Tuesday December 3rd 2019, 5:00:16 pm #
# Last Modified: Tuesday December 3rd 2019, 5:10:27 pm #
# Modified By: John James (jjames@decisionscients.com) #
# --------------- #
# License: Modified BSD #
# Copyright (c) 2019 Decision Scients #
# =========================================================================== #
"""Optimization related functionality."""
import numpy as np
from sklearn.model_selection import KFold
from ml_studio.utils.data_manager import sampler
# --------------------------------------------------------------------------- #
class KFoldCV():
"""Performs KFold cross validation on a single estimator.
This is to analyze performance vis-a-vis training set sizes on a single
estimator.
Parameters
----------
model : a Scikit-Learn or an ML Studio estimator
A Scikit-Learn or ML Studio estimator.
sizes : array-like
List or nd.array containing the training set sizes to evaluate.
k : int Default = 5
The number of folds.
Attributes
----------
cv_results_ : dict
dictionary contains:
mean_train_scores : nd.array.
mean_test_scores : nd.array
std_train_scores : nd.array
std_test_scores : nd.array
mean_fit_time : nd.array
std_fit_time : nd.array
sets = list of dictionaries. One element per dataset size
train_scores : nd.array
test_scores : nd.array
fit_times : nd.array
"""
def __init__(self, model, sizes, k=5):
self.model = model
self.sizes = sizes
self.k = k
self.cv_results_ = {}
def fit(self, X, y):
"""Performs the cross-validation over varying training set sizes.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
self : KFoldLearningCurve
"""
# Validate parameters
if not hasattr(self.sizes, "__len__"):
raise TypeError("sizes must be a list or a numpy array")
if not isinstance(self.k, int):
raise TypeError("k must be an integer greater than 1.")
k_fold = KFold(n_splits=self.k)
# Perform cross-validation over training set sizes
mean_epochs = []
mean_train_scores = []
mean_test_scores = []
mean_fit_times = []
mean_fit_times_norm = []
std_epochs = []
std_train_scores = []
std_test_scores = []
std_fit_times = []
std_fit_times_norm = []
training_sets = []
for s in self.sizes:
training_set = {}
total_epochs = []
train_scores = []
test_scores = []
fit_times = []
fit_times_norm = []
X_train, y_train = sampler(X,y, size=s, seed=50)
for train, test in k_fold.split(X_train, y_train):
training_set = {}
self.model.fit(X_train[train], y_train[train])
epochs = self.model.history.total_epochs
train_score = self.model.score(X_train[train], y_train[train])
test_score = self.model.score(X_train[test], y_train[test])
fit_time = self.model.history.duration
fit_time_norm = fit_time / epochs
total_epochs.append(epochs)
train_scores.append(train_score)
test_scores.append(test_score)
fit_times.append(fit_time)
fit_times_norm.append(fit_time_norm)
mean_total_epochs = np.mean(total_epochs)
mean_epochs.append(mean_total_epochs)
mean_train_score = np.mean(train_scores)
mean_train_scores.append(mean_train_score)
mean_test_score = np.mean(test_scores)
mean_test_scores.append(mean_test_score)
mean_fit_time = np.mean(fit_times)
mean_fit_times.append(mean_fit_time)
mean_fit_time_norm = np.mean(fit_times_norm)
mean_fit_times_norm.append(mean_fit_time_norm)
std_total_epochs = np.std(total_epochs)
std_epochs.append(std_total_epochs)
std_train_score = np.std(train_scores)
std_train_scores.append(std_train_score)
std_test_score = np.std(test_scores)
std_test_scores.append(std_test_score)
std_fit_time = np.std(fit_times)
std_fit_times.append(std_fit_time)
std_fit_time_norm = np.std(fit_times_norm)
std_fit_times_norm.append(std_fit_time_norm)
# Format attribute
training_set['size'] = s
training_set['epochs'] = total_epochs
training_set['train_scores'] = train_scores
training_set['test_scores'] = test_scores
training_set['fit_times'] = fit_times
training_set['fit_times_norm'] = fit_times_norm
training_sets.append(training_set)
self.cv_results_['mean_epochs'] = mean_epochs
self.cv_results_['mean_train_scores'] = mean_train_scores
self.cv_results_['mean_test_scores'] = mean_test_scores
self.cv_results_['mean_fit_times'] = mean_fit_times
self.cv_results_['mean_fit_times_norm'] = mean_fit_times_norm
self.cv_results_['std_epochs'] = std_epochs
self.cv_results_['std_train_scores'] = std_train_scores
self.cv_results_['std_test_scores'] = std_test_scores
self.cv_results_['std_fit_times'] = std_fit_times
self.cv_results_['std_fit_times_norm'] = std_fit_times_norm
self.cv_results_['training_sets'] = training_sets
| 38.349727
| 79
| 0.515247
|
cac08792de8041a071d2d89b76dc699938558162
| 503
|
py
|
Python
|
flask_philo_sqlalchemy/test.py
|
maigfrga/Flask-Philo-SQLAlchemy
|
71598bb603b8458a2cf9f7989f71d8f1c77fafb9
|
[
"MIT"
] | null | null | null |
flask_philo_sqlalchemy/test.py
|
maigfrga/Flask-Philo-SQLAlchemy
|
71598bb603b8458a2cf9f7989f71d8f1c77fafb9
|
[
"MIT"
] | 13
|
2018-11-02T15:12:20.000Z
|
2019-02-20T16:05:13.000Z
|
flask_philo_sqlalchemy/test.py
|
maigfrga/Flask-Philo-SQLAlchemy
|
71598bb603b8458a2cf9f7989f71d8f1c77fafb9
|
[
"MIT"
] | 3
|
2018-10-11T09:04:04.000Z
|
2018-12-19T13:14:23.000Z
|
from flask_philo_core.test import FlaskPhiloTestCase
from flask_philo_sqlalchemy.connection import create_pool
from flask_philo_sqlalchemy import cleandb, syncdb
class SQLAlchemyTestCase(FlaskPhiloTestCase):
def setup(self):
super(SQLAlchemyTestCase, self).setup()
with self.app.app_context():
self.pool = create_pool()
syncdb(pool=self.pool)
def teardown(self):
with self.app.app_context():
cleandb()
self.pool.close()
| 29.588235
| 57
| 0.689861
|
be70a79d59f0d9b9d5be834c7f6208ed06ac4c38
| 28,020
|
py
|
Python
|
app/lib/pushkin/pushkin/sender/nordifier/apns.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
app/lib/pushkin/pushkin/sender/nordifier/apns.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
app/lib/pushkin/pushkin/sender/nordifier/apns.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
# PyAPNs was developed by Simon Whitaker <simon@goosoftware.co.uk>
# Source available at https://github.com/simonwhitaker/PyAPNs
#
# PyAPNs is distributed under the terms of the MIT license.
#
# Copyright (c) 2011 Goo Software Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from binascii import a2b_hex, b2a_hex
from datetime import datetime
from socket import socket, timeout, AF_INET, SOCK_STREAM
from socket import error as socket_error
from struct import pack, unpack
import sys
import ssl
import select
import time
import collections, itertools
import logging
import threading
try:
from ssl import wrap_socket, SSLError
except ImportError:
from socket import ssl as wrap_socket, sslerror as SSLError
from _ssl import SSL_ERROR_WANT_READ, SSL_ERROR_WANT_WRITE
try:
import json
except ImportError:
import simplejson as json
_logger = logging.getLogger(__name__)
def set_logger(logger):
global _logger
_logger = logger
MAX_PAYLOAD_LENGTH = 2048
NOTIFICATION_COMMAND = 0
ENHANCED_NOTIFICATION_COMMAND = 1
NOTIFICATION_FORMAT = (
'!' # network big-endian
'B' # command
'H' # token length
'32s' # token
'H' # payload length
'%ds' # payload
)
ENHANCED_NOTIFICATION_FORMAT = (
'!' # network big-endian
'B' # command
'I' # identifier
'I' # expiry
'H' # token length
'32s' # token
'H' # payload length
'%ds' # payload
)
ERROR_RESPONSE_FORMAT = (
'!' # network big-endian
'B' # command
'B' # status
'I' # identifier
)
TOKEN_LENGTH = 32
ERROR_RESPONSE_LENGTH = 6
DELAY_RESEND_SEC = 0.0
SENT_BUFFER_QTY = 100000
WAIT_WRITE_TIMEOUT_SEC = 10
WAIT_READ_TIMEOUT_SEC = 10
WRITE_RETRY = 3
ER_STATUS = 'status'
ER_IDENTIFER = 'identifier'
class APNs(object):
"""A class representing an Apple Push Notification service connection"""
def __init__(self, use_sandbox=False, cert_file=None, key_file=None, enhanced=False, write_retries=WRITE_RETRY):
"""
Set use_sandbox to True to use the sandbox (test) APNs servers.
Default is False.
"""
super(APNs, self).__init__()
self.use_sandbox = use_sandbox
self.cert_file = cert_file
self.key_file = key_file
self._feedback_connection = None
self._gateway_connection = None
self.enhanced = enhanced
self.write_retries = write_retries
@staticmethod
def packed_uchar(num):
"""
Returns an unsigned char in packed form
"""
return pack('>B', num)
@staticmethod
def packed_ushort_big_endian(num):
"""
Returns an unsigned short in packed big-endian (network) form
"""
return pack('>H', num)
@staticmethod
def unpacked_ushort_big_endian(bytes):
"""
Returns an unsigned short from a packed big-endian (network) byte
array
"""
return unpack('>H', bytes)[0]
@staticmethod
def packed_uint_big_endian(num):
"""
Returns an unsigned int in packed big-endian (network) form
"""
return pack('>I', num)
@staticmethod
def unpacked_uint_big_endian(bytes):
"""
Returns an unsigned int from a packed big-endian (network) byte array
"""
return unpack('>I', bytes)[0]
@staticmethod
def unpacked_char_big_endian(bytes):
"""
Returns an unsigned char from a packed big-endian (network) byte array
"""
return unpack('c', bytes)[0]
@property
def feedback_server(self):
if not self._feedback_connection:
self._feedback_connection = FeedbackConnection(
use_sandbox=self.use_sandbox,
cert_file=self.cert_file,
key_file=self.key_file
)
return self._feedback_connection
@property
def gateway_server(self):
if not self._gateway_connection:
self._gateway_connection = GatewayConnection(
use_sandbox=self.use_sandbox,
cert_file=self.cert_file,
key_file=self.key_file,
enhanced=self.enhanced,
write_retries=self.write_retries
)
return self._gateway_connection
class APNsConnection(object):
"""
A generic connection class for communicating with the APNs
"""
def __init__(self, cert_file=None, key_file=None, timeout=None, enhanced=False):
super(APNsConnection, self).__init__()
self.cert_file = cert_file
self.key_file = key_file
self.timeout = timeout
self._socket = None
self._ssl = None
self.enhanced = enhanced
self.connection_alive = False
def _connect(self):
# Establish an SSL connection
_logger.debug("%s APNS connection establishing..." % self.__class__.__name__)
# Fallback for socket timeout.
for i in xrange(3):
try:
self._socket = socket(AF_INET, SOCK_STREAM)
self._socket.settimeout(self.timeout)
self._socket.connect((self.server, self.port))
break
except timeout:
pass
except:
raise
if self.enhanced:
self._last_activity_time = time.time()
self._socket.setblocking(False)
self._ssl = wrap_socket(self._socket, self.key_file, self.cert_file,
do_handshake_on_connect=False)
while True:
try:
self._ssl.do_handshake()
break
except ssl.SSLError, err:
if ssl.SSL_ERROR_WANT_READ == err.args[0]:
select.select([self._ssl], [], [])
elif ssl.SSL_ERROR_WANT_WRITE == err.args[0]:
select.select([], [self._ssl], [])
else:
raise
else:
# Fallback for 'SSLError: _ssl.c:489: The handshake operation timed out'
for i in xrange(3):
try:
self._ssl = wrap_socket(self._socket, self.key_file, self.cert_file)
break
except SSLError, ex:
if ex.args[0] == SSL_ERROR_WANT_READ:
sys.exc_clear()
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
sys.exc_clear()
else:
raise
self.connection_alive = True
_logger.debug("%s APNS connection established" % self.__class__.__name__)
def _disconnect(self):
if self.connection_alive:
if self._socket:
self._socket.close()
if self._ssl:
self._ssl.close()
self.connection_alive = False
_logger.debug(" %s APNS connection closed" % self.__class__.__name__)
def _connection(self):
if not self._ssl or not self.connection_alive:
self._connect()
return self._ssl
def read(self, n=None):
return self._connection().read(n)
def write(self, string):
if self.enhanced: # nonblocking socket
self._last_activity_time = time.time()
_, wlist, _ = select.select([], [self._connection()], [], WAIT_WRITE_TIMEOUT_SEC)
if len(wlist) > 0:
length = self._connection().sendall(string)
if length == 0:
_logger.debug("sent length: %d" % length) # DEBUG
else:
_logger.warning("write socket descriptor is not ready after " + str(WAIT_WRITE_TIMEOUT_SEC))
else: # blocking socket
return self._connection().write(string)
class PayloadAlert(object):
def __init__(self, body=None, action_loc_key=None, loc_key=None,
loc_args=None, launch_image=None):
super(PayloadAlert, self).__init__()
self.body = body
self.action_loc_key = action_loc_key
self.loc_key = loc_key
self.loc_args = loc_args
self.launch_image = launch_image
def dict(self):
d = {}
if self.body:
d['body'] = self.body
if self.action_loc_key:
d['action-loc-key'] = self.action_loc_key
if self.loc_key:
d['loc-key'] = self.loc_key
if self.loc_args:
d['loc-args'] = self.loc_args
if self.launch_image:
d['launch-image'] = self.launch_image
return d
class PayloadTooLargeError(Exception):
def __init__(self, payload_size):
super(PayloadTooLargeError, self).__init__()
self.payload_size = payload_size
class Payload(object):
"""A class representing an APNs message payload"""
def __init__(self, alert=None, badge=None, sound=None, category=None, custom={}, content_available=False):
super(Payload, self).__init__()
self.alert = alert
self.badge = badge
self.sound = sound
self.category = category
self.custom = custom
self.content_available = content_available
self._check_size()
def dict(self):
"""Returns the payload as a regular Python dictionary"""
d = {}
if self.alert:
# Alert can be either a string or a PayloadAlert
# object
if isinstance(self.alert, PayloadAlert):
d['alert'] = self.alert.dict()
else:
d['alert'] = self.alert
if self.sound:
d['sound'] = self.sound
if self.badge is not None:
d['badge'] = int(self.badge)
if self.category:
d['category'] = self.category
if self.content_available:
d.update({'content-available': 1})
d = {'aps': d}
d.update(self.custom)
return d
def json(self):
return json.dumps(self.dict(), separators=(',', ':'), ensure_ascii=False).encode('utf-8')
def _check_size(self):
payload_length = len(self.json())
if payload_length > MAX_PAYLOAD_LENGTH:
raise PayloadTooLargeError(payload_length)
def __repr__(self):
attrs = ("alert", "badge", "sound", "category", "custom")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s)" % (self.__class__.__name__, args)
class Frame(object):
"""A class representing an APNs message frame for multiple sending"""
def __init__(self):
self.frame_data = bytearray()
self.notification_data = list()
def get_frame(self):
return self.frame_data
def add_item(self, token_hex, payload, identifier, expiry, priority):
"""Add a notification message to the frame"""
item_len = 0
self.frame_data.extend('\2' + APNs.packed_uint_big_endian(item_len))
token_bin = a2b_hex(token_hex)
token_length_bin = APNs.packed_ushort_big_endian(len(token_bin))
token_item = '\1' + token_length_bin + token_bin
self.frame_data.extend(token_item)
item_len += len(token_item)
payload_json = payload.json()
payload_length_bin = APNs.packed_ushort_big_endian(len(payload_json))
payload_item = '\2' + payload_length_bin + payload_json
self.frame_data.extend(payload_item)
item_len += len(payload_item)
identifier_bin = APNs.packed_uint_big_endian(identifier)
identifier_length_bin = \
APNs.packed_ushort_big_endian(len(identifier_bin))
identifier_item = '\3' + identifier_length_bin + identifier_bin
self.frame_data.extend(identifier_item)
item_len += len(identifier_item)
expiry_bin = APNs.packed_uint_big_endian(expiry)
expiry_length_bin = APNs.packed_ushort_big_endian(len(expiry_bin))
expiry_item = '\4' + expiry_length_bin + expiry_bin
self.frame_data.extend(expiry_item)
item_len += len(expiry_item)
priority_bin = APNs.packed_uchar(priority)
priority_length_bin = APNs.packed_ushort_big_endian(len(priority_bin))
priority_item = '\5' + priority_length_bin + priority_bin
self.frame_data.extend(priority_item)
item_len += len(priority_item)
self.frame_data[-item_len - 4:-item_len] = APNs.packed_uint_big_endian(item_len)
self.notification_data.append(
{'token': token_hex, 'payload': payload, 'identifier': identifier, 'expiry': expiry, "priority": priority})
def get_notifications(self, gateway_connection):
notifications = list({'id': x['identifier'],
'message': gateway_connection._get_enhanced_notification(x['token'], x['payload'],
x['identifier'], x['expiry'])}
for x in self.notification_data)
return notifications
def get_notification_ids(self):
return list(x['identifier'] for x in self.notification_data)
def __str__(self):
"""Get the frame buffer"""
return str(self.frame_data)
class FeedbackConnection(APNsConnection):
"""
A class representing a connection to the APNs Feedback server
"""
def __init__(self, use_sandbox=False, **kwargs):
super(FeedbackConnection, self).__init__(**kwargs)
self.server = (
'feedback.push.apple.com',
'feedback.sandbox.push.apple.com')[use_sandbox]
self.port = 2196
def _chunks(self):
BUF_SIZE = 4096
while 1:
data = self.read(BUF_SIZE)
yield data
if not data:
break
def items(self):
"""
A generator that yields (token_hex, fail_time) pairs retrieved from
the APNs feedback server
"""
buff = ''
for chunk in self._chunks():
buff += chunk
# Quit if there's no more data to read
if not buff:
break
# Sanity check: after a socket read we should always have at least
# 6 bytes in the buffer
if len(buff) < 6:
break
while len(buff) > 6:
token_length = APNs.unpacked_ushort_big_endian(buff[4:6])
bytes_to_read = 6 + token_length
if len(buff) >= bytes_to_read:
fail_time_unix = APNs.unpacked_uint_big_endian(buff[0:4])
fail_time = datetime.utcfromtimestamp(fail_time_unix)
token = b2a_hex(buff[6:bytes_to_read])
yield (token, fail_time)
# Remove data for current token from buffer
buff = buff[bytes_to_read:]
else:
# break out of inner while loop - i.e. go and fetch
# some more data and append to buffer
break
class GatewayConnection(APNsConnection):
"""
A class that represents a connection to the APNs gateway server
"""
def __init__(self, write_retries, use_sandbox=False, **kwargs):
super(GatewayConnection, self).__init__(**kwargs)
self.server = (
'gateway.push.apple.com',
'gateway.sandbox.push.apple.com')[use_sandbox]
self.port = 2195
if self.enhanced == True: # start error-response monitoring thread
self._last_activity_time = time.time()
self._working = False
self._send_lock = threading.RLock()
self._error_response_handler_worker = None
self._response_listener = None
self._error_listener = None
self.write_retries = write_retries
self._sent_notifications = collections.deque(maxlen=SENT_BUFFER_QTY)
def _init_error_response_handler_worker(self):
self._send_lock = threading.RLock()
self._error_response_handler_worker = self.ErrorResponseHandlerWorker(apns_connection=self)
self._error_response_handler_worker.start()
_logger.debug("initialized error-response handler worker")
def _get_notification(self, token_hex, payload):
"""
Takes a token as a hex string and a payload as a Python dict and sends
the notification
"""
token_bin = a2b_hex(token_hex)
token_length_bin = APNs.packed_ushort_big_endian(len(token_bin))
payload_json = payload.json()
payload_length_bin = APNs.packed_ushort_big_endian(len(payload_json))
zero_byte = '\0'
if sys.version_info[0] != 2:
zero_byte = bytes(zero_byte, 'utf-8')
notification = (zero_byte + token_length_bin + token_bin
+ payload_length_bin + payload_json)
return notification
def _get_enhanced_notification(self, token_hex, payload, identifier, expiry):
"""
form notification data in an enhanced format
"""
token = a2b_hex(token_hex)
payload = payload.json()
fmt = ENHANCED_NOTIFICATION_FORMAT % len(payload)
notification = pack(fmt, ENHANCED_NOTIFICATION_COMMAND, identifier, expiry,
TOKEN_LENGTH, token, len(payload), payload)
return notification
def send_notification(self, token_hex, payload, identifier=0, expiry=0):
"""
in enhanced mode, send_notification may return error response from APNs if any
"""
if self.enhanced:
message = self._get_enhanced_notification(token_hex, payload, identifier, expiry)
notification = {'id': identifier, 'message': message}
self.send_data(message, [notification], [identifier])
else:
self.write(self._get_notification(token_hex, payload))
def send_data(self, data, notifications, notification_ids):
success = False
self._working = True
for i in xrange(self.write_retries):
try:
with self._send_lock:
self._last_activity_time = time.time()
self._make_sure_error_response_handler_worker_alive()
self.write(data)
self._sent_notifications += notifications
success = True
break
except socket_error as e:
delay = 10 + (i * 2)
_logger.exception("sending data to APNS failed: " + str(type(e)) + ": " + str(e) +
" in " + str(i + 1) + "th attempt, will wait " + str(delay) + " secs for next action")
time.sleep(delay) # wait potential error-response to be read
self._working = False
# if error listener exists, call it, and pass list of notifications ids that couldn't be sent
if not success and self._error_listener:
self._error_listener(notification_ids)
def _make_sure_error_response_handler_worker_alive(self):
if (not self._error_response_handler_worker
or not self._error_response_handler_worker.is_alive()):
self._init_error_response_handler_worker()
TIMEOUT_SEC = 10
for _ in xrange(TIMEOUT_SEC):
if self._error_response_handler_worker.is_alive():
_logger.debug("error response handler worker is running")
return
time.sleep(1)
_logger.warning("error response handler worker is not started after %s secs" % TIMEOUT_SEC)
def send_notification_multiple(self, frame):
data = str(frame.get_frame())
if self.enhanced:
self.send_data(data, frame.get_notifications(self), frame.get_notification_ids())
else:
self.write(data)
def register_response_listener(self, response_listener):
self._response_listener = response_listener
def register_error_listener(self, error_listener):
self._error_listener = error_listener
def force_close(self):
if self._error_response_handler_worker:
self._error_response_handler_worker.close()
def _is_idle_timeout(self):
TIMEOUT_IDLE = 30
return (time.time() - self._last_activity_time) >= TIMEOUT_IDLE
def is_sending_finished(self):
"""
Sending is finished if it's not working currently (not trying to send some data) and
it's idle for time - time given to ErrorResponseHandler to catch some error.
"""
TIMEOUT_IDLE = 3
is_idle = (time.time() - self._last_activity_time) >= TIMEOUT_IDLE
return not self._working and is_idle
class ErrorResponseHandlerWorker(threading.Thread):
def __init__(self, apns_connection):
threading.Thread.__init__(self, name=self.__class__.__name__)
self._apns_connection = apns_connection
self._close_signal = False
def close(self):
self._close_signal = True
def run(self):
while True:
if self._close_signal:
_logger.debug("received close thread signal")
break
if self._apns_connection._is_idle_timeout():
idled_time = (time.time() - self._apns_connection._last_activity_time)
_logger.debug("connection idle after %d secs" % idled_time)
break
if not self._apns_connection.connection_alive:
time.sleep(1)
continue
try:
rlist, _, _ = select.select([self._apns_connection._connection()], [], [], WAIT_READ_TIMEOUT_SEC)
if len(rlist) > 0: # there's some data from APNs
notifications_to_be_resent = []
with self._apns_connection._send_lock:
buff = self._apns_connection.read(ERROR_RESPONSE_LENGTH)
if len(buff) == ERROR_RESPONSE_LENGTH:
command, status, identifier = unpack(ERROR_RESPONSE_FORMAT, buff)
if 8 == command: # there is error response from APNS
error_response = (status, identifier)
if self._apns_connection._response_listener:
self._apns_connection._response_listener(
Util.convert_error_response_to_dict(error_response))
_logger.info("got error-response from APNS:" + str(error_response))
self._apns_connection._disconnect()
# self._resend_notifications_by_id(identifier)
fail_idx = Util.getListIndexFromID(self._apns_connection._sent_notifications,
identifier)
end_idx = len(self._apns_connection._sent_notifications)
notifications_to_be_resent = collections.deque(
itertools.islice(self._apns_connection._sent_notifications, (fail_idx + 1),
end_idx))
self._apns_connection._sent_notifications.clear()
if len(buff) == 0:
_logger.warning("read socket got 0 bytes data") # DEBUG
self._apns_connection._disconnect()
# Resending notifications one by one
for notif in notifications_to_be_resent:
self._apns_connection.send_data(notif['message'], [notif], [notif['id']])
except socket_error as e: # APNS close connection arbitrarily
_logger.exception(
"exception occur when reading APNS error-response: " + str(type(e)) + ": " + str(e)) # DEBUG
self._apns_connection._disconnect()
continue
time.sleep(0.1) # avoid crazy loop if something bad happened. e.g. using invalid certificate
self._apns_connection._disconnect()
_logger.debug("error-response handler worker closed") # DEBUG
def _resend_notifications_by_id(self, failed_identifier):
fail_idx = Util.getListIndexFromID(self._apns_connection._sent_notifications, failed_identifier)
# pop-out success notifications till failed one
self._resend_notification_by_range(fail_idx + 1, len(self._apns_connection._sent_notifications))
return
def _resend_notification_by_range(self, start_idx, end_idx):
self._apns_connection._sent_notifications = collections.deque(
itertools.islice(self._apns_connection._sent_notifications, start_idx, end_idx))
_logger.info("resending %s notifications to APNS" % len(self._apns_connection._sent_notifications)) # DEBUG
for sent_notification in self._apns_connection._sent_notifications:
_logger.debug("resending notification with id:" + str(sent_notification['id']) + " to APNS") # DEBUG
try:
self._apns_connection.write(sent_notification['message'])
except socket_error as e:
_logger.exception(
"resending notification with id:" + str(sent_notification['id']) + " failed: " + str(
type(e)) + ": " + str(e)) # DEBUG
break
time.sleep(DELAY_RESEND_SEC) # DEBUG
class Util(object):
@classmethod
def getListIndexFromID(this_class, the_list, identifier):
return next(index for (index, d) in enumerate(the_list)
if d['id'] == identifier)
@classmethod
def convert_error_response_to_dict(this_class, error_response_tuple):
return {ER_STATUS: error_response_tuple[0], ER_IDENTIFER: error_response_tuple[1]}
| 38.595041
| 121
| 0.585011
|
346c861e588d4053249c93786c02cbac15821294
| 7,727
|
py
|
Python
|
tests/mockssh.py
|
bchess/mrjob
|
7415f57884f2ba4313ef9164e023174d5f36abae
|
[
"Apache-2.0"
] | null | null | null |
tests/mockssh.py
|
bchess/mrjob
|
7415f57884f2ba4313ef9164e023174d5f36abae
|
[
"Apache-2.0"
] | null | null | null |
tests/mockssh.py
|
bchess/mrjob
|
7415f57884f2ba4313ef9164e023174d5f36abae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-2011 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mock version of the ssh binary that actually manipulates the
filesystem. This imitates only things that mrjob actually uses.
Relies on these environment variables:
MOCK_SSH_ROOTS -- specify directories for hosts in the form:
host1=/tmp/dir1:host2=/tmp/dir2
MOCK_SSH_VERIFY_KEY_FILE -- set to 'true' if the script should print an error
when the key file does not exist
This is designed to run as: python -m tests.mockssh <ssh args>
mrjob requires a single binary (no args) to stand in for ssh, so
use create_mock_hadoop_script() to write out a shell script that runs
mockssh.
"""
from __future__ import with_statement
import os
import pipes
import posixpath
import re
import stat
import sys
def create_mock_ssh_script(path):
"""Dump a wrapper script to the given file object that runs this
python script."""
# make this work even if $PATH or $PYTHONPATH changes
with open(path, 'w') as f:
f.write('#!/bin/sh\n')
f.write('%s %s "$@"\n' % (
pipes.quote(sys.executable),
pipes.quote(os.path.abspath(__file__))))
os.chmod(path, stat.S_IREAD | stat.S_IEXEC)
def path_for_host(host):
"""Get the filesystem path that the given host is being faked at"""
for kv_pair in os.environ['MOCK_SSH_ROOTS'].split(':'):
this_host, this_path = kv_pair.split('=')
if this_host == host:
return os.path.abspath(this_path)
raise KeyError('Host %s is not specified in $MOCK_SSH_ROOTS (%s)' %
(host, os.environ['MOCK_SSH_ROOTS']))
def rel_posix_to_rel_local(path):
"""Convert a POSIX path to the current system's format"""
return os.path.join(*path.split('/'))
def rel_posix_to_abs_local(host, path):
"""Convert a POSIX path to the current system's format and prepend the
tmp directory the host's files are in
"""
if path.startswith('/'):
path = path[1:]
root = path_for_host(host)
return os.path.join(root, *path.split('/'))
def mock_ssh_dir(host, path):
"""Create a directory at ``path`` relative to the temp directory for
``host``, where ``path`` is a POSIX path
"""
dest = rel_posix_to_abs_local(host, path)
if not os.path.exists(dest):
os.makedirs(dest)
def mock_ssh_file(host, path, contents):
"""Create a directory at ``path`` relative to the temp directory for
``host``, where ``path`` is a POSIX path.
Returns the path of the resulting file on the filesystem for sanity
checking.
"""
path = rel_posix_to_abs_local(host, path)
basename, name = os.path.split(path)
if not os.path.exists(basename):
os.makedirs(basename)
with open(path, 'w') as f:
f.write(contents)
return path
_SLAVE_ADDR_RE = re.compile(r'^(?P<master>.*?)!(?P<slave>.*?)=(?P<dir>.*)$')
def slave_addresses():
"""Get the addresses for slaves based on :envvar:`MOCK_SSH_ROOTS`"""
for kv_pair in os.environ['MOCK_SSH_ROOTS'].split(':'):
m = _SLAVE_ADDR_RE.match(kv_pair)
if m:
print m.group('slave')
_SCP_RE = re.compile(r'^.*"cat > (?P<filename>.*?)".*$')
def receive_poor_mans_scp(host, args):
"""Mock SSH behavior for :py:func:`~mrjob.ssh.poor_mans_scp()`"""
dest = _SCP_RE.match(args[0]).group('filename')
try:
with open(os.path.join(path_for_host(host), dest), 'w') as f:
f.writelines(sys.stdin)
except IOError:
print >> sys.stderr, 'No such file or directory:', dest
def ls(host, args):
"""Mock SSH behavior for :py:func:`~mrjob.ssh.ssh_ls()`"""
dest = args[1]
root = path_for_host(host)
local_dest = rel_posix_to_abs_local(host, dest)
prefix_length = len(path_for_host(host))
if not os.path.exists(local_dest):
print >> sys.stderr, 'No such file or directory:', local_dest
sys.exit(1)
if not os.path.isdir(local_dest):
print dest
for root, dirs, files in os.walk(local_dest):
components = root.split(os.sep)
new_root = posixpath.join(*components)
for filename in files:
print '/' + posixpath.join(new_root, filename)[prefix_length:]
def cat(host, args):
"""Mock SSH behavior for :py:func:`~mrjob.ssh.ssh_cat()`"""
local_dest = rel_posix_to_abs_local(host, args[1])
if not os.path.exists(local_dest):
print >> sys.stderr, 'No such file or directory:', local_dest
sys.exit(1)
with open(local_dest, 'r') as f:
print f.read()
def run(host, remote_args, slave_key_file=None):
"""Execute a command as a "host." Recursively call for slave if necessary.
"""
remote_arg_pos = 0
# Get slave addresses (this is 'bash -c "hadoop dfsadmn ...')
if remote_args[0].startswith('bash -c "hadoop'):
slave_addresses()
return
# Accept stdin for a file transfer (this is 'bash -c "cat > ...')
if remote_args[0].startswith('bash -c "cat'):
receive_poor_mans_scp(host, remote_args)
return
# ls (this is 'find -type f ...')
if remote_args[0] == 'find':
ls(host, remote_args)
return
# cat (this is 'cat ...')
if remote_args[0] == 'cat':
cat(host, remote_args)
return
# Recursively call for slaves
if remote_args[0] == 'ssh':
# Actually check the existence of the key file on the master node
while not remote_args[remote_arg_pos] == '-i':
remote_arg_pos += 1
slave_key_file = remote_args[remote_arg_pos + 1]
if not os.path.exists(
os.path.join(path_for_host(host), slave_key_file)):
# This is word-for-word what SSH says.
print >> sys.stderr, 'Warning: Identity file',
slave_key_file, 'not accessible: No such file or directory.'
print >> sys.stderr, 'Permission denied (publickey).'
sys.exit(1)
while not remote_args[remote_arg_pos].startswith('hadoop@'):
remote_arg_pos += 1
slave_host = host + '!%s' % remote_args[remote_arg_pos].split('@')[1]
# build bang path
run(slave_host,
remote_args[remote_arg_pos + 1:],
slave_key_file)
return
print >> sys.stderr, ("Command line not recognized: %s" %
' '.join(remote_args))
sys.exit(1)
def main():
args = sys.argv
# Find where the user's commands begin
arg_pos = 0
# skip to key file path
while args[arg_pos] != '-i':
arg_pos += 1
arg_pos += 1
# verify existence of key pair file if necessary
if os.environ.get('MOCK_SSH_VERIFY_KEY_FILE', 'false') == 'true' \
and not os.path.exists(args[arg_pos]):
print >> sys.stderr, 'Warning: Identity file',
args[arg_pos], 'not accessible: No such file or directory.'
sys.exit(1)
# skip to host address
while not args[arg_pos].startswith('hadoop@'):
arg_pos += 1
host = args[arg_pos].split('@')[1]
# the rest are arguments are what to run on the remote machine
arg_pos += 1
run(host, args[arg_pos:])
if __name__ == '__main__':
main()
| 31.032129
| 78
| 0.636728
|
5aa79bd3718e18c6be8fa997d3a65bc1423435fb
| 10,617
|
py
|
Python
|
pyswarms/base/base_single.py
|
claudin92/pyswarms_prac
|
2902ad96ca52f16f2de0304190207cb83995aaec
|
[
"MIT"
] | 1
|
2020-09-28T08:02:34.000Z
|
2020-09-28T08:02:34.000Z
|
pyswarms/base/base_single.py
|
claudin92/pyswarms_prac
|
2902ad96ca52f16f2de0304190207cb83995aaec
|
[
"MIT"
] | null | null | null |
pyswarms/base/base_single.py
|
claudin92/pyswarms_prac
|
2902ad96ca52f16f2de0304190207cb83995aaec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Base class for single-objective Particle Swarm Optimization
implementations.
All methods here are abstract and raises a :code:`NotImplementedError`
when not used. When defining your own swarm implementation,
create another class,
>>> class MySwarm(SwarmBase):
>>> def __init__(self):
>>> super(MySwarm, self).__init__()
and define all the necessary methods needed.
As a guide, check the global best and local best implementations in this
package.
.. note:: Regarding :code:`options`, it is highly recommended to
include parameters used in position and velocity updates as
keyword arguments. For parameters that affect the topology of
the swarm, it may be much better to have them as positional
arguments.
See Also
--------
:mod:`pyswarms.single.global_best`: global-best PSO implementation
:mod:`pyswarms.single.local_best`: local-best PSO implementation
"""
import os
import yaml
import logging
import numpy as np
import logging.config
from collections import namedtuple
# Import from package
from ..backend import create_swarm
class SwarmOptimizer(object):
def assertions(self):
"""Assertion method to check various inputs.
Raises
------
TypeError
When the :code:`bounds` is not of type tuple
IndexError
When the :code:`bounds` is not of size 2.
When the arrays in :code:`bounds` is not of equal size.
When the shape of :code:`bounds` is not the same as :code:`dimensions`.
ValueError
When the value of :code:`bounds[1]` is less than
:code:`bounds[0]`.
"""
# Check setting of bounds
if self.bounds is not None:
if not isinstance(self.bounds, tuple):
raise TypeError('Parameter `bound` must be a tuple.')
if not len(self.bounds) == 2:
raise IndexError('Parameter `bound` must be of size 2.')
if not self.bounds[0].shape == self.bounds[1].shape:
raise IndexError('Arrays in `bound` must be of equal shapes')
if not self.bounds[0].shape[0] == self.bounds[1].shape[0] == \
self.dimensions:
raise IndexError('Parameter `bound` must be the same shape '
'as dimensions.')
if not (self.bounds[1] > self.bounds[0]).all():
raise ValueError('Values of `bounds[1]` must be greater than '
'`bounds[0]`.')
# Check clamp settings
if hasattr(self.velocity_clamp, '__iter__'):
if not len(self.velocity_clamp) == 2:
raise IndexError('Parameter `velocity_clamp` must be of '
'size 2')
if not self.velocity_clamp[0] < self.velocity_clamp[1]:
raise ValueError('Make sure that velocity_clamp is in the '
'form (min, max)')
# Check setting of center
if isinstance(self.center, (list, np.ndarray)):
if not len(self.center) == self.dimensions:
raise IndexError('Parameter `center` must be the same shape '
'as dimensions.')
if isinstance(self.center, np.ndarray) and self.center.ndim != 1:
raise ValueError('Parameter `center` must have a 1d array')
# Required keys in options argument
if not all(key in self.options for key in ('c1', 'c2', 'w')):
raise KeyError('Missing either c1, c2, or w in options')
def setup_logging(self, default_path='./config/logging.yaml',
default_level=logging.INFO, env_key='LOG_CFG'):
"""Setup logging configuration
Parameters
----------
default_path : str (default is `./config/logging.yaml`)
the path where the logging configuration is stored
default_level: logging.LEVEL (default is `logging.INFO`)
the default logging level
env_key : str
the environment key for accessing the setup
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def __init__(self, n_particles, dimensions, options, bounds=None,
velocity_clamp=None, center=1.0, ftol=-np.inf, init_pos=None):
"""Initializes the swarm.
Creates a Swarm class depending on the values initialized
Attributes
----------
n_particles : int
number of particles in the swarm.
dimensions : int
number of dimensions in the space.
options : dict with keys :code:`{'c1', 'c2', 'w'}`
a dictionary containing the parameters for the specific
optimization technique
* c1 : float
cognitive parameter
* c2 : float
social parameter
* w : float
inertia parameter
bounds : tuple of :code:`np.ndarray` (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum bound
while the second entry is the maximum bound. Each array must
be of shape :code:`(dimensions,)`.
velocity_clamp : tuple (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum velocity
and the second entry is the maximum velocity. It
sets the limits for velocity clamping.
center : list (default is :code:`None`)
an array of size :code:`dimensions`
ftol : float
relative error in objective_func(best_pos) acceptable for convergence
"""
self.setup_logging()
# Initialize primary swarm attributes
self.n_particles = n_particles
self.dimensions = dimensions
self.bounds = bounds
self.velocity_clamp = velocity_clamp
self.swarm_size = (n_particles, dimensions)
self.options = options
self.center = center
self.ftol = ftol
self.init_pos = init_pos
# Initialize named tuple for populating the history list
self.ToHistory = namedtuple('ToHistory',
['best_cost', 'mean_pbest_cost',
'mean_neighbor_cost', 'position',
'velocity'])
# Invoke assertions
self.assertions()
# Initialize resettable attributes
self.reset()
def _populate_history(self, hist):
"""Populates all history lists
The :code:`cost_history`, :code:`mean_pbest_history`, and
:code:`neighborhood_best` is expected to have a shape of
:code:`(iters,)`,on the other hand, the :code:`pos_history`
and :code:`velocity_history` are expected to have a shape of
:code:`(iters, n_particles, dimensions)`
Parameters
----------
hist : namedtuple
Must be of the same type as self.ToHistory
"""
self.cost_history.append(hist.best_cost)
self.mean_pbest_history.append(hist.mean_pbest_cost)
self.mean_neighbor_history.append(hist.mean_neighbor_cost)
self.pos_history.append(hist.position)
self.velocity_history.append(hist.velocity)
@property
def get_cost_history(self):
"""Get cost history"""
return np.array(self.cost_history)
@property
def get_mean_pbest_history(self):
"""Get mean personal best history"""
return np.array(self.mean_pbest_history)
@property
def get_mean_neighbor_history(self):
"""Get mean neighborhood cost history"""
return np.array(self.mean_neighbor_history)
@property
def get_pos_history(self):
"""Get position history"""
return np.array(self.pos_history)
@property
def get_velocity_history(self):
"""Get velocity history"""
return np.array(self.velocity_history)
def optimize(self, objective_func, iters, print_step=1, verbose=1):
"""Optimizes the swarm for a number of iterations.
Performs the optimization to evaluate the objective
function :code:`objective_func` for a number of iterations
:code:`iter.`
Parameters
----------
objective_func : function
objective function to be evaluated
iters : int
number of iterations
print_step : int (the default is 1)
amount of steps for printing into console.
verbose : int (the default is 1)
verbosity setting.
Raises
------
NotImplementedError
When this method is not implemented.
"""
raise NotImplementedError("SwarmOptimizer::optimize()")
def reset(self):
"""Resets the attributes of the optimizer.
All variables/atributes that will be re-initialized when this
method is defined here. Note that this method
can be called twice: (1) during initialization, and (2) when
this is called from an instance.
It is good practice to keep the number of resettable
attributes at a minimum. This is to prevent spamming the same
object instance with various swarm definitions.
Normally, swarm definitions are as atomic as possible, where
each type of swarm is contained in its own instance. Thus, the
following attributes are the only ones recommended to be
resettable:
* Swarm position matrix (self.pos)
* Velocity matrix (self.pos)
* Best scores and positions (gbest_cost, gbest_pos, etc.)
Otherwise, consider using positional arguments.
"""
# Initialize history lists
self.cost_history = []
self.mean_pbest_history = []
self.mean_neighbor_history = []
self.pos_history = []
self.velocity_history = []
# Initialize the swarm
self.swarm = create_swarm(n_particles=self.n_particles,
dimensions=self.dimensions,
bounds=self.bounds, center=self.center, init_pos=self.init_pos,
clamp=self.velocity_clamp, options=self.options)
| 37.648936
| 97
| 0.601206
|
f739df0d02c431262465c9f9cddbb2964b329c63
| 1,077
|
py
|
Python
|
twitoff/predict.py
|
EEdwardsA/Twitoff
|
e1c2613c233e81c5aa50fecb89e90c75b9bbdd01
|
[
"MIT"
] | null | null | null |
twitoff/predict.py
|
EEdwardsA/Twitoff
|
e1c2613c233e81c5aa50fecb89e90c75b9bbdd01
|
[
"MIT"
] | null | null | null |
twitoff/predict.py
|
EEdwardsA/Twitoff
|
e1c2613c233e81c5aa50fecb89e90c75b9bbdd01
|
[
"MIT"
] | null | null | null |
"""Prediction of Users based on tweet embeddings"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, hypo_tweet_text):
"""
Determine and return which user is more likely to say a hypothetical tweet
Example run: predict_user('elonmusk', 'nasa', 'Tesla cars are rad')
returns 0 (user0_name) or 1 (user1_name)
"""
user0 = User.query.filter(User.name == user0_name).one()
# TODO: create try/except block
user1 = User.query.filter(User.name == user1_name).one()
user0_vects = np.array([tweet.vect for tweet in user0.tweets])
user1_vects = np.array([tweet.vect for tweet in user1.tweets])
vects = np.vstack([user0_vects, user1_vects])
labels = np.concatenate(
[np.zeros(len(user0.tweets)), np.ones(len(user1.tweets))])
hypo_tweet_vect = vectorize_tweet(hypo_tweet_text)
log_reg = LogisticRegression().fit(vects, labels)
return log_reg.predict(hypo_tweet_vect.reshape(1, -1))
| 35.9
| 78
| 0.714949
|
65726b94957de72f06a015f8348b8218916d6994
| 401
|
py
|
Python
|
myapp/SetUtf8.py
|
abars/illustbook
|
3e790a688c19205b7384cc5815ca76c23b88f09a
|
[
"MIT"
] | 3
|
2016-06-16T20:11:45.000Z
|
2022-01-27T04:23:09.000Z
|
myapp/SetUtf8.py
|
abars/illustbook
|
3e790a688c19205b7384cc5815ca76c23b88f09a
|
[
"MIT"
] | 1
|
2017-10-23T00:23:13.000Z
|
2017-10-23T00:23:13.000Z
|
myapp/SetUtf8.py
|
abars/illustbook
|
3e790a688c19205b7384cc5815ca76c23b88f09a
|
[
"MIT"
] | null | null | null |
#!-*- coding:utf-8 -*-
#!/usr/bin/env python
#---------------------------------------------------
#文字コードをUTF8に設定
#copyright 2010-2012 ABARS all rights reserved.
#---------------------------------------------------
import sys
class SetUtf8:
@staticmethod
def set():
stdin = sys.stdin
stdout = sys.stdout
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdin = stdin
sys.stdout = stdout
| 21.105263
| 52
| 0.513716
|
b1b9f0d140b078502871ce7de32d378c3653ced6
| 8,456
|
py
|
Python
|
pwnlib/rop/call.py
|
tkmikan/pwntools
|
1238fc359eb72313d3f82849b2effdb7063ab429
|
[
"MIT"
] | 8,966
|
2015-01-02T11:58:14.000Z
|
2022-03-31T21:19:56.000Z
|
pwnlib/rop/call.py
|
tkmikan/pwntools
|
1238fc359eb72313d3f82849b2effdb7063ab429
|
[
"MIT"
] | 1,401
|
2015-01-01T00:56:22.000Z
|
2022-03-31T16:19:53.000Z
|
pwnlib/rop/call.py
|
tkmikan/pwntools
|
1238fc359eb72313d3f82849b2effdb7063ab429
|
[
"MIT"
] | 1,844
|
2015-01-07T04:38:06.000Z
|
2022-03-30T03:54:46.000Z
|
# -*- coding: utf-8 -*-
"""Abstracting ROP calls
"""
from __future__ import division
from pwnlib.abi import ABI
from pwnlib.context import context
from pwnlib.util import packing
import six
from pwnlib.util.misc import python_2_bytes_compatible, align
class Unresolved(object):
"""
Encapsulates logic for deferring evaluation of a value used
in a ROP chain which is in some way self-referential.
For example, it may be necessary to point to arbitrary data
appended to the ROP chain, but whose address is not known until
the full ROP chain is complete (because the data is appended
after all of the gadgets).
"""
pass
class CurrentStackPointer(Unresolved):
"""
Unresolved argument which will be replaced with the address of itself.
"""
pass
class NextGadgetAddress(Unresolved):
"""
Unresolved argument which will be replaced with the address of the next
gadget on the stack.
This is useful for gadgets which set the stack pointer to an absolute
value, when we wish to continue "execution" of the ROP stack at the
next gadget. In particular, SROP needs this.
"""
pass
class StackAdjustment(Unresolved):
"""
Placeholder for a ROP gadget which will adjust the stack pointer such
that "execution" continues at the next ROP gadget.
This is necessary for ABIs which place arguments on the stack.
If no stack adjustment is necessary (e.g. a call with no stack-based
arguments), no data is emitted and the ROP will fall-through to the
next gadget.
"""
pass
@python_2_bytes_compatible
class AppendedArgument(Unresolved):
r"""
Encapsulates information about a pointer argument, and the data
which is pointed to, where the absolute address of the data must
be known, and the data can be appended to the ROP chain.
Examples:
>>> context.clear()
>>> context.arch = 'amd64'
>>> u = AppendedArgument([1,2,b'hello',3])
>>> len(u)
32
>>> u.resolve()
[1, 2, b'hello\x00$$', 3]
>>> u = AppendedArgument([1,2,[b'hello'],3])
>>> u.resolve()
[1, 2, 32, 3, b'hello\x00$$']
>>> u.resolve(10000)
[1, 2, 10032, 3, b'hello\x00$$']
>>> u.address = 20000
>>> u.resolve()
[1, 2, 20032, 3, b'hello\x00$$']
>>> u = AppendedArgument([[[[[[[[[b'pointers!']]]]]]]]], 1000)
>>> u.resolve()
[1008, 1016, 1024, 1032, 1040, 1048, 1056, 1064, b'pointers!\x00$$$$$$']
"""
#: Symbolic name of the value.
name = None
#: The values to be placed at a known location
#:
#: A list of any of the following types:
#: - int
#: - str
#: - UnresolvedArgument (allows nesting)
values = []
#: The size of the fully-resolved argument, in bytes
size = 0
#: Absolute address of the target data in memory.
#: When modified, updates recursively.
address = 0
def __init__(self, value, address = 0):
if not isinstance(value, (list, tuple)):
value = [value]
self.values = []
self.address = address
for v in value:
if isinstance(v, (list, tuple)):
self.size += context.bytes
else:
if isinstance(v, six.text_type):
v = packing._need_bytes(v)
try:
self.size += align(context.bytes, len(v))
except TypeError: # no 'len'
self.size += context.bytes
for v in value:
if isinstance(v, (list, tuple)):
arg = AppendedArgument(v, self.address + self.size)
self.size += arg.size
self.values.append(arg)
else:
self.values.append(v)
@property
def address(self):
return self._address
@address.setter
def address(self, value):
old = self._address
delta = value - old
for v in self.values:
if isinstance(v, Unresolved):
v.address += delta
self._address = value
_address = 0
def local(self, address):
original = self.address
class LocalAddress(object):
def __enter__(*a, **kw):
self.address = address
def __exit__(*a, **kw):
self.address = original
return LocalAddress()
def resolve(self, addr=None):
"""
Return a flat list of ``int`` or ``bytes`` objects which can be
passed to :func:`.flat`.
Arguments:
addr(int): Address at which the data starts in memory.
If :const:`None`, ``self.addr`` is used.
"""
if addr is None:
addr = self.address
with self.local(addr):
self.address = addr
rv = [None] * len(self.values)
for i, value in enumerate(self.values):
if isinstance(value, six.integer_types):
rv[i] = value
elif isinstance(value, six.text_type):
value = packing._need_bytes(value)
if isinstance(value, (bytes, bytearray)):
value += b'\x00'
while len(value) % context.bytes:
value += b'$'
rv[i] = value
elif isinstance(value, Unresolved):
rv[i] = value.address
rv.extend(value.resolve())
assert rv[i] is not None
return rv
def __len__(self):
return self.size
def __bytes__(self):
return packing.flat(self.resolve())
def __repr__(self):
if isinstance(self.address, six.integer_types):
return '%s(%r, %#x)' % (self.__class__.__name__, self.values, self.address)
else:
return '%s(%r, %r)' % (self.__class__.__name__, self.values, self.address)
class Call(object):
"""
Encapsulates ABI-agnostic information about a function call, which is
to be executed with ROP.
All non-integer arguments are assumed to be pointer arguments.
The raw data is placed at the end of the ROP chain, and the argument
is replaced with an exact pointer to the argument.
Example:
>>> Call('system', 0xdeadbeef, [1, 2, b'/bin/sh'])
Call('system', 0xdeadbeef, [1, 2, AppendedArgument([b'/bin/sh'], 0x0)])
"""
#: Pretty name of the call target, e.g. 'system'
name = None
#: Address of the call target
target = 0
#: Arguments to the call
args = []
def __init__(self, name, target, args, abi=None, before=()):
assert isinstance(name, (bytes, six.text_type))
# assert isinstance(target, six.integer_types)
assert isinstance(args, (list, tuple))
self.abi = abi or ABI.default()
self.name = name
self.target = target
self.args = list(args)
for i, arg in enumerate(args):
if not isinstance(arg, six.integer_types+(Unresolved,)):
self.args[i] = AppendedArgument(arg)
self.stack_arguments_before = before
def __repr__(self):
fmt = "%#x" if isinstance(self.target, six.integer_types) else "%r"
return '%s(%r, %s, %r)' % (self.__class__.__name__,
self.name,
fmt % self.target,
self.args)
@property
def register_arguments(self):
return dict(zip(self.abi.register_arguments, self.args))
@property
def stack_arguments(self):
return self.args[len(self.abi.register_arguments):]
@classmethod
def _special_repr(cls, x):
if isinstance(x, AppendedArgument):
x = x.values
if isinstance(x, list):
return list(map(cls._special_repr, x))
else:
return x
def __str__(self):
fmt = "%#x" if isinstance(self.target, six.integer_types) else "%r"
args = []
for arg in self.args:
args.append(self._special_repr(arg))
name = self.name or (fmt % self.target)
arg_str = []
for arg in args:
if isinstance(arg, six.integer_types) and arg > 0x100:
arg_str.append(hex(arg))
else:
arg_str.append(str(arg))
return '%s(%s)' % (name, ', '.join(arg_str))
| 30.2
| 87
| 0.566107
|
ebef34cbda89ca0f4489b45bf2b5a8785851df1f
| 1,642
|
py
|
Python
|
gateway/utils/testfinder.py
|
aceofwings/Cantactular
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | 3
|
2017-01-26T01:37:42.000Z
|
2018-07-22T02:42:52.000Z
|
gateway/utils/testfinder.py
|
aceofwings/Cantactular
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | 1
|
2017-07-07T18:02:20.000Z
|
2017-07-07T18:02:20.000Z
|
gateway/utils/testfinder.py
|
aceofwings/Evt-Gateway
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | null | null | null |
import os
import unittest
from gateway.utils.resourcelocator import ResourceLocator
from unittest import TestLoader
TEST_PATH = "tests"
verbosity = 1
test_loader = unittest.defaultTestLoader
def find_test_modules(file_pattern='test*.py'):
"""
Finds test modules within the test folder
"""
test_locator = ResourceLocator.get_locator(TEST_PATH)
test_suite = test_loader.discover(test_locator.ROOT_PATH, pattern=file_pattern)
return test_suite
def start_framework_test_sequence(test_classes=None)
def run_tests(test_classes=None):
"""
run_tests - runs a test suite with specified paramters
:param test_classes: list of tests classnames to only test
:return int: -1 for failure or 0 for success
"""
test_runner = unittest.TextTestRunner(verbosity=verbosity)
if test_classes is not None:
suite = load_test_from_classes(test_classes)
if not suite.countTestCases():
return False
else:
test_runner.run(suite)
return True
tests = find_test_modules()
test_runner.run(tests)
return True
def load_test_from_classes(class_names):
"""
load_test_from_classes - returns a suite with specified class_names
:param class_names: list of tests classnames to add to the suite
"""
test_suite = find_test_modules()
temp_ts = unittest.TestSuite()
for test in test_suite:
suite = test.__dict__['_tests']
if len(suite):
for case in suite:
if case.__dict__['_tests'][0].__class__.__name__ in class_names:
temp_ts.addTest(case)
return temp_ts
| 29.854545
| 83
| 0.698538
|
aa3605dfa092541f9003ebdde50c0565380e63a9
| 15,154
|
py
|
Python
|
ibmsecurity/isam/base/network/felb/config.py
|
ibm-enio/ibmsecurity
|
81f989678642c3b6a49b2a3fbb5d9ca98804ef17
|
[
"Apache-2.0"
] | null | null | null |
ibmsecurity/isam/base/network/felb/config.py
|
ibm-enio/ibmsecurity
|
81f989678642c3b6a49b2a3fbb5d9ca98804ef17
|
[
"Apache-2.0"
] | null | null | null |
ibmsecurity/isam/base/network/felb/config.py
|
ibm-enio/ibmsecurity
|
81f989678642c3b6a49b2a3fbb5d9ca98804ef17
|
[
"Apache-2.0"
] | null | null | null |
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb"
requires_module = None
requires_version = None
def export(isamAppilance, check_mode=False, force=False):
"""
Exporting current FELB configuration with RESTful web service
"""
return isamAppilance.invoke_get("Exporting FELB configuration", "{0}?export=true".format(module_uri),
requires_modules=requires_module, requires_version=requires_version)
def imp_config(isamAppliance, file, check_mode=False, force=False):
"""
Importing FELB file
"""
change_required = _check_import(isamAppliance, file)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Importing Configuration", "{0}".format(module_uri),
{
"file": file
}, requires_version=requires_version,
requires_modules=requires_module)
else:
return isamAppliance.create_return_object(changed=False)
def replace(isamAppliance, enable, debug, ha_enable, is_primary, interface,
remote, port, health_check_interval, health_check_timeout, local, remote_address,
remote_port, remote_facility, ssl_enable, keyfile, services_enable, name, services_address,
services_port, netmask, services_interface, scheduler, services_health_check_interval, rise, fall,
layer_type, layer7_secure, layer7_ssl_label, layer7_cookie, attribute_name, attribute_value, server_id,
server_active, server_address, server_port, server_weight, server_secure=False, ssllabel=None,
check_mode=False, force=False):
"""
updates ssl configuration
"""
if force is True or _check(isamAppliance, enable, debug, ha_enable, is_primary, interface,
remote, port, health_check_interval, health_check_timeout, local, remote_address,
remote_port, remote_facility, ssl_enable, keyfile, services_enable, name,
services_address,
services_port, netmask, services_interface, scheduler, services_health_check_interval,
rise, fall,
layer_type, layer7_secure, layer7_ssl_label, layer7_cookie, attribute_name,
attribute_value, server_id,
server_active, server_address, server_port, server_weight, server_secure=False,
ssllabel=None) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put("Updating Configuration", "{0}".format(module_uri),
{
"enabled": enable,
"debug": debug,
"ha": {
"enabled": ha_enable,
"is_primary": is_primary,
"interface": interface,
"remote": remote,
"port": port,
"health_check_interval": health_check_interval,
"health_check_timeout": health_check_timeout
},
"logging": {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
},
"ssl": {
"enabled": ssl_enable,
"keyfile": keyfile
},
"services": [
{
"enabled": services_enable,
"name": name,
"address": services_address,
"port": services_port,
"netmask": netmask,
"interface": services_interface,
"scheduler": scheduler,
"health_check_interval": services_health_check_interval,
"rise": rise,
"fall": fall,
"layer": {
"type": layer_type,
"layer7_secure": layer7_secure,
"layer7_ssl_label": layer7_ssl_label,
"layer7_cookie": layer7_cookie
},
"attributes": [
{
"name": attribute_name,
"value": attribute_value
}
],
"servers": [
{
"id": server_id,
"active": server_active,
"address": server_address,
"port": server_port,
"weight": server_weight,
"secure": server_secure,
"ssllabel": ssllabel
}
]
}
],
})
def get(isamAppliance):
"""
Retrieves configuration in full
:param isamAppliance:
:return:
"""
return isamAppliance.invoke_get("Retrieving Configuration", module_uri)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieves configuration
:param isamAppliance:
:param check_mode:
:param force:
:return:
"""
return isamAppliance.invoke_get("Retrieving Configuration", "{0}/configuration".format(module_uri))
def update(isamAppliance, felb_id, value, check_mode=False, force=False):
"""
updates existing configuration
:param isamAppliance:
:param felb_id:
:param check_mode:
:param force:
:return:
"""
change_required = _check_update(isamAppliance, felb_id, value)
if force is True or change_required is True:
return isamAppliance.invoke_put("Updating configuration", "{0}/configuration/{1}".format(module_uri, felb_id),
{
"value": value
})
else:
return isamAppliance.create_return_object(changed=False)
def _check_import(isamAppliance, file):
"""
checks to see if file is already imported
"""
temp_obj = get(isamAppliance)
change_required = False
if temp_obj['file'] != file:
change_required = True
return change_required
def _check_update(isamappliance, felb_id, value):
"""
checks update for value passed
"""
change_required = False
temp_obj = isamappliance.invoke_get("Retrieving configuration", "{0}/configuration/{1}".format(module_uri, felb_id))
if temp_obj['value'] != value:
change_required = True
return change_required
def _check(isamAppliance, enable, debug, ha_enable, is_primary, interface,
remote, port, health_check_interval, health_check_timeout, local, remote_address,
remote_port, remote_facility, ssl_enable, keyfile, services_enable, name, services_address,
services_port, netmask, services_interface, scheduler, services_health_check_interval, rise, fall,
layer_type, layer7_secure, layer7_ssl_label, layer7_cookie, attribute_name, attribute_value, server_id,
server_active, server_address, server_port, server_weight, server_secure=False, ssllabel=None):
"""
Checks update in full
"""
check_obj = get(isamAppliance)
"""
json_data = {
"enable": enable,
"debug": debug,
"ha": {
"enable": ha_enable,
"is_primary": is_primary,
"interface": interface,
"remote": remote,
"port": port,
"health_check_interval": health_check_interval,
"health_check_timeout": health_check_timeout
},
"logging": {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
},
"ssl": {
"enable": ssl_enable,
"keyfile": keyfile
},
"services": [
{
"enable": services_enable,
"name": name,
"address": services_address,
"port": services_port,
"netmask": netmask,
"interface": services_interface,
"scheduler": scheduler,
"health_check_interval": services_health_check_interval,
"rise": rise,
"fall": fall,
"layer": {
"type": layer_type,
"layer7_secure": layer7_secure,
"layer7_ssl_label": layer7_ssl_label,
"layer7_cookie": layer7_cookie
},
"attributes": [
{
"name": attribute_name,
"value": attribute_value
}
],
"servers": [
{
"id": server_id,
"active": server_active,
"address": server_address,
"port": server_port,
"weight": server_weight,
"secure": server_secure,
"ssllabel": ssllabel
}
]
}
],
}
sort_check_obj = ibmsecurity.utilities.tools.json_sort(check_obj)
sort_json_data = ibmsecurity.utilities.tools.json_sort(json_data)
print sort_check_obj
print sort_json_data
"""
if check_obj['data']['debug'] != debug:
return True
if check_obj['data']['enabled'] != enable:
return True
if check_obj['data']['ha']['enabled'] != ha_enable:
return True
if check_obj['data']['ha']['enabled'] != ha_enable:
return True
if check_obj['data']['ha']['health_check_interval'] != health_check_interval:
return True
if check_obj['data']['ha']['health_check_timeout'] != health_check_timeout:
return True
if check_obj['data']['ha']['interface'] != interface:
return True
if check_obj['data']['ha']['is_primary'] != is_primary:
return True
if check_obj['data']['ha']['port'] != port:
return True
if check_obj['data']['ha']['remote'] != remote:
return True
if check_obj['data']['logging']['local'] != local:
return True
if check_obj['data']['logging']['remote_address'] != remote_address:
return True
if check_obj['data']['logging']['remote_facility'] != remote_facility:
return True
if check_obj['data']['logging']['remote_port'] != remote_port:
return True
if check_obj['data']['services']['address'] != services_address:
return True
if check_obj['data']['services']['enabled'] != services_enable:
return True
if check_obj['data']['services']['fall'] != fall:
return True
if check_obj['data']['services']['health_check_interval'] != services_health_check_interval:
return True
if check_obj['data']['services']['interface'] != services_interface:
return True
if check_obj['data']['services']['name'] != name:
return True
if check_obj['data']['services']['netmask'] != netmask:
return True
if check_obj['data']['services']['port'] != services_port:
return True
if check_obj['data']['services']['rise'] != rise:
return True
if check_obj['data']['services']['scheduler'] != scheduler:
return True
if check_obj['data']['servers']['active'] != server_active:
return True
if check_obj['data']['servers']['address'] != server_address:
return True
if check_obj['data']['servers']['id'] != server_id:
return True
if check_obj['data']['servers']['port'] != server_port:
return True
if check_obj['data']['servers']['active'] != server_active:
return True
if check_obj['data']['servers']['secure'] != server_secure:
return True
if check_obj['data']['servers']['ssllabel'] != ssllabel:
return True
if check_obj['data']['servers']['weight'] != server_weight:
return True
if check_obj['data']['servers']['layer']['type'] != layer_type:
return True
if check_obj['data']['servers']['layer']['layer7_cookie'] != layer7_cookie:
return True
if check_obj['data']['servers']['layer']['layer7_secure'] != layer7_secure:
return True
if check_obj['data']['servers']['layer']['layer7_ssl_label'] != layer7_ssl_label:
return True
| 44.309942
| 120
| 0.472746
|
b4dc593bcd634307cee83e4120d8eb0bc538ce3c
| 26,401
|
py
|
Python
|
src/anomaly_toolbox/trainers/descargan.py
|
zurutech/anomaly-toolbox
|
ee772898b66b8be86cfa300334fb8cf7b826dc4d
|
[
"MIT"
] | 73
|
2021-09-13T14:35:16.000Z
|
2022-03-11T14:39:04.000Z
|
src/anomaly_toolbox/trainers/descargan.py
|
321Visual/anomaly-toolbox
|
ee772898b66b8be86cfa300334fb8cf7b826dc4d
|
[
"MIT"
] | 4
|
2021-09-14T13:41:55.000Z
|
2022-02-24T20:26:59.000Z
|
src/anomaly_toolbox/trainers/descargan.py
|
321Visual/anomaly-toolbox
|
ee772898b66b8be86cfa300334fb8cf7b826dc4d
|
[
"MIT"
] | 4
|
2021-09-14T08:45:17.000Z
|
2021-12-29T22:02:57.000Z
|
# Copyright 2021 Zuru Tech HK Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for the DeScarGAN model."""
import json
from pathlib import Path
from typing import Dict, Set, Tuple, Union
import tensorflow as tf
import tensorflow.keras as k
from anomaly_toolbox.datasets.dataset import AnomalyDetectionDataset
from anomaly_toolbox.models.descargan import Discriminator, Generator
from anomaly_toolbox.trainers.trainer import Trainer
class DeScarGAN(Trainer):
"""DeScarGAN Trainer."""
def __init__(
self,
dataset: AnomalyDetectionDataset,
hps: Dict,
summary_writer: tf.summary.SummaryWriter,
log_dir: Path,
):
"""Initialize DeScarGAN Trainer."""
super().__init__(
dataset, hps=hps, summary_writer=summary_writer, log_dir=log_dir
)
# Data info
self._ill_label = dataset.anomalous_label
self._healthy_label = dataset.normal_label
# Models
self.generator = Generator(
ill_label=self._ill_label, n_channels=dataset.channels
)
self.discriminator = Discriminator(
ill_label=self._ill_label, n_channels=dataset.channels
)
# Optimizers
self.g_optimizer = k.optimizers.Adam(
learning_rate=hps["learning_rate"], beta_1=0.5, beta_2=0.999
)
self.d_optimizer = k.optimizers.Adam(
learning_rate=hps["learning_rate"], beta_1=0.5, beta_2=0.999
)
# Parameters
self._generator_training_steps = tf.constant(5, dtype=tf.int64)
self._g_lambda_identity = tf.constant(50.0)
self._g_lambda_reconstruction = tf.constant(50.0)
self._g_lambda_fake = tf.constant(1.0)
self._g_lambda_classification = tf.constant(1.0)
self._d_lambda_gradient_penalty = tf.constant(10.0)
self._d_lambda_fake = tf.constant(20.0)
self._d_lambda_real = tf.constant(20.0)
self._d_lambda_classification = tf.constant(5.0)
# Losses
self._classification_loss = k.losses.SparseCategoricalCrossentropy(
from_logits=True
)
self._reconstruction_error = k.losses.MeanSquaredError()
# Training Metrics
self.epoch_d_loss_avg = k.metrics.Mean(name="epoch_discriminator_loss")
self.epoch_g_loss_avg = k.metrics.Mean(name="epoch_generator_loss")
self.accuracy = k.metrics.BinaryAccuracy(name="binary_accuracy")
self.keras_metrics = {
metric.name: metric
for metric in [self.epoch_d_loss_avg, self.epoch_g_loss_avg, self.accuracy]
}
# Outside of the keras_metrics because it's used to compute the running
# mean and not as a metric
self._mean = k.metrics.Mean()
# Constants
self._zero = tf.constant(0.0)
self._zero_batch = tf.zeros((1, 1, 1, 1))
@staticmethod
def hyperparameters() -> Set[str]:
"""List of the hyperparameters name used by the trainer."""
return {"learning_rate"}
@staticmethod
def clip_by_norm_handle_none(grad, clip_norm):
"""
tape.compute_gradients returns None instead of a zero tensor when the
gradient would be a zero tensor and tf.clip_by_* does not support
a None value.
So just don't pass None to it and preserve None values.
Source: https://stackoverflow.com/a/39295309/2891324
"""
if grad is None:
return None
return tf.clip_by_norm(grad, clip_norm=clip_norm)
def _select_and_save(self, threshold: tf.Tensor):
current_accuracy = self.accuracy.result()
base_path = self._log_dir / "results" / "accuracy"
self.generator.save(
str(base_path / "generator"),
overwrite=True,
include_optimizer=False,
)
with open(base_path / "validation.json", "w") as fp:
json.dump(
{
"value": float(current_accuracy),
"threshold": float(threshold),
},
fp,
)
@tf.function
def train(
self,
epochs: int,
step_log_frequency: int = 100,
):
"""
Train the DeScarGAN generator and discriminator.
"""
step_log_frequency = tf.convert_to_tensor(step_log_frequency, dtype=tf.int64)
epochs = tf.convert_to_tensor(epochs, dtype=tf.int32)
best_accuracy = -1.0
for epoch in tf.range(epochs):
for batch in self._dataset.train:
# Perform the train step
d_loss, g_loss, reconstructions = self.train_step(batch)
# Update the losses metrics
self.epoch_d_loss_avg.update_state(d_loss)
self.epoch_g_loss_avg.update_state(g_loss)
# step -1 because in train_step self.d_optimizer.iterations has been incremented
if tf.math.equal(
tf.math.mod(self.d_optimizer.iterations - 1, step_log_frequency),
tf.constant(0, tf.int64),
):
x, y = batch
healthy_idx = tf.squeeze(tf.where(tf.equal(y, self._healthy_label)))
x_healthy = tf.gather(x, healthy_idx)
x_hat_healthy = tf.gather(reconstructions, healthy_idx)
if tf.equal(tf.rank(x_healthy), tf.constant(3)):
x_healthy = tf.expand_dims(x_healthy, axis=0)
x_hat_healthy = tf.expand_dims(x_hat_healthy, axis=0)
ill_idx = tf.squeeze(tf.where(tf.equal(y, self._ill_label)))
x_ill = tf.gather(x, ill_idx)
x_hat_ill = tf.gather(reconstructions, ill_idx)
if tf.equal(tf.rank(x_ill), tf.constant(3)):
x_ill = tf.expand_dims(x_ill, axis=0)
x_hat_ill = tf.expand_dims(x_hat_ill, axis=0)
with self._summary_writer.as_default():
tf.summary.scalar(
"d_loss", d_loss, step=self.d_optimizer.iterations
)
tf.summary.scalar(
"g_loss", g_loss, step=self.d_optimizer.iterations
)
tf.summary.image(
"healthy",
tf.concat(
[
x_healthy,
x_hat_healthy,
tf.abs(x_healthy - x_hat_healthy),
],
axis=2,
),
step=self.d_optimizer.iterations,
)
tf.summary.image(
"ill",
tf.concat(
[x_ill, x_hat_ill, tf.abs(x_ill - x_hat_ill)], axis=2
),
step=self.d_optimizer.iterations,
)
tf.print(
"[",
epoch,
"] step: ",
self.d_optimizer.iterations,
": d_loss: ",
d_loss,
", g_loss: ",
g_loss,
)
# Epoch end
# Global classification (not pixel level)
# 1. Find the reconstruction error on a sub-set of the validation set
# that WON'T be used during the score computation.
# Reason: https://stats.stackexchange.com/a/427468/91290
#
# "The error distribution on the training data is misleading since your
# training error distribution is not identical to test error distribution,
# due to inevitable over-fitting. Then, comparing training error
# distribution with future data is unjust."
#
# 2. Use the threshold to classify the validation set (positive and negative)
# 3. Compute the binary accuracy (we can use it since the dataset is perfectly balanced)
self._mean.reset_state()
for x, y in self._dataset.validation_normal:
self._mean.update_state(
tf.reduce_mean(
tf.math.abs(self.generator((x, y), training=False) - x)
)
)
threshold = self._mean.result()
tf.print(
"Reconstruction error on normal validation set: ",
threshold,
)
# reconstruction <= threshold, then is a normal data (label 0)
for x, y in self._dataset.test_normal.concatenate(
self._dataset.test_anomalous
):
self.accuracy.update_state(
y_true=y,
y_pred=tf.cast(
# reconstruction > threshold, then is anomalous (label 1 = cast(True))
# invoke the generator always with the normal label, since that's
# what we suppose to receive in input (and the threshold has been found
# using data that comes only from the normal distribution)
tf.math.greater(
tf.reduce_mean(
tf.math.abs(
self.generator(
(
x,
tf.ones(tf.shape(x)[0], dtype=tf.int32)
* self._dataset.normal_label,
),
training=False,
)
- x
),
axis=[1, 2, 3],
),
threshold,
),
tf.int32,
),
)
current_accuracy = self.accuracy.result()
tf.print("Binary accuracy on validation set: ", current_accuracy)
if best_accuracy < current_accuracy:
tf.py_function(self._select_and_save, [threshold], [])
best_accuracy = current_accuracy
with self._summary_writer.as_default():
tf.summary.scalar(
"accuracy", current_accuracy, step=self.d_optimizer.iterations
)
# Reset the metrics at the end of every epoch
self._reset_keras_metrics()
def gradient_penalty(
self, x: tf.Tensor, x_gen: tf.Tensor, labels: tf.Tensor
) -> tf.Tensor:
"""
Compute gradient penalty: L2(grad - 1)^2.
Args:
x: input batch
x_gen: generated images
labels: labels associated with x (and thus with x_gen)
Returns:
penalty on discriminator gradient
"""
epsilon = tf.random.uniform([tf.shape(x)[0], 1, 1, 1], 0.0, 1.0)
x_hat = epsilon * x + (1 - epsilon) * x_gen
labels = tf.cast(labels, tf.float32)
with tf.GradientTape() as tape:
tape.watch([x_hat, labels])
d_hat = self.discriminator([x_hat, labels], training=True)
gradients = tape.gradient(d_hat, x_hat)
ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))
d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)
return d_regularizer
@tf.function
def train_step(
self,
inputs: Tuple[tf.Tensor, tf.Tensor],
):
"""
Single training step.
Args:
inputs: a tuple (x,y) containing the input samples (x) and their labels (y).
both x, and y, are batches.
If x is a batch of images the input shape is (batch_size, h, w, d).
The shape of y is always (batch_size,).
Returns:
d_loss, g_loss, x_hat, where
d_loss: discriminator loss
g_loss: generator loss
x_hat: a tensor with the same shape of inputs[0] containing the reconstructions.
"""
x, y = inputs
# Generate all the reconstruction for the current batch
# outside of the tape since we need this only for logging
x_hat = self.generator(inputs, training=True)
# All the gathering of the inputs can be done outside of the tape
# no need to track these operations for computing the gradient (save memory)
x_healthy = tf.gather(x, tf.squeeze(tf.where(tf.equal(y, self._healthy_label))))
if tf.equal(tf.rank(x_healthy), tf.constant(3)):
x_healthy = tf.expand_dims(x_healthy, axis=0)
x_ill = tf.gather(x, tf.squeeze(tf.where(tf.equal(y, self._ill_label))))
if tf.equal(tf.rank(x_ill), tf.constant(3)):
x_ill = tf.expand_dims(x_ill, axis=0)
# Count # healthy and # ill in batch
tot_healthy = tf.cast(tf.shape(x_healthy)[0], tf.float32)
tot_ill = tf.cast(tf.shape(x_ill)[0], tf.float32)
tot = tf.cast(tf.shape(x)[0], tf.float32)
percentage_healthy = tf.math.divide_no_nan(tot_healthy, tot)
percentage_ill = tf.math.divide_no_nan(tot_ill, tot)
# Scalar labels used in the losses
healthy_labels = tf.ones((tot_healthy,), dtype=tf.int32) * tf.squeeze(
self._healthy_label
)
ill_labels = tf.ones((tot_ill,), dtype=tf.int32) * tf.squeeze(self._ill_label)
# Train the discriminator
with tf.GradientTape(persistent=True) as tape:
# With real images - healthy
if tf.not_equal(percentage_healthy, self._zero):
(d_healthy, d_healthy_pred) = self.discriminator(
[x_healthy, healthy_labels],
training=True,
)
d_loss_real_healthy = -tf.reduce_mean(d_healthy) * percentage_healthy
d_loss_classification_healthy = (
self._classification_loss(
y_true=healthy_labels, y_pred=d_healthy_pred
)
* percentage_healthy
)
else:
d_loss_classification_healthy = self._zero
d_loss_real_healthy = self._zero
# With real images - ill
if tf.not_equal(percentage_ill, self._zero):
(d_ill, d_ill_pred) = self.discriminator(
[x_ill, ill_labels], training=True
)
d_loss_real_ill = -tf.reduce_mean(d_ill) * percentage_ill
d_loss_classification_ill = (
self._classification_loss(y_true=ill_labels, y_pred=d_ill_pred)
* percentage_ill
)
else:
d_loss_classification_ill = self._zero
d_loss_real_ill = self._zero
# Total loss on real images
d_loss_real = d_loss_real_ill + d_loss_real_healthy
d_loss_classification = (
d_loss_classification_ill + d_loss_classification_healthy
)
# Generate fake images:
# Add random noise to the input too
noise_variance = tf.constant(0.05)
if tf.not_equal(percentage_healthy, self._zero):
x_healthy_noisy = (
x_healthy
+ tf.random.uniform(tf.shape(x_healthy), dtype=tf.float32)
* noise_variance
)
x_fake_healthy = self.generator(
[x_healthy_noisy, healthy_labels], training=True
)
# Add noise to generated and real images - used for the losses
x_fake_healthy_noisy = (
x_fake_healthy
+ tf.random.uniform(tf.shape(x_fake_healthy), dtype=tf.float32)
* noise_variance
)
# Train with fake noisy images
(d_on_fake_healthy, _) = self.discriminator(
[x_fake_healthy_noisy, healthy_labels], training=True
)
# Gradient penealty
d_gradient_penalty_healty = self.gradient_penalty(
x_healthy_noisy,
x_fake_healthy_noisy,
healthy_labels,
)
else:
d_on_fake_healthy = self._zero
d_gradient_penalty_healty = self._zero
x_fake_healthy = self._zero
x_fake_healthy_noisy = self._zero
x_healthy_noisy = self._zero
if tf.not_equal(percentage_ill, self._zero):
x_ill_noisy = (
x_ill
+ tf.random.uniform(tf.shape(x_ill), dtype=tf.float32)
* noise_variance
)
x_fake_ill = self.generator([x_ill_noisy, ill_labels], training=True)
# Add noise to generated and real images - used for the losses
x_fake_ill_noisy = (
x_fake_ill
+ tf.random.uniform(tf.shape(x_fake_ill), dtype=tf.float32)
* noise_variance
)
# Train with fake noisy images
(d_on_fake_ill, _) = self.discriminator(
[x_fake_ill_noisy, ill_labels], training=True
)
# Gradient penalty
d_gradient_penalty_ill = self.gradient_penalty(
x_ill_noisy, x_fake_ill_noisy, ill_labels
)
else:
d_on_fake_ill = self._zero_batch
d_gradient_penalty_ill = self._zero
x_fake_ill = self._zero_batch
x_fake_ill_noisy = self._zero_batch
x_ill_noisy = self._zero_batch
d_loss_fake = (
tf.reduce_mean(d_on_fake_healthy) * percentage_healthy
+ tf.reduce_mean(d_on_fake_ill) * percentage_ill
)
# Gradient penalty to improve discriminator training stability
d_loss_gp = d_gradient_penalty_healty + d_gradient_penalty_ill
# Sum all the losses and compute the discriminator loss
d_loss = (
self._d_lambda_real * d_loss_real
+ self._d_lambda_fake * d_loss_fake
+ self._d_lambda_classification * d_loss_classification
+ self._d_lambda_gradient_penalty * d_loss_gp
)
# Train the Generator ever self._generator_training_steps performed by the discriminator
if tf.equal(
tf.math.mod(
self.d_optimizer.iterations, self._generator_training_steps
),
0,
):
# D output reduction is needed because the output is batch_size, w, h, D
if tf.not_equal(percentage_healthy, self._zero):
g_classification_loss_healthy = self._classification_loss(
y_true=healthy_labels,
y_pred=tf.reduce_mean(d_on_fake_healthy, axis=[2, 3]),
)
g_identity_loss_healthy = self._reconstruction_error(
y_true=x_healthy, y_pred=x_fake_healthy
)
g_reconstruction_loss_healthy = self._reconstruction_error(
y_true=x_healthy_noisy, y_pred=x_fake_healthy
)
else:
g_classification_loss_healthy = self._zero
g_identity_loss_healthy = self._zero
g_reconstruction_loss_healthy = self._zero
if tf.not_equal(percentage_ill, self._zero):
g_classification_loss_ill = self._classification_loss(
y_true=ill_labels,
y_pred=tf.reduce_mean(d_on_fake_ill, axis=[2, 3]),
)
g_identity_loss_ill = self._reconstruction_error(
y_true=x_ill, y_pred=x_fake_ill
)
g_reconstruction_loss_ill = self._reconstruction_error(
y_true=x_ill_noisy, y_pred=x_fake_ill
)
else:
g_classification_loss_ill = self._zero
g_identity_loss_ill = self._zero
g_reconstruction_loss_ill = self._zero
g_classification_loss = (
g_classification_loss_ill + g_classification_loss_healthy
)
# Adversarial loss
g_loss_fake = -tf.reduce_mean(d_on_fake_healthy) - tf.reduce_mean(
d_on_fake_ill
)
# Identity loss
g_identity_loss = g_identity_loss_ill + g_identity_loss_healthy
# Reconstruction loss
g_reconstruction_loss = (
g_reconstruction_loss_healthy + g_reconstruction_loss_ill
)
# Total generator loss
g_loss = (
self._g_lambda_fake * g_loss_fake
+ self._g_lambda_reconstruction * g_reconstruction_loss
+ self._g_lambda_identity * g_identity_loss
+ self._g_lambda_classification * g_classification_loss
)
else:
g_loss = self._zero
d_grads = tape.gradient(d_loss, self.discriminator.trainable_variables)
# Gradient clipping
d_grads = [self.clip_by_norm_handle_none(g, clip_norm=10) for g in d_grads]
self.d_optimizer.apply_gradients(
zip(d_grads, self.discriminator.trainable_variables)
)
if tf.equal(
tf.cast(
tf.math.mod(
self.d_optimizer.iterations - 1,
# -1 because at the previous line with d_opt.apply_gradients
# the counter increased
self._generator_training_steps,
),
tf.int32,
),
tf.constant(0, dtype=tf.int32),
):
g_grads = tape.gradient(g_loss, self.generator.trainable_variables)
# Gradient clipping
g_grads = [self.clip_by_norm_handle_none(g, clip_norm=10) for g in g_grads]
self.g_optimizer.apply_gradients(
zip(g_grads, self.generator.trainable_variables)
)
del tape
return d_loss, g_loss, x_hat
def test(self, base_path: Union[Path, None] = None):
"""Measure the performance (only measured metric is accuracy) on the
test set.
Args:
base_path: the path to use for loading the models. If None, the default is used.
"""
if not base_path:
base_path = self._log_dir / "results" / "accuracy"
# Load the best model to use as the model here
model_path = base_path / "generator"
generator = tf.keras.models.load_model(model_path)
generator.summary()
self.accuracy.reset_state()
# Get the threshold
accuracy_path = base_path / "validation.json"
with open(accuracy_path, "r") as fp:
data = json.load(fp)
threshold = data["threshold"]
# reconstruction <= threshold => normal data (label 0)
for x, y in self._dataset.test_normal.concatenate(self._dataset.test_anomalous):
self.accuracy.update_state(
y_true=y,
y_pred=tf.cast(
# reconstruction > threshold => anomalous (label 1 = cast(True))
# invoke the generator always with the normal label, since that's
# what we suppose to receive in input (and the threshold has been found
# using data that comes only from the normal distribution)
tf.math.greater(
tf.reduce_mean(
tf.math.abs(
generator(
(
x,
tf.ones(tf.shape(x)[0], dtype=tf.int32)
* self._dataset.normal_label,
),
training=False,
)
- x
),
axis=[1, 2, 3],
),
threshold,
),
tf.int32,
),
)
current_accuracy = self.accuracy.result()
tf.print("Binary accuracy on test set: ", current_accuracy)
# Create the result
result_path = self._log_dir / "results" / "accuracy" / "test.json"
result_path.parent.mkdir(parents=True, exist_ok=True)
# Write the file
with open(result_path, "w") as fp:
json.dump(
{
"accuracy": {
"value": float(current_accuracy),
"threshold": float(threshold),
}
},
fp,
)
| 40.554531
| 100
| 0.526722
|
0c36cc9a797aefdcff1a70236e50c7aaca031163
| 2,633
|
py
|
Python
|
FEP/preliminary-work/example_FEP/expanded-ensemble/create_ee_mdp.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | 1
|
2020-04-16T05:10:33.000Z
|
2020-04-16T05:10:33.000Z
|
FEP/preliminary-work/example_FEP/expanded-ensemble/create_ee_mdp.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | 7
|
2020-03-16T16:14:28.000Z
|
2020-05-16T16:05:18.000Z
|
FEP/preliminary-work/example_FEP/expanded-ensemble/create_ee_mdp.py
|
vvoelz/covid-FAH-CPU
|
1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5
|
[
"MIT"
] | null | null | null |
import os, sys, stat
import subprocess
from expanded import *
###############
## Let's see if grompp can build an expanded ensemble simulaton
if (0):
os.system('gmx --version')
##################
usage = """
Usage: create_ee_mdp.py [grofile] [topfile] [ndxfile] [output mdpfile]
EXAMPLE
$ python create_ee_mdp.py RUN0/npt.gro RUN0/topol.top RUN0/index.ndx RUN0/prod.mdp
"""
if len(sys.argv) < 5:
print(usage)
sys.exit(1)
# parse the input arguments
prev_jobdir = sys.argv[1]
# Gather all the setup files in this dir
grofile = sys.argv[1]
topfile = sys.argv[2]
ndxfile = sys.argv[3]
mdpfile = sys.argv[4]
# To tether the ligand to the protein, we need to have prepared an index file with
# the following atom groups, for example:
"""
[ a1-Protein ]
678
[ a2-Ligand ]
1564
[ Restraint-Distance ]
678 1564
"""
# NOTE that for our tool chain to work smoothly, we should always use these DEFAULT
# atom group names
### To create the mdpfile, we need to know these default names, *and* the distance between the atoms
GMX_BIN = '/usr/local/gromacs/bin/gmx'
# write a temp "selection.dat"
fout = open('selection.dat', 'w')
fout.write('group "Restraint-Distance"\n')
fout.close()
distance_cmd = '{GMX_BIN} distance -f {grofile} -n {ndxfile} -sf selection.dat'.format(GMX_BIN=GMX_BIN, grofile=grofile, ndxfile=ndxfile)
output = subprocess.check_output(distance_cmd, shell=True).decode("utf-8")
output_lines = output.split('\n')
atom_distance = None
for line in output_lines:
if line.count('Average') > 0:
fields = line.split()
print('fields', fields)
atom_distance = float( fields[2] ) # ['Average', 'distance:', '0.387', 'nm']
print('atom_distance is', atom_distance, 'nm')
# use the expanded_ensemble_mdpfile() class to create an initial mdpfile
e = expanded_ensemble_mdpfile(pull_group1_name = 'a1-Protein',
pull_group2_name = 'a2-Ligand',
pull_coord1_init = atom_distance)
e.write_to_filename(mdpfile)
"""
#### make the first round of simulation
jobdir = 'out1'
if not os.path.exists(jobdir):
os.mkdir(jobdir)
# grompp to make a tpr
grompp_cmd = "gmx grompp -c {grofile} -f {mdpfile} -p {topfile} -n {ndxfile} -o {jobdir}/frame0.tpr -po {jobdir}/mdout.mdp -maxwarn 1".format(grofile=grofile, mdpfile=mdpfile, topfile=topfile, ndxfile=ndxfile, jobdir=jobdir)
print('>>', grompp_cmd)
os.system(grompp_cmd)
# write a runme
runme_file = os.path.join(jobdir, 'runme')
fout = open(runme_file, 'w')
fout.write('gmx mdrun -v -s frame0.tpr\n')
fout.close()
os.chmod(runme_file, stat.S_IRWXU)
print('Wrote:', runme_file)
"""
| 28.619565
| 224
| 0.687429
|
7c99714cb0fd8bd933c503d2533005298fb07e09
| 279
|
py
|
Python
|
Chapter 07/Chap07_Example7.108.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 07/Chap07_Example7.108.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 07/Chap07_Example7.108.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
mys1 = {1,2,3,4}
mys2 = {1,2,3,4,5,6}
print(mys2.issuperset(mys1)) # ISUP1
mys3 = {'a','b','c','d'}
mys4 = {'d','w','f','g'}
mys5 = {'a','b', 'c', 'd','v','w','x','z'}
print(mys3.issuperset(mys4)) # ISUP2
print(mys4.issuperset(mys5)) # ISUP3
print(mys5.issuperset(mys3)) # ISUP4
| 27.9
| 42
| 0.566308
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.