blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c0653eb61fae0c91e47ce16d21e2ac393c24afa
|
67dd0d9d548c52b2a387d011b1c7f8055d84e7c7
|
/functions/preprocessing.py
|
efbdc9261b1c0a17fc8fce302f3b1cf2ca27c6a2
|
[] |
no_license
|
jasperhajonides/NLP_data_extraction
|
bbf68cde63e88833f7aac9d0f9d06b4318b01740
|
32a33ccfa51948e5a1dfc16954085e58f18ffa74
|
refs/heads/master
| 2023-08-21T23:23:39.879475
| 2021-10-14T14:14:50
| 2021-10-14T14:14:50
| 412,777,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,721
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# preprocessing.py
"""
Contains functions to obtain company information and pre-processing
for metric extraction pipelines.
"""
import re
import spacy
import pandas as pd
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
from cleaning import parse_file,toy_clean,rep_char_search
nlp = spacy.load("en_core_web_sm")
matcher=Matcher(nlp.vocab)
def define_company_dictionary(path):
"""Returns dictionary of company information
Args:
path(str): Company's report path
Returns:
company_info (dict): Dictionary with
keys=['name', 'filename', 'type', 'year', 'clean_text']
"""
filename = path.split('/')[-1]
company_name = ' '.join(filename.split('_Annual_Report_')[0].split('_'))
year = filename.split('_Annual_Report_')[1].split('.')[0]
type_of_report = 'Annual Report'
pdf_dict = parse_file(path)
cleaned_text = toy_clean(pdf_dict, table_contents =True)
cleaned_text = rep_char_search(cleaned_text)
company_info_dict = {'name' : company_name,
'filename' : filename,
'type' : type_of_report,
'year' : year,
'clean_text': cleaned_text,
}
return company_info_dict
def select_pages(page_dict, metric):
"""Returns dictionary with selected pages of text containing
keywords or expressions for a given metric
Args:
page_dict (dict): Dictionary with keys=['page'] values=text_list
metric (str or list):
If metric is a string it loads in a predefined set of keywords.
Desired metric with possible values:
'n_employees',
'ceo_pay_ratio',
'ltifr',
'trifr',
'n_contractors',
'company_gender_diversity',
'board_gender_diversity',
'n_fatalities',
'company_ethnic_diversity',
'board_ethnic_diversity',
'international_diversity',
'healthcare',
'parental_care'
If metric is a list of strings, this list of keywords will then be
used to select relevant pages.
Returns:
selected_dict: Dictionary with keys=['page'] values=text_list
where the pages are the ones containing
keywords regarding a specific metric
"""
selected_dict ={}
if metric == 'n_employees':
keywords =['employ', 'team', 'workforce', 'colleague', 'staff', 'headcount']
elif metric == 'n_contractors':
keywords = ['contractor']
elif metric in ['company_gender_diversity','board_gender_diversity']:
keywords = ['female','divers','gender','male','women']
elif metric == 'n_fatalities':
keywords = ['fatal', 'died', 'death', 'mortal', 'casualt', 'dead']
elif metric in ['company_ethnic_diversity','board_ethnic_diversity']:
keywords = ['divers','representation','ethnic','bame']
elif metric == 'international_diversity':
keywords =['national','diversity','countr']
elif metric == 'healthcare':
keywords =['healthcare', 'health', 'life', 'medical']
elif metric == 'ceo_pay_ratio':
keywords = ['pay ratio', 'ratios','pay', 'ceo pay', 'remuneration']
elif metric == 'parental_care':
keywords = ["mother", 'maternity','paternity', 'family']
elif metric == 'ltifr':
keywords = ["accidents", "injury", "ltifr", "lost-time injury", "lost time injury"]
elif metric == 'trifr':
keywords = ["trir", "injury", 'trcfr', "trifr", 'triir', 'trif', "total recordable",
"total reportable"]
elif isinstance(metric, list):
# if keywords is defined externally we use metric as a list of keywords
keywords = metric
else:
raise ValueError(f'The metric {metric} is not implemented.')
# Now use the keywords to search the document and select pages.
page_list = []
for page,text_list in page_dict.items():
text = ' '.join(text_list).lower()
for keyword in keywords:
if (re.search(keyword,text) != None) and (page not in page_list):
page_list.append(page)
selected_dict={page:page_dict[page] for page in page_list}
return selected_dict
def run_nlp(page_dict):
"""Runs nlp pipe and to selected pages of text
dictionary.
Args:
page_dict (dict): Dictionary with keys=['page'] values=text_list
Returns:
nlp_dict (dict): Dictionary with
keys=['page'], value = nlp_text
"""
nlp_dict = {page:list(nlp.pipe(text_list)) for page, text_list in page_dict.items()}
return nlp_dict
def pattern_definition(metric):
"""
Define patterns to be used on SpaCy matchers for filtering pages in the
document for each of the desired metrics.
Args:
metric (str): Desired metric with possible values:
'n_employees',
'ceo_pay_ratio',
'ltifr',
'trifr',
'n_contractors',
'company_gender_diversity',
'board_gender_diversity',
'n_fatalities',
'company_ethnic_diversity',
'board_ethnic_diversity',
'international_diversity',
'healthcare',
'parental_care'
Returns:
patterns (list): Patterns to feed to a SpaCy Matcher or PhraseMatcher
object.
"""
if metric == 'n_employees':
employee_synonyms = ["employee",
"people",
"person",
"colleague",
"team",
"staff",
"full-time",
"fte"]
pattern1=[
{"LEMMA": {"IN": ["total", "average"]}},
{"OP":"?"},
{"OP":"?"},
{"LEMMA": "number", "OP":"?"},
{"LEMMA": "of", "OP":"+"},
{"LEMMA": {"IN": employee_synonyms}},
]
pattern2=[
{"LEMMA": {"IN": ["employ", "hire"]}},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"ENT_TYPE":"CARDINAL","OP":"+"},
{"LEMMA": {"IN": employee_synonyms}},
{"POS":"NOUN","OP":"?"}
]
pattern3 =[
{"LEMMA": {"IN": ["the", "our", "with", "have"]}},
{"POS": "ADP", "OP":"?"},
{"POS": "ADP", "OP":"?"},
{"ENT_TYPE":"CARDINAL","OP":"+"},
{"LEMMA": {"IN": employee_synonyms + ["headcount", "workforce"]}}
]
pattern4 =[
{"LEMMA": {"IN": ["total", "average"]}},
{"LEMMA": {"IN": employee_synonyms + ["headcount", "workforce"]}}
]
pattern5 = [
{"LEMMA":{"IN":["with","have"]}},
{"LEMMA": {"IN":["headcount","team", "workforce", "staff"]}},
{"LEMMA": "of"},
{"ENT_TYPE": "CARDINAL"}
]
pattern6 = [
{"LEMMA": "there"},
{"LEMMA": "be"},
{"ENT_TYPE":"CARDINAL"},
{"LEMMA": {"IN": employee_synonyms}}
]
pattern7 = [
{"ENT_TYPE": "CARDINAL"},
{"LEMMA": {"IN": employee_synonyms}},
{"LEMMA": "work"}
]
patterns = [pattern1, pattern2, pattern3, pattern4, pattern5, pattern6, pattern7]
elif metric == 'n_contractors':
pattern1 = [
{"ENT_TYPE": {"IN": ["CARDINAL", "PERCENT"]}},
{"LEMMA": "employee", "OP":"!"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LEMMA": "contractor" }
]
pattern2 = [
{"LEMMA": "contractor" },
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"ENT_TYPE": {"IN": ["CARDINAL", "PERCENT"]}}
]
pattern3 = [
{"LEMMA": "number"},
{"LEMMA": "employee", "OP":"!"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LEMMA": "contractor" }
]
patterns =[pattern1, pattern2, pattern3]
elif metric in ['company_gender_diversity','board_gender_diversity']:
pattern1=[
{"LOWER":"gender","OP":"+"},
{"LOWER":{"IN":["diversity","equality","balance","split","breakdown"]},"OP":"+"}
]
pattern2=[
{"LOWER":{"IN":["by","across"]},"OP":"+"},
{"LOWER":"gender","OP":"+"}
]
pattern3 = [
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"*"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LEMMA":{"IN":["female"]}}
]
patterns = [pattern1,pattern2,pattern3]
elif metric == 'n_fatalities':
pattern1=[
{"LEMMA": {"IN": ["tragically", "tragic", "sadly", "regret", " regrettably"]}},
{"OP":"*"},
{"LEMMA": "fatality"},
]
pattern2=[
{"ENT_TYPE":"CARDINAL","OP":"+"},
{"OP":"*"},
{"LEMMA": {"IN": ["fatality", "die"]}},
]
pattern3 =[
{"LEMMA": "number"},
{"OP":"*"},
{"LEMMA": {"IN": ["fatality", "die", "death", "casualty"]}}
]
pattern4 = [
{"LEMMA": "safety"},
{"OP":"*"},
{"LEMMA": {"IN": ["fatality", "die", "death", "casualty"]}}
]
patterns = [pattern1, pattern2, pattern3, pattern4]
elif metric in ["company_ethnic_diversity","board_ethnic_diversity"]:
pattern0 = [
{"LEMMA":{"IN":["ethnic","bame","non-white","ethnically","race","racial","minority"]}},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"?"}
]
pattern1 = [
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"*"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LEMMA":{"IN":["ethnic","bame","non-white","ethnically","race","racial","minority"]}}
]
pattern2 = [
{"LOWER":"black"},
{"LOWER":"asian"},
{"LOWER":"and","OP":"?"},
{"LOWER":"minority"},
{"LEMMA":"ethnic"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"?"}
]
pattern3 = [
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LOWER":"black"},
{"LOWER":"asian"},
{"LOWER":"and","OP":"?"},
{"LOWER":"minority"},
{"LEMMA":"ethnic"}
]
pattern4 = [
{"LOWER":"of"},
{"LOWER":{"IN":["colour","color"]}},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"*"}
]
pattern5 = [
{"LIKE_NUM":True, "OP":"+"},
{"POS":"SYM", "OP":"*"},
{"OP":"?"},
{"OP":"?"},
{"OP":"?"},
{"LOWER":"of"},
{"LOWER":{"IN":["colour","color"]}},
]
pattern6 = [
{"LOWER":"board","OP":"?"},
{"LEMMA": {"IN":["ethnic","ethnicity"]}},
{"OP":"?"},
{"OP":"?"},
{"TEXT": {"REGEX": "([Dd]iversity|)"}}
]
pattern7 = [
{"LOWER":"non","OP":"?"},
{"POS":"SYM", "OP":"?"},
{"LOWER":"bame"},
{"OP":"?"},
{"LIKE_NUM":True, "OP":"?"},
{"POS":"SYM", "OP":"?"}
]
pattern8 = [
{"LOWER":"parker"},
{"LOWER":"review"}
]
patterns = [pattern0, pattern1, pattern2, pattern3, pattern4,
pattern5, pattern6, pattern7, pattern8]
elif metric == 'international_diversity':
pattern1=[
{"ENT_TYPE":"CARDINAL","OP":"?"},
{"LEMMA": {"IN": ["nationality", "country"]}, "OP":"+"},
{"LEMMA": {"IN": ["represent", "serve", "employ"]}, "OP":"+"}
]
pattern2=[
{"LEMMA": {"IN": ["represent", "serve", "employ"]}, "OP":"+"},
{"ENT_TYPE":"CARDINAL","OP":"+"},
{"LEMMA": {"IN": ["nationality", "country"]}, "OP":"+"},
]
pattern3=[
{"LOWER":{"IN": ["by", "across"]},"OP":"+"},
{"LEMMA": {"IN": ["nationality"]},"OP":"+"}
]
pattern4=[
{"LOWER": "nationality","OP":"+"},
{"IS_PUNCT": True, "OP": "?"},
{"ENT_TYPE":{"IN": ["NORP", "GPE", "CARDINAL"]}, "OP":"+"}
]
pattern5=[
{"LOWER": {"IN": ["board", "employee", "executive", "non-executive",
"colleague", "workforce", "committee"]}, "OP":"+"},
{"OP":"*"},
{"LEMMA": {"IN": ["nationality"]}, "OP":"+"}
]
patterns = [pattern1,pattern2,pattern3,pattern4,pattern5]
elif metric == 'healthcare':
pattern1 = [
{"LOWER":{"IN":["medical","health","healthcare","health care",
"life"]}, "OP":"+"},
{"LOWER":{"IN":["insurance","benefits","scheme","assurance",
"cover","protection"]}, "OP":"+"},
{"LOWER":{"NOT_IN":["limited","business","no","co","ltd","risk",
"liabilites","company","products"]}, "OP":"+"}
]
patterns = [pattern1]
elif metric == 'parental_care':
phrases = ["mother","maternity", "paternity", "parental", "family care",
"family benefits"]
patterns = [nlp(text) for text in phrases]
elif metric == 'ltifr':
phrases = ['Lost-time injury frequency rate', 'Lost time injury frequency rate',
'Lost time injury and illness rate', 'LTIFR', 'LTIIR']
patterns = [nlp(text) for text in phrases]
elif metric == 'trifr':
phrases = ["TOTAL RECORDABLE INJURY FREQUENCY RATE",'TRIFR',
'Total Recordable Frequency Rate', 'TRFR', 'Total recordable injury frequency',
'TRIF','Total reportable injury (TRI) rate', 'Total recordable case frequency',
'TRCF','Total recordable case frequency rate', 'TRCFR',
'Total recordable injury and illness rate', 'TRIIR','Total Recordable Incident Rate',
'TRIR', 'Total recordable case rate', 'TRCR']
patterns = [nlp(text) for text in phrases]
elif metric == 'ceo_pay_ratio':
phrases = ["pay ratio",'pay ratios', "ceo pay", "chief executive pay",
"renumeration","pay for the ceo"]
patterns = [nlp.make_doc(text) for text in phrases]
else:
raise ValueError(f'The metric {metric} is not implemented.')
return patterns
def define_matcher(patterns, matcher_type='text'):
"""Returns word matcher for a given metric.
Args:
patterns (list):
... list of lists can be obtained using pattern_definition()
or can be defined manually.
matcher_type (str): Takes values
'text': defines normal Matcher instance with given patterns,
'phrase': defines PhraseMatcher instance with given patterns
Returns:
if matcher_type='text'
matcher (Matcher): Token Matcher object matching patterns
for the chosen metric
if matcher_type='phrase'
matcher (PhraseMatcher): PhraseMatcher object matching patterns
for the chosen metric
"""
if not isinstance(patterns, list):
raise ValueError('Input should be a list of spaCy patterns.')
if matcher_type == 'text':
matcher = Matcher(nlp.vocab)
elif matcher_type == 'phrase':
matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
else:
raise ValueError(f'{matcher_type} is not a valid matcher type')
# loop over patterns and add them to the matcher
for i in range(len(patterns)):
matcher.add("ID_pattern{}".format(i), [patterns[i]])
return matcher
def create_match_dataframe(path, metric):
"""Creates dataframe with possible matches of the desired metric.
Args:
path (str): Reports path
metric: desired metrics taking values between:
n_employees',
'ltifr',
...
matcher_type (str): Takes values
'text': defines normal Matcher instance with given patterns,
'phrase': defines PhraseMatcher instance with given patterns
Returns:
match_dataframe: Dataframe with columns=['string', 'page']
containing the matched sentences from a given metric
and respective page number
"""
company_info = define_company_dictionary(path)
selected_text = select_pages(company_info['clean_text'], metric)
nlp_text = run_nlp(selected_text)
# Define pattern
pattern = pattern_definition(metric)
# Look for metrics that use token matcher
if metric in ["n_employees", "n_contractors","n_fatalities",
"company_ethnic_diversity","board_ethnic_diversity",
"company_gender_diversity", "board_gender_diversity",
"international_diversity", "healthcare"]:
matcher_type="text"
# Look for metrics that use PhraseMatcher
elif metric in ["parental_care", "ltifr", 'trifr']:
matcher_type="phrase"
# If metric is unespecified defaults to PhraseMatcher
else:
matcher_type="phrase"
matcher = define_matcher(pattern, matcher_type)
# Loop over the text outputted by the matcher to create a dataframe of
# matches for a given metric.
# The next step would be to apply the binary classifier of the metric
# to the text contained in this dataframe.
filenames = []
names = []
years = []
match_pages = []
match_text = []
match_strings = []
entity_starts =[]
entity_ends=[]
for page, text_list in nlp_text.items():
for text in text_list:
match = matcher(text)
if len(match)>0:
match_id, start, end = match[0]
span = text[start:end]
filenames.append(company_info['filename'])
names.append(company_info['name'])
years.append(company_info['year'])
match_text.append(text.text)
match_strings.append(span.text)
match_pages.append(page)
entity_starts.append(start)
entity_ends.append(end)
match_dataframe=pd.DataFrame({'filename': filenames,
'name': names,
'year': years,
'text':match_text,
'match_string': match_strings,
'page':match_pages,
'start': entity_starts,
'end': entity_ends})
return match_dataframe
def predict_best_page(dfr):
""" This function finds the page with the best predictions
for the target variable.
input
-----
dfr: pd.DataFrame
containing rows of different phrases and columns for:
-page numbers of these phrases ('page')
-likelihood estimations ('likelihood')
returns
-----
selected_pages: list
a binary list with index for each sentence
Jasper Hajonides 30082021
"""
#What page do we find the best prediction based on likelihood
indices_dfr = dfr.loc[dfr['likelihood'] > 0.6, :].groupby('page').agg(
page_count = ("page", 'count')).join(dfr.groupby('page').agg(
likelihood=("likelihood", 'max')))
# sort by likelihood
indices_dfr = indices_dfr.reset_index().sort_values(by='likelihood',ascending=False).reset_index()
#count ratio of occurances per page
indices_dfr['ratio_of_occurances']=indices_dfr['page_count']/indices_dfr['page_count'].sum()
#page with good likelihood and highest page count.
top_index = indices_dfr.loc[(indices_dfr['likelihood'] > .6) &
(indices_dfr['page_count'] == indices_dfr['page_count'].max()),
'index']
# if there happen to be multiple cases what predict well we take the first result.
if len(top_index) > 0:
# add page number if likelihood is <.4
dfr_filt = indices_dfr.loc[(indices_dfr['likelihood'] > .6) &
(indices_dfr['page_count'] == indices_dfr['page_count'].max())
].head(1).reset_index()
convert = map(int, (dfr['page'] == int(dfr_filt['page'])) & (dfr['likelihood'] > .6))
selected_pages = list(convert)
else:
selected_pages = [0]*len(dfr)
return selected_pages
def page_highest_occurance(dfr):
""" This function finds the page that is most frequently occuring in dataframe
input
-----
dfr: pd.DataFrame
containing rows of different phrases and columns for:
-page numbers of these phrases ('page')
returns
-----
selected_pages: list
a binary list with index for each sentence
Jasper Hajonides 31082021
"""
#Get number of page occurances
indices_dfr = dfr.groupby('page').agg(
page_count = ("page", 'count')).reset_index()
#page with highest page count.
top_index = indices_dfr.loc[(indices_dfr['page_count'] == indices_dfr['page_count'].max()),
'page'].reset_index()
# if there happen to be multiple cases what predict well we take the first result.
page = list(top_index.loc[:,'page'])
# find phrases in original data frame that occur on the most freq. occurring page
dfr['highest_occurring_page'] = False
for p in range(len(dfr)):
dfr['highest_occurring_page'][p] = dfr['page'][p] in page
convert = map(int, dfr['highest_occurring_page'] )
return list(convert)
|
[
"38157436+jasperhajonides@users.noreply.github.com"
] |
38157436+jasperhajonides@users.noreply.github.com
|
e84f19b9c8b1db3788021d59bdc4e9e6c8cb1ff2
|
5ecb37b8af50db1dc56a67c66cf0a9760ac941b4
|
/shuffle-list.py
|
b6038784978b19c107d1fca9587dde12d3cfdbdb
|
[] |
no_license
|
vipul-royal/a9
|
9f3796d46693e1531edac95c54827f841787aedf
|
5b72a084394f4ebbc26696dac019517777b322c0
|
refs/heads/master
| 2022-11-11T12:15:37.597523
| 2020-07-01T08:35:37
| 2020-07-01T08:35:37
| 276,320,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
import random
List=[1,2,3,4,5]
random.shuffle(List)
print(List)
|
[
"noreply@github.com"
] |
vipul-royal.noreply@github.com
|
1a88864659f29226f17cd86959864729da92d13f
|
597c4f48332251552a602122bb3d325bc43a9d7f
|
/chapter02_hard_to_think/02_make_n_array/05_access_with_index.py
|
425284847bd1098e7e4bbcc95facbdfb91070525
|
[] |
no_license
|
Kyeongrok/python_algorithm
|
46de1909befc7b17766a57090a7036886361fd06
|
f0cdc221d7908f26572ae67b5c95b12ade007ccd
|
refs/heads/master
| 2023-07-11T03:23:05.782478
| 2023-06-22T06:32:31
| 2023-06-22T06:32:31
| 147,303,654
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
string = "ABCDEDE"
array = []
for i in range(len(string)):
array.append(0)
print(array, len(array))
print("index 0:",array[0])
print("index 1:",array[1])
|
[
"oceanfog1@gmail.com"
] |
oceanfog1@gmail.com
|
cf72eebe1eb66416c14fca98d83ac5a50194f49d
|
1c76418fee90f80368f2f843007ebd6a37bfc01f
|
/RunSelections.py
|
c40316494b127efd3c152302c19fdb7029587208
|
[] |
no_license
|
SyntaxVoid/HighRedshiftGalaxyFinder
|
e5dfb244bbba53c310de9b7fe414990b04bcb3a0
|
83fad048e37d65a1a7c98727c0d4164c8e84922a
|
refs/heads/master
| 2021-01-20T21:59:06.211431
| 2015-11-19T04:24:58
| 2015-11-19T04:24:58
| 42,703,816
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
import time
from SelectionCriteria import *
from GLOBALS import *
from libs.jtools import write_table
def stats(drop,name,num_obj,lst):
print(" #{:5d} objects out of {:5d} met the {:4s} dropout criteria from {}".format(len(lst),num_obj,drop,name))
def run(catalog,destination,header,col_dict,rms):
print("\n" + "="*80)
print("="*25 + "Now applying Selection Criteria"+ "="*24)
print("="*80 + "\n")
t1 = time.time()
dir_str = destination + "{}/{}.cat"
rms_or_none = "RMS" if rms else "NONE"
out = [[],[],[],[],[],[],[],[]]
with open(catalog,'r') as cat_file:
cat_lines = cat_file.readlines()
temp = 0
for line in cat_lines:
if line[0] == "#":
temp += 1
continue
if b435_dropout(line,col_dict):
out[0].append(line.split())
if i775_dropout(line,col_dict):
out[1].append(line.split())
if v606_dropout(line,col_dict):
out[2].append(line.split())
if z4(line,col_dict):
out[3].append(line.split())
if z5(line,col_dict):
out[4].append(line.split())
if z6(line,col_dict):
out[5].append(line.split())
if z7(line,col_dict):
out[6].append(line.split())
if z8(line,col_dict):
out[7].append(line.split())
num_objects = len(cat_lines) - temp
for n,s in enumerate(SELECTIONS):
name = "Stark et. al." if "z" not in s else "Bouwen et. al."
stats(s,name,num_objects,out[n])
write_table(out[n],header,dir_str.format(rms_or_none,s))
t2 = time.time()
print("#Catalogs written to: " + destination + rms_or_none + "/")
print("#Time Elapsed: {:.2f} seconds".format(t2-t1))
print("\n" + ("\n" + "="*80)*3)
return out
if __name__ == '__main__':
run("masterNONE.cat","SelectedObjects/Mine/",header,MASTER_COL_DICT,False)
run("masterRMS.cat","SelectedObjects/Mine/",header,MASTER_COL_DICT,True)
run("Candels_Catalog/CANDELS.GOODSS.F160W.v1_1.photom.cat","SelectedObjects/Candels/",candels_header,MASTER_CANDELS_DICT,False)
|
[
"j.gresl12@gmail.com"
] |
j.gresl12@gmail.com
|
99e84d87c32a484c39ba9f6e7de023e400f63234
|
bceda83f0c4932d95c27f4c04909e6f921debb5d
|
/createModelUi_.py
|
2825cc13bdf23abc54587f8e99c7ad2891814f99
|
[] |
no_license
|
JayDesai007/temp
|
104e5a307ab2e2b0992d9b59c6132dea0cc8ce5e
|
c579e3e3ec6f4440496a86465d98ad94b000e579
|
refs/heads/master
| 2023-01-03T22:49:54.288173
| 2020-10-26T05:37:48
| 2020-10-26T05:37:48
| 283,546,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,655
|
py
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_crateModel(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(604, 498)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.modelNameLabel = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.modelNameLabel.setFont(font)
self.modelNameLabel.setObjectName("modelNameLabel")
self.horizontalLayout.addWidget(self.modelNameLabel)
self.modelNameLineEdit = QtWidgets.QLineEdit(Form)
self.modelNameLineEdit.setObjectName("modelNameLineEdit")
self.horizontalLayout.addWidget(self.modelNameLineEdit)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.imageNameLabel = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.imageNameLabel.setFont(font)
self.imageNameLabel.setObjectName("imageNameLabel")
self.horizontalLayout_4.addWidget(self.imageNameLabel)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.imageListWidget = QtWidgets.QListWidget(Form)
self.imageListWidget.setObjectName("imageListWidget")
self.verticalLayout.addWidget(self.imageListWidget)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.imageAddButton = QtWidgets.QPushButton(Form)
self.imageAddButton.setObjectName("imageAddButton")
self.horizontalLayout_2.addWidget(self.imageAddButton)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.imageDeleteButton = QtWidgets.QPushButton(Form)
self.imageDeleteButton.setObjectName("imageDeleteButton")
self.horizontalLayout_2.addWidget(self.imageDeleteButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_7.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.xmlNameLabel = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.xmlNameLabel.setFont(font)
self.xmlNameLabel.setObjectName("xmlNameLabel")
self.horizontalLayout_6.addWidget(self.xmlNameLabel)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.xmlListWidget = QtWidgets.QListWidget(Form)
self.xmlListWidget.setLineWidth(5)
self.xmlListWidget.setObjectName("xmlListWidget")
self.verticalLayout_2.addWidget(self.xmlListWidget)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.xmlAddButton = QtWidgets.QPushButton(Form)
self.xmlAddButton.setObjectName("xmlAddButton")
self.horizontalLayout_3.addWidget(self.xmlAddButton)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem4)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem5)
self.xmlDeleteButton = QtWidgets.QPushButton(Form)
self.xmlDeleteButton.setObjectName("xmlDeleteButton")
self.horizontalLayout_3.addWidget(self.xmlDeleteButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_7.addLayout(self.verticalLayout_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_7)
self.line = QtWidgets.QFrame(Form)
self.line.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
font.setStrikeOut(False)
self.line.setFont(font)
self.line.setFrameShadow(QtWidgets.QFrame.Raised)
self.line.setLineWidth(5)
self.line.setMidLineWidth(5)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem6)
self.nextButton = QtWidgets.QPushButton(Form)
self.nextButton.setObjectName("nextButton")
self.horizontalLayout_5.addWidget(self.nextButton)
self.cancleButton = QtWidgets.QPushButton(Form)
self.cancleButton.setObjectName("cancleButton")
self.horizontalLayout_5.addWidget(self.cancleButton)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.modelNameLabel.setText(_translate("Form", "Model Name :"))
self.imageNameLabel.setText(_translate("Form", "Image File :"))
self.imageAddButton.setText(_translate("Form", "Add"))
self.imageDeleteButton.setText(_translate("Form", "Delete"))
self.xmlNameLabel.setText(_translate("Form", "XML File:"))
self.xmlAddButton.setText(_translate("Form", "Add"))
self.xmlDeleteButton.setText(_translate("Form", "Delete"))
self.nextButton.setText(_translate("Form", "Next"))
self.cancleButton.setText(_translate("Form", "Cancle"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
EVALUATETeam = QtWidgets.QWidget()
ui = Ui_crateModel()
ui.setupUi(EVALUATETeam)
EVALUATETeam.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
JayDesai007.noreply@github.com
|
7ed130e1399db9ad05d646f9c77379b76bb3a328
|
8ccccc791635a2765394e2c15657249b7bcd33c0
|
/__init__.py
|
2250fa2650613daae0811236fa7bdcc1ba8a6a86
|
[] |
no_license
|
dominic-dimico/notebook
|
a0942cfea021f6531a6af4179388c6566ca208ae
|
b57633b4ea9c6a6112eb806fc5a4d95e4615a1f1
|
refs/heads/master
| 2021-11-09T16:41:24.363825
| 2021-10-11T00:54:57
| 2021-10-11T00:54:57
| 166,492,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
from notebook import notes
|
[
"dominic.dimico@gmail.com"
] |
dominic.dimico@gmail.com
|
48e4e5a6a3a110cf5e6d4e4a3fc9b4ee816b1c7d
|
4b5564821a413483e4c2bb007be4c5afb62e5822
|
/Others/codes/Unclassified_003 13. Roman to Integer.py
|
bd7c632c7f1b34fb2c59da93fb458289d32da45b
|
[] |
no_license
|
vabbybansal/Java
|
7225538066a4888fe9d688f81b895772e8422445
|
0b3afabc6c309cd230a7edfa220998983f5a9e45
|
refs/heads/master
| 2020-05-17T15:50:34.220343
| 2020-04-10T04:18:08
| 2020-04-10T04:18:08
| 183,803,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
# Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
#
# Symbol Value
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
# For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
#
# Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
#
# I can be placed before V (5) and X (10) to make 4 and 9.
# X can be placed before L (50) and C (100) to make 40 and 90.
# C can be placed before D (500) and M (1000) to make 400 and 900.
# Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
lookUp = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
'IV': 4,
'IX': 9,
'XL': 40,
'XC': 90,
'CD': 400,
'CM': 900
}
i = 0
num = 0
while i < len(s):
# check for two digits
if i+1 < len(s):
if s[i: i+2] in lookUp:
num += lookUp[s[i: i+2]]
i += 2
continue
num += lookUp[s[i]]
i += 1
return num
obj = Solution()
print obj.romanToInt("MMMXLV")
|
[
"vaibhavb@uber.com"
] |
vaibhavb@uber.com
|
83f0d591575f60700000456b9573bf6b3ac98c1a
|
3a1ea6d22733096f2a0745d9d79a40ef68f27a3a
|
/StonksStreamlit/show_tickers.py
|
a868aed87889543b1910a71285218d3569ce3060
|
[] |
no_license
|
foorenxiang/stonks
|
9a055989dcad92cb9d457b1ed7901eb6386c4caa
|
6560f198db4d35c7ff3f8ebe488a46c0ce69cb1b
|
refs/heads/main
| 2023-05-31T15:31:19.108410
| 2021-05-02T06:08:14
| 2021-05-02T06:08:14
| 342,834,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import yfinance as yf
import streamlit as st
class ShowTickers:
tickerSymbols = set()
@classmethod
def show_tickers(cls, tickerSymbols={}):
if not tickerSymbols:
st.write("### Please add ticker symbols to tickerSymbols.py to continue...")
cls.tickerSymbols = tickerSymbols
for tickerSymbol in cls.tickerSymbols:
tickerData = yf.Ticker(tickerSymbol)
tickerDF = tickerData.history(period="1y")
if not tickerDF.Close.empty | tickerDF.Volume.empty:
st.write(f"## {tickerSymbol}")
st.line_chart(tickerDF.Close)
st.write(f"{tickerSymbol} Closing Prices")
st.line_chart(tickerDF.Volume)
st.write(f"{tickerSymbol} Volume\n\n\n")
else:
st.write(f"{tickerSymbol} is not a valid symbol on Yahoo Finance!!")
|
[
"foorenxiang@gmail.com"
] |
foorenxiang@gmail.com
|
858e1b1a75af0e6a90e46ff1a6acd5c9f11aa181
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4446/codes/1670_2966.py
|
496840b47310b8dfdbcdc1f6ddf7374ba6acfa67
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
m=input("para mulheres: ")
p=float(input("valor do ingresso: "))
quant=int(input("quantidade de ingressos: "))
if m.upper()=="S":
print(round( p * quant , 2))
else:
print(tot)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
521e068611c8bfa84d4b1f26b049b105d725a73c
|
0b1621d8590a89b1cd285ee0e73b65b1ba7a24a9
|
/aiohttp_pydantic/oas/view.py
|
4aab93f84ed4a49051a982b08ed272264b6f364f
|
[
"MIT"
] |
permissive
|
spinenkoia/aiohttp-pydantic
|
a572f3c4accb8ca855592f3b56114f4736b86ab0
|
7492af5acf20c4d325ec863b9987f6107a05abd7
|
refs/heads/main
| 2023-03-31T13:23:35.160988
| 2021-03-27T11:34:39
| 2021-03-27T11:45:19
| 354,247,925
| 0
| 0
|
MIT
| 2021-04-03T09:15:16
| 2021-04-03T09:15:15
| null |
UTF-8
|
Python
| false
| false
| 6,902
|
py
|
import typing
from inspect import getdoc
from itertools import count
from typing import List, Type
from aiohttp.web import Response, json_response
from aiohttp.web_app import Application
from pydantic import BaseModel
from aiohttp_pydantic.oas.struct import OpenApiSpec3, OperationObject, PathItem
from . import docstring_parser
from ..injectors import _parse_func_signature
from ..utils import is_pydantic_base_model
from ..view import PydanticView, is_pydantic_view
from .typing import is_status_code_type
def _handle_optional(type_):
"""
Returns the type wrapped in Optional or None.
>>> from typing import Optional
>>> _handle_optional(int)
>>> _handle_optional(Optional[str])
<class 'str'>
"""
if typing.get_origin(type_) is typing.Union:
args = typing.get_args(type_)
if len(args) == 2 and type(None) in args:
return next(iter(set(args) - {type(None)}))
return None
class _OASResponseBuilder:
"""
Parse the type annotated as returned by a function and
generate the OAS operation response.
"""
def __init__(self, oas: OpenApiSpec3, oas_operation, status_code_descriptions):
self._oas_operation = oas_operation
self._oas = oas
self._status_code_descriptions = status_code_descriptions
def _handle_pydantic_base_model(self, obj):
if is_pydantic_base_model(obj):
response_schema = obj.schema(
ref_template="#/components/schemas/{model}"
).copy()
if def_sub_schemas := response_schema.pop("definitions", None):
self._oas.components.schemas.update(def_sub_schemas)
return response_schema
return {}
def _handle_list(self, obj):
if typing.get_origin(obj) is list:
return {
"type": "array",
"items": self._handle_pydantic_base_model(typing.get_args(obj)[0]),
}
return self._handle_pydantic_base_model(obj)
def _handle_status_code_type(self, obj):
if is_status_code_type(typing.get_origin(obj)):
status_code = typing.get_origin(obj).__name__[1:]
self._oas_operation.responses[status_code].content = {
"application/json": {
"schema": self._handle_list(typing.get_args(obj)[0])
}
}
desc = self._status_code_descriptions.get(int(status_code))
if desc:
self._oas_operation.responses[status_code].description = desc
elif is_status_code_type(obj):
status_code = obj.__name__[1:]
self._oas_operation.responses[status_code].content = {}
desc = self._status_code_descriptions.get(int(status_code))
if desc:
self._oas_operation.responses[status_code].description = desc
def _handle_union(self, obj):
if typing.get_origin(obj) is typing.Union:
for arg in typing.get_args(obj):
self._handle_status_code_type(arg)
self._handle_status_code_type(obj)
def build(self, obj):
self._handle_union(obj)
def _add_http_method_to_oas(
oas: OpenApiSpec3, oas_path: PathItem, http_method: str, view: Type[PydanticView]
):
http_method = http_method.lower()
oas_operation: OperationObject = getattr(oas_path, http_method)
handler = getattr(view, http_method)
path_args, body_args, qs_args, header_args, defaults = _parse_func_signature(
handler
)
description = getdoc(handler)
if description:
oas_operation.description = docstring_parser.operation(description)
status_code_descriptions = docstring_parser.status_code(description)
else:
status_code_descriptions = {}
if body_args:
body_schema = (
next(iter(body_args.values()))
.schema(ref_template="#/components/schemas/{model}")
.copy()
)
if def_sub_schemas := body_schema.pop("definitions", None):
oas.components.schemas.update(def_sub_schemas)
oas_operation.request_body.content = {
"application/json": {"schema": body_schema}
}
indexes = count()
for args_location, args in (
("path", path_args.items()),
("query", qs_args.items()),
("header", header_args.items()),
):
for name, type_ in args:
i = next(indexes)
oas_operation.parameters[i].in_ = args_location
oas_operation.parameters[i].name = name
optional_type = _handle_optional(type_)
attrs = {"__annotations__": {"__root__": type_}}
if name in defaults:
attrs["__root__"] = defaults[name]
oas_operation.parameters[i].schema = type(name, (BaseModel,), attrs).schema(
ref_template="#/components/schemas/{model}"
)
oas_operation.parameters[i].required = optional_type is None
return_type = handler.__annotations__.get("return")
if return_type is not None:
_OASResponseBuilder(oas, oas_operation, status_code_descriptions).build(
return_type
)
def generate_oas(apps: List[Application]) -> dict:
"""
Generate and return Open Api Specification from PydanticView in application.
"""
oas = OpenApiSpec3()
for app in apps:
for resources in app.router.resources():
for resource_route in resources:
if not is_pydantic_view(resource_route.handler):
continue
view: Type[PydanticView] = resource_route.handler
info = resource_route.get_info()
path = oas.paths[info.get("path", info.get("formatter"))]
if resource_route.method == "*":
for method_name in view.allowed_methods:
_add_http_method_to_oas(oas, path, method_name, view)
else:
_add_http_method_to_oas(oas, path, resource_route.method, view)
return oas.spec
async def get_oas(request):
"""
View to generate the Open Api Specification from PydanticView in application.
"""
apps = request.app["apps to expose"]
return json_response(generate_oas(apps))
async def oas_ui(request):
"""
View to serve the swagger-ui to read open api specification of application.
"""
template = request.app["index template"]
static_url = request.app.router["static"].url_for(filename="")
spec_url = request.app.router["spec"].url_for()
host = request.url.origin()
return Response(
text=template.render(
{
"openapi_spec_url": host.with_path(str(spec_url)),
"static_url": host.with_path(str(static_url)),
}
),
content_type="text/html",
charset="utf-8",
)
|
[
"vincent.maillol@gmail.com"
] |
vincent.maillol@gmail.com
|
fb78a211e54f0c7e3ca6b79b30d6779ae6515303
|
dcb417f91e23ffca0498d0c6fd03e4a3ddc9a41a
|
/manage.py
|
b67daa79fbd5d3573c20f88dd3dd7f464ecb247d
|
[] |
no_license
|
cdkd321/month_refresh
|
d75cde845b1c8b707980be2bf9d89b65d3ac230a
|
c92f2efd3f2a6d93b06f4ed9e70a10493826380a
|
refs/heads/master
| 2021-05-24T18:28:15.997555
| 2020-05-03T18:23:56
| 2020-05-03T18:23:56
| 253,698,847
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "month_refresh.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"zhangwenjie123"
] |
zhangwenjie123
|
9e2a90e7c5eaf59d6e68232aa46ab1b466154805
|
be5401cad765484d2971b160606ba89596364bb8
|
/util.py
|
ef8a29a742a0ee83587f470f90e8f9e700aa01d2
|
[] |
no_license
|
Jashpatel1/Collaborative-Pathfinder
|
60117b1aa7baa802c2bda94f8c15aa5563492a90
|
24a6f7772997b509b6fc8a2101d9b50cbfba6d25
|
refs/heads/main
| 2023-01-08T13:02:16.370373
| 2020-11-16T03:39:10
| 2020-11-16T03:39:10
| 305,340,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,417
|
py
|
import pdb
INVALID = -999
HARD_PLACE = -999
ANY_TIME = -999
SOMETIME = 25
tLIMIT = 25
TWAIT = 0
WAIT_FACTOR = 0.51
MAX_STEPS = 45
UNOCCUPIED = 0
IS_ROCK = -99
MOVE_SPEED = 1
MSG_BUFFER_SIZE = 3
FRAME_HEIGHT = 350
FRAME_WIDTH = 600
FRAME_MARGIN = 10
CELL_MARGIN = 5
MAX_AGENTS_IN_CELL = 1
class Actions(object):
RIGHT = 0
UP = 1
LEFT = 2
DOWN = 3
WAIT = 4
COLORS = ['white', 'green', 'blue', 'black',
'red', 'magenta', 'cyan', 'yellow']
def extract_fn(a):
# print 'a :', a, 'Extract :', a[:-1]
return a
# return a[1:]
def euclidean_dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5
def manhattan_dist(a, b):
return abs(a[0]-b[0]) + abs(a[1]-b[1])
def heapsort(l):
q = PriorityQueue()
for (i, x) in enumerate(l):
q.update(i, x)
return [q.pop_smallest()[1] for x in l]
def _parent(i):
return (i - 1)
def _lchild(i):
return 2 * i + 1
def _rchild(i):
return 2 * i + 2
def _children(i):
return (_lchild(i), _rchild(i))
class PriorityQueue:
def __init__(self):
self._heap = []
self._keyindex = {}
self.tie_breaker = None
def __len__(self):
return len(self._heap)
def __contains__(self, key):
return key in self._keyindex
def _key(self, i):
"""
Returns the key value of the given node.
"""
return self._heap[i][0]
def _priority(self, i):
"""
Returns the priority of the given node.
"""
return self._heap[i][1]
def _swap(self, i, j):
"""
Swap the positions of two nodes and update the key index.
"""
(self._heap[i], self._heap[j]) = (self._heap[j], self._heap[i])
(self._keyindex[self._key(i)], self._keyindex[self._key(j)]) = (
self._keyindex[self._key(j)], self._keyindex[self._key(i)])
def _heapify_down(self, i):
"""
Solves heap violations starting at the given node, moving down the heap.
"""
children = [c for c in _children(i) if c < len(self._heap)]
# This is a leaf, so stop
if not children:
return
# Get the minimum child
min_child = min(children, key=self._priority)
# If there are two children with the same priority, we need to break the tie
if self.tie_breaker and len(children) == 2:
c0 = children[0]
c1 = children[1]
if self._priority(c0) == self._priority(c1):
min_child = c0 if self.tie_breaker(
self._key(c0), self._key(c1)) else c1
# Sort, if necessary
a = self._priority(i)
b = self._priority(min_child)
if a > b or (self.tie_breaker and a == b and not self.tie_breaker(self._key(i), self._key(min_child))):
# Swap with the minimum child and continue heapifying
self._swap(i, min_child)
self._heapify_down(min_child)
def _heapify_up(self, i):
"""
Solves heap violations starting at the given node, moving up the heap.
"""
# This is the top of the heap, so stop.
if i == 0:
return
parent = _parent(i)
a = self._priority(i)
b = self._priority(parent)
if a < b or (self.tie_breaker and a == b and self.tie_breaker(self._key(i), self._key(parent))):
self._swap(i, parent)
self._heapify_up(parent)
def peek_smallest(self):
"""
Returns a tuple containing the key with the smallest priority and its associated priority.
"""
return self._heap[0]
def pop_smallest(self):
"""
Removes the key with the smallest priority and returns a tuple containing the key and its associated priority
"""
# Swap the last node to the front
self._swap(0, len(self._heap) - 1)
# Remove the smallest from the list
(key, priority) = self._heap.pop()
del self._keyindex[key]
# Fix the heap
self._heapify_down(0)
return (key, priority)
def update(self, key, priority):
"""
update(key, priority)
If priority is lower than the associated priority of key, then change it to the new priority. If not, does nothing.
If key is not in the priority queue, add it.
Return True if a change was made, else False.
"""
if key in self._keyindex:
# Find key index in heap
i = self._keyindex[key]
# Make sure this lowers its priority
if priority > self._priority(i):
return False
# Fix the heap
self._heap[i] = (key, priority)
self._heapify_up(i)
return True
else:
self._heap.append((key, priority))
self._keyindex[key] = len(self._heap) - 1
self._heapify_up(len(self._heap) - 1)
return True
def is_empty(self):
"""
Returns True if the queue is empty empty, else False.
"""
return len(self) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"patel.5@iitj.ac.in"
] |
patel.5@iitj.ac.in
|
90854df211652602912ca3d8916673a350afd52e
|
72765c1736a10b86be8583dbd694906aff467068
|
/Decorators/Decorators.py
|
30430da271cea99a7e115dee138898fc154452ec
|
[] |
no_license
|
taison2000/Python
|
05e3f3834501a4f5ef7a6260d8bf3d4ce41930f3
|
44079700c3db289f92792ea3ec5add6a523f8eae
|
refs/heads/master
| 2021-10-16T07:43:55.202012
| 2019-02-09T02:22:44
| 2019-02-09T02:22:44
| 103,322,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
#!C:\Python34\python.exe
#!/Python34/python
#!/usr/bin/python
# -*- coding: utf-8 -*-
## ----------------------------------------------------------------------------
"""
Decorator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A decorator is a function that takes one function as input and returns
another function.
Note:
-
"""
import os
import time
##-----------------------------------------------------------------------------
'''
decorator function
'''
def document_it( func ):
def new_function( *args, **kwargs ):
print('Running function: ', func.__name__)
print('Positional arguments: ', args)
print('Keyword arguments: ', kwargs)
result = func( *args, **kwargs )
print('Result: ', result)
return result
return new_function
#------------------------------------------------------------------------------
"""
Function to test decorator
"""
def add_ints( a, b ):
return a + b
##-----------------------------------------------------------------------------
## automatically decorator
@document_it
def add_two( a, b ):
return a + b
@document_it
def add_two_kw( first=8, second=11):
return first + second
# -----------------------------------------------------------------------------
# Main program - This is the main function
# -----------------------------------------------------------------------------
def main():
## manual decorator assignment
cooler_add_ints = document_it( add_ints )
# Call the new function
print("Manual decorator ")
cooler_add_ints(12, 45)
print("Automatic decorator")
add_two(34, 78)
pass
# -----------------------------------------------------------------------------
# Code entry
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
##-----------------------------------------------------------------------------
"""
Resources:
- https://wiki.python.org/moin/Generators
"""
##-----------------------------------------------------------------------------
"""
Note:
- No ++ or --, use a+=1 or a-=1
- print ("Variable %d", %Val)
print ("Variable %d %d", % (Val1, Val2))
"""
'''
3 single quote string
'''
|
[
"noreply@github.com"
] |
taison2000.noreply@github.com
|
c57ad88b86dbdb3334726bdb816321673fbc3b4c
|
696a7250b8937e816bac365a909e669db2988017
|
/backend/UKS_Projekat/urls.py
|
7b1ff90c1ea4726a54035242b89befa5cfee0a0f
|
[] |
no_license
|
DejanS24/UKS_Project
|
543a640f5440210e16d370e38a9538bb0d264426
|
79292219b41daf97a07d1ccc3a60a106f7dd107f
|
refs/heads/master
| 2023-03-06T05:23:19.841104
| 2021-02-20T12:08:50
| 2021-02-20T12:08:50
| 286,275,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
"""UKS_Projekat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('uks_app.urls')),
]
|
[
"dejans1224@gmail.com"
] |
dejans1224@gmail.com
|
0a31f92deebd0a0dd62788559138ee467f4f0339
|
000a4b227d970cdc6c8db192f4437698cb782721
|
/python/helpers/typeshed/stubs/stripe/stripe/api_resources/payment_method.pyi
|
da90f0e959feaaa22a20224cc8bd87f30de6df07
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
trinhanhngoc/intellij-community
|
2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d
|
1d4a962cfda308a73e0a7ef75186aaa4b15d1e17
|
refs/heads/master
| 2022-11-03T21:50:47.859675
| 2022-10-19T16:39:57
| 2022-10-19T23:25:35
| 205,765,945
| 1
| 0
|
Apache-2.0
| 2019-09-02T02:55:15
| 2019-09-02T02:55:15
| null |
UTF-8
|
Python
| false
| false
| 510
|
pyi
|
from typing import Any
from stripe.api_resources.abstract import (
CreateableAPIResource as CreateableAPIResource,
ListableAPIResource as ListableAPIResource,
UpdateableAPIResource as UpdateableAPIResource,
custom_method as custom_method,
)
class PaymentMethod(CreateableAPIResource, ListableAPIResource, UpdateableAPIResource):
OBJECT_NAME: str
def attach(self, idempotency_key: Any | None = ..., **params): ...
def detach(self, idempotency_key: Any | None = ..., **params): ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
465e9b543a4ca594180a5b5aacd3f04e972ddc16
|
7362f2722ffe1144bf69c416080437f93a82c035
|
/projects/donkeycar/donkeycar/templates/complete.py
|
93ec380ade145053cbd4c90d4243274c8ec83847
|
[
"MIT"
] |
permissive
|
Indianaat/Kart-MANI-IA
|
696b670bea355288be4e5f250bb8acdd625c3fa2
|
687a5f36cb2fc661efa00007fd80c48bb3c773ae
|
refs/heads/main
| 2023-06-02T15:39:31.128015
| 2021-06-12T11:34:30
| 2021-06-12T11:34:30
| 377,418,911
| 0
| 0
| null | 2021-06-16T08:05:23
| 2021-06-16T08:05:22
| null |
UTF-8
|
Python
| false
| false
| 30,870
|
py
|
#!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>]
manage.py (train) [--tubs=tubs] (--model=<model>) [--type=(linear|inferred|tensorrt_linear|tflite_linear)]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import os
import time
import logging
from docopt import docopt
import donkeycar as dk
from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger
from donkeycar.parts.tub_v2 import TubWriter
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.controller import LocalWebController, JoystickController, WebFpv
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.parts.behavior import BehaviorPart
from donkeycar.parts.file_watcher import FileWatcher
from donkeycar.parts.launch import AiLaunch
from donkeycar.utils import *
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def drive(cfg, model_path=None, use_joystick=False, model_type=None,
camera_type='single', meta=[]):
"""
Construct a working robotic vehicle from many parts. Each part runs as a
job in the Vehicle loop, calling either it's run or run_threaded method
depending on the constructor flag `threaded`. All parts are updated one
after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
part finishes processing in a timely manner. Parts may have named outputs
and inputs. The framework handles passing named outputs to parts
requesting the same named input.
"""
logger.info(f'PID: {os.getpid()}')
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
if cfg.HAVE_MQTT_TELEMETRY:
from donkeycar.parts.telemetry import MqttTelemetry
tel = MqttTelemetry(cfg)
if cfg.HAVE_ODOM:
if cfg.ENCODER_TYPE == "GPIO":
from donkeycar.parts.encoder import RotaryEncoder
enc = RotaryEncoder(mm_per_tick=0.306096, pin = cfg.ODOM_PIN, debug = cfg.ODOM_DEBUG)
V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
elif cfg.ENCODER_TYPE == "arduino":
from donkeycar.parts.encoder import ArduinoEncoder
enc = ArduinoEncoder()
V.add(enc, outputs=['enc/speed'], threaded=True)
else:
print("No supported encoder found")
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
outputs = ['cam/image_array']
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
#rbx
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, record_lidar=cfg.SIM_RECORD_LIDAR, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from donkeycar.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
# add lidar
if cfg.USE_LIDAR:
from donkeycar.parts.lidar import RPLidar
if cfg.LIDAR_TYPE == 'RP':
print("adding RP lidar part")
lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
if cfg.LIDAR_TYPE == 'YD':
print("YD Lidar not yet supported")
# Donkey gym part will output position information if it is configured
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
if cfg.SIM_RECORD_GYROACCEL:
outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
if cfg.SIM_RECORD_VELOCITY:
outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
if cfg.SIM_RECORD_LIDAR:
outputs += ['lidar/dist_array']
V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
elif "custom" == cfg.CONTROLLER_TYPE:
#
# custom controller created with `donkey createjs` command
#
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
elif cfg.HAVE_ODOM:
inputs = ['cam/image_array', 'enc/speed']
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
else:
inputs=['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor
left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
#add tub to save data
if cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array', 'user/angle', 'user/throttle', 'user/mode']
types = ['image_array', 'nparray','float', 'float', 'str']
else:
inputs=['cam/image_array','user/angle', 'user/throttle', 'user/mode']
types=['image_array','float', 'float','str']
if cfg.HAVE_ODOM:
inputs += ['enc/speed']
types += ['float']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
# rbx
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
types += ['float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_GYROACCEL:
inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
types += ['float', 'float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_VELOCITY:
inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
types += ['float', 'float', 'float']
if cfg.SIM_RECORD_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from donkeycar.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
telem_inputs, _ = tel.add_step_inputs(inputs, types)
V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
elif args['train']:
print('Use python train.py instead.\n')
|
[
"manonloupio@gmail.com"
] |
manonloupio@gmail.com
|
c24159df004390b4a0ede19e88cad4a701faab16
|
c573a8da4a17cb3ed909f5a30b9334eb18cca6b2
|
/signalgen/examples/spec_test.py
|
fd5f2ea51deac143c3538e30fe334117742cf82e
|
[] |
no_license
|
AJSterner/apex
|
52a077ff6b486f8817928bd6760a2a31851ca8e0
|
b621c2b8e979e3da7cabd2db69c9f7ee17469422
|
refs/heads/master
| 2020-04-05T13:37:16.712357
| 2017-08-02T18:44:03
| 2017-08-02T18:44:03
| 94,922,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from specanalyzer import EnetRandSFSP
DEFAULT_ADDR = "131.243.171.57"
spec = EnetRandSFSP(DEFAULT_ADDR, 18)
spec.reset()
spec.set_window(185.7, .03, -30)
spec.auto_ref_lvl()
spec.continuous_sweep(False)
spec.take_sweep()
|
[
"andrewjohnsterner@gmail.com"
] |
andrewjohnsterner@gmail.com
|
80510b9310d4487e0c029eb1688c6adc013496b9
|
1c4100c3d1814c2ee8761968c2e9b65666de9445
|
/Ip/Get_Proxies.py
|
cf345cbf8a503e3bab7fdb172a265d33ef1e79f3
|
[] |
no_license
|
Lucas-Wong/GeneralTools
|
defb8cd83f80eca7c03a6d2e25fc22292c8c8d80
|
9ef5e646da94f63bf232ff45d2493b1bfbef914f
|
refs/heads/master
| 2021-05-05T02:45:19.195496
| 2019-01-03T02:42:44
| 2019-01-03T02:42:44
| 119,784,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
# ! /usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author = lucas.wang
@create_time = 2018-02-07
"""
# IP地址取自国内髙匿代理IP网站:http://www.xicidaili.com/nn/
# 仅仅爬取首页IP地址就足够一般使用
from bs4 import BeautifulSoup
import requests
import random
class Get_proxies(object):
def get_ip_list(self, url, headers):
web_data = requests.get(url, headers=headers)
# print(web_data)
soup = BeautifulSoup(web_data.text, 'lxml')
# print(soup)
ips = soup.find_all('tr')
# print(ips)
ip_list = []
for i in range(1, len(ips)):
ip_info = ips[i]
# print(ip_info)
tds = ip_info.find_all('td')
# print(tds)
ip_list.append(tds[1].text + ':' + tds[2].text)
return ip_list
def get_random_ip(self, ip_list):
proxy_list = []
for ip in ip_list:
proxy_list.append('http://' + ip)
# print(proxy_list)
proxy_ip = random.choice(proxy_list)
proxies = {'http': proxy_ip, }
return proxies
if __name__ == '__main__':
url = 'http://www.xicidaili.com/nn/'
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
# }
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Linux; U; Android 6.0; zh-CN; MZ-m2 note Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 MZBrowser/6.5.506 UWS/2.10.1.22 Mobile Safari/537.36'
}
getProxies = Get_proxies()
ip_list = getProxies.get_ip_list(url, headers=headers)
proxies = getProxies.get_random_ip(ip_list)
print(proxies)
|
[
"lucas.wang@ariix.com"
] |
lucas.wang@ariix.com
|
c04a358d0715f2589ea0857a5e733acd3eb79b92
|
80694fba85a4096e9069d836ab71917d5f8b3540
|
/zerorpc_less2/test_middleware_before_after_exec.py
|
ca6162665c646e881043893844eb9f848972187f
|
[] |
no_license
|
gitletian/zerorpc_test
|
dad732fd3d9086bbf4200e2d6b790afb377bb685
|
7f11a62dee1ea71cf1ed743c7bc17a3397a806c8
|
refs/heads/master
| 2021-06-23T18:25:37.210949
| 2019-07-09T07:59:39
| 2019-07-09T07:59:39
| 147,283,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,186
|
py
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2015 François-Xavier Bourlet (bombela+zerorpc@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from builtins import range
import gevent
import zerorpc
endpoint = "tcp://0.0.0.0:4244"
TIME_FACTOR = 0.2
class EchoModule(object):
def __init__(self, trigger=None):
self.last_msg = None
self._trigger = trigger
def echo(self, msg):
self.last_msg = 'echo: ' + msg
if self._trigger:
self._trigger.set()
return self.last_msg
@zerorpc.stream
def echoes(self, msg):
self.last_msg = 'echo: ' + msg
for i in range(0, 3):
yield self.last_msg
class ServerBeforeExecMiddleware(object):
def __init__(self):
self.called = False
def server_before_exec(self, request_event):
assert request_event.name == "echo" or request_event.name == "echoes"
self.called = True
def test_hook_server_before_exec():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
# Test without a middleware
assert test_client.echo("test") == "echo: test"
# Test with a middleware
test_middleware = ServerBeforeExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_server_before_exec_puller():
zero_ctx = zerorpc.Context()
trigger = gevent.event.Event()
echo_module = EchoModule(trigger)
test_server = zerorpc.Puller(echo_module, context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Pusher()
test_client.connect(endpoint)
# Test without a middleware
test_client.echo("test")
trigger.wait(timeout=TIME_FACTOR * 2)
assert echo_module.last_msg == "echo: test"
trigger.clear()
# Test with a middleware
test_middleware = ServerBeforeExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
test_client.echo("test with a middleware")
trigger.wait(timeout=TIME_FACTOR * 2)
assert echo_module.last_msg == "echo: test with a middleware"
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_server_before_exec_stream():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
# Test without a middleware
for echo in test_client.echoes("test"):
assert echo == "echo: test"
# Test with a middleware
test_middleware = ServerBeforeExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
it = test_client.echoes("test")
assert test_middleware.called == True
assert next(it) == "echo: test"
for echo in it:
assert echo == "echo: test"
test_server.stop()
test_server_task.join()
class ServerAfterExecMiddleware(object):
def __init__(self):
self.called = False
def server_after_exec(self, request_event, reply_event):
self.called = True
self.request_event_name = getattr(request_event, 'name', None)
self.reply_event_name = getattr(reply_event, 'name', None)
def test_hook_server_after_exec():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
# Test without a middleware
assert test_client.echo("test") == "echo: test"
# Test with a middleware
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
assert test_middleware.request_event_name == 'echo'
assert test_middleware.reply_event_name == 'OK'
test_server.stop()
test_server_task.join()
def test_hook_server_after_exec_puller():
zero_ctx = zerorpc.Context()
trigger = gevent.event.Event()
echo_module = EchoModule(trigger)
test_server = zerorpc.Puller(echo_module, context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Pusher()
test_client.connect(endpoint)
# Test without a middleware
test_client.echo("test")
trigger.wait(timeout=TIME_FACTOR * 2)
assert echo_module.last_msg == "echo: test"
trigger.clear()
# Test with a middleware
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
test_client.echo("test with a middleware")
trigger.wait(timeout=TIME_FACTOR * 2)
assert echo_module.last_msg == "echo: test with a middleware"
assert test_middleware.called == True
assert test_middleware.request_event_name == 'echo'
assert test_middleware.reply_event_name is None
test_server.stop()
test_server_task.join()
def test_hook_server_after_exec_stream():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
# Test without a middleware
for echo in test_client.echoes("test"):
assert echo == "echo: test"
# Test with a middleware
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
it = test_client.echoes("test")
assert next(it) == "echo: test"
assert test_middleware.called == False
for echo in it:
assert echo == "echo: test"
assert test_middleware.called == True
assert test_middleware.request_event_name == 'echoes'
assert test_middleware.reply_event_name == 'STREAM_DONE'
test_server.stop()
test_server_task.join()
class BrokenEchoModule(object):
def __init__(self, trigger=None):
self.last_msg = None
self._trigger = trigger
def echo(self, msg):
try:
self.last_msg = "Raise"
raise RuntimeError("BrokenEchoModule")
finally:
if self._trigger:
self._trigger.set()
@zerorpc.stream
def echoes(self, msg):
self.echo(msg)
def test_hook_server_after_exec_on_error():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(BrokenEchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
try:
test_client.echo("test")
except zerorpc.RemoteError:
pass
assert test_middleware.called == False
test_server.stop()
test_server_task.join()
def test_hook_server_after_exec_on_error_puller():
zero_ctx = zerorpc.Context()
trigger = gevent.event.Event()
echo_module = BrokenEchoModule(trigger)
test_server = zerorpc.Puller(echo_module, context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Pusher()
test_client.connect(endpoint)
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
try:
test_client.echo("test with a middleware")
trigger.wait(timeout=TIME_FACTOR * 2)
except zerorpc.RemoteError:
pass
assert echo_module.last_msg == "Raise"
assert test_middleware.called == False
test_server.stop()
test_server_task.join()
def test_hook_server_after_exec_on_error_stream():
zero_ctx = zerorpc.Context()
test_server = zerorpc.Server(BrokenEchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client()
test_client.connect(endpoint)
test_middleware = ServerAfterExecMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
try:
test_client.echoes("test")
except zerorpc.RemoteError:
pass
assert test_middleware.called == False
test_server.stop()
test_server_task.join()
|
[
"guoyuanpei12@sina.com"
] |
guoyuanpei12@sina.com
|
8075781d0d13baf55d9f6368cef05b1fb6478322
|
e8bee70da2f4dcbb74aaecf481d4677bcc18235c
|
/pull_data/pull_utils.py
|
78f6a2a5071b6637dd7eb77c5f23b9ac257c83b8
|
[] |
no_license
|
Udneowi/FacebookDataForGoodPreprocessing
|
1ba3e90b0f5a3191faa5a7b56106b12a2e66727c
|
9d4c3ab64e48ef595e229209ab3bf885b546f0a7
|
refs/heads/master
| 2022-09-11T14:41:14.572372
| 2020-05-31T09:23:21
| 2020-05-31T09:23:21
| 268,245,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,412
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import os
from selenium.webdriver import Chrome
from selenium.webdriver.chrome import webdriver as chrome_webdriver
import sys
import numpy as np
import glob
import shutil
import operator
import json
from tqdm import tqdm
import re
class data_updater:
def __init__(self, download_folder, outdir, path=None, headless=False, creds=False,driver_path=False):
# self.outdir = '../Facebook/'
self.outdir = outdir
with open(creds) as fp:
self.keys = json.load(fp)
self.download_folder = download_folder
self.headless = headless
self.driver_path = driver_path
self.data_types = ['Movement between Admin', 'Movement between Tiles', 'Facebook Population (Admin',
'Facebook Population (Tile']
self.load_data(path)
def load_data(self, path):
# Load data containing country facebook IDs or create an empty dataframe
if path:
self.data = pd.read_csv(path)
self.data = self.data.set_index('Index')
else:
df = pd.DataFrame(columns=self.data_types)
df.loc['folder'] = ['movement_admin', 'movement_tile', 'population_admin', 'population_tile']
df.index.names = ['Index']
self.data = df
def save_data(self, path_out):
# Save country dataframe
self.data.to_csv(path_out)
def login(self):
# Login to facebook
self.driver.get('https://www.facebook.com/login/?next=https%3A%2F%2Fwww.facebook.com%2Fgeoinsights-portal%2F')
self.driver.find_element_by_xpath('//*[@id="email"]').send_keys(self.keys[0])
self.driver.find_element_by_xpath('//*[@id="pass"]').send_keys(self.keys[1])
self.driver.find_element_by_xpath('//*[@id="loginbutton"]').click()
def add_countries(self, countries):
# Adds country IDs to the data
self.start_driver()
self.login()
for country in countries:
time.sleep(1)
ids = []
self.driver.get(
'https://www.facebook.com/login/?next=https%3A%2F%2Fwww.facebook.com%2Fgeoinsights-portal%2F')
ele = self.driver.find_element_by_xpath('//*[@id="js_3"]')
ele.send_keys(country)
time.sleep(1)
ele.send_keys(" Coronavirus Disease Prevention")
time.sleep(3)
ele.send_keys(Keys.DOWN)
while True:
text = self.driver.find_element_by_xpath('//span[contains(text(),"Search")]').text
if text == f'Search: "{country + " Coronavirus Disease Prevention"}"':
break
time.sleep(1)
ele.send_keys(Keys.ENTER)
time.sleep(3)
for dat_type in self.data_types:
elements = self.driver.find_elements_by_xpath(f'//div[contains(text(),"{dat_type}")]')
for ele in elements:
page_id = ele.find_element_by_tag_name('a').get_attribute('href').split('=')[-1]
out = self.open_and_check(f'https://www.facebook.com/geoinsights-portal/downloads/?id={page_id}',
country + " Corona")
if out:
break
ids.append(page_id)
self.data.loc[country] = ids
self.close_driver()
def download_countries(self, countries):
self.start_driver()
self.login()
time.sleep(3)
for country in countries:
print(f'{country}')
for i in self.data.loc[['folder', country]].items():
links, text = self.get_links(f'https://www.facebook.com/geoinsights-portal/downloads/?id={i[1][1]}')
print(f'Downloading {i[1][0]}')
self.download_links(links,text,f'{self.outdir}/{country}/{i[1][0]}',country)
print('')
self.driver.quit()
def download_id(self, id_web_link, country, folder_name):
self.start_driver()
self.login()
time.sleep(3)
print(f'Downloading files for {country}')
links, text = self.get_links(id_web_link)
print(f'Downloading {folder_name}')
self.download_links(links,text, f'{self.outdir}/{country}/{folder_name}', country)
print('')
self.driver.quit()
def get_links(self, path):
self.driver.get(path)
ele = self.driver.find_elements_by_tag_name('li')
links = [date.find_element_by_tag_name('a').get_attribute('href') for date in ele if len(date.text) > 0]
text = [date.text.replace('-','_').replace(' ','_') for date in ele if len(date.text) > 0]
return links, text
def download_links(self, links, text, outdir,country):
dates = [country + '_' + date + '.csv' for date in text]
self.try_mkdir_silent(f'{outdir}')
dl_links = np.array(links)[~np.isin(dates, os.listdir(f'{outdir}'))]
wait_time = 1
while len(dl_links) > 0:
for link in tqdm(dl_links):
self.driver.get(link)
time.sleep(wait_time)
wait_time += 10
self.move_most_recent_files(outdir, links,country)
dl_links = np.array(links)[~np.isin(dates, os.listdir(f'{outdir}'))]
def open_and_check(self, link, contains):
main_window = self.driver.current_window_handle
self.driver.execute_script(f'''window.open("{link}","_blank");''')
self.driver.switch_to.window(self.driver.window_handles[1])
ele = self.driver.find_elements_by_xpath(f'//*[contains(text(),"{contains}")]')
if len(ele)>0:
output = ele[0].text
else:
output = None
time.sleep(1)
self.driver.close()
self.driver.switch_to.window(main_window)
return output
######################### Moving files ##################################
def try_mkdir_silent(self,path):
# Silently making dir if it doesn't exist
try:
os.makedirs(path, exist_ok=True)
except:
pass
def rename_and_move(self,old_fn: str, old_dir: str, new_fn: str, new_dir: str):
# Renaming and moving the files
os.rename(old_dir + '/' + old_fn, old_dir + '/' + new_fn)
shutil.move(old_dir + '/' + new_fn, new_dir + '/' + new_fn)
def get_new_file_name(self,file: str, country: str):
# Changing default names to COUNTRY_DATE.csv
regex = re.search(r'\d{4}[_-]\d{2}[_-]\d{2}([-_ +]\d{4})?', file).group()
date = re.sub("[^0-9a-zA-Z]+","_",regex)
return (country + '_' + date + '.csv')
def move_most_recent_files(self,outdir: str, urls: list, country: str):
'''
Get the most recent files form the download directory, rename them, and put them in the destination directory
'''
self.try_mkdir_silent(outdir)
csv_files = {}
# import pdb; pdb.set_trace()
for f in glob.glob(self.download_folder + '/*.csv'):
csv_files[f] = os.path.getctime(f)
sorted_files = [f[0] for f in sorted(csv_files.items(), key=operator.itemgetter(1), reverse=True)[:len(urls)]]
new_fns = [self.get_new_file_name(file,country) for file in sorted_files]
for i, sorted_file in enumerate(sorted_files):
self.rename_and_move(sorted_file.split('/')[-1].split('\\')[-1], self.download_folder, new_fns[i], outdir)
def remove_empty_files(self,start_dir):
for root, dirs, files in os.walk(start_dir):
for file in files:
file_path = os.path.join(root, file)
if (os.path.getsize(file_path) == 0) & (file_path.endswith('00.csv')):
os.remove(file_path)
######################### Initialize driver ##############################
def start_driver(self):
self.driver = self.get_driver(self.download_folder, headless=self.headless,
driver_path=self.driver_path + '/chromedriver')
def close_driver(self):
self.driver.quit()
def get_driver(self, download_location=None, headless=False, driver_path=None):
driver = self._get_chrome_driver(download_location, headless, driver_path)
driver.set_window_size(1400, 700)
return driver
def _get_chrome_driver(self, download_location, headless, driver_path):
chrome_options = chrome_webdriver.Options()
if download_location:
prefs = {'download.default_directory': download_location,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': False,
'safebrowsing.disable_download_protection': True}
chrome_options.add_experimental_option('prefs', prefs)
if headless:
chrome_options.add_argument("--headless")
if sys.platform.startswith("win"):
driver_path += ".exe"
driver = Chrome(executable_path=driver_path, options=chrome_options)
if headless:
self.enable_download_in_headless_chrome(driver, download_location)
return driver
def enable_download_in_headless_chrome(self, driver, download_dir):
"""
there is currently a "feature" in chrome where
headless does not allow file download: https://bugs.chromium.org/p/chromium/issues/detail?id=696481
This method is a hacky work-around until the official chromedriver support for this.
Requires chrome version 62.0.3196.0 or above.
"""
# add missing support for chrome "send_command" to selenium webdriver
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_dir}}
command_result = driver.execute("send_command", params)
|
[
"petem@heisenberg.imm.dtu.dk"
] |
petem@heisenberg.imm.dtu.dk
|
7bbb82742c7c11ef190a75dccd02818e8bd19e56
|
a51193ee48b0c358afb8af77ad37055ac358dd3b
|
/enderecos/api/serializers.py
|
85d25631450d1f9e0aa1874790b2559115be2d9f
|
[] |
no_license
|
nico-est/Pontos_Turisticos_Udemy
|
5343fc77318bfa716058e200a2538729fecaaea2
|
eaeae34d1688eea90e3029eaba04e6b5628b0928
|
refs/heads/main
| 2023-03-13T04:27:28.292146
| 2021-02-23T19:05:45
| 2021-02-23T19:05:45
| 338,313,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from rest_framework.serializers import ModelSerializer
from enderecos.models import Endereco
class EnderecoSerializer(ModelSerializer):
class Meta:
model = Endereco
fields = '__all__'
|
[
"nicolas.estanislau@gmail.com"
] |
nicolas.estanislau@gmail.com
|
7cc2bcb3f739df76df023a3fac442bde1b4247b5
|
363d82529eef4a389639ebdbea734dca00ac7abd
|
/946.验证栈序列.py
|
a20efe7027935c09062b009b33ea30370cfcf12e
|
[] |
no_license
|
adsl305480885/leetcode-zhou
|
d9ac9ff391a68fb491bf3d3a5204778585fc9a40
|
79c96fc7f66ed0508351ed9300482a1619f39fb5
|
refs/heads/main
| 2023-05-03T12:26:05.218505
| 2021-05-21T10:39:30
| 2021-05-21T10:39:30
| 314,748,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
'''
Author: Zhou Hao
Date: 2021-03-11 09:46:23
LastEditors: Zhou Hao
LastEditTime: 2021-03-11 12:12:27
Description: file content
E-mail: 2294776770@qq.com
'''
#
# @lc app=leetcode.cn id=946 lang=python3
#
# [946] 验证栈序列
#
# @lc code=start
class Solution:
# def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
# stack = []
# pop,push =0,0
# while pop < len(popped) :
# if push < len(pushed):
# stack.append(pushed[push])
# push += 1
# print(stack,popped[pop])
# while stack and stack[-1] == popped[pop]:
# stack.pop()
# if pop <len(popped)-1:
# pop+=1
# print(stack,popped[pop],'******')
# if push == len(pushed) and not stack:
# return True
# if push == len(pushed) and stack[-1] != popped[pop]:
# return False
# return True
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
#用栈来模拟
stack = []
pop = 0
for i in pushed:
stack.append(i)
# print(stack,popped[pop])
while stack and stack[-1] == popped[pop]:
# print(stack,popped[pop])
stack.pop()
# print(stack,'*****')
pop += 1
# print(stack,'\n')
return not stack
# @lc code=end
|
[
"2294776770@qq.com"
] |
2294776770@qq.com
|
49d73ec735a98863a62138cc3f0c0a2fe49629c6
|
1de51338fbd214fe509a77f0ab71e6a9029896bd
|
/deep_disfluency/corpus/util.py
|
d29bf3630240c49f6071c0e3e9d0f73dd6557dd3
|
[
"MIT"
] |
permissive
|
askender/deep_disfluency
|
b46d71f829f819748caddec45daa865f74119ae2
|
bea8403ed954df8eadd3e2b9d98bb7c2b416a665
|
refs/heads/master
| 2020-03-31T13:09:09.287145
| 2018-10-09T12:00:40
| 2018-10-09T12:00:40
| 152,243,576
| 0
| 0
|
MIT
| 2018-10-09T12:01:22
| 2018-10-09T12:01:21
| null |
UTF-8
|
Python
| false
| false
| 46,134
|
py
|
# -*- coding: utf-8 -*-
import re
from collections import defaultdict
def add_word_continuation_tags(tags):
"""Returns list with continuation tags for each word:
<cc/> continues current dialogue act and the next word will also continue
<ct/> continues current dialogue act and is the last word of it
<tc/> starts this dialogue act tag and the next word continues it
<tt/> starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags) - 1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def get_tags(s, open_delim='<',
close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end + len(close_delim):]
else:
return
def strip_disf_tags_from_easy_read(text):
"""List of strings (words or POS tags) without the disfluency markup
"""
words = []
for w in text.split(" "):
words.append(w[w.rfind(">") + 1:])
return words
def disf_tags_from_easy_read(text):
"""List of disfluency tags from the inline easy read marked up utterances
"""
tags = []
for w in text.split():
tags.append(w[:w.rfind(">") + 1])
return [tag.replace("_", " ") for tag in tags]
def easy_read_disf_format(words, tags):
"""Easy read style inline disfluency tagged string."""
final_tags = []
for i in range(0, len(words)):
final_tags.append("".join([tags[i].replace(" ", "_"), words[i]]))
return " ".join(final_tags)
def detection_corpus_format(uttRef, words, pos, tags, indices):
"""Replace blanks with fluent <f/> tags and outputs tag separated."""
for i in range(0, len(tags)):
if tags[i] == "":
tags[i] = "<f/>"
final_string = "\t".join(
[uttRef, indices.pop(0), words.pop(0), pos.pop(0), tags.pop(0)]) + "\n"
print len(indices), len(words), len(pos), len(tags)
print indices
print words
print pos
print tags
for i in range(0, len(tags)):
final_string += "\t".join(["", indices[i],
words[i], pos[i], tags[i]]) + "\n"
return final_string.rstrip("\n")
def detection_corpus_format_from_easy_read(easyReadString):
"""Converts the easy read format to the detection corpus format"""
lines = [x.split(",") for x in easyReadString.split("\n")]
uttRef = lines[0][0]
wordstring = lines[0][1]
posstring = lines[1][1]
indexstring = lines[2][1]
tags = disf_tags_from_easy_read(wordstring)
words = strip_disf_tags_from_easy_read(wordstring)
pos = strip_disf_tags_from_easy_read(posstring)
indices = indexstring.split(" ")
return detection_corpus_format(uttRef, words, pos, tags, indices)
def easy_read_format_from_detection_corpus(detectionString):
"""The inverse function to detectionCorpusFormatStringFromEasyReadFormat.
Mainly for checking consistency at corpus creation time.
"""
lines = detectionString.rstrip("\n").split("\n")
uttref = lines[0].split("\t")[0]
lines[0] = lines[0].replace(uttref, "")
indices = [line.split("\t")[1] for line in lines]
words = [line.split("\t")[2] for line in lines]
pos = [line.split("\t")[3] for line in lines]
tags = [line.split("\t")[4].replace("<f/>", "") for line in lines]
final_string = uttref + "," + easy_read_disf_format(words, tags) + '\n'
final_string += "POS," + easy_read_disf_format(pos, tags) + "\n"
final_string += "REF," + " ".join(indices)
return final_string
def get_edit_terms_from_easy_read(text, postext):
"""Outputs tuples of each string of consecutive edit terms and their POS"""
words = strip_disf_tags_from_easy_read(text)
pos = strip_disf_tags_from_easy_read(postext)
tags = disf_tags_from_easy_read(text)
current_edit_term = ""
current_pos_edit_term = ""
# a list of tuples of (edit term strings, POS tags of that string)
edit_terms = []
for t in range(0, len(tags)):
tag = tags[t]
if "<e" in tag or "<i" in tag:
current_edit_term += words[t] + " "
current_pos_edit_term += pos[t] + " "
elif not current_edit_term == "": # we've built up a string, save it
edit_terms.append(
(current_edit_term.strip(), current_pos_edit_term.strip()))
current_edit_term = ""
current_pos_edit_term = ""
if not current_edit_term == "": # flush
edit_terms.append(
(current_edit_term.strip(), current_pos_edit_term.strip()))
return edit_terms
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the
repair ID numbers to be derivable from
their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=") + 2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair] all True
# when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print repairs
# second pass verify the validity of the tags and (optionally) modify the
# IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
all([repairs[ID][2] or repairs[ID] == [None, None, None]
for ID in repairs.keys(
)]), "Unresolved repairs at fluent tag\n\t" + str(repairs)
# iterate over all tags in this tag string
for tag in get_tags(tags[i]):
# print i, tag
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=") + 2:-3]
if "<rms" in tag:
assert repairs[ID][0] is None, \
"reparandum started parsed more than once " + ID
assert repairs[ID][1] is None, \
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] is None, \
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] is not None, \
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] is None, \
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] is None, \
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] is not None, \
"interregnum start before reparandum start " + ID
assert repairs[ID][2] is None, \
"interregnum in a repair phase " + ID
if repairs[ID][1] is None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] is not None, \
"repair start before reparandum start " + ID
assert repairs[ID][1] != True, \
"interregnum over before repair start " + ID
assert repairs[ID][2] is None, \
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True, \
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True, \
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False, \
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True, \
"repair end before reparandum end " + ID
assert repairs[ID][1] == True, \
"repair end before interregnum end " + ID
assert repairs[ID][2] == False, \
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]
), "Unresolved repairs:\n\t" + str(repairs)
def orthography_normalization(word, pos, spelling_dict, lang='en'):
"""Converts the spelling from the transcripts into
one that is consistent for disfluency detection.
Filled pauses are treated specially to make
sure the POS tags are correct.
"""
if lang == 'en':
um = "um"
uh = "uh"
elif lang == 'de':
um = "ähm"
uh = "äh"
else:
raise NotImplementedError(
'No filled pause normalization for lang: ' + lang)
for key in spelling_dict.keys():
if re.match(key, word):
word = spelling_dict[key]
# make sure filled pauses have the right POS tags
if word in [uh, um]:
pos = 'UH'
break
return word, pos
def clean(myString):
myString = re.sub(r"([\+/\}\[\]]\#|\{\w)", "", myString)
elicitcharacters = "\#)(+\/[]_><,.\"\*%!=}{"
mynewString = ""
for char in myString:
if not char in elicitcharacters:
mynewString += char
if mynewString == "":
return None
else:
return mynewString.lower()
def parse_list(string):
# returns list [1,2] from "[1,2]"
chars = string
Number1 = ""
Number2 = ""
x = False
y = True
for char in chars:
if char == " ":
continue
if char == "[":
x = True
elif char == ",":
x = False
y = True
elif char == "]":
y = False
elif x == True:
Number1 += char
elif y == True:
Number2 += char
return [int(Number1), int(Number2)]
def remove_repairs(tags, repairIDs):
"""Return a list of tags without the repairs with IDs in repairIDs."""
for t in range(0, len(tags)):
new_tag = tags[t]
for repair_class in ["rms", "rm", "i", "rps", "rp",
"rpnsub", "rpndel", "rpnrep"]:
for repairID in repairIDs:
if repair_class == "i":
interreg = re.findall(
'<{} id="{}"/>'.format(repair_class, repairID),
new_tag)
new_tag = new_tag.replace(
'<{} id="{}"/>'.format(repair_class, repairID), "")
if (repair_class == "i" and len(interreg) > 0) \
and not ("<e" in new_tag or "<i" in new_tag):
# assure edit terms are maintained
new_tag += "<e/>"
tags[t] = new_tag
return tags
def find_repair_end(repair, disfluencyTagList):
"""Given a repair object and a disfluency tag list,
find the repair word and tag it in place in the list.
"""
# print "searching for repair in same utt", repair.repairID
# loop stops at first element that's not an <i>
for B in range(len(disfluencyTagList) - 1, -1, -1):
if str(repair.repairID) in disfluencyTagList[B]: # gets deletes/subs
repair_class = repair.classify()
disfluencyTagList[B] = disfluencyTagList[B]\
.replace('<rp id="{}"/>'.format(repair.repairID), "")
if repair_class == "del" and not \
'<rps id="{}"/>'.format(repair.repairID) \
in disfluencyTagList[B]:
disfluencyTagList[B] = disfluencyTagList[B] + \
'<rps id="{}"/>'.format(repair.repairID)
disfluencyTagList[B] = disfluencyTagList[B] + \
'<rpn{} id="{}"/>'.format(repair_class, repair.repairID)
repair.complete = True
# print "completing" + str(repair.repairID)
return True
return False
def find_repair_ends_and_reclassify(problem_rpns, tag_list, word_list,
search_start, partial_disallowed=False):
"""Backwards search to find a possible repair end rpn tag and
re-classify its type if needs be.
Return the repair ends successfully found.
problem_rpns :: list of repair end tags (non-deletes) which are to
be moved back before an edit term.
tag_list :: the disfluency tags for utterance
word_list :: the words for the utterance
search_start :: position in utterance where backwards search starts
non_partial :: repair end cannot be a partial word
"""
resolved = []
unresolved = []
for i in range(search_start, -1, -1):
if "<e/>" in tag_list[i]:
continue # only allow non-edit term words
if partial_disallowed and word_list[i][-1] == "-":
continue # in partial_disallowed setting, no partial word rpns
# if we have got here we may have a possible repair end word
for rpn in problem_rpns:
if rpn in resolved or rpn in unresolved:
continue
rpMid = rpn.replace("rpnsub", "rp").replace("rpnrep", "rp")
rpStart = rpn.replace("rpnsub", "rps").replace("rpnrep", "rps")
# a legit rp tag, can be the repair end
if rpMid in tag_list[i] or rpStart in tag_list[i]:
# get rid of rp mid tags
tag_list[i] = tag_list[i].replace(rpMid, "")
tag_list[i] = tag_list[i] + rpn # add repair end tag
# reclassify it as either repeat or substitution by iterating
# up to this current word
rmMid = rpn.replace("rpnsub", "rm").replace("rpnrep", "rm")
rmStart = rpn.replace("rpnsub", "rms").replace("rpnrep", "rms")
reparandum = []
repair = []
for check in range(0, i + 1):
if rmStart in tag_list[check] or rmMid in tag_list[check]:
reparandum.append(word_list[check])
if rpStart in tag_list[check] or rpMid in tag_list[check]:
repair.append(word_list[check])
if rpn in tag_list[check]:
repair.append(word_list[check])
# it was marked as a repeat, change if no longer a
# repeat
if "rep" in rpn:
if not reparandum == repair:
tag_list[check] = tag_list[check].replace(
rpn, rpn.replace("rpnrep", "rpnsub"))
# else if marked as a sub, change if it is now a repeat
elif reparandum == repair:
tag_list[i] = tag_list[i].replace(
rpn, rpn.replace("rpnsub", "rpnrep"))
break
resolved.append(rpn) # this is a resolved repair end
return resolved
def find_repair_end_in_previous_utts(repair, overallTagList, uttlist):
# print "searching back for repair", repair.repairID
testTagList = None
testUttCaller = None
search = 0
# backwards search if not found- needs to be called at the end too as a
# flush
while not repair.complete == True:
search += 1
if search >= len(overallTagList):
print 'Repair not found!'
raise Exception
# print "search " + str(search)
# get the tag list *search* utterances back
testTagList = overallTagList[-search]
testUttCaller = uttlist[-search][2]
# continue backtracking if not the caller
if not testUttCaller == repair.caller:
continue
# search back in the previous utterance
repair.complete = find_repair_end(repair, testTagList)
# list mutable, so will change this here in place
overallTagList[-search] = testTagList
return
def find_delete_interregna_and_repair_onsets(tag_list, problem_rpns_del,
interreg_start):
"""problem_rpns_del :: list of delete repairs (consisting of their
identifying <rpndel id="x"/>.
tag_list :: list of disfluency tags where reparanda of those repairs
is marked.
interreg_start :: int, where the interregnum is known to start
for these tags
For each repair in problems_rpns_del mark the interregnum and
repair onset/repair end delete word for that repair if possible.
Return a list with those repairs with successfully resolved
interregna and repair stats.
"""
interreg_index_dict = defaultdict(
list) # key repair onset tag, value list of indices for interregnum
resolved = []
for i in range(interreg_start, len(tag_list)):
tag = tag_list[i]
for r in problem_rpns_del:
if r in resolved:
continue
# interrengum could still be in there for the tag
if r.replace("rpndel", "i") in tag:
# remove as repair start may not be found
tag_list[i] = tag_list[i].replace(r.replace("rpndel", "i"), "")
interreg_index_dict[r].append(i)
elif "<e" in tag: # not marked as an interregnum for this repair
interreg_index_dict[r].append(i)
else:
tag_list[i] += r.replace("rpndel", "rps") + r
# print "interregs found for r",r,interreg_index_dict[r]
# if rps found, mark its interregna
for interreg in interreg_index_dict[r]:
tag_list[interreg] = r.replace(
"rpndel", "i") + tag_list[interreg]
# tag_list[interreg] =
# tag_list[interreg].replace("<e/>","")#turning into
# interregnum
resolved.append(r)
return resolved
def find_interregna_and_repair_onsets(tag_list, problem_rps, interreg_start,
word_list):
"""For each repair in problems_rps mark the interregnum and
repair onset/repair end delete word for that repair if possible.
Return a list with those repairs with successfully
resolved interregna and repair stats.
problem_rps :: list of repair ends (consisting of
their identifying <rps id="x"/>.
tag_list :: list of disfluency tags where reparanda of those
repairs is marked.
interreg_start :: int, where the interregnum is known to start
for these tags
"""
# key repair onset tag, value a list of indices for the interregnum
interreg_index_dict = defaultdict(list)
resolved = []
for i in range(interreg_start, len(tag_list)):
tag = tag_list[i]
for r in problem_rps:
if r in resolved:
continue
# interrengum could still be in there for the tag
if r.replace("rps", "i") in tag:
# remove as repair start may not be found
tag_list[i] = tag_list[i].replace(r.replace("rps", "i"), "")
interreg_index_dict[r].append(i)
elif "<e" in tag: # not marked as an interregnum for this repair
interreg_index_dict[r].append(i)
elif r.replace("rps", "rp") in tag or r.replace("rps", "rpnsub")\
in tag or r.replace("rps", "rpnrep") in tag:
tag_list[i] = tag_list[i].replace(
r.replace("rps", "rp"), "") # first remove any rps
# if "<rps" in tag_list[i] : continue #don't add if embedded
tag_list[i] = r + tag_list[i] # add the repair start
# now check if classification has changed
reparandum = []
repair = []
for check in range(0, len(word_list)):
if r.replace("rps", "rms") in tag_list[check] or \
r.replace("rps", "rm") in tag_list[check]:
reparandum.append(word_list[check])
if r in tag_list[check] or r.replace("rps", "rp") \
in tag_list[check]:
repair.append(word_list[check])
if r.replace("rps", "rpnrep") in tag_list[check]:
repair.append(word_list[check])
if not reparandum == repair:
tag_list[i] = tag_list[i].replace(
r.replace("rps", "rpnrep"),
r.replace("rps", "rpnsub"))
break
elif r.replace("rps", "rpnsub") in tag_list[check]:
repair.append(word_list[check])
if reparandum == repair:
tag_list[i] = tag_list[i].replace(
r.replace("rps", "rpnsub"),
r.replace("rps", "rpnrep"))
break
# print "interregs found for r",r,interreg_index_dict[r]
# if rps found, mark its interregna
for interreg in interreg_index_dict[r]:
tag_list[interreg] = r.replace(
"rps", "i") + tag_list[interreg]
# tag_list[interreg] =
# tag_list[interreg].replace("<e/>","")#turning into
# interregnum
resolved.append(r)
return resolved
def remove_non_edit_interregna(tags, words, problem_interreg_IDs):
"""Where an interregnum is marked but is not an edit term,
convert the interregnum to <rp(s) repair tags.
"""
phase_dict = dict() # repair mapped to the phase it is currently in
# list of 2 lists with reparandum and repair words
phase_words_dict = defaultdict(list)
for p in problem_interreg_IDs:
phase_dict[p] = "o" # initialize as o for original utterance
phase_words_dict[p] = [[], []]
for t in range(0, len(tags)):
for repairID in problem_interreg_IDs:
if '<rps id="{}"/>'.format(repairID) in tags[t]:
# repair phase already reached, replace start with rps
if phase_dict[repairID] == "rp":
tags[t] = tags[t].replace(
'<rps id="{}"/>'.format(repairID),
'<rp id="{}"/>'.format(repairID))
else:
phase_dict[repairID] = "rp"
if '<rms id="{}"/>'.format(repairID) in tags[t]:
phase_dict[repairID] = "rm"
# reparandum not reached yet or finished
if phase_dict[repairID] == "o":
continue
if '<i id="{}"/>'.format(repairID) in tags[t]:
if not "<e" in tags[t]:
# repair phase not reached yet, repair onset
if phase_dict[repairID] == "rm":
tags[t] = tags[t].replace(
'<i id="{}"/>'.format(repairID),
'<rps id="{}"/>'.format(repairID))
phase_dict[repairID] = "rp"
# repair phase reached, just repair word
elif phase_dict[repairID] == "rp":
if not '<rps id="{}"/>'.format(repairID) in tags[t]:
tags[t] = tags[t].replace(
'<i id="{}"/>'.format(repairID),
'<rp id="{}"/>'.format(repairID))
else:
# repair onset word from above, just get rid of
# interregnum
tags[t] = tags[t].replace(
'<i id="{}"/>'.format(repairID), "")
else:
# while potentially a good interrengum,
# not if repair phase has already been reached,
# so leave it as a plain edit term
if phase_dict[repairID] == "rp":
tags[t] = tags[t].replace(
'<i id="{}"/>'.format(repairID), "")
if phase_dict[repairID] == "rm" and not '<i id="{}"/>'.format(
repairID) in tags[t] and not "<e/>" in tags[t]:
phase_words_dict[repairID][0].append(words[t])
if phase_dict[repairID] == "rp" and not "<e/>" in tags[t] and \
not '<rpndel id="{}"/>'.format(repairID) in tags[t]:
phase_words_dict[repairID][1].append(words[t])
# reclassify if end of repair
if '<rpndel id="{}"/>'.format(repairID) in tags[t]:
# either a rep or sub, replace delete with appropriate class
if len(phase_words_dict[repairID][1]) > 0:
if phase_words_dict[repairID][0] + [words[t]] == \
phase_words_dict[repairID][1]:
tags[t] = tags[t].replace(
'<rpndel id="{}"/>'.format(repairID),
'<rpnrep id="{}"/>'.format(repairID))
else:
tags[t] = tags[t].replace(
'<rpndel id="{}"/>'.format(repairID),
'<rpnsub id="{}"/>'.format(repairID))
# either way get rid of rp from above
tags[t] = tags[t].replace('<rp id="{}"/>'.format(repairID), "")
phase_dict[repairID] = "o"
elif '<rpnrep id="{}"/>'.format(repairID) in tags[t]:
# if not the same, change to sub, else leave
if phase_words_dict[repairID][0] != \
phase_words_dict[repairID][1]:
tags[t] = tags[t].replace('<rpnrep id="{}"/>'.format(
repairID), '<rpnsub id="{}"/>'.format(
repairID)).\
replace('<rp id="{}"/>'.
format(repairID), "")
# either way get rid of rp from above
tags[t] = tags[t].replace('<rp id="{}"/>'.
format(repairID), "")
phase_dict[repairID] = "o"
elif '<rpnsub id="{}"/>'.format(repairID) in tags[t]:
# if the same, change to rep, else leave
if phase_words_dict[repairID][0] == \
phase_words_dict[repairID][1]:
tags[t] = tags[t].replace(
'<rpnsub id="{}"/>'.format(repairID),
'<rpnrep id="{}"/>'.format(repairID))
# either way get rid of rp from above
tags[t] = tags[t].replace('<rp id="{}"/>'.format(repairID), "")
phase_dict[repairID] = "o"
return
def remove_partial_words(tagList, wordsList, POSList, refList):
"""Consistent with the standard switchboard disfluency detection task,
remove partial words,
and any repairs whose reparanda consist solely of partial words.
"""
repairsToRemoveNoReparandumStart = []
repairsToRemoveNoRepairStart = []
repairsToRemoveNoRepairEnd = []
repairsToRemove = []
wordsToRemove = []
for w in range(0, len(wordsList)):
word = wordsList[w]
# print word, w
if word[-1] == "-":
wordsToRemove.append(w)
if '<rms' in tagList[w]:
# reparandum start cut off, store to see if it can be resolved
# after this point
problem_rms = re.findall(
"<rms id\=\"[0-9]+\"\/>", tagList[w], re.S)
for r in problem_rms:
test = r.replace("rms", "rps")
repairsToRemoveNoReparandumStart.append(test)
if '<rps' in tagList[w]:
# repair start cut off
problem_rps = re.findall(
"<rps id\=\"[0-9]+\"\/>", tagList[w], re.S)
repairsToRemoveNoRepairStart.extend(problem_rps)
problem_rpndels = re.findall(
"<rpndel id\=\"[0-9]+\"\/>", tagList[w], re.S)
# if delete, try to find a non-partial word after this one,
# shift rps+rpndel to this word if so
for r in problem_rpndels:
test = r.replace("rpndel", "rps")
for n in range(w + 1, len(wordsList)):
if "<rps" in tagList[n]:
break # don't make it an embedded repair
if not wordsList[n][-1] == "-" and \
not "<e/>" in tagList[n]:
tagList[n] = test + r + tagList[n]
repairsToRemoveNoRepairStart.remove(test)
break
if "<rpn" in tagList[w]:
# repair end is being cut off, see if it can be moved back, and
# reclassify it if needs be
problem_rpns = [
rpn for rpn in get_tags(tagList[w]) if "<rpn" in rpn]
resolved = find_repair_ends_and_reclassify(
problem_rpns, tagList, wordsList, w - 1,
partial_disallowed=True)
for r in problem_rpns:
test = r.replace("rpn", "rps")
if not r in resolved:
repairsToRemoveNoRepairEnd.append(test)
# all problems resolved here, apart from interregnum onsets, which
# should not cause problems
continue
# get here we have non-partial (complete) words,
# see if problem repairs can be resolved
# try to shift repairs missing reparandum start forward
if '<rm ' in tagList[w]:
# print "repairs to remove no rm", repairsToRemoveNoReparandumStart
rm = re.findall("<rm id\=\"[0-9]+\"\/>", tagList[w], re.S)
for r in rm:
test = r.replace("rm", "rps")
# we have a legit rm word
if test in repairsToRemoveNoReparandumStart:
# print "deleting rm for rms"
# shift the rms tag along one
tagList[w] = tagList[w].replace(r, r.replace("rm", "rms"))
# print tagList[w]
repairsToRemoveNoReparandumStart.remove(test)
# try to shift repair phases with their repair onset word missing along
# one
if '<rp ' in tagList[w]:
rp = re.findall("<rp id\=\"[0-9]+\"\/>", tagList[w], re.S)
for r in rp:
test = r.replace("rp", "rps")
# we have a legit rp word
if test in repairsToRemoveNoRepairStart:
if "<rps" in tagList[w]:
# do not allow embedded repairs at all
repairsToRemove.append(test)
continue
# replace the rp tag with the rps one along one
tagList[w] = tagList[w].replace(r, test)
repairsToRemoveNoRepairStart.remove(test)
# try to shift repair phases forward
if '<rpnsub ' in tagList[w] or '<rpnrep ' in tagList[w]:
# subsitution or repeat repair
rpn = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tagList[w], re.S) + \
re.findall(
"<rpnsub id\=\"[0-9]+\"\/>", tagList[w], re.S)
for r in rpn:
test = r.replace("rpnrep", "rps").replace("rpnsub", "rps")
# we have a legit rp word
if test in repairsToRemoveNoRepairStart:
if "<rps" in tagList[w]:
# do not allow embedded repairs at all
repairsToRemove.append(test)
continue
# make this word the rps one, keeping the end the same
tagList[w] = test + tagList[w] # add the repair start
repairsToRemoveNoRepairStart.remove(test)
repairsToRemove.extend(repairsToRemoveNoReparandumStart +
repairsToRemoveNoRepairStart +
repairsToRemoveNoRepairEnd)
repairIDs = []
for problem in repairsToRemove:
repairID = problem[problem.find("=") + 2:-3]
repairIDs.append(repairID)
tagList = remove_repairs(tagList, repairIDs)
i = len(tagList) - 1
while i >= 0:
if i in wordsToRemove:
del tagList[i]
del wordsList[i]
del POSList[i]
del refList[i]
i -= 1
return tagList, wordsList, POSList, refList
def classify_repair(reparandum, repair, continuation):
"""min edit distance string aligner for repair-> reparandum
Creates table of all possible edits, only considers the paths
from i,j=0 to i=m, j= n
returns mapping from i to j with the max alignment- problem,
there may be several paths. Weights:
rep(string1,string2) 0
repPartial(string1,string2) 1 j- john
repReversePartial(string1,string2) 1 john j-
samePOS(string1,string2) 2
insertIntoReparandum(eps,string) 4
deleteFromReparandum(string1,eps) 4
samePOSclass(string1,string2) 5
arbitarySubstitution(string1,string2) 7
"""
# we have the normal del, insert and subs for min edit distance
# we only need define the sub relation for the special cases
# pointers
left = "<"
up = "^"
diag = "\\"
m = len(reparandum)
n = len(repair)
reparandum = [("", "")] + list(reparandum) # add the empty strings
repair = [("", "")] + list(repair)
# print "initial:"
# print reparandum
# print repair
# 2 tuples (word,POS) #initial = the table's content initialised as
# [i,j,"",currentScore,""]
def sub(source, goal, initial):
if source[0] == goal[0]:
return initial[0:2] + ["REP"] + [initial[3], diag] # NO COST
elif source[0][-1] == "-" and source[0][:-1] in goal[0]:
return initial[0:2] + ["REP_complete"] + [initial[3] + 1, diag]
elif goal[0][-1] == "-" and goal[0][:-1] in source[0]:
return initial[0:2] + ["REP_partial"] + [initial[3] + 1, diag]
elif source[1] == goal[1]:
return initial[0:2] + ["SUB_POS"] + [initial[3] + 2, diag]
elif source[1][0] == goal[1][0]:
return initial[0:2] + ["SUB_POS_CLASS"] + [initial[3] + 5, diag]
else:
return initial[0:2] + ["S_ARB"] + [initial[3] + 7, diag]
def delete(source, initial):
category = "DEL"
if source[0][-1] == "-":
category += "_partial"
return initial[0:2] + ["DEL"] + [initial[3] + 4, up]
def insert(goal, initial):
category = "INS"
if goal[0][-1] == "-":
category += "_partial"
return initial[0:2] + ["INS"] + [initial[3] + 4, left]
# initilisation of axes in table, hash from number to number to list (cell)
D = [] # the cost table
ptr = [] # the pointer table
for i in range(0, m + 1):
D.append([0] * (n + 1))
ptr.append([[]] * (n + 1)) # these are mutable, just dummies
# defaultdict(defaultdict(list))
# the pointer table with a list of (pointer,relation) pairs
# populate each of the table axes
D[0][0] = 0
j = 0
for i in range(1, m + 1):
a = delete(reparandum[i], [i, j, "", D[i - 1][j], ""])
D[i][j] = a[3] # delete cost
ptr[i][j] = [(a[-1], a[2])] # delete type
i = 0
for j in range(1, n + 1):
a = insert(repair[j], [i, j, "", D[i][j - 1], ""])
D[i][j] = a[3] # insert cost
ptr[i][j] = [(a[-1], a[2])] # insert type
# for i in range(0,m+1):
# print D[i]
# for i in range(0,m+1):
# print ptr[i]
# main recurrence relation algorithm
for i in range(1, m + 1):
for j in range(1, n + 1):
# print "%%%%%%%"
# print i
# print j
deltest = delete(reparandum[i], [i, j, "", D[i - 1][j], ""])
# print deltest
instest = insert(repair[j], [i, j, "", D[i][j - 1], ""])
# print instest
subtest = sub(
reparandum[i], repair[j], [i, j, "", D[i - 1][j - 1], ""])
# print subtest
# print "%%%%%%%"
# get the min cost set
mincostset = set()
mincostset.add(tuple(deltest))
mincost = deltest[-2]
tests = [instest, subtest] # check the others
for t in tests:
if t[-2] < mincost:
mincost = t[-2]
mincostset = set()
mincostset.add(tuple(t))
elif t[-2] == mincost:
mincostset.add(tuple(t))
# add the pointers and their alignments
ptr[i][j] = []
for a in mincostset:
# print a
ptr[i][j].append((a[-1], a[2]))
D[i][j] = mincost
# print the optimal alignment(s) backtrace-
# there should only be one given the weights as
# we shouldn't allow an ins+del to beat an arbsub
# return a list of the alignemnts
# gets them backwards then returns the reverse
# print "cost = " + str(D[m][n])
# for i in range(0,m+1):
# print D[i]
# for p in range(0,m+1):
# print ptr[p]
# return all and rank by best first approach
# if there is a branch, follow and pop the first pointer, effectively
# removing the path
def backtrace(D, ptr, i, j, mymap, mymaps):
if i == 0 and j == 0: # should always get there directly
mymaps.append(mymap)
return
arrow = ptr[i][j][0][0] # get the first one
alignment = ptr[i][j][0][1]
score = D[i][j]
if len(ptr[i][j]) > 1: # more than one!
del ptr[i][j][0] # remove it before copying and recursing
#mymapcopy = list(mymap)
backtrace(D, ptr, i, j, list(mymap), mymaps)
#ptr[i][j] = filter(lambda x: not x[0] == "\\", ptr[i][j])
# coarse approximation
mymap.insert(
0, tuple([max(0, i - 1), max(0, j - 1), alignment, score]))
if arrow == "\\":
backtrace(D, ptr, i - 1, j - 1, mymap, mymaps)
elif arrow == "^":
backtrace(D, ptr, i - 1, j, mymap, mymaps)
elif arrow == "<":
backtrace(D, ptr, i, j - 1, mymap, mymaps)
def rank(mymaps, start, n):
tail = []
for j in range(start, n):
bestscores = []
if len(mymaps) == 1:
return mymaps + tail
# should this recurse to the last mapping to j (i.e. highest value
# for i)? yes
for mymap in mymaps:
for mapping in mymap:
if mapping[1] == j:
bestscore = mapping[3]
elif mapping[1] > j:
break
bestscores.append(bestscore) # should always get one!
best = min(bestscores)
# print "best"
# print best
# maintain all the best for further sorting; separately sort the
# tail?
i = 0
a = 0
while i < len(bestscores):
# print bestscores[i]
if bestscores[i] > best:
tail.append(list(mymaps[a])) # bad score
del mymaps[a]
else:
a += 1
i += 1
if len(tail) > 0:
tail = rank(tail, j, n) # recursively sort the tail
# print "warning no difference!!"
return mymaps # if no difference just return all
mymaps = []
mymap = []
backtrace(D, ptr, m, n, mymap, mymaps)
if len(mymaps) > 1:
# print "ranking"
# print len(mymaps)
# print mymaps
# sorts the list by best first as you pass left to right in the repair
mymaps = rank(mymaps, 0, n)
# for mapping in mymaps:
# print mapping
# print "returning:"
# print mymaps[0]
return mymaps[0] # only returns top, can change this
def graph_viz_repair(maps, reparandum, repair, continuation):
"""Returns a graph viz .dot input file for a
digraph that can be rendered by graphviz
"""
assert isinstance(reparandum, list)
assert isinstance(repair, list)
assert isinstance(continuation, list)
digraphInit = """digraph Alignment {\n
rankdir=LR;\n
node[color=white]\n;
"""
reparandumClusterInit = """subgraph cluster_reparandum {\n
label = "reparandum";\n
style = "invisible";\n
node [color=white];\n
edge[weight=5,constrained=true];\n"""
repairClusterInit = """subgraph cluster_repair{\n
label = "repair";\n
style = "invisible";\n
node [color=white];\n
edge[weight=5,constrained=true];\n"""
reparandumIndex = 0
repairIndex = 0
reparandumNodes = ""
reparandumSequence = ""
if len(repair) == 0:
if len(continuation) == 0:
raw_input("no continuation for rep in classify")
repair = [continuation[0]] # add the first one
for i in range(len(reparandum)):
reparandumSequence += str(i)
reparandumNodes += str(i) + \
"[label=\"" + reparandum[i][0].lower() + "\"];\n"
if i < len(reparandum) - 1:
reparandumSequence += " -> "
else:
reparandumSequence += ";"
repairNodes = ""
repairSequence = ""
for i in range(len(repair)):
repairSequence += "r" + str(i)
repairNodes += "r" + \
str(i) + "[label=\"" + repair[i][0].lower() + "\"];\n"
if i < len(repair) - 1:
repairSequence += " -> "
else:
repairSequence += ";"
# if repeats or subs, they need the same rank, otherwise deletes a bit
# tricky
ranks = ""
alignments = ""
for alignment in maps:
if not alignment[2] == "DEL" and not alignment[2] == "INSERT":
ranks += """{rank="same";""" + \
str(alignment[0]) + "; r" + str(alignment[1]) + "}\n"
alignments += str(alignment[0]) + " -> " + "r" + str(alignment[1]) + \
"""[label=\"""" + \
alignment[2].lower() + """\",color=red,dir=back];\n"""
""" #aim is to produce something in this format:
digraph Alignment {
compound=true
rankdir=LR;
node[color=white];
{rank="same";0; r0}
{rank="same";1; r4}
{rank="same";2; r5}
subgraph cluster_reparandum {
label = "reparandum";
style=invisble;
node [color=white];
edge[weight=5,constrained=true];
0[label="john"];
1[label="likes"];
2[label="mary"];
edge[weight=15];
0 -> 1 -> 2;
}
subgraph cluster_repair{
label = "repair";
style=invisible;
node [color=white];
edge[weight=5,constrained=true];
r0[label="john"];
r1[label="really"];
r2[label="really"];
r3[label="really"];
r4[label="loves"];
r5[label="mary"];
edge[weight=15];
r0 -> r1 -> r2 -> r3 -> r4 -> r5;
}
edge[constrained=false]
0 -> r0[label="rep",color=red,dir=back];
0 -> r1[label="insert",color=red,dir=back];
0 -> r2[label="insert",color=red,dir=back];
0 -> r3[label="insert",color=red,dir=back];
1 -> r4[label="sublexical",color=red,dir=back];
2 -> r5[label="rep",color=red,dir=back];
}
"""
finalResult = digraphInit + ranks + reparandumClusterInit + \
reparandumNodes + "edge[weight=15];\n" + reparandumSequence + "\n}\n\n"\
+ repairClusterInit + repairNodes + \
"edge[weight=15];\n" + repairSequence + "\n}\n\n" + \
"edge[constrained=false]" + alignments + "\n}\n"
return finalResult
if __name__ == '__main__':
#s = SelfRepair()
reparandum = [("there", "EX"), ("were", "VBD")]
repair = [("they", "PRP")]
continuation = [("a", "DT")]
#repair = [("You","NNP"),("really","RB"),("like","VP"),("him","NP")]
#reparandum = [("Y-","NNP"),("like","VP"),("john","NN")]
# repair = [("Y-","NNP"),("like","VP"),("and","cc"),("I","RB"),
# ("like","VP"),("I","RB"),("like","VP"),("john","NN")]
graph_viz_repair(classify_repair(
reparandum, repair, [("", "")]), reparandum, repair, continuation)
|
[
"shiji.lb@alibaba-inc.com"
] |
shiji.lb@alibaba-inc.com
|
2359bf352f0f10aa6997150fb0c8514b009fec62
|
39bc55c2a4457bbe7ff4136ea660a29ff88ee66d
|
/skued/voigt.py
|
7bb288f94ac6749d65ed5de252a9b59756c00c00
|
[
"MIT"
] |
permissive
|
KOLANICH-physics/scikit-ued
|
c72b3219e547e33ae067c5d36a93439d2f9045e2
|
c13472129df33105312b57427ce588e66d20391f
|
refs/heads/master
| 2022-01-22T05:47:04.286449
| 2018-09-24T15:06:00
| 2018-09-24T15:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,861
|
py
|
# -*- coding: utf-8 -*-
"""
Voigt and pseudo-voigt curves, as well as related Gaussian and Lorentzian functions
"""
from functools import lru_cache
import numpy as np
from numpy import pi
def gaussian(coordinates, center, fwhm = None, std = None):
"""
Unit integral Gaussian function.
Parameters
----------
coordinates : ndarray or list of ndarrays
Can be either a list of ndarrays, as a meshgrid coordinates list, or a
single ndarray for 1D computation
center : array_like
Center of the gaussian. Should be the same shape as `coordinates.ndim`.
fwhm : float or None, optional
Full-width at half-max of the function. Either `std` or `fwhm` must be provided.
std : float or None, optional
Standard deviation of the function. Either `std` or `fwhm` must be provided.
Returns
-------
out : ndarray
Gaussian function of unit integral.
Raises
------
ValueError : If fwhm and std are not provided.
Notes
-----
In the case where both `std` and `fwhm` are given, `fwhm` takes precedence.
Example
-------
>>> import numpy as np
>>> from skued import gaussian
>>>
>>> span = np.arange(-10, 10, 0.1)
>>> xx, yy = np.meshgrid(span, span)
>>> center = [0,0]
>>> g = gaussian( coordinates = [xx,yy], center = [0,0], std = 1)
>>> g.shape == xx.shape #True
>>> np.sum(g)*0.1**2 #Integral should be unity (spacing = 0.1)
"""
if not any([fwhm, std]):
raise ValueError('Either fwhm or std has to be provided')
if fwhm:
std = fwhm/(2*np.sqrt(2*np.log(2)))
# 1D is a special case, as coordinates are not given as a list of arrays
if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray
return 1/(std * np.sqrt(2*pi)) * np.exp(- (coordinates - center)**2 / (2 * std * std))
# Computation
dim = len(coordinates)
exponent = sum([ (x - c)**2 for x, c in zip(coordinates, center) ])/(2*std*std)
factor = 1/(std*np.sqrt(2*pi))**dim
return factor*np.exp(-exponent)
def lorentzian(coordinates, center, fwhm):
"""
Unit integral Lorenzian function.
Parameters
----------
coordinates : array-like
Can be either a list of ndarrays, as a meshgrid coordinates list, or a
single ndarray for 1D computation
center : array-like
Center of the lorentzian. Should be the same shape as `coordinates.ndim`.
fwhm : float
Full-width at half-max of the function.
Returns
-------
out : ndarray
Lorentzian function of unit integral.
Notes
-----
The functional form of the Lorentzian is given by:
.. math::
L(x) = \\frac{1}{\pi} \\frac{(\gamma/2)}{(x-c)^2 + (\gamma/2)^2}
where :math:`\gamma` is the full-width at half-maximum, and :math:`c` is the
center.
For n dimensions, the functional form of the Lorentzian is given by:
.. math::
L(x_1, ..., x_n) = \\frac{1}{n \pi} \\frac{(\gamma/2)}{(\sum_i{(x_i - c_i)^2} + (\gamma/2)^2)^{\\frac{1+n}{2}}}
Example
-------
>>> import numpy as np
>>> from skued import lorentzian
>>>
>>> span = np.arange(-10, 10, 0.1)
>>> xx, yy = np.meshgrid(span, span)
>>> center = [0,0]
>>> l = lorentzian( coordinates = [xx,yy], center = [0,0], fwhm = 1)
>>> l.shape == xx.shape #True
>>> np.sum(l)*0.1**2 #Integral should be unity (spacing = 0.1)
"""
width = 0.5*fwhm
# 1D is a special case, as coordinates are not given as a list of arrays
if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray
return (width/pi) / ((coordinates - center)**2 + width**2)
# Computation
#TODO: speedup by creating numpy array, sum over last axis?
dim = len(coordinates)
core = width/(( sum([(x - c)**2 for x,c in zip(coordinates, center)]) + width**2 ))**( (dim + 1)/2)
factor = 1/(dim*pi)
return factor*core
@lru_cache(maxsize = 16)
def _pseudo_voigt_mixing_factor(width_l, width_g):
"""
Returns the proportion of Lorentzian for the computation of a pseudo-Voigt profile.
pseudoVoigt = (1 - eta) Gaussian + eta * Lorentzian
Parameters
----------
width_l, width_g : float
FWHM for the Gaussian and Lorentzian parts, respectively.
Returns
-------
eta : numerical
Proportion of Lorentzian. Between 0 and 1
"""
#Fast formula (see paper in pseudo_voigt docstrings)
#This assumes width_g and width_l are the Gaussian FWHM and Lorentzian FWHM
gamma = (width_g**5 + 2.69*width_l*(width_g**4) +
2.43*(width_g**3)*(width_l**2) + 4.47*(width_g**2)*(width_l**3) +
0.08*width_g*(width_l**4) + width_l**5)**(1/5)
#Proportion of the Voigt that should be Lorentzian
return 1.37*(width_l/gamma) - 0.477*(width_l/gamma)**2 + 0.11*(width_l/gamma)**3
def pseudo_voigt(coordinates, center, fwhm_g, fwhm_l):
"""
Unit integral pseudo-Voigt profile. Deviation from real Voigt
by less than 1% [1]_.
Parameters
----------
coordinates : array_like
Can be either a list of ndarrays, as a meshgrid coordinates list, or a
single ndarray for 1D computation
center : array_like
Center of the pseudo-voigt. Should be the same shape as `coordinates.ndim`.
fwhm_g, fwhm_l : float
Full-width at half-max of the Gaussian and Lorentzian parts respectively.
Returns
-------
out : ndarray
Pseudo-Voigt profile of unit integral.
Example
--------
>>> import numpy as n
>>> span = n.arange(-10, 10, 0.1)
>>> xx, yy = n.meshgrid(span, span)
>>> center = [0,0]
>>> pV = pseudo_voigt( coordinates = [xx,yy], center = [0,0], fwhm_g = 1, fwhm_l = 0.1)
>>> pV.shape == xx.shape #True
>>> n.sum(pV)*0.1**2 #Integral should be unity
References
----------
.. [1] T. Ida et al., Extended pseudo-Voigt function for approximating the Voigt profile.
J. of Appl. Cryst. (2000) vol. 33, pp. 1311-1316
"""
eta = _pseudo_voigt_mixing_factor(fwhm_g, fwhm_l)
return (1 - eta)*gaussian(coordinates, center, fwhm_g) + eta*lorentzian(coordinates, center, fwhm_l)
|
[
"laurent.decotret@outlook.com"
] |
laurent.decotret@outlook.com
|
82de8706ca2eedd2fcd2cdff8f72d38d2fc55280
|
f65ab9e202f5449de4a99352a94caecedbc5b478
|
/tobby/4.sort/01.py
|
6179f78f51f8b33b50c9be3ae8daec9265b8b47b
|
[] |
no_license
|
HongDaeYong/codingStudy
|
143e627f079c0f17e7704945e587af8ad9bb967a
|
849928e3336eee9caef7cdeb01b0196d90451fc1
|
refs/heads/master
| 2020-05-01T03:34:57.766061
| 2019-09-04T16:51:08
| 2019-09-04T16:51:08
| 177,248,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def solution(array, commands):
answer = []
for i,j,k in commands:
ta = array[i-1:j]
ta.sort()
answer.append(ta[k-1])
return answer
|
[
"wlsah7505@korea.ac.kr"
] |
wlsah7505@korea.ac.kr
|
18722d2c3550ab2ae5d2d5d77a98acf4a0e84fc8
|
4075dc586bcce231c586d805427293dcc9d2c763
|
/star/myTypes.py
|
d0185149732210ee713c1722aa282518b8b47f6b
|
[
"MIT"
] |
permissive
|
DanielAndreasen/pyStar
|
ace2a64ce47199e6fc9c7789af30c7d8d0bc066c
|
7ba5f780a7765e592cabdf1273eba42f454fa9d3
|
refs/heads/master
| 2023-07-22T16:35:30.645275
| 2023-07-08T07:02:26
| 2023-07-08T07:02:26
| 143,328,968
| 4
| 1
|
MIT
| 2023-07-08T07:02:28
| 2018-08-02T18:04:09
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
from typing import Union, List, Tuple
import numpy as np
listLikeType = Union[List, Tuple, np.ndarray]
|
[
"daniel.andreasen@hotmail.com"
] |
daniel.andreasen@hotmail.com
|
7e14d3aa5e66f24c793c773eb8dcd9019b9eade3
|
352d5acef4e8fca2d1306d9e749444869ed4ef2f
|
/medium/uniquePaths.py
|
bac0d40a3215cfa82cc0293bbad2711e999481e7
|
[] |
no_license
|
rupafn/leetcode
|
d73672a5fce8fd2027d3a7d0118d9ff7b9e89c5e
|
2c75625db500d806e2a55954c85f389cfde2c3aa
|
refs/heads/master
| 2021-06-07T20:01:07.992640
| 2021-05-07T15:29:15
| 2021-05-07T15:29:15
| 128,645,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
class Solution:
def uniquePaths(self, m, n):
grid = []
if(m==1):
return 1
for i in range(m):
g = []
for j in range(n):
g.append(0)
grid.append(g)
grid[0][1] = 1
grid[1][0] = 1
for i in range(0,m):
for j in range(0,n):
if(i>0 and j>0):
grid[i][j] += grid[i-1][j]+grid[i][j-1]
elif(i==0 and j>0):
grid[i][j] += grid[i][j-1]
elif(i>0 and j==0):
grid[i][j]+= grid[i-1][j]
# print(grid)
# print(grid[m-1][n-1])
return grid[m-1][n-1]
obj = Solution()
m = 3
n=2
m = 7
n = 3
obj.uniquePaths(m,n)
|
[
"rupafn@gmail.com"
] |
rupafn@gmail.com
|
82f48a890e42245c471e46183a567db3265b30f6
|
c9b13b24de95b3743f94bf599836522860f5ccbf
|
/cyclegan_vanilla/main.py
|
ba3b09dc12eb484dc74220db4f4919ded5de102a
|
[] |
no_license
|
czhang0808/Music-Genre-Transfer-with-Deep-Learning
|
2554ad51338bfb94fb263efa4f945b8ed048cd0c
|
3d30fc226367819c62d0a765d6f746cd1e82e182
|
refs/heads/master
| 2020-07-25T08:41:34.403082
| 2019-10-01T08:47:09
| 2019-10-01T08:47:09
| 208,233,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,226
|
py
|
import argparse
import os
import tensorflow as tf
from model import cyclegan
from style_classifier import Classifer
from style_classifier_new import Classifer_new
tf.set_random_seed(19)
# os.environ["CUDA_VISIBLE_DEVICES"] = os.environ['SGE_GPU']
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_dir', dest='dataset_dir', default='JAZZ2ROCK', help='path of the dataset')
parser.add_argument('--dataset_A_dir', dest='dataset_A_dir', default='JC_J', help='path of the dataset of domain A')
parser.add_argument('--dataset_B_dir', dest='dataset_B_dir', default='JC_C', help='path of the dataset of domain B')
parser.add_argument('--epoch', dest='epoch', type=int, default=100, help='# of epoch')
parser.add_argument('--epoch_step', dest='epoch_step', type=int, default=10, help='# of epoch to decay lr')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='# images in batch')
parser.add_argument('--train_size', dest='train_size', type=int, default=1e8, help='# images used to train')
parser.add_argument('--load_size', dest='load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--fine_size', dest='fine_size', type=int, default=128, help='then crop to this size')
parser.add_argument('--time_step', dest='time_step', type=int, default=64, help='time step of pianoroll')
parser.add_argument('--pitch_range', dest='pitch_range', type=int, default=84, help='pitch range of pianoroll')
parser.add_argument('--ngf', dest='ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', dest='ndf', type=int, default=64, help='# of discri filters in first conv layer')
parser.add_argument('--input_nc', dest='input_nc', type=int, default=1, help='# of input image channels')
parser.add_argument('--output_nc', dest='output_nc', type=int, default=1, help='# of output image channels')
parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--which_direction', dest='which_direction', default='AtoB', help='AtoB or BtoA')
parser.add_argument('--phase', dest='phase', default='train', help='train, test')
parser.add_argument('--save_freq', dest='save_freq', type=int, default=1000, help='save a model every save_freq iterations')
parser.add_argument('--print_freq', dest='print_freq', type=int, default=100, help='print the debug information every print_freq iterations')
parser.add_argument('--continue_train', dest='continue_train', type=bool, default=False, help='if continue training, load the latest model: 1: true, 0: false')
parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./samples', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--log_dir', dest='log_dir', default='./log', help='logs are saved here')
parser.add_argument('--L1_lambda', dest='L1_lambda', type=float, default=10.0, help='weight on L1 term in objective')
parser.add_argument('--gamma', dest='gamma', type=float, default=1.0, help='weight of extra discriminators')
parser.add_argument('--use_midi_G', dest='use_midi_G', type=bool, default=False, help='select generator for midinet')
parser.add_argument('--use_midi_D', dest='use_midi_D', type=bool, default=False, help='select disciminator for midinet')
parser.add_argument('--use_lsgan', dest='use_lsgan', type=bool, default=False, help='gan loss defined in lsgan')
parser.add_argument('--max_size', dest='max_size', type=int, default=50, help='max size of image pool, 0 means do not use image pool')
parser.add_argument('--sigma_c', dest='sigma_c', type=float, default=0.0, help='sigma of gaussian noise of classifiers')
parser.add_argument('--sigma_d', dest='sigma_d', type=float, default=1.0, help='sigma of gaussian noise of discriminators')
parser.add_argument('--model', dest='model', default='base', help='three different models, base, partial, full')
parser.add_argument('--type', dest='type', default='cyclegan', help='cyclegan or classifier')
args = parser.parse_args()
def main(_):
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
if args.type == 'cyclegan':
model = cyclegan(sess, args)
model.train(args) if args.phase == 'train' else model.test(args)
if args.type == 'classifier':
classifier = Classifer(sess, args)
# classifier = Classifer_new(sess, args)
classifier.train(args) if args.phase == 'train' else classifier.test(args)
if __name__ == '__main__':
tf.app.run()
|
[
"noreply@github.com"
] |
czhang0808.noreply@github.com
|
8675f000dbd22904b785a6da065d8274dacf7bca
|
49e6dbae4dcce37ce62e6219f4ce8b2d14eb1aae
|
/Graphs/Graph.py
|
06113c531d8b1e72ad1cbf03c3f9d5b09dc9ab3f
|
[] |
no_license
|
sidhumeher/-ZTM-DS-and-Algo-Python-
|
c770ae35ce8a928537eb49f7bd302e027586ce9c
|
d4b47995c4eb3afad0f135b50fa1d7c2c0adb4f2
|
refs/heads/master
| 2023-01-19T01:51:24.068776
| 2020-11-27T00:54:12
| 2020-11-27T00:54:12
| 283,087,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
'''
Created on Oct 8, 2020
@author: sidteegela
'''
# Graph implementation with Dictionary
class Graph():
def __init__(self, graph=None):
if graph == None:
self.graph_dict = {}
self.graph_dict = graph
def generateEdges(self, graph):
edges = []
for node in graph:
for neighbor in graph[node]:
edges.append((node, neighbor))
return edges
def isolatedNodes(self, graph):
isolatedNodes = []
for node in graph:
if len(graph[node]) == 0:
isolatedNodes.append(node)
return isolatedNodes
def nodes(self):
return list(self.graph_dict.keys())
def addNode(self, node):
if node not in self.graph_dict:
self.graph_dict[node] = []
return
def addEdge(self, node):
# Edge can be a list, since a node can connect to many nodes
nodes = list(node)
if nodes[0] in self.graph_dict:
self.graph_dict[nodes[0]].append(nodes[1])
else:
self.graph_dict[nodes[0]] = [nodes[1]]
def print(self):
nodes = 'Nodes: '
for node in self.graph_dict:
nodes += str(node) + ' '
print(nodes)
edges = 'Edges:'
for edge in self.generateEdges(graph):
edges += str(edge) + ' '
print(edges)
def findPath(self, startNode, endNode, path=None):
if path is None:
path = []
graph = self.graph_dict
path += [startNode]
if startNode == endNode:
return path
if startNode not in graph:
return None
for node in graph[startNode]:
if node not in path:
extendPath = self.findPath(node, endNode, path)
if extendPath:
return extendPath
return None
def findAllPaths(self, startNode, endNode, path):
if path is None:
path = []
graph = self.graph_dict
path += [startNode]
if startNode == endNode:
return path
if startNode == None:
return None
paths = []
for node in graph[startNode]:
if node not in path:
extend_path = self.findAllPaths(node, endNode, path)
for path in extend_path:
paths.append(path)
return paths
if __name__ == '__main__':
graph = { "a" : ["c"],
"b" : ["c", "e"],
"c" : ["a", "b", "d", "e"],
"d" : ["c"],
"e" : ["c", "b"],
"f" : []
}
g = Graph(graph)
# Graph edges
edges = g.generateEdges(graph)
print(edges)
# Isolated nodes
print(g.isolatedNodes(graph))
# Adding node
g.addNode('g')
# Add edge
g.addEdge('g' 'd')
edges = g.generateEdges(graph)
print(edges)
# Find path from a to b
path = g.findPath('a', 'b', [])
print(path)
# Print graph
g.print()
# Find all paths
# paths = g.findAllPaths('a', 'b', [])
# print(paths)
|
[
"sidhumeher@yahoo.co.in"
] |
sidhumeher@yahoo.co.in
|
eb5d7f330eaa28257e9e46e9645fe4d2942d0c17
|
1498c7f4c44df77f32c41e3c84b55d5fba073731
|
/numpy学习/012_sample_数组操作_连接数组.py
|
fef891a24da331b5ad6a8b89e9dbf34f10778dce
|
[] |
no_license
|
whh881114/common_scripts
|
1afbffcb32f3190b3f3ed3b5e1364d8ccd108e88
|
23ce2e25b407c27373b53829a844e51269a8b296
|
refs/heads/master
| 2020-05-07T22:53:29.801739
| 2019-08-26T15:07:06
| 2019-08-26T15:07:06
| 180,965,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import print_function
import numpy as np
"""
numpy.concatenate
numpy.concatenate 函数用于沿指定轴连接相同形状的两个或多个数组,格式如下:
numpy.concatenate((a1, a2, ...), axis)
参数说明:
a1, a2, ...:相同类型的数组
axis:沿着它连接数组的轴,默认为 0
"""
print('第一个数组:')
a = np.array([[1, 2], [3, 4]])
print(a)
print('')
print('第二个数组:')
b = np.array([[5, 6], [7, 8]])
print(b)
print('')
print('沿轴0连接两个数组:')
print(np.concatenate((a, b)))
print()
print('沿轴1连接两个数组:')
print(np.concatenate((a, b), axis=1))
print('- ' * 50)
"""
numpy.stack
numpy.stack 函数用于沿新轴连接数组序列,格式如下:
numpy.stack(arrays, axis)
参数说明:
arrays相同形状的数组序列
axis:返回数组中的轴,输入数组沿着它来堆叠
"""
print('沿轴0堆叠两个数组:')
print(np.stack((a, b), 0))
print('沿轴1堆叠两个数组:')
print(np.stack((a, b), 1))
print('- ' * 50)
"""
numpy.hstack
numpy.hstack 是 numpy.stack 函数的变体,它通过水平堆叠来生成数组。
"""
print('水平堆叠:')
c = np.hstack((a, b))
print(c)
print('- ' * 50)
"""
numpy.vstack
numpy.vstack 是 numpy.stack 函数的变体,它通过垂直堆叠来生成数组。
"""
print('竖直堆叠:')
c = np.vstack((a, b))
print(c)
print('- ' * 50)
|
[
"whh881114@gmail.com"
] |
whh881114@gmail.com
|
2a9304da0975ee2db5cbcea60b4eb46bcfae1072
|
4e6c49811322b3923cd6fad536e5bda8e087a7c6
|
/app1/urls.py
|
0f036f869ad8150197013b6c3bcf20d4ab9c9820
|
[] |
no_license
|
3toe/DjangoSkeleton
|
b13c9cb0bfa85acc66d24e5e9eae3b068ed73d3c
|
0ec01729745045cf18398946f5edbc1d884fd501
|
refs/heads/main
| 2023-07-29T03:12:51.393134
| 2021-09-11T22:21:02
| 2021-09-11T22:21:02
| 405,494,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('new', views.newz),
path('create', views.create),
path('<int:num>', views.show),
path('<int:num>/edit', views.edit),
path('<int:num>/delete', views.destroy)
]
|
[
"AnthonyJS@gmail.com"
] |
AnthonyJS@gmail.com
|
5cac0e208270765e832606beee105a1e7e45059f
|
28d32bb6639de8bf2f57a8f2236e644c76d479c7
|
/bnn_optimization/train.py
|
93e780ae0df9baf85803a97509dd808ef8abe43c
|
[
"Apache-2.0"
] |
permissive
|
CuauSuarez/Bop2ndOrder
|
e8b84efe465a9baff3cdadb9bdf75d479740d4dd
|
ff680fbb142b32f8d8061be6c3f2fe0e757b4a03
|
refs/heads/master
| 2023-04-11T17:04:05.310662
| 2021-04-16T03:43:42
| 2021-04-16T03:43:42
| 296,754,657
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
from zookeeper import cli, build_train
from os import path
import click
from bnn_optimization import utils
@cli.command()
@click.option("--tensorboard", default=True)
@build_train(utils.prepare_registry)
def train(build_model, dataset, hparams, output_dir, tensorboard):
import larq as lq
from bnn_optimization import utils, data
import tensorflow as tf
initial_epoch = utils.get_current_epoch(output_dir)
print(output_dir)
model_path = path.join(output_dir, "model")
callbacks = [utils.ModelCheckpoint(filepath=model_path, save_weights_only=True)]
if hasattr(hparams, "learning_rate_schedule"):
callbacks.append(
tf.keras.callbacks.LearningRateScheduler(hparams.learning_rate_schedule)
)
if tensorboard:
callbacks.extend(
[
tf.keras.callbacks.TensorBoard(
log_dir=output_dir,
write_graph=False,
histogram_freq=0,
update_freq=250,
),
]
)
with tf.device("/cpu:0"):
train_data = dataset.train_data(hparams.batch_size)
validation_data = dataset.validation_data(hparams.batch_size)
with utils.get_distribution_scope(hparams.batch_size):
model = build_model(hparams, **dataset.preprocessing.kwargs)
model.compile(
optimizer=hparams.optimizer,
loss="categorical_crossentropy",
metrics=["categorical_accuracy", "top_k_categorical_accuracy"],
)
lq.models.summary(model)
if initial_epoch > 0:
model.load_weights(model_path)
click.echo(f"Loaded model from epoch {initial_epoch}")
model.fit(
train_data,
epochs=hparams.epochs,
steps_per_epoch=dataset.train_examples // hparams.batch_size,
validation_data=validation_data,
validation_steps=dataset.validation_examples // hparams.batch_size,
verbose=2 if tensorboard else 1,
initial_epoch=initial_epoch,
callbacks=callbacks,
)
model_name = build_model.__name__
model.save_weights(path.join(output_dir, f"{model_name}_weights.h5"))
if __name__ == "__main__":
cli()
|
[
"noreply@github.com"
] |
CuauSuarez.noreply@github.com
|
cf01dbb776560da64e396a951678d131e5c96073
|
4385cdd4678033f2be7d47dae25970f3e25a4aa5
|
/catalogo/logic/catalogo_logic.py
|
c7fc0c64739d0149b17f3258dfe53c5859088c24
|
[] |
no_license
|
mpedroza96/AntusuSoftwareArchitecture
|
702fed5a359a1876f924a95f765249b25a4f53ef
|
eaa32eecc6cedd97b281ae988d2c7984fe07df06
|
refs/heads/master
| 2022-06-05T01:01:11.172662
| 2020-01-11T21:55:57
| 2020-01-11T21:55:57
| 233,146,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from ..models import Catalogo
def get_catalogos():
queryset = Catalogo.objects.all()
return (queryset)
def create_catalogo(form):
measurement = form.save()
measurement.save()
return ()
|
[
"m.pedroza@uniandes.edu.co"
] |
m.pedroza@uniandes.edu.co
|
4c05062ba6d323b169dd4868d21c51becba2c5fe
|
5fce342c9e598ac7ef2ab06047081db4d6661b9d
|
/python-3.4/ABC/140/a.py
|
ff93e1acf0e9b15e81809e43d1f80bb72cb289db
|
[] |
no_license
|
kp047i/AtCoder
|
679493203023a14a10fca22479dbeae4986d2046
|
276ad0fab8d39d5d9a1251bb2a533834124f3e77
|
refs/heads/master
| 2022-07-26T21:49:29.490556
| 2020-06-28T14:28:12
| 2020-06-28T14:28:12
| 208,727,698
| 0
| 0
| null | 2022-06-22T02:11:01
| 2019-09-16T06:37:50
|
Python
|
UTF-8
|
Python
| false
| false
| 28
|
py
|
print(pow(int(input()), 3))
|
[
"takayuki.miura28@gmail.com"
] |
takayuki.miura28@gmail.com
|
e76de6fe18cf9b8d8426119f9d6f9107b22d5c3e
|
93c94488b3258ba316821ee70b55812e6d391cbd
|
/examples/tortuga_backtest.py
|
9d433d384d725089ee261f17a774f3c207b1fcff
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
cgajagon/qstrader
|
7cf81d7e75ddaaac4801e77ceb54b2052e474794
|
69efe3ee594a732b704ff25a2a8afb7d717864d5
|
refs/heads/master
| 2020-12-24T00:20:12.365801
| 2020-03-20T22:11:28
| 2020-03-20T22:11:28
| 237,321,584
| 1
| 0
| null | 2020-01-30T22:53:51
| 2020-01-30T22:53:50
| null |
UTF-8
|
Python
| false
| false
| 3,611
|
py
|
import datetime
import numpy as np
import pandas as pd
from qstrader import settings
from qstrader.strategy.base import AbstractStrategy
from qstrader.event import SignalEvent, EventType
from qstrader.compat import queue
from qstrader.trading_session import TradingSession
class TortugaStrategy(AbstractStrategy):
"""
TODO
"""
def __init__(
self, ticker, events_queue,
base_quantity=100, long_window=30
):
self.ticker = ticker
self.events_queue = events_queue
self.base_quantity = base_quantity
self.bars = 0
self.invested = False
self.window = 30
self.max_price = float('-inf')
self.min_price = float('inf')
self.support = float('inf')
self.stop_loss = float('-inf')
def calculate_signals(self, event):
if (
event.type in [EventType.BAR, EventType.TICK] and
event.ticker == self.ticker
):
if self.invested == False:
if self.max_price > float('-inf') and self.support < float('inf') and event.high_price > self.max_price:
order = event.high_price
self.stop_loss = self.support - self.support * 0.03
print("LONG %s: %s, at %s with stop at %s" % (self.ticker, event.time, order, self.stop_loss))
signal = SignalEvent(
self.ticker, "BOT",
suggested_quantity=self.base_quantity
)
self.events_queue.put(signal)
self.invested = True
if event.low_price < self.support:
self.support = event.low_price
#print('Update Support', self.support)
if self.invested == True:
if event.high_price > self.max_price and self.support > self.stop_loss:
self.stop_loss = self.support - self.support * 0.03
#print('Update Stop Loss',self.stop_loss)
if self.support < self.stop_loss:
print("SHORT %s: %s, at %s" % (self.ticker, event.time, self.stop_loss))
signal = SignalEvent(
self.ticker, "SLD",
suggested_quantity=self.base_quantity
)
self.events_queue.put(signal)
self.invested = False
if event.high_price > self.max_price:
self.max_price = event.high_price
self.support = float('inf')
#print('Update Max', self.max_price)
self.bars += 1
def run(config, testing, tickers, filename):
# Backtest information
title = ['Tortuga Example on %s' % tickers[0]]
initial_equity = 3000.0
start_date = datetime.datetime(2018, 12, 6)
end_date = datetime.datetime(2020, 1, 1)
# Use the Buy and Hold Strategy
events_queue = queue.Queue()
strategy = TortugaStrategy(tickers[0], events_queue)
# Set up the backtest
backtest = TradingSession(
config, strategy, tickers,
initial_equity, start_date, end_date,
events_queue, title=title, benchmark=tickers[1]
)
results = backtest.start_trading(testing=testing)
return results
if __name__ == "__main__":
# Configuration data
testing = False
config = settings.from_file(
settings.DEFAULT_CONFIG_FILENAME, testing
)
tickers = ["KEYS", "SPY"]
filename = None
run(config, testing, tickers, filename)
|
[
"cgajagon@gmail.com"
] |
cgajagon@gmail.com
|
16e0a7908e446ffbaa6949ae8426a36dfd2f0cd6
|
529bb0fc524e2138c91bc91f500272786bf8cdbe
|
/wiki_history_ip_country_city.py
|
3fbb60271cd895668b99d84a4288d2aa90851148
|
[] |
no_license
|
ZedYeung/pythonalg
|
829f3e077138df5a8fc4f8a259ef8c05cd0eb58f
|
1f8097fbd304fbd44821ee839365eba308a465b3
|
refs/heads/master
| 2021-06-06T07:39:41.380362
| 2016-10-24T14:21:43
| 2016-10-24T14:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
from urllib.request import urlopen
from urllib.request import HTTPError
from bs4 import BeautifulSoup
import datetime
import json
import random
import re
random.seed(datetime.datetime.now())
def getLinks(articleUrl):
html = urlopen("http://en.wikipedia.org" + articleUrl)
bs = BeautifulSoup(html, "html.parser")
return bs.find("div", {"id": "bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$"))
def getHistoryIPs(pageUrl):
# Format of revision history pages is:
# http://en.wikipedia.org/w/index.php?title=Title_in_URL&action=history
pageUrl = pageUrl.replace("/wiki/", "")
historyUrl = "http://en.wikipedia.org/w/index.php?title=" + pageUrl + "&action=history"
print("history url is: " + historyUrl)
html = urlopen(historyUrl)
bs = BeautifulSoup(html, "html.parser")
# finds only the links with class "mw-anonuserlink" which has IP addresses
# instead of usernames
ipAddresses = bs.findAll("a", {"class": "mw-anonuserlink"})
addressList = set()
for ipAddress in ipAddresses:
addressList.add(ipAddress.get_text())
return addressList
def getCountryAndCity(ipAddress):
try:
response = urlopen("http://freegeoip.net/json/" + ipAddress).read().decode('utf-8')
# can't work with IPv6
except HTTPError:
return None
responseJson = json.loads(response)
return responseJson.get("country_code") + " " + responseJson.get("city")
links = getLinks("/wiki/Abraham_Lincoln")
while (len(links) > 0):
for link in links:
print("-------------------")
historyIPs = getHistoryIPs(link.attrs["href"])
for historyIP in historyIPs:
countryCity = getCountryAndCity(historyIP)
if countryCity is not None:
print(historyIP + " is from " + countryCity)
newLink = links[random.randint(0, len(links) - 1)].attrs["href"]
links = getLinks(newLink)
|
[
"shoominga@gmail.com"
] |
shoominga@gmail.com
|
02b8e4125a0a8cbca922ae4f1de476e4c0d44d09
|
f5de163623c0e1f2207104a69fea499b2d4b3948
|
/tests/integration/test_repeated_posts_setting.py
|
2a03338570cc6011e38030a95ea273b8d1f616b8
|
[
"MIT"
] |
permissive
|
getnikola/nikola
|
cbc233706ebd52fe76bc14b2ff3c5a7c27678275
|
2b10e9952bac5a1119e6845c7a2c28273aca9775
|
refs/heads/master
| 2023-09-03T12:52:22.617757
| 2023-08-05T19:24:18
| 2023-08-05T19:24:18
| 4,025,121
| 2,142
| 467
|
MIT
| 2023-09-13T12:38:11
| 2012-04-14T13:53:22
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
"""
Duplicate POSTS in settings.
Should not read each post twice, which causes conflicts.
"""
import pytest
from nikola import __main__
from .helper import append_config, cd
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
append_config(
target_dir,
"""
POSTS = (("posts/*.txt", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"))
""",
)
with cd(target_dir):
__main__.main(["build"])
|
[
"der@nik0.de"
] |
der@nik0.de
|
06db384c8b1b1ccd48f196017025fb4994bc18a5
|
3d9dfd8627b4535464770c0881294e5d73a7cc63
|
/collaborative_filtering/models.py
|
8fd48b19197df0584d31623a62ff8d5f4d9b9142
|
[] |
no_license
|
AnastasiaVedernikova/NeighborsInCollegium
|
bb499328b713bae03fdf4f92d5df8a521448773e
|
5978f801536cc053007606b2c34e382dbffde463
|
refs/heads/master
| 2021-07-23T22:35:42.059709
| 2018-10-21T19:08:07
| 2018-10-21T19:08:07
| 125,835,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
from collaborative_filtering.cos_dist import cos_dist
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsClassifier
import xgboost
data = pd.read_csv("WithoutOneCollegium.csv")
data["cos_dist"] = cos_dist()
y = np.array(data['Oцінка задоволення сусідом'])
data = data.drop('Oцінка задоволення сусідом',1)
x = data.values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)#update
#PCA
neigh_pca = PCA(n_components=15)
neigh_pca.fit(x_train)
train_pca = neigh_pca.transform(x_train)
test_pca = neigh_pca.transform(x_test)
#KNN classifier
model = KNeighborsClassifier(n_neighbors=5, algorithm='auto')
model.fit(x_train, y_train)
print(model.score(x_test, y_test))
#LogisticRegression
neigh = LogisticRegression()
neigh.fit(x_train, y_train)
print(neigh.score(x_test, y_test))
#RandomForest
clf = RandomForestClassifier(max_depth=2, random_state=0)
clf.fit(x_train, y_train)
print(clf.score(x_test, y_test))
#LinearRegression
regr = LinearRegression()
regr.fit(x_train, y_train)
print(regr.score(x_test, y_test))
#XGBoost classifier
model = xgboost.XGBRegressor()
model.fit(x_train, y_train)
print(model.score(x_test, y_test))
|
[
"vedernikova@ucu.edu.ua"
] |
vedernikova@ucu.edu.ua
|
9b36c59d63b5a3c2da8c3f5793ae3a417d6a9cf0
|
54965961acdfee49e2d245a624cf8fd3db35ae05
|
/pujcovna/katalog/migrations/0003_vypujcka.py
|
7da5770dc88ff38892d26af3eed2ec7424f27f75
|
[] |
no_license
|
starkroman/python-012021
|
6bc5a058046a2662badc9d8ce0a435d01a9925ca
|
3c8a7cd06760c12b96a238375cc9fc55afe8979e
|
refs/heads/master
| 2023-04-15T16:00:10.473369
| 2021-05-03T15:41:53
| 2021-05-03T15:41:53
| 338,174,113
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
# Generated by Django 3.1.7 on 2021-04-18 19:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('katalog', '0002_zakaznik'),
]
operations = [
migrations.CreateModel(
name='Vypujcka',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datumCasZacatkuVypujcky', models.DateTimeField()),
('datumCasKonceVypujcky', models.DateTimeField()),
('cenaVypujcky', models.IntegerField()),
],
),
]
|
[
"stark@spsostrov.cz"
] |
stark@spsostrov.cz
|
6b1d56b0726b2a28808a9a8f2b0a5ce857ed5e1e
|
ef93f29034820b9fadfc1ff8a0da2056fe9621e6
|
/算法和数据结构/02_数组和列表_test.py
|
013109d088f2bbb460569a0be625ba2177bd1132
|
[] |
no_license
|
ZhMingZh/StudyPython
|
9bb0ae6c9134eb48a69c888e2623ab3b8818ea01
|
4205cc57e51371de9c862eba28707ecfea968ccd
|
refs/heads/master
| 2020-08-10T00:13:46.753157
| 2020-04-16T20:07:25
| 2020-04-16T20:07:25
| 214,207,767
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
"""
线性结构:1.内存空间连续 2.下标访问
python常用的array(很少用,不同于Numpy中的array)、
list方法 时间复杂度
下标访问 O(1)
append O(1)
insert O(n)
pop default the last element' O(1)
remove O(n)
"""
class Array(object):
"""用list实现 Array ADT"""
def __init__(self, size=32):
self.size = size
self._items = [None] * size
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, key, value):
self._items[key] = value
def __len__(self):
return self.size
def __iter__(self):
for item in self._items:
yield item
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def test_array():
size = 10
a = Array(size)
a[0] = 1
assert a[0] == 1
a.clear()
assert a[0] is None
test_array()
|
[
"skydream@MacBook-Pro.local"
] |
skydream@MacBook-Pro.local
|
1b4868de04f5ca8073fd6da39aded5d208b16ded
|
0b18e102ebd6ab6365968a13c510e81fe764a98c
|
/security_flaws/user.py
|
7664bbf49b33b9c8689ad94af115353d65ec6217
|
[] |
no_license
|
jumblesale/security-flaws
|
cfd976dd155081ab510cfcbe6eccf689074b536b
|
1e6d1a075af9ebe67b3929b6ca20733d94ef7389
|
refs/heads/master
| 2021-08-23T01:56:30.661800
| 2017-12-02T09:55:48
| 2017-12-02T09:55:48
| 104,563,338
| 2
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
import hashlib
import re
class User:
required_fields = ['username', 'secret']
def __init__(self, username, secret):
self.id = None
self.username = username
self.secret = secret
def __str__(self):
return "{}|{}".format(self.username, self.secret)
def encrypt_secret(plaintext: str) -> str:
"""
a totally cryptographically secure method to encrypt secrets GUARANTEED unbreakable
:param plaintext: the plaintext secret
:return: the unbreakable encrypted version of the secret
"""
hash = hashlib.md5()
hash.update(plaintext.encode('utf-8'))
return hash.hexdigest()
def validate_secret(plaintext: str) -> bool:
result = re.match('^[a-z]{1,5}$', plaintext)
if result is None:
return False
return True
def create_user(username: str, plaintext_secret: str) -> User:
"""
validate and create a User object
:param username: the name of the user
:param plaintext_secret: the plaintext version of the user's secret
:return: a User object representing that user with their secret encrypted
"""
return User(username, encrypt_secret(plaintext_secret))
def create_user_from_dict(d: dict) -> User:
"""
validate and create a User object from a dictionary of values
:param d: a dict containing all the needed fields
:return: a User object if the dict is valid
"""
errors = []
for field in User.required_fields:
if field not in d.keys():
errors.append('{} was not provided'.format(field))
if 'secret' in d:
if d['secret'] == '':
errors.append('no secret provided')
elif validate_secret(d['secret']) is False:
errors.append('"{}" is not a valid secret'.format(d['secret']))
if errors:
raise ValueError(*errors)
return create_user(d['username'], d['secret'])
|
[
"rob@robsollars.co.uk"
] |
rob@robsollars.co.uk
|
2b74879fe93e5c92749085597cf1c6c5cd234493
|
7cdfbe80ac56a042b9b99c1cb17766683da439b4
|
/paper2/setup_files/old_mocks/mock_8.py
|
674592696e13fdfa96eac69492f351278b0a54ef
|
[] |
no_license
|
bacook17/pixcmd
|
1e918cc6b147abe1885f9533836005b9f2b30012
|
fac20ced14492fd32448d2722c377d88145f90a1
|
refs/heads/master
| 2021-01-18T11:54:13.625834
| 2019-07-30T15:13:12
| 2019-07-30T15:13:12
| 67,228,636
| 0
| 0
| null | 2016-09-02T14:18:59
| 2016-09-02T14:18:58
| null |
UTF-8
|
Python
| false
| false
| 8,379
|
py
|
# Mock configuration file for Paper 2
# Ben Cook (bcook@cfa.harvard.edu)
###############################################
# CONFIG FILE for mock run #3
# MOCK Galaxy:
# Metallicity Model: Single FeH
# [Fe/H] = -0.25
# Dust Model: Single Dust
# log E(B-V) = -1.0
# SFH Model: Tau
# Npix = 2.0
# tau = 3.0
# Distance
# dmod = 29.0
#
# MODEL Galaxy: Matches input model
# WITH Non-Param SFH
# Priors:
# [Fe/H] : [-0.5, 0.25]
# log E(B-V) : [-1.5, 0.0]
# SFH_i : tau_model +/- 1 dex
# distance : [27.0, 33.0]
import pcmdpy_gpu as ppy
import multiprocessing
import time
import numpy as np
###############################################
# IMPLEMENTATION SETTINGS
params = {} # arguments passed to pcmdpy_integrate
sampler_params = {} # arguments passed to dynesty sampler initialization
run_params = {} # arguments passed to sampler's run_nested()
# Whether to use GPU acceleration
params['use_gpu'] = True
# Whether to output progress steps
params['verbose'] = True
# The number of parallel processes to run. Using more threads than available
# CPUs (or GPUs, if gpu=True) will not improve performance
N_threads = 1
# Setup the multiprocessing pool, for parallel evaluation
pool = None
if N_threads > 1:
if params['use_gpu']:
pool = multiprocessing.Pool(processes=N_threads,
initializer=ppy.gpu_utils.initialize_gpu)
time.sleep(10)
else:
pool = multiprocessing.Pool(processes=N_threads)
sampler_params['pool'] = pool
# Initialize the GPU with pycuda
# if params['use_gpu']:
# ppy.gpu_utils.initialize_gpu(n=0)
# Check to see if GPU is available and properly initialized. If not, exit
# if params['use_gpu']:
# assert ppy.gpu_utils._GPU_AVAIL, ('GPU NOT AVAILABLE, SEE ERROR LOGS. ',
# 'QUITTING')
# assert ppy.gpu_utils._CUDAC_AVAIL, ('CUDAC COMPILATION FAILED, SEE ERROR ',
# 'LOGS. QUITTING')
###############################################
# DYNESTY SAMPLER SETTINGS
# These parameters are passed to initialization of
# Dynesty Sampler object
# Whether to use dynamic nested sampling
params['dynamic'] = DYNAMIC = True
# The number of dynesty live points
_nlive = 500
if DYNAMIC:
run_params['nlive_init'] = _nlive
else:
sampler_params['nlive'] = _nlive
# How to bound the prior
sampler_params['bound'] = 'multi'
# How to sample within the prior bounds
sampler_params['method'] = 'unif'
# Number of parallel processes
sampler_params['nprocs'] = N_threads
# Only update the bounding distribution after this many calls
sampler_params['update_interval'] = 100
# Compute multiple realizations of bounding objects
sampler_params['bootstrap'] = 0
# Enlarge volume of bounding ellipsoids
sampler_params['enlarge'] = 1.1
# When should sampler update bounding from unit-cube
sampler_params['first_update'] = {'min_eff': 30.}
###############################################
# DYNESTY RUN_NESTED SETTINGS
# The number of max calls for dynesty
run_params['maxcall'] = 250000
# The error tolerance for dynesty stopping criterion
_dlogz = 0.5
if DYNAMIC:
run_params['dlogz_init'] = _dlogz
else:
run_params['dlogz'] = _dlogz
sampler_params['add_live'] = True
if DYNAMIC:
# How many batches?
run_params['maxbatch'] = 10
# How many live points per batch?
run_params['nlive_batch'] = 100
# weight function parameters
run_params['wt_kwargs'] = {'pfrac': 1.0}
# How many max calls per iteration?
run_params['maxcall_per_iter'] = 10000
# Don't keep boundaries
run_params['save_bounds'] = False
###############################################
# PCMD MODELLING SETTINGS
# The size (Nim x Nim) of the simulated image
params['Nim'] = 512
# The filters (photometry bands) to model. There should be at least 2 filters.
# Default choice: F814W and F475W
params['filters'] = ppy.instrument.default_m31_filters()
# Initialize the isochrone models for the current set of filters
params['iso_model'] = ppy.isochrones.Isochrone_Model(params['filters'])
# Set a custom Galaxy Model with four parts
# Metallicity model
metalmodel = ppy.metalmodels.SingleFeH() # Single Metallicity
# metalmodel = ppy.metalmodels.NormMDF() # Gaussian MDF
# metalmodel = ppy.metalmodels.FixedWidthNormMDF(0.2) # fixed width MDF
# Dust model
dustmodel = ppy.dustmodels.SingleDust() # single dust screen
# dustmodel = ppy.dustmodels.LogNormDust() # lognormal screen
# dustmodel = ppy.dustmodels.FixedWidthLogNormDust(0.1) # fixed width lognorm
# Age model
sfhmodel = ppy.sfhmodels.NonParam() # Fully non-parametric model
# sfhmodel = ppy.sfhmodels.ConstantSFR() # constant Star Formation Rate
# sfhmodel = ppy.sfhmodels.TauModel() # exponential SFR decline
# sfhmodel = ppy.sfhmodels.RisingTau() # Linear x exponential decline
# sfhmodel = ppy.sfhmodels.SSPModel() # single age SSP
# Distance model
# distancemodel = ppy.distancemodels.FixedDistance(26.0) # fixed dmod=26.0 (1.6 Mpc)
distancemodel = ppy.distancemodels.VariableDistance() # dmod floats
params['gal_model'] = ppy.galaxy.CustomGalaxy(
metalmodel,
dustmodel,
sfhmodel,
distancemodel)
# Add the binned hess values and the mean magnitude and color terms
params['like_mode'] = 5
# The hess bins to compute the likelihood in
# The magnitude upper/lower bounds are very important to consider
# relative to distance
magbins = np.arange(10, 45, 0.05)
colorbins = np.arange(-1.5, 5.6, 0.05) # fairly insensitive to distance
params['bins'] = [magbins, colorbins]
# Factor to downsample the isochrones
params['downsample'] = 5
# which magnitude system
params['mag_system'] = 'vega'
# Cut out stars brighter than some limit (of mean luminosity)
params['lum_cut'] = np.inf
# Whether to use a fixed random-number seed
# (decreases stochasticity of likelihood calls)
params['fixed_seed'] = True
# Average counts of "sky noise" to add in each band
params['sky_noise'] = None
params['shot_noise'] = True
###############################################
# PRIOR SETTINGS
# The bounds on the flat prior for each parameter
z_bound = [-0.5, 0.25] # metallicity
dust_med_bound = [-1.5, 0.0] # log dust
# Only set the distance bounds if allowed to float
# dmod_bound = None
dmod_bound = [[27.0, 33.0]]
# Compute the 7-param SFH bound using tau models to bound
Npix_low, tau = 3.0, 3.0
model = ppy.sfhmodels.TauModel(iso_step=-1)
model.set_params([Npix_low, tau])
lower_sfh = np.log10(model.SFH)
Npix_high = 7.0
model.set_params([Npix_high, tau])
upper_sfh = np.log10(model.SFH)
SFH_bounds_arr = np.array([lower_sfh, upper_sfh]).T
SFH_bounds = list(list(bound) for bound in SFH_bounds_arr)
# Create a Prior object with given bounds
prior_bounds = {}
prior_bounds['feh_bounds'] = [z_bound]
prior_bounds['dust_bounds'] = [dust_med_bound]
prior_bounds['age_bounds'] = SFH_bounds
prior_bounds['dmod_bounds'] = dmod_bound
params['prior'] = params['gal_model'].get_flat_prior(**prior_bounds)
###############################################
# DATA / MOCK SETTINGS
# Is the data created manually, or should it be read from a file?
params['data_is_mock'] = True
# scale of mock image (N_mock x N_mock)
N_mock = 256
# model of the mock galaxy
feh = -0.25
log_ebv = -1.0
log_npix = 5.0
tau = 2.0
dmod = 29.0
# Mock data is generated with same model as is fit (except Tau Model)
metalmodel = metalmodel
dustmodel = dustmodel
sfhmodel = ppy.sfhmodels.TauModel()
distancemodel = ppy.distancemodels.VariableDistance() # dmod floats
model_mock = ppy.galaxy.CustomGalaxy(
metalmodel,
dustmodel,
sfhmodel,
distancemodel)
gal_params = np.array([feh, log_ebv, log_npix, tau, dmod])
model_mock.set_params(gal_params)
# Create the mock data
# temporary driver to make mock
driv = ppy.driver.Driver(params['iso_model'], gpu=True)
# The mock data
params['data_pcmd'], _ = driv.simulate(model_mock, N_mock,
fixed_seed=params['fixed_seed'],
shot_noise=params['shot_noise'],
sky_noise=params['sky_noise'],
downsample=params['downsample'],
mag_system=params['mag_system'])
del driv
|
[
"bcook@cfa.harvard.edu"
] |
bcook@cfa.harvard.edu
|
5d5b5ad17d5f573733978c26be47cab77c5833fe
|
12fd8ef25563d3b32f74c3b4c92d20bb3326225c
|
/lists/migrations/0002_item_text.py
|
191f76c3f6916672c5b5c2b08b2a54220bc752a6
|
[] |
no_license
|
robglass/superlists
|
b1b94cbf23f913505eadd6aebc4ab36f31b1f9a0
|
c26b746d494de64f6f6b81e5e70de88bcb570a7e
|
refs/heads/master
| 2021-01-02T23:35:57.787895
| 2017-08-07T03:31:04
| 2017-08-07T03:31:04
| 99,502,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-06 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='text',
field=models.TextField(default=''),
),
]
|
[
"rglass@getwellnetwork.com"
] |
rglass@getwellnetwork.com
|
075cba7341bec0541cda7af78f6285dd481f1745
|
17fef77120e7466b002e5722852987dd3f36d510
|
/KodlamaEgzersizleri/Döngüler/fibonacci_serisi.py
|
8ddceaf331da9d68af5a0330c5c6659528ae15a3
|
[
"MIT"
] |
permissive
|
sametcelikbicak/Python
|
20c25d62069d4cf49e6844b7063ab4b63c8d67cf
|
f5652a0b9710c00149b7388721ee9f3cdc916ca5
|
refs/heads/master
| 2021-05-16T15:25:40.211731
| 2021-02-10T07:06:59
| 2021-02-10T07:06:59
| 119,162,689
| 3
| 0
| null | 2021-02-10T07:06:59
| 2018-01-27T12:31:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
print("""********************
Fibonacci Serisi
********************""")
a = 1
b = 1
fibonacci = [a, b]
for i in range(20):
a, b = b, a + b
fibonacci.append(b)
print(fibonacci)
|
[
"sametcelikbicak@outlook.com"
] |
sametcelikbicak@outlook.com
|
b7da6c4d3d9b772d466e54a9ff2483eede54b1f7
|
22bc2fb28fe7e384544bd949efea936bc670acf2
|
/4. Detect Communities on Network/chengzhe_xue_task1.py
|
3a97f82e414490d7edddbd8837177171e31ce02a
|
[] |
no_license
|
waoxc/Data-Mining-on-Yelp-Data
|
505894ee7ebcae159453b00bab312858da46e5f9
|
92d71c7c5ed7c97979fe0691d4d6b18c36921fe0
|
refs/heads/master
| 2020-04-21T11:45:50.568208
| 2019-08-09T03:29:55
| 2019-08-09T03:49:27
| 169,537,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
import os
import sys
import time
from pyspark import SparkContext
from itertools import combinations
from pyspark.sql import SQLContext
from graphframes import GraphFrame
os.environ["PYSPARK_SUBMIT_ARGS"] = ("--packages graphframes:graphframes:0.6.0-spark2.3-s_2.11 pyspark-shell")
curr = time.time()
threshold = int(sys.argv[1])
input = sys.argv[2]
output = sys.argv[3]
sc = SparkContext(appName="LPA")
RawRDD = sc.textFile(input).map(lambda x: x.split(','))
header = RawRDD.first()
DataRDD = RawRDD.filter(lambda x: x != header)
dic = {}
for x in DataRDD.collect():
if x[0] not in dic:
dic[x[0]] = set()
dic[x[0]].add(x[1])
edgelist = []
verticeSet = set()
for x in combinations(dic.keys(), 2):
if len(dic[x[0]].intersection(dic[x[1]])) >= threshold:
edgelist.append(x)
edgelist.append((x[1], x[0]))
verticeSet.add(x[0])
verticeSet.add(x[1])
verticelist = list(combinations(verticeSet, 1))
sqlContext = SQLContext(sc)
vertices = sqlContext.createDataFrame(verticelist, ["id"])
edges = sqlContext.createDataFrame(edgelist, ["src", "dst"])
g = GraphFrame(vertices, edges)
labeled = g.labelPropagation(maxIter=5)
resRDD = labeled.rdd.map(lambda x: (x['label'], [x['id']]))\
.reduceByKey(lambda x, y: x+y)\
.map(lambda x: (len(x[1]), [sorted(x[1])]))\
.reduceByKey(lambda x, y: x+y)\
.map(lambda x: (x[0], sorted(x[1])))\
.sortByKey()
f = open(output, "w")
for x in resRDD.collect():
communities = x[1]
for community in communities:
f.write(str(community)[1:-1]+'\n')
f.close()
print("runtime", time.time()-curr, "s")
|
[
"chengzhe0607@gmail.com"
] |
chengzhe0607@gmail.com
|
351984131f23d00cf44a4d6d2340458923189143
|
d089487e0b108f71fdfa001ef905d04a38c272a3
|
/shuishiwodi.py
|
7e27d1a3ae6658564ea81660642d71a7dfc9cb01
|
[] |
no_license
|
nn243823163/shuishiwodi
|
2ed0bb09b2d4d4388826f5725eba73b5ad31abde
|
6ef0fbb6c4af5a41aa166ccf4992034ea5e30ec3
|
refs/heads/master
| 2021-01-24T02:15:57.729614
| 2018-02-25T14:02:27
| 2018-02-25T14:02:27
| 122,840,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,429
|
py
|
#coding:utf-8
from Tkinter import *
import ttk
from PIL import Image, ImageTk
from packs_need.spyword import spywords
import random
import tkMessageBox
from tkSimpleDialog import askinteger
str_note = '''游戏规则:
1、游戏有卧底和平民两种身份;
2、平民得到同一词语,卧底得到与之相关的另一词语;
3、每人每轮用一句话描述自己的词语,既不能被卧底察觉,也要给同伴暗示;
4、每轮描述完毕,在场的所有人投票,选出怀疑谁是卧底的人,得票最多的人出局,
若出现平局,平局的人进行描述,大家再投票选出一个卧底;
5、若有卧底撑到最后一轮(场上剩两人),则卧底获胜,反正平民获胜。
'''
#####定义全局变量#####
num = 0
# 卧底玩家
spy = 0
# 随机产生词语 定义词语列表 计算玩家票数的列表 统计死亡玩家的列表
list_rand = ()
word = []
cnt = []
dead = []
sameVote = 0
spyWin = 0
#####定义回调函数#####
def givewords():
global num,word,cnt,dead,spy,sameVote,spyWin,list_rand
list_rand = random.sample(spywords, 1)[0]
num = int(number_chose.get())
spy = random.randint(0, num - 1)
print 'zongrenshu',num
# 给三个列表赋值
for i in range(0, num):
word.append('a')
cnt.append(0)
dead.append(num + 2)
# 给玩家词语 其中print是调试用的,sanmeVote是出现相同票数的标志,spyWin是卧底胜利的判决条件
for i in range(0, num):
if (i == spy):
word[i] = str(list_rand[1])
else:
word[i] = str(list_rand[0])
print (word[i])
tkMessageBox.showinfo('请依次看单词',"第%d玩家的词语是:%s"%(i+1,word[i]))
def startgame():
# player_choice['values'] = range(1,num+1)
for i_lun in range(0,num-2):
for i_ren in range(0,num):
cnt[i_ren] = 0
if i_ren not in dead:
tkMessageBox.showinfo('请发言','请%d玩家发言'%(i_ren+1))
tkMessageBox.showinfo('开始投票','开始投票')
for i_ren in range(0,num):
if i_ren not in dead:
res = askinteger('请%d玩家投票'%(i_ren+1), "%d玩家您觉得卧底是:"%(i_ren+1), initialvalue=0)
print res
cnt[res-1] = cnt[res-1]+1
print cnt
dead[i_lun] = cnt.index(max(cnt))
if dead[i_lun] == spy:
tkMessageBox.showinfo('卧底%d玩家被投出,游戏结束'%(dead[i_lun]+1),'卧底%d玩家被投出,游戏结束'%(dead[i_lun]+1))
spyWin = 0
break
else:
tkMessageBox.showinfo('%d玩家被冤死'%(dead[i_lun]+1),'%d玩家被冤死'%(dead[i_lun]+1))
spyWin = 1
print cnt
tkMessageBox.showinfo('投票结束,进入下一轮','投票结束,进入下一轮')
if spyWin:
tkMessageBox.showinfo('卧底玩家%d获胜'%(spy+1),'卧底玩家%d获胜'%(spy+1))
def overgame():
root.quit()
print '单词是'
max_number = 8 #设置最大游戏人数
root = Tk()
root.title('谁是卧底')
frame1 = Frame(root,width=800,bg='green')
frame1.pack(fill=X)
load = Image.open('pic.jpg')
render= ImageTk.PhotoImage(load)
img = Label(frame1,image = render)
img.pack()
text1 = Label(frame1,text = "谁是卧底")
text1.pack()
tex2 = Label(frame1,text = str_note,justify = LEFT,anchor = 'w',bg = 'gray')
tex2.pack(fill = X)
frame2 = Frame(root,width=800,bg='red')
frame2.pack(fill = X)
number_chose = ttk.Combobox(frame2)
number_chose.config(width = 20 )
number_chose['values'] = (3,4,5,6,7,8)
number_chose.set('选择玩家个数')
number_chose.grid(row = 0 ,column=0)
button1 = Button(frame2,width = 20 ,text = '分配单词',command = givewords)
button1.grid(row = 0 ,column = 1)
button2 = Button(frame2,width = 20 ,text = '开始游戏',command = startgame)
button2.grid(row = 0 ,column = 2)
#
# player_choice = ttk.Combobox(frame2)
# player_choice.config(width = 10)
# player_choice.grid(row = 0,column = 3)
# player_choice.set('选择玩家号码')
#
# button3 = Button(frame2,text = '投票',command = givetickets)
# button3.grid(row = 0 ,column = 4)
button4 = Button(frame2,width = 20 ,text = '结束游戏',command = overgame)
button4.grid(row = 0 ,column = 3)
root.mainloop()
|
[
"apple@appledeMacBook-Air.local"
] |
apple@appledeMacBook-Air.local
|
444cff32b708c4600f2a6004bb61df602fc3384e
|
1c955f9b3811b3246e781fbdfe9dddbfd5913d34
|
/django_react_starter/settings.py
|
20d85216bbc4190fbcda65a287dd618ba17c2bda
|
[] |
no_license
|
caisbalderas/django_react_starter
|
4f8a66d7c3c14d8911bfca2c46830155781dbdc5
|
5f36cbe5d5de1b5760b296c05bd73a621d4b7e5a
|
refs/heads/master
| 2021-01-20T17:51:10.048296
| 2017-05-11T21:52:35
| 2017-05-11T21:52:35
| 90,894,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
"""
Django settings for django_react_starter project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpack_loader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_react_starter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react_starter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets/'),
)
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
|
[
"caisbalderas@gmail.com"
] |
caisbalderas@gmail.com
|
1262314fa860eabc4ef6f1cbb7e7caed363fbdd6
|
ad00cfc0106a6d11b07907aee8f23300943bd97b
|
/Codeforces Scraper/main.py
|
fe2b20ed4676c9a8d7d20858b9190ce6a87e5a37
|
[] |
no_license
|
shreyansh424singh/Scrapping_Assingment
|
8d2e73b8dacdb90d2a715be02c9813d017dfb7fa
|
955b446bc8d6d98d1ead20f62403bc1f9476db99
|
refs/heads/main
| 2023-03-11T20:46:11.115761
| 2021-02-26T12:25:08
| 2021-02-26T12:25:08
| 342,484,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
import selenium
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver import ChromeOptions
import shutil, os
import time
import os
from selenium.webdriver.chrome.options import Options
PATH="D:\chromedriver.exe"
driver = webdriver.Chrome(PATH)
print("Enter the contest number")
x=input()
for code in range(ord('A'), ord('Z') + 1):
a=chr(code)
b="https://codeforces.com/problemset/problem/" + str(x) + "/" + a
print(b)
driver.implicitly_wait(5)
driver.get(b)
time.sleep(1)
if (driver.title[14] == a):
path = "C:/Users/HP/OneDrive/Documents/Selenium Dev Club/Codeforces Scraper/" + str(x) + "/" + a
os.makedirs(path)
el = driver.find_element_by_class_name('problem-statement')
el.screenshot('problem.png')
problem_path = path + "/problem.png"
shutil.move("C:/Users/HP/OneDrive/Documents/Selenium Dev Club/Codeforces Scraper/problem.png", problem_path)
inputs = driver.find_elements_by_class_name("input")
a=1
for input in inputs:
txt_path = path + "/input" + str(a) + ".txt"
f = open(txt_path, "w+")
i = driver.find_elements_by_class_name("input")
f.write(i[a-1].text)
lines = f.readlines()
f.close()
txt_path1 = path + "/output" + str(a) + ".txt"
f = open(txt_path1, "w+")
i = driver.find_elements_by_class_name("output")
f.write(i[a-1].text)
f.close()
a=a+1
driver.quit()
|
[
"noreply@github.com"
] |
shreyansh424singh.noreply@github.com
|
c93d5675dde2480fa7850132821f46d033fb4289
|
1baf7ca06f4e4adda9f273ce9a52d858b4bd9577
|
/PyMLServer-Local/MLTrainer.py
|
14fc12203d76af906b2eadc62dc2c460d0c9825b
|
[] |
no_license
|
fabijas/F2MD
|
3909c7de6c6af999b5b51ab43bc226e56716eb75
|
d27dc26cca2169853c897abba96be4252d5d143f
|
refs/heads/master
| 2020-05-23T14:29:58.812048
| 2018-12-11T02:41:47
| 2018-12-11T02:41:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
"""
/*******************************************************************************
* @author Joseph Kamel
* @email josephekamel@gmail.com
* @date 28/11/2018
* @version 2.0
*
* SCA (Secure Cooperative Autonomous systems)
* Copyright (c) 2013, 2018 Institut de Recherche Technologique SystemX
* All rights reserved.
*******************************************************************************/
"""
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.externals import joblib
from os import listdir
from os.path import isfile, join
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import load_model
from keras.utils import to_categorical
class MlTrainer:
AIType = 'NotSet'
valuesFileStr = 'notSet'
targetFileStr = 'notSet'
savePath = ''
valuesCollection = np.array([])
targetCollection = np.array([])
curDateStr = ''
def setCurDateSrt(self, datastr):
self.curDateStr = datastr
def setSavePath(self, datastr):
self.savePath = datastr
def setValuesCollection(self, datacol):
self.valuesCollection = datacol
def setTargetCollection(self, datacol):
self.targetCollection = datacol
def setAIType(self, datastr):
self.AIType = datastr
def train(self):
if(self.AIType == 'SVM'):
X, y = self.valuesCollection, self.targetCollection
y = to_categorical(y)
clf = SVC(gamma=0.001, C=100.)
clf.fit(X, y)
if(self.AIType == 'MLP_L1N15'):
X, y = self.valuesCollection, self.targetCollection
y = to_categorical(y)
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(25,), random_state=1)
clf.fit(X, y)
if(self.AIType == 'MLP_L3N25'):
X, y = self.valuesCollection, self.targetCollection
y = to_categorical(y)
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(25,25,25,), random_state=1)
clf.fit(X, y)
if(self.AIType == 'LSTM'):
print self.valuesCollection.shape
print self.targetCollection.shape
X, y = self.valuesCollection, self.targetCollection
y = to_categorical(y)
clf = Sequential()
clf.add(LSTM(128, return_sequences=True, input_shape=(X.shape[1], X.shape[2])))
clf.add(LSTM(128, return_sequences=True))
clf.add(LSTM(128, return_sequences=False))
clf.add(Dense(y.shape[1],activation='softmax'))
clf.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
clf.fit(X, y,epochs=10, batch_size=64)
joblib.dump(clf, self.savePath + '/clf_' + self.AIType + '_'+self.curDateStr+'.pkl')
def loadData(self):
self.valuesCollection = np.load(self.savePath + '/' +self.valuesFileStr)
self.targetCollection = np.load(self.savePath + '/' +self.targetFileStr)
def setFileNames(self):
filesNames = [f for f in listdir(self.savePath) if isfile(join(self.savePath, f))]
for s in filesNames:
if s.endswith(".npy"):
if s.startswith("valuesSave_") and self.valuesFileStr == 'notSet' :
self.targetFileStr = s
if s.startswith("targetSave_") and self.targetFileStr == 'notSet' :
self.targetFileStr = s
|
[
"josephekamel@gmail.com"
] |
josephekamel@gmail.com
|
0e3ef96d0f833186b2c509af4b518f523aa78c8e
|
68e7e580432b91a88723ffd90b01fbc2586bed1b
|
/app/admin/forms.py
|
8b3838eefdc0a2dadf997e5884ace395ce41ca56
|
[] |
no_license
|
Pretty-19/Employee-management-flask
|
89d5339993e5d0b0be746a73ee4f9d583c498b43
|
4dd7d7f74ba47d7515c5831ce271ab677ffecada
|
refs/heads/master
| 2023-03-28T06:07:21.141007
| 2020-09-20T12:43:44
| 2020-09-20T12:43:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import Department, Role
class DepartmentForm(FlaskForm):
"""
Form for admin to add or edit a department
"""
name = StringField('Name', validators=[DataRequired()])
description = StringField('Description', validators=[DataRequired()])
submit = SubmitField('Submit')
class RoleForm(FlaskForm):
"""
Form for admin to add or edit a role
"""
name = StringField('Name', validators=[DataRequired()])
description = StringField('Description', validators=[DataRequired()])
submit = SubmitField('Submit')
class EmployeeAssignForm(FlaskForm):
"""
Form for admin to assign departments and roles to employees
"""
department = QuerySelectField(query_factory=lambda: Department.query.all(),
get_label="name")
role = QuerySelectField(query_factory=lambda: Role.query.all(),
get_label="name")
submit = SubmitField('Submit')
|
[
"hello@parekhjigar.com"
] |
hello@parekhjigar.com
|
c89ede755803e22359be129204f534a099105041
|
185e7257cc5a16a584ffd6dc38d0c13cb5f82fe6
|
/downloads/smr3128-material/labs/src/lora-coverage/lora_rx_node/main.py
|
e6a178666aac7ea2fa7fb4e4129d1ef56cfb0039
|
[] |
no_license
|
diegoleonbarido/iot-smr3128-ictp
|
267812ac6b699b34a3de1ee811de225ba8c7c51c
|
03bb39a540f7dc9a067bc078f4be51efdd5782b6
|
refs/heads/master
| 2020-12-02T23:58:40.537077
| 2017-07-01T16:41:53
| 2017-07-01T16:41:53
| 95,968,900
| 0
| 0
| null | 2017-07-01T14:25:48
| 2017-07-01T14:25:48
| null |
UTF-8
|
Python
| false
| false
| 7,669
|
py
|
# --------------------------------------------------------
# read gps position using android smartphone
# data are saved in csv file
# see:
# https://docs.pycom.io/pycom_esp32/pycom_esp32/tutorial/includes/lora-mac.html
#
# android app ShareGPS:
# https://play.google.com/store/apps/details?id=com.jillybunch.shareGPS
# http://www.jillybunch.com/sharegps/gpsd-commands.html
#
# caution:
# https://github.com/micropython/micropython/issues/2890
# OSError: [Errno 113] EHOSTUNREACH only when executing connect() in main.py
#
# caution:
# to use two sockets together (wifi and lora):
# 1) open socket1
# 2) work with socket1
# 3) close socket1
# 4) open socket2
# 5) work with socket2
# 6) close socket2
# 7) insert a delay (actually 0.3 sec, time.sleep(0.3))
#
# other info about sockets
# http://stackoverflow.com/questions/1908878/netcat-implementation-in-python
# https://gist.github.com/adventureloop/9bba49b214768ed36717060246d18916
# import os
# import pycom
# import network
# import time
# import socket
# from machine import Pin
# from machine import SD
# import binascii
# from network import LoRa
# import machine
# import json
# import binascii
# import sys
# import utils
#
# # gps data
# lat = 0.0
# lon = 0.0
# alt = 0.0
# gpstime = ""
#
# # rssi data
# timestamp = ""
# rssi = ""
# snr = ""
# sf = ""
#
# # use tcp to send gps request position.
# # It is equivalent to:
# # netcat(192.168.43.1, 2947, '?SHGPS.LOCATION;')
# #
# def GetGpsPosition(host, port):
# global lat
# global lon
# global alt
# global gpstime
#
# data = b'?SHGPS.LOCATION;\r\n'
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.connect((host, port))
# # get data after connection
# rxdata = sock.recv(4096)
# sock.send(data)
# # get answer
# rxdata = sock.recv(4096)
# jsrx = rxdata.decode('ascii')
#
# #jsrx = repr(rxdata)
# # http://stackoverflow.com/questions/4987327/how-do-i-check-if-a-string-is-unicode-or-ascii
# # convert bytes (python 3) or unicode (python 2) to str
# # if str(type(jsrx)) == "<class 'bytes'>":
# # # only possible in Python 3
# # jsrx = jsrx.decode('ascii') # or s = str(s)[2:-1]
# # elif str(type(jsrx)) == "<type 'unicode'>":
# # # only possible in Python 2
# # jsrx = str(jsrx)
#
#
#
# # ------------------- parse json string
# # see:
# # http://stackoverflow.com/questions/7771011/parse-json-in-python
# # http://stackoverflow.com/questions/15010418/parsing-json-array-in-python
# #
# # now jsrx has the json string
# # example of string received from android app:
# # {'lat': 45.70373, 'time': '2017-04-24T12:35:20.000Z', 'alt': 57.45, 'class': 'SHGPS.LOCATION', 'lon': 13.72005, 'mode': 2}
# #
# json_data = json.loads(jsrx)
# lat = json_data["lat"]
# lon = json_data["lon"]
# alt = json_data["alt"]
# gpstime = json_data["time"]
#
# # ===========================
# sock.close()
#
# # ----------------------------- loramacrx.py
# # check if string is empty
# def isNotBlank (myString):
# return bool(myString and myString.strip())
#
#
# # ================================================
# # Start program
# #
# # get the 6-byte unique id of the board (pycom MAC address)
# # get loramac
#
# loramac = binascii.hexlify(network.LoRa().mac())
#
# # initialize LoRa in LORA mode
# # more params can also be given, like frequency, tx power and spreading factor
# lora = LoRa(mode=LoRa.LORA)
#
# # create a raw LoRa socket
# nMsgTx = 1
# tStartMsec = time.ticks_ms()
# LoraStats = "" # get lora stats
#
# # ----------------------------- tstgps5.py
# # expansion board user led
# user_led = Pin("G16", mode=Pin.OUT)
# # expansion board button
# button = Pin("G17", mode=Pin.IN, pull=Pin.PULL_UP)
#
# pycom.heartbeat(False)
# pycom.rgbled(0x007f00) # green
#
# # ---------------------------------
# # setup wypy/lopy as a station
# wlan = network.WLAN(mode=network.WLAN.STA)
# wlan.connect('mrandroid', auth=(network.WLAN.WPA2, 'eatmenow'))
# while not wlan.isconnected():
# time.sleep_ms(50)
# print(wlan.ifconfig())
#
# # ---------------------------------
# # create directory log if not exist
# try:
# os.mkdir('/flash/log')
# except OSError:
# pass
#
# # open file to store csv
# # form name of csv file
# # format: acq<year><month><day><hour><min><sec>
#
# # unpack localtime in year...
# year, month, day, hour, minute, second, ms, dayinyear = time.localtime()
#
# nameCsv = '/flash/log/acq'
# nameCsv = nameCsv + '{:04d}'.format(year)
# nameCsv = nameCsv + '{:02d}'.format(month)
# nameCsv = nameCsv + '{:02d}'.format(day)
# nameCsv = nameCsv + '{:02d}'.format(hour)
# nameCsv = nameCsv + '{:02d}'.format(minute)
# nameCsv = nameCsv + '{:02d}'.format(second)
# nameCsv = nameCsv + 'list.csv'
#
# fCsv = open(nameCsv, 'w')
#
# # ---------------------------------
# pressed = 0
# count = 0
# while True:
#
# GetGpsPosition("192.168.43.1", 2947)
#
# # create a raw LoRa socket
# s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
#
# s.setblocking(False)
# # dataRx = s.recv(64)
# dataRx = s.recv(256)
# LoraStats = lora.stats() # get lora stats (data is tuple)
#
# if isNotBlank (dataRx):
# timestamp=LoraStats[0] # get timestamp value
# rssi=LoraStats[1]
# snr=LoraStats[2]
# sf=LoraStats[3]
#
# msgData=""
# msgCrc=""
# if len(dataRx)>=5:
# msgData = dataRx[:-5] # remove the last 5 char data crc
# msgCrc = dataRx[-4:] # get the last 4 char
#
# # calc crc
# crc8 = utils.crc(msgData)
# # verify crc
# crcOk = False
# if crc8 == msgCrc.decode('utf-8'):
# crcOk = True
#
# # fields_lorastats(LoraStats)
#
# # form csv row
# msg = str(count)
# msg = msg + ',' + loramac.decode('utf8')
# msg = msg + ',' + gpstime
# msg = msg + ',' + str(lat)
# msg = msg + ',' + str(lon)
# msg = msg + ',' + str(alt)
#
# msg = msg + ',' + msgData.decode('utf-8')
# msg = msg + ',' + msgCrc.decode('utf-8')
# msg = msg + ',' + str(crc8)
# msg = msg + ',' + str(crcOk)
# msg = msg + ',' + str(timestamp)
# msg = msg + ',' + str(rssi)
# msg = msg + ',' + str(snr)
# msg = msg + ',' + str(sf)
#
# # # calc crc8 row
# #crc8row = calc(msg.encode('utf-8'))
# crc8row = utils.crc(msg.encode('utf-8'))
#
# # # add crc8row as last item
# msg = msg + ',' + str(crc8row)
#
# # write csv and terminal
# fCsv.write(msg)
# fCsv.write('\n')
# fCsv.flush()
#
# print(msg) # show in repl
#
# count = count + 1
#
# s.close()
# time.sleep(0.3) #<== Try a delay here...
#
# if button() == 0:
# print("Acquisition ended")
# wlan.mode(network.WLAN.AP)
# pycom.rgbled(0x7f0000) # red
# break
from machine import Pin
import pycom
from loracoverage import LoraCoverage
import config
import utils
# expansion board button
button = Pin("G17", mode=Pin.IN, pull=Pin.PULL_UP)
lora_cov = LoraCoverage(config.GPSD_HOST, config.GPSD_PORT, config.WIFI_HOTSPOT_SSID, config.WIFI_HOTSPOT_WPA2, config.LOG_PATH)
lora_cov.start()
pushed = False
def handler(pin):
global pushed
value = pin.value()
if not value and not pushed:
print("Acquisition ended")
lora_cov.stop()
pushed = True
button.callback(Pin.IRQ_FALLING | Pin.IRQ_RISING, handler)
|
[
"franckalbinet@gmail.com"
] |
franckalbinet@gmail.com
|
20f3df647778fcc404aa09fbeb1717b0f3cc3b00
|
1824a526a49139506fe604e85d656b29b3c8342f
|
/home/models.py
|
5269838b542bfa6141af824ba39587fca828111c
|
[] |
no_license
|
nikoladang/nikolad
|
5f4802f98adde5febd1b17250920f88ea26a9015
|
c5e8228f242058e930232f6773ba306c0a930f0f
|
refs/heads/master
| 2016-08-09T16:20:56.147690
| 2016-01-08T15:45:07
| 2016-01-08T15:45:07
| 49,169,998
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from __future__ import unicode_literals
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
class HomePage(Page):
body = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('body', classname="full")
]
pass
|
[
"ducthinhdt@gmail.com"
] |
ducthinhdt@gmail.com
|
03f7b27025f5c22560aac438c4792f9b6ff465e8
|
1a48ac7b2dddd18d31e4c39cc89f3386f3a8eb96
|
/markI.py
|
fcc0d1c85e2be192475ed036e2b0bc4cfcf64e37
|
[] |
no_license
|
N-eeraj/finding-the-number
|
24774823e2e87931896876de929b51bc884358c5
|
0477147fbfa1bae0f56472067996d17c2f6ec36e
|
refs/heads/master
| 2022-12-05T13:57:03.188520
| 2020-08-30T18:06:20
| 2020-08-30T18:06:20
| 291,524,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
max,min=100,1
lst=list(range(min,max+1))
print('Think a number between 1 & 100\n')
try:
for i in range(7):
print('Chances :',7-i)
try:
guess=lst[len(lst)//2]
except:
print('\tCheater!!')
exit()
while(True):
x=input('Is the number : '+str(guess)+' (Y/N)').lower()
if(x=='y'):
print('\nI Won!')
exit()
elif(x=='n'):
while(True):
change=input('Is the number Greater or Smaller? (G/S)').lower()
if(change=='g'):
lst=list(filter(lambda x: x>guess,lst))
break
elif(change=='s'):
lst=list(filter(lambda x: x<guess,lst))
break
else:
print('Enter Valid Output\n')
break
else:
print('Enter Valid Output\n')
print('\nI Lost')
except:
print('\n\nExiting')
|
[
"neeraj7rajeena@gmail.com"
] |
neeraj7rajeena@gmail.com
|
17b73a2c27857fe2b59f164ed758914b1f143472
|
0cb2219714575fc9717ecfde4eea3f5e05662038
|
/venv/Scripts/easy_install-3.6-script.py
|
925d72315632bad91e98d96d85c2236e56a99cbb
|
[] |
no_license
|
tamakihime/yomiage
|
ca1ae83b8dda3ead668b841e3164e7757a57f1aa
|
7396afd79ea2b9854f9f8a7391760d800d589bca
|
refs/heads/master
| 2020-03-13T20:47:45.481903
| 2018-04-27T10:26:51
| 2018-04-27T10:26:55
| 131,281,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
#!C:\programing\yomiage\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"37037753+tamakihime@users.noreply.github.com"
] |
37037753+tamakihime@users.noreply.github.com
|
ea6c41d81fd688511f7757ac3446d2cbb2bac4ef
|
6671306be3bbcdad75ad5ef3f7cd573c948acd77
|
/book_code/Chapter04/vrp.py
|
25dff81fe9fe25b101bc3d606337545571b8541d
|
[] |
no_license
|
mohantyk/genetic_algos
|
7fa0e4589f53970492a52430358a39a9c2906c9f
|
5aa202dc0367a322c814b8c920736770757cb151
|
refs/heads/main
| 2023-01-13T07:47:43.514934
| 2020-11-11T20:02:34
| 2020-11-11T20:02:34
| 310,965,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
import random
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import sys
curr_dir = Path(__file__).resolve().parent
if str(curr_dir) not in sys.path:
sys.path.append(str(curr_dir))
import tsp
class VehicleRoutingProblem:
def __init__(self, tspName, numOfVehicles, depotIndex):
"""
Creates an instance of a VRP
:param tspName: name of the underlying TSP
:param numOfVehicles: number of vehicles used
:param depotIndex: the index of the TSP city that will be used as the depot location
"""
self.tsp = tsp.TravelingSalesmanProblem(tspName)
self.numOfVehicles = numOfVehicles
self.depotIndex = depotIndex
def __len__(self):
"""
returns the number of indices used to internally represent the VRP
:return: the number of indices used to internally represent the VRP
"""
return len(self.tsp) + self.numOfVehicles - 1
def getRoutes(self, indices):
"""
breaks the list of given indices into separate routes,
by detecting the 'separator' indices
:param indices: list of indices, including 'separator' indices
:return: a list of routes, each route being a list of location indices from the tsp problem
"""
# initialize lists:
routes = []
route = []
# loop over all indices in the list:
for i in indices:
# skip depot index:
if i == self.depotIndex:
continue
# index is part of the current route:
if not self.isSeparatorIndex(i):
route.append(i)
# separator index - route is complete:
else:
routes.append(route)
route = [] # reset route
# append the last route:
if route or self.isSeparatorIndex(i):
routes.append(route)
return routes
def isSeparatorIndex(self, index):
"""
Finds if curent index is a separator index
:param index: denotes the index of the location
:return: True if the given index is a separator
"""
# check if the index is larger than the number of the participating locations:
return index >= len(self) - (self.numOfVehicles - 1)
def getRouteDistance(self, indices):
"""Calculates total the distance of the path that starts at the depo location and goes through
the cities described by the given indices
:param indices: a list of ordered city indices describing the given path.
:return: total distance of the path described by the given indices
"""
if not indices:
return 0
# find the distance between the depo location and the city:
distance = self.tsp.distances[self.depotIndex][indices[0]]
# add the distance between the last city and the depot location:
distance += self.tsp.distances[indices[-1]][self.depotIndex]
# add the distances between the cities along the route:
for i in range(len(indices) - 1):
distance += self.tsp.distances[indices[i]][indices[i + 1]]
return distance
def getTotalDistance(self, indices):
"""Calculates the combined distance of the various paths described by the given indices
:param indices: a list of ordered city indices and separator indices describing one or more paths.
:return: combined distance of the various paths described by the given indices
"""
totalDistance = 0
for route in self.getRoutes(indices):
routeDistance = self.getRouteDistance(route)
#print("- route distance = ", routeDistance)
totalDistance += routeDistance
return totalDistance
def getMaxDistance(self, indices):
"""Calculates the max distance among the distances of the various paths described by the given indices
:param indices: a list of ordered city indices and separator indices describing one or more paths.
:return: max distance among the distances of the various paths described by the given indices
"""
maxDistance = 0
for route in self.getRoutes(indices):
routeDistance = self.getRouteDistance(route)
#print("- route distance = ", routeDistance)
maxDistance = max(routeDistance, maxDistance)
return maxDistance
def getAvgDistance(self, indices):
"""Calculates the average distance among the distances of the various paths described by the given indices
Does not consider empty paths
:param indices: a list of ordered city indices and separator indices describing one or more paths.
:return: max distance among the distances of the various paths described by the given indices
"""
routes = self.getRoutes(indices)
totalDistance = 0
counter = 0
for route in routes:
if route: # consider only routes that are not empty
routeDistance = self.getRouteDistance(route)
# print("- route distance = ", routeDistance)
totalDistance += routeDistance
counter += 1
return totalDistance/counter
def plotData(self, indices):
"""breaks the list of indices into separate routes and plot each route in a different color
:param indices: A list of ordered indices describing the combined routes
:return: the resulting plot
"""
# plot th ecities of the underlying TSP:
plt.scatter(*zip(*self.tsp.locations), marker='.', color='red')
# mark the depot location with a large 'X':
d = self.tsp.locations[self.depotIndex]
plt.plot(d[0], d[1], marker='x', markersize=10, color='green')
# break the indices to separate routes and plot each route in a different color:
routes = self.getRoutes(indices)
color = iter(plt.cm.rainbow(np.linspace(0, 1, self.numOfVehicles)))
for route in routes:
route = [self.depotIndex] + route + [self.depotIndex]
stops = [self.tsp.locations[i] for i in route]
plt.plot(*zip(*stops), linestyle='-', color=next(color))
return plt
def main():
# create a problem instance:
vrp = VehicleRoutingProblem("bayg29", 3, 12)
# generate random solution and evaluate it:
randomSolution = random.sample(range(len(vrp)), len(vrp))
print("random solution = ", randomSolution)
print("route breakdown = ", vrp.getRoutes(randomSolution))
print("max distance = ", vrp.getMaxDistance(randomSolution))
# plot the solution:
plot = vrp.plotData(randomSolution)
plot.show()
if __name__ == "__main__":
main()
|
[
"mohantyk@users.noreply.github.com"
] |
mohantyk@users.noreply.github.com
|
594eec0d44450f2013b93bb0c9343590cb61eb62
|
b5698c259c80c9dc9b1cbca9dd82eb7f4d799f61
|
/tests/standalone/PyQt4Plugins.py
|
ed60fa18319e2478ffe600c74930c92f36265623
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
4O4/Nuitka
|
431998d4d424f520d2887e9195ad7d7c61691b40
|
e37d483b05c6b2ff081f3cedaee7aaff5c2ea397
|
refs/heads/master
| 2020-04-02T04:06:10.826960
| 2018-10-18T21:13:16
| 2018-10-18T21:13:16
| 154,000,090
| 0
| 0
|
Apache-2.0
| 2018-10-21T11:17:44
| 2018-10-21T11:17:44
| null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from PyQt4 import QtGui
print(QtGui.QImageReader.supportedImageFormats())
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
979d949c58a5f2a64126f55fb265850838341e56
|
d458a728de293822171faa22d5a75ae844283455
|
/Controller/UpdateAudioAccessToken.py
|
62e916996bad858df1b2c8c5b304afbf6bc13a03
|
[] |
no_license
|
wangqingbaidu/bangu
|
fda4b4c57c96f70078ea2a0c85ecf199a3065e10
|
2a7e19e9f3d55590e952dd3442080ed86209ecc9
|
refs/heads/master
| 2020-08-03T20:49:45.129683
| 2019-01-25T13:35:12
| 2019-01-25T13:35:12
| 73,537,485
| 20
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
# -*- coding: UTF-8 -*-
'''
Controller.UpdateAudioAccessToken is a part of the project bangu.
bangu is an open-source project which follows MVC design pattern mainly based on python.
Copyright (C) 2014 - 2016, Vlon Jang(WeChat:wangqingbaidu)
Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
The codes are mainly developed by Zhiwei Zhang.
As an open-source project, your can use or modify it as you want.
Contact Info: you can send an email to 564326047@qq.com(Vlon)
or visit my website www.wangqingbaidu.cn
Note: Please keep the above information whenever or wherever the codes are used.
'''
import GetBanguHome
import urllib2
import json
from utils.ReadConfig import configurations
from Model import model, ModelDB
from datetime import datetime
import time
from Controller import putErrorlog2DB
def getAudioAccessToken2DB(cfg = configurations.get_basic_settings(), db = model):
try:
if type(cfg) != dict or not cfg.has_key('audio_apikey') or not cfg.has_key('audio_secretkey'):
print 'audio_apikey and audio_secretkey must be contained!'
exit()
url = 'https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s'\
%(cfg['audio_apikey'], cfg['audio_secretkey'])
f = urllib2.urlopen(url)
audio_token = json.loads(f.read())
audio_token['datetime'] = datetime.now()
db.insert_audio_token(audio_token)
except Exception, e:
putErrorlog2DB('ThreadAudioAccessToken2DB', e, db)
def ThreadAudioAccessToken2DB(decay = 901022):
db = ModelDB()
while True:
getAudioAccessToken2DB(db = db)
time.sleep(decay)
if __name__ == '__main__':
getAudioAccessToken2DB()
desc = model.get_latest_audio_token()
print desc
|
[
"wangqingbaidu@gmail.com"
] |
wangqingbaidu@gmail.com
|
b59286c5280c6a4871896a79b181b126dd4ae483
|
deab657823976574365f86a05b72a3392601c6f9
|
/venv/bin/pasteurize
|
07a8472422171e6f351bc46252c3653fedf318c9
|
[] |
no_license
|
alexanderbcook/twitter_stream
|
3df644b99bbdb2d5a9a1107585f0cd1a139c6bd5
|
d43dbc9dce406ab14844052f8cdfd8ea3df65a12
|
refs/heads/master
| 2021-01-12T13:15:10.348539
| 2020-08-22T05:09:53
| 2020-08-22T05:09:53
| 72,162,184
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
#!/Users/alexandercook/Repositories/twitter_stream/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')()
)
|
[
"alex.chef.cook@gmail.com"
] |
alex.chef.cook@gmail.com
|
|
19ea2e3bf178cee466e63316a2aadd08a30e10dd
|
4d49d7d6e48fb9044753d50c713e32ebafca0f3b
|
/posts/migrations/0002_post_raters.py
|
1b28cb17e43388946e086ddd0f9b5ea611747b3b
|
[] |
no_license
|
dogbeide/inner-space
|
c1658a85a6d8324bf66b2cfaeb93b241a2c29032
|
f5da16af2b912f61ccbc7de308d6cc49443bbac9
|
refs/heads/master
| 2022-12-11T11:28:48.667257
| 2017-12-26T07:39:00
| 2017-12-26T07:39:00
| 108,508,363
| 0
| 0
| null | 2022-12-08T00:45:14
| 2017-10-27T06:32:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-04 00:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='raters',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
|
[
"boyowao@gmail.com"
] |
boyowao@gmail.com
|
215941da3f8b9d43ae25e8e9152da92fdb8c47c2
|
0d2efa3fa3c92a84a973f3f1bc4a3cf02d0b9315
|
/aliens/alien_invasion.py
|
e058fd0e5057bbbe84ed64dcdf382931bd2ce567
|
[] |
no_license
|
gyc598652015/alien_invasion_exercice
|
2aa76231f26b1a01f0b75d32e2fca51698efea46
|
8e6cb6b459780339865dac9f2b626dfb1b3eba44
|
refs/heads/master
| 2021-07-01T16:29:07.429887
| 2017-09-21T13:15:58
| 2017-09-21T13:15:58
| 103,915,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
import sys
import pygame
from pygame.sprite import Group
from settings import Settings
from ship import Ship
from alien import Alien
from backgrounds import Moon
import game_fuctions as gf
def run_g():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
ship = Ship(ai_settings, screen)
bullets = Group()
backgrounds = Moon(screen)
alien = Alien(ai_settings, screen)
while True:
gf.check_events(ai_settings, screen, ship, bullets)
ship.update()
gf.update_bullets(bullets)
gf.update_screen(ai_settings, screen, ship, alien, bullets, backgrounds)
##pygame.display.flip()
##一个策略射击游戏的想法,子弹会在大地图上无限飞翔直到击中目标,不断会有各种怪
##从远近战场各种地方不断刷新,只要没打死的敌人就会升级复活,因此从起点到终点,
##玩家面临的挑战是在节约弹药的前提下尽可能快多的杀死敌人,弹药有很多种,比如穿透和爆炸..
##升级项目则可以包括增大视野范围
run_g()
|
[
"noreply@github.com"
] |
gyc598652015.noreply@github.com
|
ae50af8552902dd297df6471f5b05c6635d73371
|
ad1574230541ae8eda7b432c083bb5d4e55e9c3c
|
/todo/settings.py
|
054a6cae02fbcb22381001c8581fda8cacc41a04
|
[] |
no_license
|
shuvam07/Django_ToDo
|
423294c3810eddf5750dabfc4026c40a9868fbbc
|
0e1803806cbe6271211274845d98fb7bb2827a7a
|
refs/heads/master
| 2020-05-20T16:41:16.650509
| 2019-05-08T21:26:39
| 2019-05-08T21:26:39
| 185,670,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c05^oq%lnmb(f3txe&_el#9wze&j*^@0eqr@np8b#9tzy)%olt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"shuvambosana0705@gmail.com"
] |
shuvambosana0705@gmail.com
|
7bcb9b4c0a7061894433e442cc2d3bad50907e0a
|
976e5acf2274202adeb8a1ae537de121fa4af8db
|
/w5/main/serializers.py
|
ab694cf8026445e936dc302ca2106cd68917080f
|
[] |
no_license
|
Aibek21/BFDjangoSpring2021
|
54db7be0cc4b386919a17eef8945e8c38fe24450
|
b0be9e94a7c5d08a9df9ffec3ce8baab890189e4
|
refs/heads/master
| 2023-04-14T04:24:26.092688
| 2021-05-01T08:12:44
| 2021-05-01T08:12:44
| 340,596,245
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,460
|
py
|
from rest_framework import serializers
from main.models import Book, Author, Publisher
class BookSerializer(serializers.ModelSerializer):
# author = AuthorSerializer(required=True)
# publisher = PublisherSerializer()
publication_date = serializers.DateField(read_only=True)
class Meta:
model = Book
# fields = '__all__'
fields = ('id', 'title', 'publication_date', 'num_pages')
# exclude = ('is_active')
# def validate(self, attrs):
# print(attrs)
# return attrs
# def validate_num_pages(self, value):
# if value < 1:
# raise serializers.ValidationError('num_pages must be positive')
# return value
# def validate_title(self, value):
# if '_' in value:
# raise serializers.ValidationError('invalid chars in title')
# return value
# def create(self, validated_data):
# #get author from validated_data
# # create author
# # create book
# #return book
# pass
#
# def update(self, instance, validated_data):
# pass
class PublisherSerializer(serializers.ModelSerializer):
# books_count = serializers.IntegerField()
# max_pages = serializers.IntegerField()
books = BookSerializer(many=True)
class Meta:
model = Publisher
fields = ('id', 'name', 'address', 'website', 'books')
# 'books_count', 'max_pages')
# class Meta:
# model = Publisher
# fields = '__all__'
# def to_internal_value(self, data):
# #return instance
# pass
#
# def to_representation(self, instance):
# #convert instance to dict
# #return data
# pass
#
# def create(self, validated_data):
# #create instance
# #Publisher.objects.create(**validated_data)
# #return instance
# pass
#
# def update(self, instance, validated_data):
# #update instance
# return instance
class BookFullSerializer(BookSerializer):
# author = AuthorSerializer(required=True)
publisher = PublisherSerializer()
class Meta(BookSerializer.Meta):
fields = BookSerializer.Meta.fields + ('publisher',)
class AuthorSerializer(serializers.ModelSerializer):
books = BookSerializer(many=True, read_only=True)
class Meta:
model = Author
fields = ('id', 'first_name', 'last_name', 'email', 'books', 'photo')
|
[
"aibekkuralbaev@gmail.com"
] |
aibekkuralbaev@gmail.com
|
dc76b7587d4741e776e260013edd5010476f1ec4
|
d8ddbbeeeb4a742df96580d7c9fcd1b1a04c9c1d
|
/build/robotiq/robotiq_s_model_articulated_msgs/catkin_generated/installspace/robotiq_s_model_articulated_msgs-msg-paths-context.py
|
f4d74a50f29cc96cffedb12872e2d311dc768b40
|
[] |
no_license
|
michaellin/ur5_calibration
|
5019f34d1d3577cfe0f24dc6daf54bf6eb11735f
|
d78f3693d3858c9a2d1a5bf123237fccee754cb6
|
refs/heads/master
| 2018-11-09T12:35:09.246358
| 2017-03-08T08:25:48
| 2017-03-08T08:25:48
| 84,296,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# generated from genmsg/cmake/pkg-msg-paths.context.in
DEVELSPACE = 'FALSE' == 'TRUE'
INSTALLSPACE = 'TRUE' == 'TRUE'
PROJECT_NAME = 'robotiq_s_model_articulated_msgs'
PKG_MSG_INCLUDE_DIRS = 'msg'
ARG_DEPENDENCIES = 'std_msgs'
|
[
"bdml@bdml-computer.(none)"
] |
bdml@bdml-computer.(none)
|
7cfe6762938f416cde711157596130c86a787de3
|
a09f3b682dd8a66cb64458173918b402875ad123
|
/vini pro1.py
|
d0a84044e4227825c388bb1956ab22c2df1eee34
|
[] |
no_license
|
VINISHAV/python1
|
ab987c5d71e3a16759eaff671c20a8dbd2d856e4
|
4e66083f001a73f0d03684d5a669c9c60029cf63
|
refs/heads/master
| 2020-03-28T13:48:09.740126
| 2019-01-11T08:39:44
| 2019-01-11T08:39:44
| 148,430,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
x=3
if(x==0):
print("the value is zero")
if(x>0):
print("the value is positive")
else:
print("the value is negative")
|
[
"noreply@github.com"
] |
VINISHAV.noreply@github.com
|
9271910db1f488ed0caefbe91bec08eba3441507
|
426113e472ae35aa930df52fef2ca6e28aebc815
|
/backend/mjengoapp_15173/settings.py
|
df091973c45d3a5bdd21f65353045bf1751e39c1
|
[] |
no_license
|
crowdbotics-apps/mjengoapp-15173
|
6e7011d1ced5ee1b559e36f47a134534cd01a305
|
8140d8461a89a5adcfe296240687ca0342b24cb2
|
refs/heads/master
| 2023-02-15T05:50:39.505485
| 2020-03-29T04:29:08
| 2020-03-29T04:29:08
| 250,953,209
| 0
| 0
| null | 2023-01-24T01:48:05
| 2020-03-29T04:28:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,797
|
py
|
"""
Django settings for mjengoapp_15173 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mjengoapp_15173.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mjengoapp_15173.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
edda1c6e6876e2f0e8d6bb668e65251a301cc281
|
69e23067da986c688f9ea8729c1dd1706725206d
|
/starscream/starscream/pipelines.py
|
75e40db2673627e55d6a6b3726f0bed56fbf36c6
|
[
"Apache-2.0"
] |
permissive
|
ryanorz/code-camp
|
33487e9db326b4dd94ca469de3fc02c99e470e06
|
041eb6443a3f0963a7778d3ddfb03a7557257094
|
refs/heads/master
| 2020-06-27T22:36:22.698435
| 2017-10-11T09:34:58
| 2017-10-11T09:34:58
| 97,076,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class StarscreamPipeline(object):
def process_item(self, item, spider):
return item
|
[
"ryanorz@126.com"
] |
ryanorz@126.com
|
d2a52fad4987e5055cf9401113094dbeb7f17f0d
|
18d12637d9a590a6c5c52705aaece08633567040
|
/python-2.7-tutorial/input_and_output/saving-structrued_data_with_json.py
|
3b5baed19c2b7f9d4561b1ec9488afe73dacbe5e
|
[] |
no_license
|
seehunter/python-tutor
|
3f6c0bef4226b70a65e3b86ce604e7c171497aa2
|
2886bd2bedf7ee97ab99a16503bb13d3d2214dd6
|
refs/heads/master
| 2020-04-18T02:09:06.146960
| 2019-01-23T08:55:20
| 2019-01-23T08:55:20
| 167,149,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
import json
json.dumps([1,'simple','list'])
#f is a file object ,simply serializes the object(x) to a file
#json.dump(x,f)
#to decode the object agian
#x=json.load(f)
|
[
"seehunter@163.com"
] |
seehunter@163.com
|
cae976c4d74640db6db63e6ada9bbc5320df9890
|
79319118fccdb8dcc7139002165c68967ec12d34
|
/mysite/test/views.py
|
f00bb34dc3278a7630aaad1b03c56481685c311e
|
[] |
no_license
|
Coderphobia/DjangoNotes
|
f0fd3630d1e3512602083f17b6e0724d6d72ed20
|
8ed08c7ecd2edaccc92530622ccce3dc6a4187ae
|
refs/heads/master
| 2020-05-28T11:00:54.386325
| 2019-06-04T12:55:56
| 2019-06-04T12:55:56
| 188,977,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.template import Template, Context
from django.template.loader import get_template
import datetime
# Create your views here.
def helloworld(request):
return HttpResponse("Hello World")
def current_time(request):
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def current_time2(request):
now = datetime.datetime.now()
t = Template('<html><body>It is now {{current_time}}.</body></html>')
c = Context({'current_time': now})
html = t.render((c))
return HttpResponse(html)
def current_time3(request):
now = datetime.datetime.now()
t = get_template('time.html')
#c = Context({'current_time': now})
c = {'current_time': now}
html = t.render((c))
return HttpResponse(html)
def current_time4(request):
now = datetime.datetime.now()
return render(request, 'time.html', {'current_time': now})
def current_time5(request):
current_time = datetime.datetime.now()
return render(request, 'time.html', locals())
def current_time6(request):
current_time = datetime.datetime.now()
return render(request, 'current_datetime.html', locals())
def baidu(request):
return render(request, 'baidu.html')
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
#assert False
html = "<html><body>In %s hour(s), it will be %s.</body></html>" % (offset, dt)
return HttpResponse(html)
def hours_ahead2(request, offset):
try:
hours_ahead = int(offset)
except ValueError:
raise Http404()
next_time = datetime.datetime.now() + datetime.timedelta(hours=hours_ahead)
return render(request, 'future_time.html', locals())
|
[
"qgqforu@gmail.com"
] |
qgqforu@gmail.com
|
0cb985a4c0465fa01e54452655c6db1c321f9651
|
0934c195728dbc51a6af6f8031d979817beb7df4
|
/weekly_exercises_y1s1/ex9.py
|
3172b43d83ac78d4d959daba59ccedb6051f4e5e
|
[] |
no_license
|
IMDCGP105-1819/portfolio-NormalUnicorn
|
5c25e7bad8ddc08a1310000151a0e3ce9ea564d1
|
c1d65a6c127076f2e343a2c46e2db4b346a17add
|
refs/heads/master
| 2020-03-30T01:17:34.095420
| 2018-12-11T16:54:18
| 2018-12-11T16:54:18
| 150,568,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
from decimal import Decimal
# Use this to force the savings rate to be to 2 dp max
def maths(deposit, monthly_saved, semi_raise):
savings = 0.0
months = 0
while savings < deposit and months !=250:
if months % 6 == 0:
monthly_saved += monthly_saved*semi_raise
#works out the montly saved accounting for raise
savings += savings*(0.04/12) + monthly_saved
months += 1
print(savings)
if months < 250:
print("It will take you ", months, " moth(s) to save up for a deposit for your ideal house")
else:
print("You may be dreaming a bit too big there buddy")
def inputs():
total_cost = 1000000
deposit = total_cost*0.2
#Cost of the house, you only need to save up for the deposit
annual_salary = float(input("Please enter how much you earn annualy: (£)"))
monthly_salary = annual_salary/12
#How much is earned every month
monthly_savings = float(input("Please enter how much you plan on saving each month"))
monthly_saving = monthly_savings/100
monthly_saved = monthly_salary*monthly_saving
#How much money is invested to the deposit each month
annual_raise = float(input("Please enter how much of a raise you recieve annualy"))
semi_raise = (annual_raise/100)/2
#How much of a raise the person recieves
maths(deposit, monthly_saved, semi_raise)
inputs()
|
[
"JordanChell@vivaldi.net"
] |
JordanChell@vivaldi.net
|
c9579e73278ca522955a4c96062b360e2fc2c5d5
|
4a67ca308faab57797d2684e87aba8dffa9f09f8
|
/GetIps/GetIps/spiders/Ips.py
|
c1ccc52eb17d5ce79b1f1232b6204355fd15f5ac
|
[] |
no_license
|
KeepCodeing/ScrapyProject
|
827cf82b550d6ebdd6f088b1e20f74c8e8354e59
|
91b2be73bc44dc65cb588a39ade7dcf957ee4224
|
refs/heads/master
| 2020-05-27T06:05:20.174495
| 2019-07-29T12:38:24
| 2019-07-29T12:38:24
| 188,514,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class IpsSpider(scrapy.Spider):
name = 'Ips'
allowed_domains = ['xicidaili.com']
start_urls = ['https://www.xicidaili.com/nn/']
# rules = (
# Rule(LinkExtractor(allow=r'/nn/\d+'), callback='parse_item', follow=True),
# )
def parse_item(self, response):
item = {}
IPs = response.xpath('//tr[@class="odd"or@class=""]//td[2]/text()').getall()
Ports = response.xpath('//tr[@class="odd"or@class=""]//td[3]/text()').getall()
Types = response.xpath('//tr[@class="odd"or@class=""]//td[6]/text()').getall()
zipped = zip(IPs, Ports, Types)
for i in list(zipped):
item['ip'] = i[0]
item['port'] = i[1]
item['type'] = i[2]
yield item
|
[
"3350680655@qq.com"
] |
3350680655@qq.com
|
259962e45cfc2346567f2f271b40f955a66a1a19
|
6a7fdcbba12e549e586338a97897be7923e6a72c
|
/lambda_iterator.py
|
da93415c044d89ac2169f2f01f7c5cdbe698e60b
|
[] |
no_license
|
willw625731/free-energy-automator
|
03e625c0031242258aa952861aaa20b5ef94cd66
|
93095803918740653df0c91bc93760afe3f8c176
|
refs/heads/master
| 2021-01-16T08:03:34.709995
| 2020-02-25T15:25:33
| 2020-02-25T15:25:33
| 243,031,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
import sys
import os
import free_ener_utils as fe_utils
import numpy as np
class free_energy_calc:
def __init__(self, top_file1, top_file2, gro_file1, gro_file2, lv_step, lq_step, name, mol,
cpl0='vdw-q', cpl1='none',
mdfile_path='/ddn/data/mzkl37/mdfiles/',
simfile_path='/ddn/data/mzkl37/simfiles/',
execute_path='/ddn/data/mzkl37/free_energy_calc/',
temp='300'):
self.simfile_path = simfile_path
self.top_file1 = self.simfile_path+top_file1
self.top_file2 = self.simfile_path+top_file2
self.gro_file1 = self.simfile_path+gro_file1
self.gro_file2 = self.simfile_path+gro_file2
self.top_final = None
self.gro_final = None
self.mol = None
self.lv_count = (1.0/lv_step) + 1
self.lq_count = (1.0/lq_step) + 1
self.lvs = None
self.lqs = None
self.lvs_in = None
self.lqs_in = None
self.cpl0 = cpl0
self.cpl1 = cpl1
self.mdfile_path = mdfile_path
self.execute_path = execute_path
self.name = name
self.mol = mol
self.mdfiles = ['steep.mdp', 'l-bfgs.mdp', 'nvt.mdp', 'npt.mdp', 'production.mdp']
self.temp = temp
self.other_ls = None
self.lambda_array()
# Join .top files and .gro files
def combine_files(self, top_out, gro_out, mol, count=1):
self.top_final = top_out
self.gro_final = gro_out
fe_utils.top_comp(self.top_file1, self.top_file2, mol, count, top_out)
fe_utils.comb_coords(self.gro_file1, self.gro_file2, gro_out)
# Execute equil.sh script with given lambda parameters and md file
def lambda_run(self, mdfile, lambda_idx):
cmd = '/ddn/data/mzkl37/scripts/equil.sh -i {} -f {} -p {} -l {} -v {} -q {} -a {} -b {} -m {} -t {}'.format(
self.mdfile_path+mdfile, 'confout.gro', self.top_final,
lambda_idx, self.lvs_in, self.lqs_in,
self.cpl0, self.cpl1, self.mol, self.temp)
os.system(cmd)
# Build lambda parameter array, vdw then q
def lambda_array(self, v0=0.0, v1=1.0, q0=0.0, q1=1.0):
self.lvs = list(map(str, np.round(np.linspace(v0, v1, self.lv_count), 3)))
self.lqs = list(map(str, np.round(np.linspace(q0, q1, self.lq_count), 3)))
self.lvs = self.lvs + list(map(str, np.ones(len(self.lqs)-1)))
self.lqs = list(map(str, np.zeros(len(self.lvs)-len(self.lqs)))) + self.lqs
self.lvs_in = "'"+" ".join(self.lvs)+"'"
self.lqs_in = "'"+" ".join(self.lqs)+"'"
def run_sim(self):
os.makedirs(self.execute_path+self.name)
os.chdir(self.execute_path+self.name)
self.combine_files(self.name+'.top', self.name+'.gro', self.mol)
os.makedirs('xvg_out')
# Make lambda folders
for idx, (lv, lq) in enumerate(zip(self.lvs, self.lqs)):
os.makedirs('lambda_lv{}_lq{}'.format(lv, lq))
os.chdir('lambda_lv{}_lq{}'.format(lv, lq))
os.system('cp ../{} .'.format(self.top_final))
os.system('cp ../{} confout.gro'.format(self.gro_final))
# Run md set for give lambdas
for md in self.mdfiles:
os.system('echo "MD File:\t{}\nLambdas v:\t{}\nLambdas q:\t{}"'.format(md, lv, lq))
self.lambda_run(md, idx)
os.system('wait')
if not os.path.isfile('confout.gro'):
break
os.system('cp confout.gro {}'.format(self.gro_final))
os.system('cp {} {} ../'.format(self.top_final, self.gro_final))
os.system('cp dhdl.xvg ../xvg_out/run_v{}q{}.xvg'.format(lv, lq))
os.chdir('../')
os.system('find -name \#* -exec rm {} +')
os.system('find -name *.trr* -exec rm {} +')
|
[
"mzkl37@hamilton2.hpc.dur.ac.uk"
] |
mzkl37@hamilton2.hpc.dur.ac.uk
|
f41f7ea641940aa6f78f1db5d8879f1794d994d1
|
1511ff0083ee477bfb0a0e6620a122a003a20b6d
|
/djangoshop/djangoshop/wsgi.py
|
def624a82e1b5fdbfd0ccd41b5f09d4317832b62
|
[] |
no_license
|
sgb128/djangoshop
|
534eb195004000213d04f9d11b177336c85ac1ca
|
365d0dd1a8f9db5a2e4a7e120f437c89030a65c1
|
refs/heads/main
| 2023-07-05T18:27:47.617547
| 2021-08-06T14:45:37
| 2021-08-06T14:45:37
| 378,599,786
| 0
| 0
| null | 2021-08-18T11:45:29
| 2021-06-20T08:52:15
|
Python
|
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for djangoshop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoshop.settings')
application = get_wsgi_application()
|
[
"sgb128@mail.ru"
] |
sgb128@mail.ru
|
ed2caa5f7cae3b274303c7c204cc98bdaf12e492
|
68a380323165e6770b1530664dde145165d610e5
|
/Engine/Indicator/GoodIndicator.py
|
bf5fb7752e7012e35edf90ff9d9a70e24980c3a3
|
[
"Apache-2.0"
] |
permissive
|
iznake/AI-Trading-Bot
|
d1e8ae95871442f224c3534ebbf363b9cd73c534
|
b04dc97690def91ee1b486dec9576d06d36b0240
|
refs/heads/main
| 2023-09-04T18:04:10.245486
| 2021-10-26T13:40:18
| 2021-10-26T13:40:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import copy
from .Indicator import *
from ..config import *
class GoodIndicator(Indicator):
def __init__(self, structuralParams, timingParams):
super().__init__(structuralParams, timingParams)
def SingleStep(self, stepId):
super().SingleStep(stepId)
if Binance.granualar_gstreams_ready:
for n in range( pow(10, 6) ): a = pow(n, 0.5)
trials = 10
keys = copy.copy(list(Binance.gstreams.keys()))
for key in keys:
[dataType, symbol, interval] = key.split('.')
with Binance.lock_gstream:
if Binance.gstreams.get(key, None) is not None:
start = dt.datetime.now() - dt.timedelta(days=int(dt.datetime.now().microsecond/600000), hours=int(dt.datetime.now().microsecond/50000), minutes=dt.datetime.now().second, seconds=dt.datetime.now().second) #vicious test.
end = Binance.gstream_datetime + dt.timedelta(seconds=1) # end is exclusive.
dataframe = Binance._singleton.Get_Filed_Data_By_Time( dataType, symbol, interval, start, end )
trials -= 1
if trials <= 0: break
return
|
[
"fleetpro@gmail.com"
] |
fleetpro@gmail.com
|
4211b5916fd39f3365a414838cbb1416b4c3572f
|
2a22a5e133053aca82452b3e52f082ea53ff663c
|
/contact/migrations/0008_auto_20200617_1307.py
|
042c91b29ab1f27dcfc5d5ece07366320979426e
|
[] |
no_license
|
Bhupesh7662/Django-Real-Estate
|
f59305a7bece4fa5949d82f4122f8629aae2495f
|
9b880a1a4ca3484ff2e2d9f6bfab2be9a1bfca73
|
refs/heads/master
| 2023-01-20T16:52:24.996425
| 2020-12-02T13:30:23
| 2020-12-02T13:30:23
| 317,861,362
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# Generated by Django 3.0.6 on 2020-06-17 07:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('property', '0013_delete_venue'),
('contact', '0007_auto_20200617_1301'),
]
operations = [
migrations.AlterField(
model_name='inquiry',
name='property_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.Property'),
),
]
|
[
"bpathak277@gmail.com"
] |
bpathak277@gmail.com
|
4d8023380d287d9e9d17783cf714bc19b6987561
|
8daa41989c8fb9e08ec263d67cfe8e6554259f23
|
/tests/test_led_strip.py
|
755d5cb1c25a5f5039edea1d74d8f2e8c293a447
|
[
"BSD-3-Clause"
] |
permissive
|
splintered-reality/py_trees_ros_tutorials
|
4ac85ad57ccce446f2a8c06f9831cdc41d337782
|
15c906025e60018576445400a1f0b376546d2bde
|
refs/heads/devel
| 2023-02-09T00:05:58.959175
| 2023-02-07T05:28:10
| 2023-02-07T05:29:32
| 170,604,537
| 8
| 5
|
NOASSERTION
| 2023-02-07T05:29:34
| 2019-02-14T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees/devel/LICENSE
#
##############################################################################
# Imports
##############################################################################
import py_trees
import py_trees.console as console
import py_trees_ros_tutorials
import rclpy
import rclpy.executors
##############################################################################
# Helpers
##############################################################################
def assert_banner():
print(console.green + "----- Asserts -----" + console.reset)
def assert_details(text, expected, result):
print(console.green + text +
"." * (40 - len(text)) +
console.cyan + "{}".format(expected) +
console.yellow + " [{}]".format(result) +
console.reset)
def setup_module(module):
console.banner("ROS Init")
rclpy.init()
def teardown_module(module):
console.banner("ROS Shutdown")
rclpy.shutdown()
def timeout():
return 3.0
def number_of_iterations():
return 40
##############################################################################
# Tests
##############################################################################
def test_led_strip():
console.banner("Client Success")
mock_led_strip = py_trees_ros_tutorials.mock.led_strip.LEDStrip()
tree_node = rclpy.create_node("tree")
flash_led_strip = py_trees_ros_tutorials.behaviours.FlashLedStrip(name="Flash")
flash_led_strip.setup(node=tree_node)
executor = rclpy.executors.MultiThreadedExecutor(num_threads=4)
executor.add_node(mock_led_strip.node)
executor.add_node(tree_node)
assert_banner()
# send flashing led
spin_iterations = 0
while spin_iterations < number_of_iterations() and flash_led_strip.colour not in mock_led_strip.last_text:
flash_led_strip.tick_once()
executor.spin_once(timeout_sec=0.05)
spin_iterations += 1
assert_details("flashing", flash_led_strip.colour, flash_led_strip.colour if flash_led_strip.colour in mock_led_strip.last_text else mock_led_strip.last_text)
assert(flash_led_strip.colour in mock_led_strip.last_text)
# cancel
flash_led_strip.stop(new_status=py_trees.common.Status.INVALID)
spin_iterations = 0
while spin_iterations < number_of_iterations() and mock_led_strip.last_text:
executor.spin_once(timeout_sec=0.05)
spin_iterations += 1
assert_details("cancelled", "", mock_led_strip.last_text)
assert("" == mock_led_strip.last_text)
executor.shutdown()
tree_node.destroy_node()
mock_led_strip.node.destroy_node()
|
[
"noreply@github.com"
] |
splintered-reality.noreply@github.com
|
ffa0e2dd5e4b986f7c57492a40dd104d76510f7f
|
099ed0bf0d914a901f49d4eea4fed4c6b6d7e539
|
/keyboard.py
|
f6a4005b53e655f6492cf3c118442768ad681cff
|
[
"MIT"
] |
permissive
|
evrardco/neural-snake
|
701030cf7db2f0475b5674ca373ec27d9f631c9a
|
a9b2bc0b736de83b59538ce72df9e0f33f6bba1e
|
refs/heads/main
| 2023-07-12T20:27:04.814028
| 2021-08-22T15:58:58
| 2021-08-22T15:58:58
| 331,132,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
import pygame
from keymap import *
def get_dir():
pressed = pygame.key.get_pressed()
if pressed[KEYMAP[RIGHT]]:
return 0.0
elif pressed[KEYMAP[UP]]:
return 270.0
elif pressed[KEYMAP[LEFT]]:
return 180.0
elif pressed[KEYMAP[DOWN]]:
return 90.0
return None
def update_fps(fps):
pressed = pygame.key.get_pressed()
if pressed[KEYMAP[FPS_2X]]:
fps += 2
elif pressed[KEYMAP[FPS_05X]]:
fps -= 2
return fps
|
[
"colin.evrard@student.uclouvain.be"
] |
colin.evrard@student.uclouvain.be
|
96d590d3561ee3e50c3c75fbe926be73c668d712
|
b6345c4aea6a98a3aed9a2e99fb425ea535f4e80
|
/47.permutations-ii.py
|
69f921f580deb7ac7bd175269f7110dd64d212bd
|
[] |
no_license
|
ranbix666/leetcode-1
|
b94d256af9ddb9f1403891ed3229474fca92bea1
|
351b4d97db5afdaaa77d90deb24f5b16010d9fb5
|
refs/heads/master
| 2022-12-31T05:07:53.529302
| 2020-10-21T00:49:31
| 2020-10-21T00:49:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
#
# @lc app=leetcode id=47 lang=python
#
# [47] Permutations II
#
# https://leetcode.com/problems/permutations-ii/description/
#
# algorithms
# Medium (42.35%)
# Likes: 1267
# Dislikes: 46
# Total Accepted: 276K
# Total Submissions: 651.5K
# Testcase Example: '[1,1,2]'
#
# Given a collection of numbers that might contain duplicates, return all
# possible unique permutations.
#
# Example:
#
#
# Input: [1,1,2]
# Output:
# [
# [1,1,2],
# [1,2,1],
# [2,1,1]
# ]
#
#
#
# @lc code=start
class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.getPer(nums, res, 0)
return res
def getPer(self, nums, res, begin):
if begin == len(nums):
res.append(list(nums))
visit = set()
for i in range(begin, len(nums)):
if nums[i] in visit:
continue
visit.add(nums[i])
nums[begin], nums[i] = nums[i], nums[begin]
self.getPer(nums, res, begin+1)
nums[begin], nums[i] = nums[i], nums[begin]
# res = []
# nums.sort()
# self.generateRes(nums, res, [], [])
# return res
# def generateRes(self, nums, res, rec_res, rec_i):
# if len(rec_res) == len(nums):
# res.append(list(rec_res))
# return
# for i in range(len(nums)):
# if i in rec_i:
# continue
# if i > 0 and (int(i-1) not in rec_i) and nums[i]==nums[i-1]:
# continue
# rec_res.append(nums[i])
# rec_i.append(i)
# self.generateRes(nums, res, rec_res, rec_i)
# rec_res.pop()
# rec_i.pop()
# @lc code=end
|
[
"you@example.com"
] |
you@example.com
|
bf5df2cbfc7ea126dca19cd6923c3374c190031d
|
ae326c4e6a2b2d5b67fa8d175249ef90f6a3021a
|
/leo/core/leoHistory.py
|
9bb867308a3b875170b2b7aa0474918ec9414d45
|
[
"MIT"
] |
permissive
|
frakel/leo-editor
|
f95e6c77d60485d80fddfbeaf35db961cf691177
|
b574118ee3b7ffe8344fa0d00dac603096117ac7
|
refs/heads/master
| 2020-03-28T10:40:24.621077
| 2018-10-23T14:39:31
| 2018-10-23T14:39:31
| 148,132,817
| 0
| 0
|
MIT
| 2018-09-10T09:40:18
| 2018-09-10T09:40:18
| null |
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20150514154159.1: * @file leoHistory.py
#@@first
import leo.core.leoGlobals as g
assert g
#@+others
#@+node:ekr.20160514120255.1: ** class NodeHistory
class NodeHistory(object):
'''A class encapsulating knowledge of visited nodes.'''
#@+others
#@+node:ekr.20070615131604.1: *3* NodeHistory.ctor
def __init__(self, c):
'''Ctor for NodeHistory class.'''
self.c = c
self.beadList = []
# a list of (position,chapter) tuples.
self.beadPointer = -1
self.skipBeadUpdate = False
#@+node:ekr.20160426061203.1: *3* NodeHistory.dump
def dump(self):
'''Dump the beadList'''
for i, data in enumerate(self.beadList):
p, chapter = data
p = p.h if p else 'no p'
chapter = chapter.name if chapter else 'main'
mark = '**' if i == self.beadPointer else ' '
print('%s %s %s %s' % (mark, i, chapter, p))
#@+node:ekr.20070615134813: *3* NodeHistory.goNext
def goNext(self):
'''Select the next node, if possible.'''
if self.beadPointer + 1 < len(self.beadList):
self.beadPointer += 1
p, chapter = self.beadList[self.beadPointer]
self.select(p, chapter)
#@+node:ekr.20130915111638.11288: *3* NodeHistory.goPrev
def goPrev(self):
'''Select the previously visited node, if possible.'''
if self.beadPointer > 0:
self.beadPointer -= 1
p, chapter = self.beadList[self.beadPointer]
self.select(p, chapter)
#@+node:ekr.20130915111638.11294: *3* NodeHistory.select
def select(self, p, chapter):
'''
Update the history list when selecting p.
Called only from self.goToNext/PrevHistory
'''
c, cc = self.c, self.c.chapterController
if c.positionExists(p):
self.skipBeadUpdate = True
try:
oldChapter = cc.getSelectedChapter()
if oldChapter != chapter:
cc.selectChapterForPosition(p, chapter=chapter)
c.selectPosition(p) # Calls cc.selectChapterForPosition
finally:
self.skipBeadUpdate = False
# Fix bug #180: Always call self.update here.
self.update(p, change=False)
#@+node:ville.20090724234020.14676: *3* NodeHistory.update
def update(self, p, change=True):
'''
Update the beadList while p is being selected.
Called *only* from c.frame.tree.selectHelper.
'''
c, cc = self.c, self.c.chapterController
if not p or not c.positionExists(p) or self.skipBeadUpdate:
return
# A hack: don't add @chapter nodes.
# These are selected during the transitions to a new chapter.
if p.h.startswith('@chapter '):
return
# Fix bug #180: handle the change flag.
aList, found = [], -1
for i, data in enumerate(self.beadList):
p2, junk_chapter = data
if c.positionExists(p2):
if p == p2:
if change:
pass # We'll append later.
elif found == -1:
found = i
aList.append(data)
else:
pass # Remove any duplicate.
else:
aList.append(data)
if change or found == -1:
data = p.copy(), cc.getSelectedChapter()
aList.append(data)
self.beadPointer = len(aList) - 1
else:
self.beadPointer = found
self.beadList = aList
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
|
[
"edreamleo@gmail.com"
] |
edreamleo@gmail.com
|
5697c11ac5660a77427b27eae49aa855418c464c
|
5246897010a2424b575eb84dee77715464959b4e
|
/{{cookiecutter.repo_name}}/src/{{cookiecutter.repo_name}}/middlewares.py
|
4d45662903bb93ae9b53cca7b74fd140bcad5bed
|
[] |
no_license
|
reubinoff/fastapi-template-cookiecutter
|
3cc015d1e37544231516f0e1e470e25097fffee3
|
e708393068b0d795278b1d0e5107c29a4cfac435
|
refs/heads/develop
| 2023-07-02T18:03:57.366872
| 2021-08-11T08:47:24
| 2021-08-11T08:47:24
| 390,280,466
| 0
| 0
| null | 2021-08-06T14:13:09
| 2021-07-28T08:49:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
import functools
from typing import Optional, Sequence
from sentry_asgi import SentryMiddleware
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from fastapi import Request, Response
from {{cookiecutter.repo_name}}.api import api_router
from {{cookiecutter.repo_name}}.database.core import engine, sessionmaker
@functools.lru_cache()
def get_middlewares() -> Optional[Sequence[Middleware]]:
middlewares = [
Middleware(BaseHTTPMiddleware, dispatch=db_session_middleware),
Middleware(BaseHTTPMiddleware, dispatch=add_security_headers),
Middleware(SentryMiddleware),
]
return middlewares
async def db_session_middleware(request: Request, call_next):
response = Response("Internal Server Error", status_code=500)
try:
session = sessionmaker(bind=engine)
if not session:
return response
request.state.db = session()
response = await call_next(request)
finally:
request.state.db.close()
return response
async def add_security_headers(request: Request, call_next):
response = await call_next(request)
response.headers["Strict-Transport-Security"] = "max-age=31536000 ; includeSubDomains"
return response
|
[
"moshe@ownbackup.com"
] |
moshe@ownbackup.com
|
33692068f84d8ee187ad0ee48512f4f2d57de50c
|
19cedb222593a6883da1e8c9f5259d2354bd0c39
|
/fundamentals/encapsulation.py
|
791ac0dd7034e20a2517992cb15249f936adde48
|
[] |
no_license
|
rkbeatss/object-oriented-python
|
5dc428a93c664b2c7d5a5abfc163742c4c6a5200
|
e88c69f5386f8cf54a60aef1fb90e57afa8aa0c3
|
refs/heads/master
| 2023-01-24T13:46:51.510459
| 2020-11-18T21:14:44
| 2020-11-18T21:14:44
| 314,058,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
class Employee:
def __init__(self):
self.__name = "Hubert"
self.__age = 20
def get_employee_info(self) -> str:
return f"{self.__name} is currently {self.__age} years old!"
def get_name(self) -> str:
return self.__name
def get_age(self) -> int:
return self.__age
def set_name(self, name: str) -> None:
# Specific validation logic to setting a person's name only if it's not all uppercase
# Encapsulation ensures that all the state control is concealed within the class
if not name.isupper():
self.__name = name
def set_age(self, age: int) -> None:
self.__age = age
if __name__ == "__main__":
employee = Employee()
print(employee.get_employee_info()) # Print 'Hubert is currently 20 years old!'
employee.set_name("Rupsi")
employee.set_age(23)
print(employee.get_employee_info()) # Print 'Rupsi is currently 23 years old!'
employee.set_name("HUBERT") # Our validation should stop this from being set
print(employee.get_name()) # still prints Rupsi
employee.__name = "HUBERT" # this operation should also not work, no direct modification access allowed from outside the class
print(employee.get_name()) # still Rupsi =]
|
[
"rkaus053@uottawa.ca"
] |
rkaus053@uottawa.ca
|
e3b48da8ce2e3caf7bdbcdad48e198d8fe09d303
|
e446861bb81f50108ca353bbb35fee1b65837b2d
|
/testModules.py
|
96e5291d6f59fc17878fd0500c978afcaf4c084f
|
[] |
no_license
|
wjwABCDEFG/wjw-DecisionTree
|
ef429cdc7dd0ec5e36d25e054b7005d055555699
|
476fd87c485daebb54f50c5977ced7ba3ba23067
|
refs/heads/master
| 2020-07-30T15:14:40.314983
| 2019-09-23T06:27:34
| 2019-09-23T06:27:34
| 210,273,791
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
"""
author:wjw
email:975504808@qq.com
create time:2019/09/19 21:55
测试模块,开发过程中测试每个函数,可删勿理
"""
import re
import utils
import numpy as np
# 第一组数据
# 数据集处理
str_raw = '帅。不好。矮。不上进。不嫁。不帅。好。矮。上进。不嫁。帅。好。矮。上进。嫁。不帅。爆好·高。上进。嫁。帅。不好。矮。上进。不嫁。帅。不好。矮。上进。不嫁。帅。好。高。不上进。嫁。不帅。好。中。上进。嫁。帅。爆好·中。上进。嫁。不帅。不好。高。上进。嫁。帅。好。矮。不上进。不嫁。帅。好。矮。不上进。不嫁。'
list1 = re.split(r'[。·]', str_raw)
list2 = []
for row in range(int(len(list1) / 5)):
list2.append([list1[row * 5 + i] for i in range(5)])
print(list2)
list2 = np.array(list2)
print(type(list2))
print(list2.shape)
datas = list2[:, :-1]
labels = list2[:, -1]
datas_header = ['是否帅', '脾气是否好', '是否高', '是否上进', '结果']
print(datas)
print(labels)
print('=' * 50)
# #第二组数据
# datas = np.array([['1', '1'],
# ['1', '1'],
# ['1', '0'],
# ['0', '1'],
# ['0', '1']])
#
# labels = np.array(['yes', 'yes', 'no', 'no', 'no'])
# datas_header = ['no-facing', 'flipper']
# # 测试香农熵
# print('测试香农熵')
# testResult = utils.get_shannon_entropy(labels)
# print(testResult)
# #测试条件熵
# print('测试条件熵')
# testResult = utils.get_conditional_entropy(datas[:, 2], labels)
# print(testResult)
# # 测试信息增益
# print('测试信息增益')
# testResult = utils.get_best_gain(datas, labels)
# print(testResult)
# # 测试建树字典
# print(测试建树字典)
# testResult = utils.create_tree(datas_header, datas, labels)
# print(testResult)
# 测试预测函数
# print(测试预测函数)
tree_model = utils.create_tree(datas_header, datas, labels)
input_data = ['帅', '好', '矮', '上进']
testResult = utils.predict_result(tree_model, input_data, datas_header)
print(testResult)
# 测试保存和读取函数
utils.store_tree(tree_model, '嫁不嫁.pkl') # .pkl和.txt什么的都可以,但建议.pkl
testResult = utils.restore_tree('嫁不嫁.pkl')
print(testResult)
|
[
"32508332+wjwABCDEFG@users.noreply.github.com"
] |
32508332+wjwABCDEFG@users.noreply.github.com
|
3a80b5901147ca35efec941d97cfb88a4778c4f9
|
d7c8d8a57ea4f60463d0c82ead54f9772c307738
|
/CMSSW_10_6_5/src/workspace/Dilepton-grid/draw_band.py
|
4ad4d32e6b10bfff8a2a4d61bc130267fc6740dc
|
[] |
no_license
|
miguelgallo/DileptonAnalysis
|
e4381a344c74ed4c71ea60deedb5f8f8b5522cef
|
943378660e39b6783fed3c34d64ac48425906000
|
refs/heads/master
| 2021-08-07T20:35:26.998111
| 2020-06-27T02:59:00
| 2020-06-27T02:59:00
| 191,446,046
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,884
|
py
|
import numpy as np
import ROOT
def draw_band( functionprotonName, functionmumuName, graph_matchName, graph_non_matchName, titleName, fileName ):
xi_min = 0.
xi_max = 0.25
file_func = ROOT.TFile("histos_MC/pol_function.root","READ")
res_vs_xi_proton = file_func.Get( functionprotonName )
res_vs_xi_mumu = file_func.Get( functionmumuName )
file_graph = ROOT.TFile("histos_data/output_data_sigma_limitRun.root","READ")
graph_match = file_graph.Get( graph_matchName )
graph_non_match = file_graph.Get( graph_non_matchName )
arr_xi = np.linspace(xi_min,xi_max,100)
f_up = lambda xi: xi + 2*np.sqrt( ( res_vs_xi_mumu.Eval(xi) )**2 + ( res_vs_xi_proton.Eval(xi) )**2 )
f_dw = lambda xi: xi - 2*np.sqrt( ( res_vs_xi_mumu.Eval(xi) )**2 + ( res_vs_xi_proton.Eval(xi) )**2 )
arr_xi_up = np.array( list(map(f_up,arr_xi)) )
arr_xi_dw = np.array( list(map(f_dw,arr_xi)) )
n_points = arr_xi.size
gr_all = ROOT.TGraph(2*n_points)
for i in range(n_points):
gr_all.SetPoint(i,arr_xi[i],arr_xi_up[i])
gr_all.SetPoint(n_points+i,arr_xi[n_points-i-1],arr_xi_dw[n_points-i-1])
gr_all.SetFillColor(16)
gr_all.SetFillStyle(3013)
canvas1 = ROOT.TCanvas()
graph_match.SetMarkerStyle(22)
graph_match.SetMarkerColor(4)
graph_non_match.SetMarkerStyle(23)
graph_non_match.SetMarkerColor(2)
mg = ROOT.TMultiGraph()
mg.SetTitle( titleName )
mg.GetXaxis().SetTitle("#xi(RP)")
mg.GetXaxis().SetLimits(.02, .16)
mg.GetYaxis().SetTitle("#xi(#mu^{+}#mu^{-})")
mg.SetMinimum(.02)
mg.SetMaximum(.16)
mg.Add(gr_all, "ALF")
mg.Add(graph_match, "AP")
mg.Add(graph_non_match, "AP")
mg.Draw("AP")
leg = ROOT.TLegend(.9,.9,.7,.7)
leg.AddEntry(graph_match, "Matching Events" , "p")
leg.AddEntry(graph_non_match, "Non-Matching Events" , "p")
leg.AddEntry(gr_all, "2 Sigma Area" , "f")
leg.Draw()
canvas1.SaveAs( fileName, "png")
canvas1.cd()
canvas1.Close()
return 0
if __name__ == "__main__":
draw_band("f_rp3", "f_pair_left", "g_xi_left_reco_rp3_match", "g_xi_left_reco_rp3_non_match", "#xi Left Correlation - RP 3", "results/xi_corr_rp3_band.png")
draw_band("f_rp23", "f_pair_left", "g_xi_left_reco_rp23_match", "g_xi_left_reco_rp23_non_match", "#xi Left Correlation - RP 23", "results/xi_corr_rp23_band.png")
draw_band("f_rp23_nprot", "f_pair_left", "g_xi_left_reco_rp23_nprot_match", "g_xi_left_reco_rp23_nprot_non_match", "#xi Left Correlation - RP 23_nprot", "results/xi_corr_rp23_nprot_band.png")
draw_band("f_left_single", "f_pair_left", "g_xi_left_reco_single_match", "g_xi_left_reco_single_non_match", "#xi Left Correlation - Single RP", "results/xi_corr_left_single_band.png")
draw_band("f_rp103", "f_pair_right", "g_xi_right_reco_rp103_match", "g_xi_right_reco_rp103_non_match", "#xi Right Correlation - RP 103", "results/xi_corr_rp103_band.png")
draw_band("f_rp123", "f_pair_right", "g_xi_right_reco_rp123_match", "g_xi_right_reco_rp123_non_match", "#xi Right Correlation - RP 123", "results/xi_corr_rp123_band.png")
draw_band("f_rp123_nprot", "f_pair_right", "g_xi_right_reco_rp123_nprot_match", "g_xi_right_reco_rp123_nprot_non_match", "#xi Right Correlation - RP 123_nprot", "results/xi_corr_rp123_nprot_band.png")
draw_band("f_right_single", "f_pair_right", "g_xi_right_reco_single_match", "g_xi_right_reco_single_non_match", "#xi Right Correlation - Single RP", "results/xi_corr_right_single_band.png")
draw_band("f_left_multi", "f_pair_left", "g_xi_left_reco_multi_match", "g_xi_left_reco_multi_non_match", "#xi Left Correlation - Multi RP", "results/xi_corr_left_multi_band.png")
draw_band("f_right_multi", "f_pair_right", "g_xi_right_reco_multi_match", "g_xi_right_reco_multi_non_match", "#xi Right Correlation - Multi RP", "results/xi_corr_right_multi_band.png")
|
[
"miguelalvesgallo@gmail.com"
] |
miguelalvesgallo@gmail.com
|
923ce9012e3347a4b7a85410149a6f8a6311010d
|
eeef25e331fe3bc06d788dc0dd08a028d361edbc
|
/Python/Catalan.py
|
854c3e7bc9896438b789ee4cd0c6a33afaaeba79
|
[] |
no_license
|
prokarius/hello-world
|
d483083172e600832a729ee44bf4a39880e79245
|
c8a4a500e9fbf2f746ecb25c4710359fbae23bc5
|
refs/heads/master
| 2022-04-29T20:31:26.937487
| 2022-03-19T08:07:32
| 2022-03-19T08:07:32
| 93,074,171
| 47
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
catalan = [1]
new = 1
for i in range (5000):
new *= (4*i +2)
new /= (i+2)
catalan.append(new)
numcase = int(raw_input())
while numcase > 0:
Q = int(raw_input())
print catalan[Q]
numcase -= 1
|
[
"rollingwithoutslipping@yahoo.com"
] |
rollingwithoutslipping@yahoo.com
|
9355bef8b79c6821c8c46453c1e4318117f9c28a
|
23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6
|
/rootfs/usr/lib/pymodules/python2.6/papyon/service/description/Sharing/common.py
|
03cb051d878d3a76ba9f7a4ae90d9149544ca69c
|
[] |
no_license
|
xinligg/trainmonitor
|
07ed0fa99e54e2857b49ad3435546d13cc0eb17a
|
938a8d8f56dc267fceeb65ef7b867f1cac343923
|
refs/heads/master
| 2021-09-24T15:52:43.195053
| 2018-10-11T07:12:25
| 2018-10-11T07:12:25
| 116,164,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
/usr/share/pyshared/papyon/service/description/Sharing/common.py
|
[
"root@xinli.xinli"
] |
root@xinli.xinli
|
ba49362f56a7b9a8d89443d0589c5603ff8466d7
|
82adfe2b2b6bc3989f8b860513d5ee4b753987e3
|
/pytmcapi/swagger_client/models/task.py
|
b7d540f09e4de4a2341047177ed75f93dbff6431
|
[
"Apache-2.0"
] |
permissive
|
mverrilli/tmc-api-clients
|
9ffc520a21b791d5047dfd74050af9bd6268b7a5
|
0d2752a4c2f43b19da9714072d03c15dccf2619a
|
refs/heads/master
| 2021-05-17T22:45:38.453946
| 2020-04-06T16:32:40
| 2020-04-06T16:32:40
| 250,986,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,855
|
py
|
# coding: utf-8
"""
Talend Management Console Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Task(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'workspace': 'WorkspaceInfo',
'version': 'str',
'artifact': 'BaseArtifactVersion',
'tags': 'list[str]',
'connections': 'dict(str, str)',
'parameters': 'dict(str, str)',
'resources': 'dict(str, str)'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'workspace': 'workspace',
'version': 'version',
'artifact': 'artifact',
'tags': 'tags',
'connections': 'connections',
'parameters': 'parameters',
'resources': 'resources'
}
def __init__(self, id=None, name=None, description=None, workspace=None, version=None, artifact=None, tags=None, connections=None, parameters=None, resources=None): # noqa: E501
"""Task - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._description = None
self._workspace = None
self._version = None
self._artifact = None
self._tags = None
self._connections = None
self._parameters = None
self._resources = None
self.discriminator = None
self.id = id
self.name = name
if description is not None:
self.description = description
if workspace is not None:
self.workspace = workspace
self.version = version
self.artifact = artifact
if tags is not None:
self.tags = tags
if connections is not None:
self.connections = connections
if parameters is not None:
self.parameters = parameters
if resources is not None:
self.resources = resources
@property
def id(self):
"""Gets the id of this Task. # noqa: E501
Id of task # noqa: E501
:return: The id of this Task. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Task.
Id of task # noqa: E501
:param id: The id of this Task. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this Task. # noqa: E501
Name of task # noqa: E501
:return: The name of this Task. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Task.
Name of task # noqa: E501
:param name: The name of this Task. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Task. # noqa: E501
Task description # noqa: E501
:return: The description of this Task. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Task.
Task description # noqa: E501
:param description: The description of this Task. # noqa: E501
:type: str
"""
self._description = description
@property
def workspace(self):
"""Gets the workspace of this Task. # noqa: E501
Task workspace # noqa: E501
:return: The workspace of this Task. # noqa: E501
:rtype: WorkspaceInfo
"""
return self._workspace
@workspace.setter
def workspace(self, workspace):
"""Sets the workspace of this Task.
Task workspace # noqa: E501
:param workspace: The workspace of this Task. # noqa: E501
:type: WorkspaceInfo
"""
self._workspace = workspace
@property
def version(self):
"""Gets the version of this Task. # noqa: E501
Task version # noqa: E501
:return: The version of this Task. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this Task.
Task version # noqa: E501
:param version: The version of this Task. # noqa: E501
:type: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
@property
def artifact(self):
"""Gets the artifact of this Task. # noqa: E501
Artifact used in task # noqa: E501
:return: The artifact of this Task. # noqa: E501
:rtype: BaseArtifactVersion
"""
return self._artifact
@artifact.setter
def artifact(self, artifact):
"""Sets the artifact of this Task.
Artifact used in task # noqa: E501
:param artifact: The artifact of this Task. # noqa: E501
:type: BaseArtifactVersion
"""
if artifact is None:
raise ValueError("Invalid value for `artifact`, must not be `None`") # noqa: E501
self._artifact = artifact
@property
def tags(self):
"""Gets the tags of this Task. # noqa: E501
Task tags # noqa: E501
:return: The tags of this Task. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this Task.
Task tags # noqa: E501
:param tags: The tags of this Task. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def connections(self):
"""Gets the connections of this Task. # noqa: E501
Task connections # noqa: E501
:return: The connections of this Task. # noqa: E501
:rtype: dict(str, str)
"""
return self._connections
@connections.setter
def connections(self, connections):
"""Sets the connections of this Task.
Task connections # noqa: E501
:param connections: The connections of this Task. # noqa: E501
:type: dict(str, str)
"""
self._connections = connections
@property
def parameters(self):
"""Gets the parameters of this Task. # noqa: E501
Task parameters # noqa: E501
:return: The parameters of this Task. # noqa: E501
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this Task.
Task parameters # noqa: E501
:param parameters: The parameters of this Task. # noqa: E501
:type: dict(str, str)
"""
self._parameters = parameters
@property
def resources(self):
"""Gets the resources of this Task. # noqa: E501
Task resources # noqa: E501
:return: The resources of this Task. # noqa: E501
:rtype: dict(str, str)
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this Task.
Task resources # noqa: E501
:param resources: The resources of this Task. # noqa: E501
:type: dict(str, str)
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Task, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Task):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"mverrilli@talend.com"
] |
mverrilli@talend.com
|
b8829428b5df4a97033baf34a78d530b43e477c5
|
27ce5035140a5023de0892f4c3837bb03d47a700
|
/run_model.py
|
280b6384df6557d48a20764342706fa8c9ae277b
|
[
"MIT"
] |
permissive
|
matteorr/rel_3d_pose
|
42cb155bdce394a578095a32a9b5793e3564cd68
|
dcb9838bfc575d267a99998982ba756f073bc5d8
|
refs/heads/master
| 2022-05-12T13:08:32.951614
| 2022-05-02T04:53:16
| 2022-05-02T04:53:16
| 143,567,259
| 76
| 8
|
MIT
| 2018-09-21T21:45:44
| 2018-08-04T22:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,999
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import os, json
from pprint import pprint
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch.autograd import Variable
from src.data_formats.misc import DatasetMisc
from src.model import LinearModel, weight_init
from src.utils.pose_plotter import PosePlotter
from src.data_formats.human36_17k_config import pose_config
def run_model(opt):
# get misc file used for the specified data format
misc = DatasetMisc(opt['dataset_type'])
# class that takes care of plotting
pose_plotter = PosePlotter(
pose_config['KEYPOINT_NAMES'],
pose_config['SKELETON_NAMES'],
pose_config['KEYPOINT_COLORS'],
pose_config['SKELETON_COLORS'])
# load checkpoint file
ckpt = torch.load(opt['load'])
stat_2d = ckpt['stat_2d']
# load the pretrained model
print("\n==================Model===================")
print("Loading Pretrained Model:")
print(" - Linear size: [{}]".format(opt['linear_size']))
print(" - Num stages: [{}]".format(opt['linear_size']))
print("==========================================\n")
pretrained_model = LinearModel(misc.NUM_KEYPOINTS_2D * 2,
misc.NUM_KEYPOINTS_3D * 3,
opt['linear_size'],
opt['num_stage'],
opt['dropout'],
opt['predict_scale'],
opt['scale_range'],
opt['unnorm_op'],
opt['unnorm_init'])
pretrained_model = pretrained_model.cuda()
pretrained_model.load_state_dict(ckpt['state_dict'])
pretrained_model.eval()
# load the data from a numpy file
print("\n==================Data====================")
print("Loading Data:")
print(" - Data path: [{}]".format(opt['data_dir']))
print(" - Data type: [{}]".format(opt['dataset_type']))
with open(opt['data_dir'], 'r') as fp: data = np.load(fp)
num_frames, num_coords = data.shape
num_kpts = int(num_coords/2)
print(" - Num frames: [{}]".format(num_frames))
print(" - Num kpts: [{}]".format(num_kpts))
print("==========================================\n")
# subtract root if specified
if opt['subtract_2d_root']:
root_idx_2d, _ = misc.get_skeleton_root_idx()
# subtract the 2d skeleton center from all coordinates so it is always in 0,0
data_2d_root = data[:, [2 * root_idx_2d, 2 * root_idx_2d + 1]]
data -= np.tile(data_2d_root, num_kpts)
# normalize the inputs according to the stored mean and std
data_mean = stat_2d['mean']
data_std = stat_2d['std']
norm_data = (data - data_mean[np.newaxis, ...]) / data_std[np.newaxis, ...]
norm_data[np.isnan(norm_data)] = 0
norm_data = norm_data.astype(np.float32)
seq_dataset = TensorDataset(torch.from_numpy(norm_data), torch.from_numpy(data))
seq_loader = DataLoader(dataset=seq_dataset,
batch_size=100, shuffle=False,
num_workers=4, drop_last=False)
# predict 3d pose using the model
in_2d_poses = []
out_3d_poses = []
for indx, (norm_data, data) in enumerate(seq_loader):
model_inps = Variable(norm_data.cuda())
model_outs, model_scale = pretrained_model(model_inps)
in_2d_poses.append(data.numpy())
out_3d_poses.append(model_outs.data.cpu().numpy())
in_2d_poses = np.vstack(in_2d_poses)
out_3d_poses = np.vstack(out_3d_poses)
num_frames = out_3d_poses.shape[0]
num_kpts = int(out_3d_poses.shape[1] / 3)
print("\n==================Outputs====================")
print("Predicted Data:")
print(" - Num frames: [{}]".format(num_frames))
print(" - Num keypoints: [{}]".format(num_kpts))
f_no = np.random.randint(num_frames)
########################################################################
## load the 2d groundtruth keypoints in the frame
kpts_2d_x = in_2d_poses[f_no, 0::2]
kpts_2d_y = in_2d_poses[f_no, 1::2]
########################################################################
## get 3d predicted keypoints in the frame
kpts_3d_x = out_3d_poses[f_no, 0::3]
kpts_3d_y = out_3d_poses[f_no, 1::3]
kpts_3d_z = out_3d_poses[f_no, 2::3]
########################################################################
## set the visibility flags (currently all keypoints are assumed visible)
kpts_v = np.ones(np.shape(kpts_2d_x))
pose_plotter.plot_2d(kpts_2d_x, kpts_2d_y, kpts_v)
pose_plotter.plot_3d(kpts_3d_x, kpts_3d_y, kpts_3d_z, kpts_v)
pose_plotter.plot_2d_3d(kpts_2d_x, kpts_2d_y, kpts_3d_x, kpts_3d_y, kpts_3d_z, kpts_v)
if __name__ == "__main__":
"""
uses the function run_model to test a pretrained model on a numpy array
"""
# NOTE: baseball.npy and running.npy contain poses with 17 keypoints
# while random.npy contains poses with 14 keypoints
DEMO_DATA = './demo_data/baseball.npy' # [baseball.npy, running.npy, random.npy]
# NOTE: this model was trained for data with 17 keypoints so is compatible
# with baseball.npy and running.npy, to run a model on random.npy you must
# train a new model with 14 keypoints.
LOAD_PATH = './checkpoint/default_human36_rel'
opts_path = LOAD_PATH + '/opt.json'
model_path = LOAD_PATH + '/test_ckpt_last.pth.tar'
with open(opts_path,'r') as fp: opt = json.load(fp)
opt['data_dir'] = DEMO_DATA
opt['load'] = model_path
print("\n==================Options=================")
pprint(opt, indent=4)
print("==========================================\n")
predicted_3d_poses = run_model(opt)
|
[
"mronchi@caltech.edu"
] |
mronchi@caltech.edu
|
3fb3765a1e602eb8cf08fe45b9b2285878edc0ad
|
277c29b460cef26db7d43fb80d10667f5fa321f6
|
/SEIR_extended/calculate_healthcare_costs.py
|
8db1abc881046b500f7c826c17601f8db2b33774
|
[] |
no_license
|
zuzancek/corona
|
75e0bd3678f1a01ec0109338067649abf1292bbe
|
da15613e231d4d61a0a6bb739b59454a65c0c36f
|
refs/heads/master
| 2021-07-16T14:56:27.052410
| 2021-05-24T21:34:08
| 2021-05-24T21:34:08
| 248,943,879
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
import sys
import numpy as np
import pickle
import pandas as pd
import time
import init as x
from random import sample
import init_healthcare as xx
def get_htc(inflow):
HTC = np.zeros(shape=(x.N_per+1, 16))
HTC[0,0] = x.first_infections_unobserved_asymptomatic
HTC[0,1] = x.first_infections_unobserved_symptomatic
HTC[0,2] = x.first_infections_observed_asymptomatic
HTC[0,3] = x.first_infections_mild
HTC[0,4] = x.first_infections_hospital
HTC[0,5] = x.first_infections_icu
omega_obs = xx.omega_obs
omega_unobs_asymp = xx.omega_asymp
omega_obs_asymp = xx.omega_asymp
prob_hosp_obs = xx.prob_hosp_obs/(1-omega_obs_asymp)
prob_hosp_unobs = xx.prob_hosp_unobs/(1-omega_unobs_asymp)
prob_icu = xx.prob_icu
prob_vent = xx.prob_vent
prob_surv = xx.prob_surv
T_death = xx.T_death-xx.T_hosp
Trec_unobs_asymp = xx.Trec_asymp
Trec_obs_asymp = xx.Trec_asymp
Trec_unobs_symp = xx.Trec_mild
Trec_mild = xx.Trec_mild
Trec_hosp = xx.Trec_hosp-xx.T_hosp
Trec_icu = xx.Trec_icu-xx.T_hosp
for t in range(x.N_per):
# 0: unobserved, asymptomatic
inf_unobs_asymp_in = (1-omega_obs)*omega_unobs_asymp*inflow[t]
inf_unobs_asymp_out = HTC[t,0]/Trec_unobs_asymp
HTC[t+1,0] = HTC[t,0]+inf_unobs_asymp_in-inf_unobs_asymp_out
# 1: unobserved, symptomatic (in fact mild cases only)
inf_unobs_symp_in = (1-omega_obs)*(1-omega_unobs_asymp)*inflow[t]
inf_unobs_symp_out = (prob_hosp_unobs+1/Trec_unobs_symp)*HTC[t,1]
HTC[t+1,1] = HTC[t,1]+inf_unobs_symp_in-inf_unobs_symp_out
# 2: observed, asymptomatic
inf_obs_asymp_in = (omega_obs)*omega_obs_asymp*inflow[t]
inf_obs_asymp_out = HTC[t,2]/Trec_obs_asymp
HTC[t+1,2] = HTC[t,2]+inf_obs_asymp_in-inf_obs_asymp_out
# 3: observed, mild cases
inf_obs_mild_in = omega_obs*(1-omega_obs_asymp)*inflow[t]
inf_obs_mild_out = (prob_hosp_obs+1/Trec_mild)*HTC[t,3]
HTC[t+1,3] = HTC[t,3]+inf_obs_mild_in-inf_obs_mild_out
# 4: serious/hospital cases
inf_hosp_in = prob_hosp_obs*HTC[t,3]+prob_hosp_unobs*HTC[t,1]
inf_hosp_out = (prob_icu+1/Trec_hosp)*HTC[t,4]
HTC[t+1,4] = HTC[t,4]+inf_hosp_in-inf_hosp_out
# 5: ICU cases
inf_icu_in = prob_icu*HTC[t,4]
inf_icu_out = (prob_surv+1/T_death)*HTC[t,5]
HTC[t+1,5] = HTC[t,5]+inf_icu_in-inf_icu_out
# 6: Recovered, unobserved
rec_unobs_in = inf_unobs_asymp_out+1/Trec_unobs_symp*HTC[t,1]
HTC[t+1,6] = HTC[t,6]+rec_unobs_in
# 7: Recovered, observed
rec_obs_in = inf_obs_asymp_out+1/Trec_mild*HTC[t,3]+1/Trec_hosp*HTC[t,4]+prob_surv*HTC[t,5]
HTC[t+1,7] = HTC[t,7]+rec_obs_in
# 8: Dead (observed only)
HTC[t+1,8] = HTC[t,8]+1/T_death*HTC[t,5]
# 9: Observed
HTC[t+1,9] = HTC[t+1,2]+HTC[t+1,3]+HTC[t+1,4]+HTC[t+1,5]
# 10: UnObserved
HTC[t+1,10] = HTC[t+1,0]+HTC[t+1,1]
# 11: Ventilation
HTC[t+1,11] = prob_vent*HTC[t+1,5]
# 12: Hospital
HTC[t+1,12] = HTC[t+1,5]+HTC[t+1,4]
# 13: cummulative number of infectious (total, observed) and observed recovered
HTC[t+1,13] = HTC[t,13]+inflow[t]
HTC[t+1,14] = HTC[t,14]+omega_obs*inflow[t]+prob_hosp_unobs*HTC[t,1]
HTC[t+1,15] = HTC[t+1,6]+HTC[t+1,7]
return HTC
|
[
"zuzana.mucka@RRZ.SK"
] |
zuzana.mucka@RRZ.SK
|
1b03c884bc103c0ed31e7a6ba4a78c2c7807ea70
|
06845fc008705076bc3f1e0263e843cb5a453aa0
|
/miSitio/settings.py
|
5f528718536baa44ab6871c41add86effd2a9f4b
|
[] |
no_license
|
burdasparUPNA/my-first-blog
|
61077ddbd3aff08a9acd0086f8099cc6cdb75a6d
|
ff8707384c8d252d39ba1a83ccf1497c83d3b26c
|
refs/heads/master
| 2020-08-26T17:58:28.810675
| 2019-10-23T16:13:46
| 2019-10-23T16:13:46
| 217,095,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,104
|
py
|
"""
Django settings for miSitio project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's&cnx3jv8o^uuskns7et^lovzr1sm3=(=2ccit&+s%=z!#j0tf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'miSitio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'miSitio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"burdaspar.110936@e.unavarra.es"
] |
burdaspar.110936@e.unavarra.es
|
099d1b937e15ed45b66b4db94e9a8e584da27c42
|
a43799f16005f8adc1a462a66df74210ec251266
|
/main.py
|
e23b2ca7d03bead6b7e6335505e1b9bd52d1cdf7
|
[] |
no_license
|
limisie/nn-simple-nn
|
f1a0230e23e29073a2907241458f54f688a0b111
|
ead0833ba78f930bca12d871e42792226fd7aa6f
|
refs/heads/main
| 2023-08-15T04:11:53.303777
| 2021-10-13T10:31:37
| 2021-10-13T10:31:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,985
|
py
|
from data import x_train, y_train, x_test, y_test, classes, input_data
from models import Adaline, PerceptronUnipolar, PerceptronBipolar
def convert_for_bipolar(arr):
arr[arr == 0] = -1
def n_iteration_stats(n=1000, learning_rate=0.005, weight_range=0.01, allowed_error=0.5):
epochs = 0
times = 0
bipolar = True
if bipolar:
convert_for_bipolar(y_train)
convert_for_bipolar(y_test)
model = PerceptronBipolar(weight_range, learning_rate)
for i in range(n):
model.train(x_train, y_train)
epochs += model.epochs
times += model.time
print(f'epochs: {epochs / n}')
print(f'time: {times / n}')
def parameter_stats(iterations=10, learning_rate=0.5, weight_range=0.5, allowed_error=0.5,
researched_array=[1, 0.8, 0.5, 0.2, 0.1, 0.01, 0.001]):
bipolar = False
if bipolar:
convert_for_bipolar(y_train)
convert_for_bipolar(y_test)
for variable in researched_array:
model = PerceptronUnipolar(variable, learning_rate)
epochs = 0
times = 0
for i in range(iterations):
model.train(x_train, y_train)
epochs += model.epochs
times += model.time
model.evaluate(x_test)
print(model.y)
print(y_test)
print(f'variable: {variable}')
print(f'epochs: {epochs / iterations}')
print(f'time: {times / iterations}')
print('---------------------------')
def adaline_test(learning_rate=0.01, weight_rate=0.5, allowed_error=2):
convert_for_bipolar(y_train)
convert_for_bipolar(y_test)
convert_for_bipolar(classes)
model = Adaline(weight_rate, learning_rate, allowed_error)
model.train(x_train, y_train)
print(model.y)
print(y_train)
model.evaluate(x_test)
print(model.y)
print(y_test)
model.evaluate(input_data)
print(model.y)
print(classes)
if __name__ == '__main__':
adaline_test()
|
[
"k.limisiewicz@gmail.com"
] |
k.limisiewicz@gmail.com
|
df94396508a7f40be4eeb86bc5443e9ec1b828e5
|
4e075be7d0e1a6a2fce6ef7b334e12339f98bbf9
|
/djangoapp/bin/wheel
|
8a3d7b23d99f43e63d9172ed88bcf536ae48ec53
|
[] |
no_license
|
prashantssingh/djangoapp
|
0155ba9fc75f4f11fcbfe5c14c3e3157b522c00e
|
641d64c307c32bbec4c883f74eb46433028e7b17
|
refs/heads/master
| 2020-12-11T01:36:01.433623
| 2020-01-19T05:59:47
| 2020-01-19T05:59:47
| 233,765,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
#!/home/prashant/codebase/src/github/prashantssingh/djangoapp/djangoapp/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"prashant_singh@live.in"
] |
prashant_singh@live.in
|
|
7887e161c034ca72ce90fc63d3f80203c96bc7f8
|
cd0c24cdc07d1842d0ffbcd481b8fcb8aede1344
|
/examples/custom_infer.py
|
f53e4f1e3a9cd78c1099af3308b7efa0df270656
|
[
"Apache-2.0"
] |
permissive
|
songwang41/fastT5
|
ae32ed57c29b312ecf62d55ea32e0bd3df8b6650
|
21c24c198050ba12e63a2eda5a31d5eb182d8938
|
refs/heads/master
| 2023-09-01T12:01:25.962641
| 2021-10-15T06:05:44
| 2021-10-15T06:05:44
| 417,377,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
from fastT5 import (
OnnxT5,
export_and_get_onnx_model,
get_onnx_model,
get_onnx_runtime_sessions,
generate_onnx_representation,
quantize,
)
from transformers import AutoTokenizer
model_or_model_path = "t5-small"
# Step 1. convert huggingfaces t5 model to onnx
onnx_model_paths = generate_onnx_representation(model_or_model_path)
# Step 2. (recommended) quantize the converted model for fast inference and to reduce model size.
quant_model_paths = quantize(onnx_model_paths)
# step 3. setup onnx runtime
model_sessions = get_onnx_runtime_sessions(quant_model_paths)
# step 4. get the onnx model
model = OnnxT5(model_or_model_path, model_sessions)
# --------common-part--------
tokenizer = AutoTokenizer.from_pretrained(model_or_model_path)
t_input = "translate English to French: The universe is a dark forest."
token = tokenizer(t_input, return_tensors="pt")
input_ids = token["input_ids"]
attention_mask = token["attention_mask"]
# 'set num_beams = 1' for greedy search
tokens = model.generate(input_ids=input_ids, attention_mask=attention_mask, num_beams=2)
output = tokenizer.decode(tokens.squeeze(), skip_special_tokens=True)
print(output)
|
[
"kiranr8k@gmail.com"
] |
kiranr8k@gmail.com
|
f4a69f2772c573ca821813b9ddf51a2201c316df
|
07641678207eb52fc62acdb3dc409712c5d73933
|
/gwas_wrapper/__init__.py
|
a86dc892deccac98d22d159c5253c9ad1a028372
|
[
"MIT"
] |
permissive
|
arvkevi/gwas-wrapper
|
6c44b536528054dd4261a31e6690c139e7302092
|
f4b42535c84f6a354f36e4c5e0754345ff766c03
|
refs/heads/master
| 2021-01-15T08:15:17.654123
| 2016-04-23T15:40:40
| 2016-04-23T15:40:40
| 56,402,953
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from gwas_wrapper import GWAS
|
[
"arvkevi@gmail.com"
] |
arvkevi@gmail.com
|
e55724a165065062ec4d801ea86c00fc1f33ec11
|
1062df20cd4e90486f3e301e86b1a19ce9a382cf
|
/DL_inference/network7_256/deepvoxel.py
|
44b42207b74c4dbe1e7004a8a655f9c21721aef2
|
[
"MIT"
] |
permissive
|
Hagtaril/NLOSFeatureEmbeddings
|
cd6dfabee501f6199a33925d5997bc9df86466b3
|
f882ca5684e9b6ffb16052a714714f570e606295
|
refs/heads/main
| 2023-01-28T19:59:43.748460
| 2020-12-11T05:20:02
| 2020-12-11T05:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,121
|
py
|
import torch
import torch.nn as nn
import numpy as np
from customer_layers_3 import \
Transient2volumn, \
VisibleNet, \
Rendering
import sys
sys.path.append('../utils_pytorch')
from tfmodule import diffmodule as lct
###########################################################################
def normalize(data_bxcxdxhxw):
b, c, d, h, w = data_bxcxdxhxw.shape
data_bxcxk = data_bxcxdxhxw.reshape(b, c, -1)
data_min = data_bxcxk.min(2, keepdim=True)[0]
data_zmean = data_bxcxk - data_min
# most are 0
data_max = data_zmean.max(2, keepdim=True)[0]
data_norm = data_zmean / (data_max + 1e-15)
return data_norm.view(b, c, d, h, w)
################################################################
class DeepVoxels(nn.Module):
def __init__(self,
nf0=16,
in_channels=3,
out_channels=3,
img_sidelength=256,
grid_dim=32,
bin_len=0.01,
wall_size=2.0,
mode='fk',
res0=0):
super(DeepVoxels, self).__init__()
###################################33
# 4 networks
# 1 downsample
# 2 unet
# 3 occlusion
# 4 render
imsz = 256
assert imsz == img_sidelength
volumnsz = 128
assert volumnsz == grid_dim
sres = imsz // volumnsz
tfull = 512
tsz = 128
volumntsz = 64
tres = tsz // volumntsz
# assert sres == tres
########################################################
basedim = nf0
self.basedim = basedim
# assert not raytracing
self.downnet = Transient2volumn(nf0=basedim, in_channels=in_channels)
print('bin_len %.7f' % bin_len)
self.lct = lct(spatial=imsz // sres, crop=tfull // tres, bin_len=bin_len * tres, \
mode=mode, wall_size=wall_size)
layernum = 0
self.visnet = VisibleNet(nf0=basedim * 1 + 1, layernum=layernum)
self.depth = True
assert out_channels == 6 or out_channels == 2
self.rendernet = Rendering(nf0=(basedim * 1 + 1) * (layernum // 2 * 2 + 1 + 1), out_channels=out_channels // 2)
self.depnet = Rendering(nf0=(basedim * 1 + 1) * (layernum // 2 * 2 + 1 + 1), out_channels=out_channels // 2, isdep=True)
def todev(self, dev):
self.lct.todev(dev, self.basedim * 1 + 1)
def noise(self, data):
gau = 0.05 + 0.03 * torch.randn_like(data) + data
poi = 0.03 * torch.randn_like(data) * gau + gau
return poi
def forward(self, input_voxel, tbes, tens):
if False:
noisedata = self.noise(input_voxel)
else:
noisedata = input_voxel
###############################
data_norm = normalize(noisedata)
tfre = self.downnet(data_norm)
# lct
tfre2 = self.lct(tfre, tbes, tens)
# resize
x = tfre2
zdim = x.shape[2]
zdimnew = zdim * 100 // 128
x = x[:, :, :zdimnew]
tfre2 = x
tfre2 = nn.ReLU()(tfre2)
tfre2 = normalize(tfre2)
######################################
# unet 2 voxel
tfflat = self.visnet(tfre2)
# render
rendered_img = self.rendernet(tfflat)
if self.depth:
dep_img = self.depnet(tfflat)
rendered_img = torch.cat([rendered_img, dep_img], dim=1)
rendered_img = torch.clamp(rendered_img, 0, 1)
rendered_img = rendered_img * 2 - 1
return rendered_img
#################################################################
if __name__ == '__main__':
basedim = 1
tres = 2
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
frame = 512
in_channels = 1
data = np.zeros((1, in_channels, frame, 256, 256), dtype=np.float32)
from scipy.io import loadmat
data = loadmat(file_name='/home/wenzheng/largestore/nlos-phasor/realdata/resolution0.mat')
rect_data_hxwxt = data['measlr']
rect_data_txhxw = np.transpose(rect_data_hxwxt, axes=[2, 0, 1])
data = rect_data_txhxw.reshape(1, 1, 512, 256, 256)
tfdata = torch.from_numpy(data).to(dev)
model = DeepVoxels(
nf0=basedim,
in_channels=in_channels,
out_channels=2,
img_sidelength=256,
grid_dim=128,
mode='lct')
model = model.to(dev)
model.todev(dev)
re = model(tfdata, [0, 0, 0, 0, 0], [frame // tres, 32, 32, 32, 32])
print('\n')
print(re.shape)
print('\n')
re = re.detach().cpu().numpy()
re = (re + 1) / 2
im = re[0, 0]
dep = re[0, 1]
im = im / np.max(im)
import cv2
cv2.imshow('im', im)
cv2.imshow('dep', dep)
cv2.waitKey()
|
[
"weifangyin@dynamic-oit-vapornet100-10-9-151-26.princeton.edu"
] |
weifangyin@dynamic-oit-vapornet100-10-9-151-26.princeton.edu
|
9d0608b995e4660701d371a2c42be94925b66cd4
|
60e2b0f728bf7b497e241afdacffaa8ee9203213
|
/rede_neural_convolucional/digitos/mnist.py
|
0b6c0882af2d83dc5b7f25954c5f5421138d2d41
|
[] |
no_license
|
yamadayoshi/deep_learning
|
43897d59dc3f89ecd4820050b96acacbf653408e
|
78bbf5b12011a5d17375b50b75203251003cb3d0
|
refs/heads/master
| 2021-02-19T01:02:57.934801
| 2020-03-10T20:02:45
| 2020-03-10T20:02:45
| 245,260,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.utils import np_utils
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
#load mnist data
(X_training, y_training), (X_test, y_test) = mnist.load_data()
#show image and convert img to gray
plt.title('Class ' + str(y_training[0]))
plt.imshow(X_training[0], cmap= 'gray')
#28 height, 28 width, 1 channel (rgb)
previsores_training = X_training.reshape(X_training.shape[0], 28, 28, 1)
previsores_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
#convert to float32 to get 0.00001 to 1
previsores_training = previsores_training.astype('float32')
previsores_test = previsores_test.astype('float32')
#normalize data to gain performace
previsores_training /= 255
previsores_test /= 255
#normalize output
training_class = np_utils.to_categorical(y_training, 10)
test_class = np_utils.to_categorical(y_test, 10)
classificador = Sequential()
#first convolution layer
classificador.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=(28, 28, 1)))
#normalize features
classificador.add(BatchNormalization())
classificador.add(MaxPooling2D(pool_size=(2,2)))
#second convolution layer
classificador.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu'))
#normalize features
classificador.add(BatchNormalization())
classificador.add(MaxPooling2D(pool_size=(2,2)))
classificador.add(Flatten())
#Neural Net
classificador.add(Dense(units= 128, activation='relu'))
classificador.add(Dropout(0.2))
classificador.add(Dense(units= 128, activation='relu'))
classificador.add(Dropout(0.2))
classificador.add(Dense(units= 10, activation='softmax'))
classificador.compile(loss= 'categorical_crossentropy', optimizer= 'adam', metrics= ['accuracy'])
classificador.fit(previsores_training, training_class, batch_size=125, epochs=5)
result = classificador.evaluate(previsores_test, test_class)
|
[
"andre.yamada@digiage.com"
] |
andre.yamada@digiage.com
|
fd3b2d8b5fd46efa9b03f3296f898075dcfbd82d
|
999ed80db247794159be1d752bc6f0fc272bd117
|
/tests/common/devices/cisco.py
|
393dab910451d374e9cbc154255e0955b1fbee70
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
ramakristipati/sonic-mgmt
|
7fee876412f0121da96d751f7d199690c73496f3
|
a86f0e5b1742d01b8d8a28a537f79bf608955695
|
refs/heads/master
| 2023-08-31T07:55:38.446663
| 2023-08-31T06:34:53
| 2023-08-31T06:34:53
| 315,448,103
| 2
| 0
|
NOASSERTION
| 2020-11-23T21:44:07
| 2020-11-23T21:44:07
| null |
UTF-8
|
Python
| false
| false
| 59,352
|
py
|
import re
import os
import sys
import logging
import functools
import time
from paramiko import SSHClient, AutoAddPolicy
from tests.common.devices.base import AnsibleHostBase
from ansible.utils.unsafe_proxy import AnsibleUnsafeText
# If the version of the Python interpreter is greater or equal to 3, set the unicode variable to the str class.
if sys.version_info[0] >= 3:
unicode = str
logger = logging.getLogger(__name__)
SAMPLE_COMMAND_DATA = '''
RP/0/RP0/CPU0:vlab-01#show operational LLDP NodeTable Node/NodeName/Rack=0;
Slot=0;Instance=CPU0 Neighbors DeviceTable Device/DeviceID=vlab-02/Interf$
Wed Aug 10 08:45:43.126 UTC
......
"Operational": {
"LLDP": {
"@MajorVersion": "1",
"@MinorVersion": "2",
"NodeTable": {
"Node": {
"Naming": {
"NodeName": {
"Rack": "0",
"Slot": "0",
"Instance": "CPU0"
}
},
"Neighbors": {
"DeviceTable": {
"Device": {
"Naming": {
"DeviceID": "vlab-02",
"InterfaceName": "GigabitEthernet0/0/0/1"
},
"Entry": {
"ReceivingInterfaceName": "GigabitEthernet0/0/0/1",
"ReceivingParentInterfaceName": "Bundle-Ether1",
"DeviceID": "vlab-02",
"ChassisID": "5254.0085.5c1c",
"PortIDDetail": "fortyGigE0/4",
"HeaderVersion": "0",
"HoldTime": "120",
"EnabledCapabilities": "B,R",
"Detail": {
......
'''
def adapt_interface_name(func):
"""Decorator to adapt interface name used in topology to cisco interface name."""
@functools.wraps(func)
def _decorated(self, *args):
args_list = list(args)
new_list = []
for item in args_list:
new_item = item
if isinstance(new_item, str) or isinstance(new_item, unicode) or isinstance(new_item, AnsibleUnsafeText):
if 'Ethernet' in new_item and 'GigabitEthernet' not in new_item:
new_item = re.sub(r'(^|\s)Ethernet', 'GigabitEthernet0/0/0/', new_item)
elif 'Port-Channel' in new_item:
new_item = re.sub(r'(^|\s)Port-Channel', 'Bundle-Ether', new_item)
new_list.append(new_item)
new_args = tuple(new_list)
return func(self, *new_args)
return _decorated
class CiscoHost(AnsibleHostBase):
"""
@summary: Class for Cisco host
"""
def __init__(self, ansible_adhoc, hostname, ansible_user, ansible_passwd):
'''Initialize an object for interacting with cisco device using ansible modules
Args:
ansible_adhoc (): The pytest-ansible fixture
hostname (string): hostname of the cisco device
ansible_user (string): Username for accessing the cisco CLI interface
ansible_passwd (string): Password for the ansible_user
'''
self.ansible_user = ansible_user
self.ansible_passwd = ansible_passwd
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
# Reserved for execute ansible commands in local device
self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
def __getattr__(self, module_name):
if module_name.startswith('iosxr_'):
evars = {
'ansible_connection': 'network_cli',
'ansible_network_os': module_name.split('_', 1)[0],
'ansible_user': self.ansible_user,
'ansible_password': self.ansible_passwd,
'ansible_ssh_user': self.ansible_user,
'ansible_ssh_pass': self.ansible_passwd,
}
else:
raise Exception("Does not have module: {}".format(module_name))
self.host.options['variable_manager'].extra_vars.update(evars)
return super(CiscoHost, self).__getattr__(module_name)
def __str__(self):
return '<CiscoHost {}>'.format(self.hostname)
def __repr__(self):
return self.__str__()
def commands(self, *args, **kwargs):
return self.iosxr_command(*args, **kwargs)
def config(self, *args, **kwargs):
return self.iosxr_config(*args, **kwargs)
@adapt_interface_name
def shutdown(self, interface_name=None):
out = self.config(
lines=['shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('Shut interface [%s]' % interface_name)
return out
def shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.shutdown(interface_name=intf_str)
@adapt_interface_name
def no_shutdown(self, interface_name):
out = self.config(
lines=['no shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('No shut interface [%s]' % interface_name)
return out
def no_shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.no_shutdown(intf_str)
@adapt_interface_name
def rm_member_from_channel_grp(self, interface_name, channel_group):
out = self.config(
lines=['no bundle id {} mode active'.format(channel_group)],
parents=['interface {}'.format(interface_name)])
logging.info('Rm interface {} from bundle-ethernet {}'.format(interface_name, channel_group))
return out
@adapt_interface_name
def add_member_to_channel_grp(self, interface_name, channel_group):
out = self.config(
lines=['bundle id {} mode active'.format(channel_group)],
parents=['interface {}'.format(interface_name)])
logging.info('Add interface {} to bundle-ethernet {}'.format(interface_name, channel_group))
return out
@adapt_interface_name
def check_intf_link_state(self, interface_name):
show_int_result = self.commands(
commands=['show interfaces %s' % interface_name])
return 'line protocol is up' in show_int_result['stdout_lines'][0]
@adapt_interface_name
def set_interface_lacp_rate_mode(self, interface_name, mode):
if mode == 'fast':
command = 'lacp period short'
else:
command = 'no lacp period'
out = self.config(
lines=[command],
parents='interface %s' % interface_name)
return out
def get_lldp_neighbor(self, local_iface=None, remote_device=None):
try:
if (local_iface is not None and remote_device is not None):
command = 'show operational LLDP NodeTable ' \
'Node/NodeName/Rack=0;Slot=0;Instance=CPU0 Neighbors DeviceTable ' \
'Device/DeviceID={}/InterfaceName={} json'.format(local_iface, remote_device)
else:
command = 'show operational LLDP json'
output = self.commands(
commands=[command],
module_ignore_errors=True)
logger.debug('cisco lldp output: %s' % (output))
return output['stdout_lines'][0]['Response']['Get']['Operational'] if output['failed'] is False else False
except Exception as e:
logger.error('command {} failed. exception: {}'.format(command, repr(e)))
return False
def config_key_chain(self, name, key):
# create key chain
output = self.config(
lines=['key chain {} key 1'.format(name)])
logger.debug('config key chain: %s' % (output))
# configure key chain parameters
output = self.config(
lines=['accept-lifetime 00:00:00 december 01 2014 infinite',
'send-lifetime 00:00:00 december 01 2014 infinite',
'cryptographic-algorithm HMAC-MD5',
'key-string clear {}'.format(key)],
parents=['key chain {} key 1'.format(name)])
logger.debug('config key chain parameters: %s' % (output))
def remove_key_chain(self, name):
# remove key chain
output = self.config(lines=['no key chain {}'.format(name)])
logger.debug('remove key chain: %s' % (output))
def isis_config_auth(self, key):
key_chain_name = 'ISIS'
self.config_key_chain(key_chain_name, key)
# configure key chain to isis
output = self.config(
lines=['lsp-password keychain {} level 2'.format(key_chain_name),
'interface Bundle-Ether1 hello-password keychain {}'.format(key_chain_name)],
parents=['router isis test'])
logger.debug('config key chain to isis: %s' % (output))
def isis_remove_auth(self, key):
key_chain_name = 'ISIS'
# remove key chain from isis
output = self.config(
lines=['no lsp-password keychain {} level 2'.format(key_chain_name),
'no interface Bundle-Ether1 hello-password keychain {}'.format(key_chain_name)],
parents=['router isis test'])
logger.debug('remove key chain from isis: %s' % (output))
self.remove_key_chain(key_chain_name)
def ping_dest(self, dest):
try:
command = 'ping {} count 5'.format(dest)
output = self.commands(commands=[command])
logger.debug('ping result: %s' % (output))
return re.search('!!!!!', output['stdout'][0]) is not None if output['failed'] is False else False
except Exception as e:
logger.error('command {} failed. exception: {}'.format(command, repr(e)))
return False
def show_command_to_json(self, command, lookup_key=None, lookup_val=None):
"""
This function will pull the show operational command output as json string and convert it json object and return
"""
try:
json_command = command + " json"
output = self.commands(commands=[json_command])
if all([lookup_key, lookup_val]):
return self.extract_key_val_pair_from_json(output['stdout_lines'], lookup_key)
elif lookup_key is not None and lookup_val is None:
return self.extract_val_from_json(output['stdout_lines'], lookup_key)
else:
return output['stdout_lines']
except Exception as e:
return {"error": e}
def extract_key_val_pair_from_json(self, data, lookup_key):
"""
Function to recursivly match provided key in all levels and return list of same level data
"""
result = []
def help(data, lookup_key, result):
if isinstance(data, dict):
for k, v in list(data.items()):
if k == lookup_key:
result.append(data)
elif isinstance(v, (list, dict)):
sub_result = help(v, lookup_key, result)
if sub_result:
result.append(sub_result)
elif isinstance(data, list):
for ele in data:
if isinstance(ele, (list, dict)):
sub_result = help(ele, lookup_key, result)
if sub_result:
result.append(sub_result)
help(data, lookup_key, result)
return result
def extract_val_from_json(self, json_data, lookup_key):
"""
Function to recursivly match provided key in all levels and return matched key's value into a list
"""
result = []
def help(data, lookup_key, result):
if isinstance(data, dict):
for k, v in list(data.items()):
if k == lookup_key:
result.append(v)
elif isinstance(v, (list, dict)):
sub_result = help(v, lookup_key, result)
if sub_result:
result.append(sub_result)
elif isinstance(data, list):
for ele in data:
if isinstance(ele, (list, dict)):
sub_result = help(ele, lookup_key, result)
if sub_result:
result.append(sub_result)
help(json_data, lookup_key, result)
return result
def _has_cli_cmd_failed(self, cmd_output_obj):
err_out = False
if 'stdout' in cmd_output_obj:
stdout = cmd_output_obj['stdout']
msg = stdout[-1] if type(stdout) == list else stdout
err_out = 'Cannot advertise' in msg
return ('failed' in cmd_output_obj and cmd_output_obj['failed']) or err_out
def load_configuration(self, config_file, backup_file=None):
if backup_file is None:
out = self.config(
src=config_file,
replace='config',
)
else:
out = self.config(
src=config_file,
replace='line',
backup='yes',
backup_options={
'filename': os.path.basename(backup_file),
'dir_path': os.path.dirname(backup_file),
}
)
return not self._has_cli_cmd_failed(out)
@adapt_interface_name
def get_portchannel_by_member(self, member_intf):
try:
command = 'show lacp {}'.format(member_intf)
output = self.commands(commands=[command])['stdout'][0]
regex_pc = re.compile(r'Bundle-Ether([0-9]+)', re.U)
for line in [item.strip().rstrip() for item in output.splitlines()]:
if regex_pc.match(line):
return re.sub('Bundle-Ether', 'Port-Channel', line)
except Exception as e:
logger.error('Failed to get PortChannel for member interface "{}", exception: {}'.format(
member_intf, repr(e)
))
return None
@adapt_interface_name
def no_isis_interface(self, isis_instance, interface):
out = self.config(
lines=['no interface {}'.format(interface)],
parents=['router isis {}'.format(isis_instance)])
return not self._has_cli_cmd_failed(out)
@adapt_interface_name
def set_isis_metric(self, interface, metric, isis_instance='test'):
out = self.config(
lines=['metric {}'.format(metric)],
parents=['router isis {}'.format(isis_instance),
'interface {}'.format(interface),
'address-family ipv4 unicast'])
return not self._has_cli_cmd_failed(out)
@adapt_interface_name
def no_isis_metric(self, interface, isis_instance='test'):
out = self.config(
lines=['no metric'],
parents=['router isis {}'.format(isis_instance),
'interface {}'.format(interface),
'address-family ipv4 unicast'])
return not self._has_cli_cmd_failed(out)
def get_lldp_neighbors(self):
"""
run show lldp neighbros command to get lldp neighbors
"""
try:
logger.info("Gathering LLDP details")
lldp_details = {}
command = "show lldp neighbors"
output = self.commands(commands=[command], module_ignore_errors=True)
header_line = "Device ID Local Intf Hold-time Capability Port ID"
end_line = "Total entries displayed:"
content_idx = 0
if not output['failed']:
output = [line.strip() for line in output['stdout_lines'][0] if len(line) > 0]
for idx, line in enumerate(output):
if end_line in line:
break
if header_line in line:
content_idx = idx
if content_idx != 0 and content_idx < idx:
line = re.split(r'\s+', line.strip())
lldp_details.update(
{line[1]: {'neighbor': line[0], 'local_interface': line[1], 'neighbor_interface': line[4]}}
)
return lldp_details
else:
return "Falied to get lldp neighbors info due to {}".format(output)
except Exception as e:
return "Failed to get lldp neighbors info due to {}".format(str(e))
def get_all_lldp_neighbor_details_for_port(self, physical_port):
"""
:param physical_port:
:return: complete lldp details for the port
"""
try:
command = "show lldp neigh {} detail".format(physical_port)
output = self.commands(commands=[command], module_ignore_errors=True)
if not output['failed']:
logger.debug('cisco lldp output: %s' % (output))
return output['stdout_lines'][0]
return "Failed to get lldp detail info for {} due to {}".format(physical_port, output)
except Exception as e:
return "Failed to get lldp detail info due to {}".format(str(e))
def get_platform_from_cli(self):
"""
run show version command to get device platform info
"""
try:
command = "show version | i ^cisco | utility head -n 1"
output = self.commands(commands=[command], module_ignore_errors=True)
if not output['failed']:
logger.debug('cisco lldp output: %s' % (output))
return output['stdout_lines'][0][0].split()[1]
return "Failed to get platform info due to {}".format(output)
except Exception as e:
return "Failed to get platform info due to {}".format(str(e))
def get_version_from_cli(self):
"""
run show version command to get device version info
"""
try:
command = 'show version | in "Version :"'
output = self.commands(commands=[command], module_ignore_errors=True)
if not output['failed']:
logger.debug('cisco lldp output: %s' % (output))
return output['stdout'][0].split()[-1].strip()
return "Failed to get version info due to {}".format(output)
except Exception as e:
return "Failed to get version info due to {}".format(str(e))
def get_chassis_id_from_cli(self):
"""
run show lldp command to get device chassis id via cli
"""
try:
command = "show lldp | i Chassis ID:"
output = self.commands(commands=[command], module_ignore_errors=True)
if not output['failed']:
logger.debug('cisco lldp output: %s' % (output))
return output['stdout_lines'][0][0].split()[-1]
return "Failed to get chassis id info due to {}".format(output)
except Exception as e:
return "Failed to get chassis id info due to {}".format(str(e))
def get_mgmt_ip_from_cli(self):
"""
:return ip
"""
try:
# On Cisco devices, the management IP is the IP of the management interface on the RP that is not active
# First, find which RP is the active one
# Example output:
"""
RP/0/RP0/CPU0:IBR02.STR01#show redundancy | i STANDBY
Mon Apr 13 04:44:43.271 UTC
Partner node (0/RP1/CPU0) is in STANDBY role
"""
command = "show redundancy | i STANDBY"
output = (
self.commands(commands=[command], module_ignore_errors=True)["stdout_lines"][0][0].strip().splitlines()
)
standby_rp = output[-1].split("/")[1]
# Get management IP of Standby RP
# Example output:
"""
RP/0/RP0/CPU0:IBR02.STR01#sh run formal interface MgmtEth 0/RP1/CPU0/0 ipv4 address
Mon Apr 13 04:29:46.371 UTC
interface MgmtEth0/RP1/CPU0/0 ipv4 address 10.3.151.104 255.255.255.0
"""
command2 = "show run formal interface MgmtEth 0/" + standby_rp + "/CPU0/0 ipv4 address"
output2 = (
self.commands(commands=[command2], module_ignore_errors=True)["stdout_lines"][0][0]
.strip()
.splitlines()
)
ip = output2[-1].split(" ")[4]
return ip
except IndexError:
command2 = "show ip interface brief | inc MANAGEMENT"
output2 = (
self.commands(commands=[command2], module_ignore_errors=True)["stdout_lines"][0][0]
.strip()
.splitlines()
)
ip = output2[-1].split()[1]
return ip
except Exception as error:
return "Failed to get mgmt ip due to {}".format(str(error))
def parse_lldp_peer_required_info(self, lldp_details=None):
"""
:param lldp_details: Output of "show lldp neighbor interface detail"
:return lldp_peer_info
"""
# Initialize dictionary to return
if "Total entries displayed: 0" in lldp_details:
return None
elif "Warning: " in str.join(" ", lldp_details):
return None
lldp_required_info = dict()
# Get Chassis ID:
chassis_dotted = re.findall("Chassis id: (.*)", str.join("\n", lldp_details))[0]
chassis = self.convert_mac_to_standard_format(chassis_dotted) # Convert to XX:XX:XX:XX:XX:XX format
lldp_required_info["chassis_id"] = chassis
# Get peer management IP:
"""
Ignoring as we have issue with Cisco Management IP
ip = re.findall("IPv4 address: (.*)", str.join("\n", lldp_details))[-1]
lldp_required_info['ip'] = ip
"""
# Get peer name:
index = 0
while "System Name:" not in lldp_details[index]:
index += 1
peer_name = lldp_details[index].split(":")[1].strip()
peer_name = peer_name.replace(".str.msn.net", "") # get rid of ".str.msn.net" if it exists
lldp_required_info["name"] = peer_name.lower()
# Get system description
index = 0
while "System Description:" not in lldp_details[index]:
index += 1
index += 1 # System description appears after the line that says: "System Description:"
system_description = lldp_details[index]
# From this description, extract platform:
# Juniper output looks like: Juniper Networks, Inc. jnp10016 internet router, kernel JUNOS 18.2X75-D51.9 ...
if "Juniper" in system_description:
description_list = system_description.split(" ")
platform = description_list[3]
# Arista output looks like "Arista Networks EOS version 4.23.2.1F-DPE running on an Arista Networks DCS-7504"
elif "Arista" in system_description:
description_list = system_description.split()
platform = description_list[-1]
elif "NCS" in system_description:
description_list = system_description.split(",")
platform = description_list[-1].strip()
elif "8000" in system_description:
description_list = system_description.split(",")
platform = description_list[-1].strip()
lldp_required_info["platform"] = platform
# From the same system description, extract version:
# Juniper output looks like "Juniper Networks, Inc. jnp10008 internet router, kernel JUNOS 18.2X75-D51.9, ..."
if "Juniper" in system_description:
description_list = system_description.split(" ")
# Find the word "JUNOS" and the version number is the next word
index = 0
while not description_list[index] == "JUNOS":
index += 1
version = description_list[index + 1][:-1] # Get rid of last character, because it is a ","
# Arista output looks like "Arista Networks EOS version 4.23.2.1F-DPE running on an Arista Networks DCS-7504"
elif "Arista" in system_description:
regex = r"(?<=version ).*(?= running )"
matches = re.search(regex, system_description, re.MULTILINE)
version = matches.group()
elif "NCS" in system_description:
description_list = system_description.split(",")
version = description_list[0].strip()
elif "8000" in system_description:
description_list = system_description.split(",")
version = description_list[0].strip()
lldp_required_info["version"] = version
# Get the peer port ID
peer_port = re.findall("Port id: (.*)", str.join("\n", lldp_details))[-1]
lldp_required_info["port"] = self.elongate_cisco_interface(peer_port)
return lldp_required_info
def convert_interface_prefix(self, list_of_interfaces):
"""
:param list_of_interfaces: List of interfaces which need to be updated for vendor naming convention
:return converted_list_of_interfaces, converted list of interface names
"""
converted_list_of_interfaces = []
for interface in list_of_interfaces:
converted_list_of_interfaces.append(interface.replace("HundredGigE", "Hu"))
return converted_list_of_interfaces
@staticmethod
def convert_pc_to_be(pc_name):
"""
:param pc_name: port-channel to be converted to Cisco BE format
:return: BE formatted ie. portchannel5 returns Bundle-Ether5
"""
pc_name = pc_name.lower()
if "portchannel" in pc_name:
pc_name = pc_name.replace("portchannel", "Bundle-Ether")
elif "port-channel" in pc_name:
pc_name = pc_name.replace("port-channel", "Bundle-Ether")
return pc_name
def get_all_interfaces_in_pc(self, pc_name):
"""
:param pc_name: port-channel/ae used for this test
:return interfaces: list of port channel member interfaces
"""
# Convert PortChannel to Bundle-Ether
try:
pc_name = self.convert_pc_to_be(pc_name)
command = "show lacp {} | begin eceive".format(pc_name)
output = self.commands(commands=[command], module_ignore_errors=True)
if not output["failed"]:
logger.debug("cisco lldp output: %s" % (output))
interface = [
self.elongate_cisco_interface(line.split()[0])
for line in output["stdout_lines"][0]
if "Current" in line
]
return interface
return "Failed to get chassis id info due to {}".format(output)
except Exception as e:
return "Failed to get chassis id info due to {}".format(str(e))
def check_interface_status(self, interface):
"""
:param
interface: str - port number e.g. ae15, Port-channel15
:return:
is_up: boolean , True if interface is up
intf_status_output: str - raw output of show interface OR error message
"""
try:
# Convert PortChannel to Bundle-Ether
pc_name = self.convert_pc_to_be(interface)
command = "show interfaces {}".format(pc_name)
success_criteria = "line protocol is up"
intf_status_output = self.commands(commands=[command], module_ignore_errors=True)
if not intf_status_output["failed"]:
logger.info("Interface status check: {} sent to {}".format(command, self.hostname))
is_up = success_criteria in intf_status_output["stdout"][0].lower()
return is_up, intf_status_output["stdout_lines"][0]
return False, intf_status_output
except Exception as e:
msg = "Failed to execute command due to {}".format(str(e))
logger.error(msg)
return False, msg
def get_isis_adjacency(self):
"""Method to gather isis adjacency details"""
isis_details = {}
try:
logger.info("Gathering ISIS adjacency details")
command = "show isis adjacency"
output = self.commands(commands=[command], module_ignore_errors=True)
if not output["failed"]:
for row in output["stdout_lines"][0][3:-2]:
row = row.split()
isis_details[row[0]] = dict()
isis_details[row[0]]["neighbor"] = row[0]
isis_details[row[0]]["interface_name"] = row[1]
isis_details[row[0]]["state"] = row[3]
return isis_details
except Exception as e:
err = "Failed to get isis for device: {}. Exception: {}".format(self.hostname, e)
logger.exception(err)
isis_details["Result"] = "Exception occurred while collecting isis adjacency information {}".format(err)
return isis_details
def check_isis_adjacency(self, neighbor_device, expected_adjacency_ports=1):
"""
:param device_b: device adjacent to device_a
:param expected_adjacency_ports: number of adjacencies between device a and device a
:return: Boolean, List of adjacency information
"""
adjacency = []
isis_adj = self.get_isis_adjacency()
for isis_neighbor in isis_adj:
if isis_neighbor.lower() == neighbor_device and isis_adj[isis_neighbor]["state"] == "Up":
adjacency.append(
"isis_adj[isis_neighbor]['interface_name']_{}_isis_adj[isis_neighbor]['state']".format(
isis_neighbor
)
)
if len(adjacency) == expected_adjacency_ports:
return True, adjacency
return False, adjacency
def get_isis_database(self, queue=None):
try:
command = "show isis database"
output = self.commands(commands=[command], module_ignore_errors=True)
lsp_entries = {}
if not output["failed"]:
for line in output["stdout_lines"][0]:
if "*" in line:
outline = line.replace("*", "").split()
lsp_entries[outline[0].strip()] = {
"sequence-number": int(outline[1], base=16),
"checksum": int(outline[2], base=16),
}
elif "0x0" in line.lower():
outline = line.split()
lsp_entries[outline[0].strip()] = {
"sequence-number": int(outline[1], base=16),
"checksum": int(outline[2], base=16),
}
if queue:
queue.put(lsp_entries)
return lsp_entries
return output
except Exception as e:
msg = "Failed to get isis database due to {}".format(str(e))
logger.error(msg)
return msg
def get_bgp_status(self):
"""
:return bgp session status
"""
command = "show bgp summary | b Neighbor"
try:
output = self.commands(commands=[command], module_ignore_errors=True)
bgp_status = {}
if not output["failed"]:
for line in output["stdout_lines"][0][1:]:
line = line.strip().split()
if line[-1].isdigit():
neighbor_status = "Established"
else:
neighbor_status = line[-1]
bgp_status[line[0]] = neighbor_status
return bgp_status
return "Failed to get bgp status from device due to {}".format(output)
except Exception as e:
logger.error(str(e))
return "Failed to get bgp status from device due to {}".format(str(e))
def get_bgp_session_details(self, peer_ip):
"""
:param peer_ip: bgp peer ip
:return: dictionary with bgp session details
"""
try:
command = "show bgp neighbor {}".format(peer_ip)
output = self.commands(commands=[command], module_ignore_errors=True)
return output
except Exception as e:
return {"msg": "Failed to get bgp session details due to {}".format(str(e))}
def get_bgp_session_status(self, peer_ip):
"""
:param peer_ip:
:return: bgp session status e.g. Established
"""
bgp_peer_details = self.get_bgp_session_details(peer_ip)
try:
if not bgp_peer_details["failed"]:
for line in bgp_peer_details["stdout_lines"][0]:
if "BGP state" in line:
bgp_session_status = line.strip().split()[3].strip(",")
break
else:
bgp_session_status = "bgp session status not find"
return bgp_session_status
return "Failed to get bgp session status"
except Exception as e:
return "Failed to get bgp session status due to {}".format(str(e))
def is_prefix_advertised_to_peer(self, prefix, peer_ip):
"""
:param prefix:
:param peer_ip:
:return: Boolean status of whether prefix is advertised to the peer or not
"""
try:
command = "show bgp advertised neighbor {} summary | in {}".format(peer_ip, prefix)
output = self.commands(commands=[command], module_ignore_errors=True)
prefix_adv_status = False
if not output["failed"]:
for line in output["stdout_lines"][0]:
if prefix in line:
prefix_adv_status = True
break
return prefix_adv_status, output
except Exception as e:
logger.error(str(e))
return False, "Failed to check is prefix advertised to peer due to {}".format(str(e))
"""
LDP
"""
def get_ldp_oper_neighbor_ips(self):
try:
command = 'show mpls ldp neighbor | include "Peer LDP Identifier:|State:"'
output = self.commands(commands=[command], module_ignore_errors=True)
if not output["failed"]:
ldp_op_list = []
for idx in range(0, len(output["stdout_lines"][0]), 2):
line1 = output["stdout_lines"][0][idx]
line2 = output["stdout_lines"][0][idx + 1]
if "Peer LDP Identifier:" in line1 and "State: Oper" in line2:
ldp_neighbor_ip = line1.split(":")[1].strip()
ldp_op_list.append(ldp_neighbor_ip)
if ldp_op_list:
return True, ldp_op_list
return False, output
except Exception as e:
return False, "Failed to get ldp neighbors ip due to {}".format(str(e))
def get_next_hop_physical_interface_list(self, destination_ip):
try:
destination_ip = destination_ip + "/32"
command = "show route {} | include via".format(destination_ip)
output = self.commands(commands=[command], module_ignore_errors=True)
interface_list = []
if not output["failed"]:
for line in output["stdout_lines"][0]:
if "via" in line:
next_hop = line.split("via")[-1].strip()
if "Bundle-Ether" in next_hop:
physical_interfaces = self.get_all_interfaces_in_pc(next_hop)
interface_list += physical_interfaces
elif "TU." in next_hop:
physical_interfaces = self.get_egress_interface_for_lsp(next_hop)
interface_list += physical_interfaces
else:
interface_list.append(next_hop)
if len(interface_list) > 0:
return interface_list
return output
except Exception as e:
return "Failed to get next hop physical interface due to {}".format(str(e))
def check_remote_ldp_sessions(self):
"""
:param dut: The Device Under Test
:return: boolean, message
"""
# get a list of ldp neighbors marked as operational
result, ldp_neighbor_ips = self.get_ldp_oper_neighbor_ips()
if result:
for neighbor_ip in ldp_neighbor_ips:
ldp_next_hop_interface = self.get_next_hop_physical_interface_list(neighbor_ip)
result, message = self.verify_core_path(ldp_next_hop_interface)
if result:
return result, message
return False, "Could not find an operational ldp session on {} traversing an IBR or OWR".format(self.hostname)
def get_egress_interface_for_lsp(self, lsp_name):
"""
:param dut: The Device Under Test
:param ldp_op_list: list of ldp neighbors marked as operational
:return: boolean, message
"""
# for each operational ldp session check the route to get the next-hop
try:
command = "show mpls traffic-eng tunnels name {} | include Hop0".format(lsp_name)
output = self.commands(commands=[command], module_ignore_errors=True)
if not output["failed"]:
list_of_interface = []
for line in output["stdout_lines"][0]:
if "Hop0" in line:
next_hop_ip = line.split()[1]
list_of_interface = self.get_next_hop_physical_interface_list(next_hop_ip)
if list_of_interface:
return list_of_interface
return output
except Exception as e:
message = "Failed to get egress interface for lsp due to {}".format(str(e))
logger.error(message)
return message
def verify_core_path(self, ldp_int_list):
"""
:param dut: The Device Under Test
:param ldp_int_list: list of next-hop interfaces for operational ldp sessions
:return: boolean, message
"""
# check each next-hop address and see if the neighbor is an IBR-cisco device
lldp_details = self.get_lldp_neighbors()
for interface in ldp_int_list:
if interface in lldp_details:
if (
"ibr" in lldp_details[interface]["neighbor"].lower()
or "owr" in lldp_details[interface]["neighbor"].lower()
):
return True, "The LDP has remote session traversing core path via {}".format(interface)
message = "No interface traversing core path according to lldp details {}".format(lldp_details)
return False, message
"""
MACSEC
"""
def get_macsec_connection_status_details(self, interface):
"""
:param interface: interface of macsec adjacency
:return: boolean, failure message or dict_out containing dictionary of attributes
"""
command = "show macsec mka session interface {} detail".format(interface)
output = self.commands(commands=[command])["stdout"][0]
split_out = output.split("MKA Detailed Status for MKA Session")
if len(split_out) == 1:
return False, "No session found on {}".format(interface)
split_out = split_out[1:]
dict_out = {"pre-shared-key": {}, "fallback-key": {}}
if len(split_out) == 1:
primary_out = split_out[0].splitlines()
for line in primary_out:
if "MKA Cipher Suite" in line:
dict_out["cipher-suite"] = line.split(":")[1].strip()
elif "CAK Name (CKN)" in line:
dict_out["pre-shared-key"]["ckn"] = line.split(":")[1].strip()
elif "Fallback Data:" in line:
dict_out["fallback-key"]["ckn"] = primary_out[primary_out.index(line) + 2].split(":")[1].strip()
elif "MKA Policy Name" in line:
dict_out["name"] = line.split(":")[1].strip()
return True, dict_out
elif len(split_out) == 2:
primary_out = split_out[0].splitlines()
for line in primary_out:
if "MKA Cipher Suite" in line:
dict_out["cipher-suite"] = line.split(":")[1].strip()
elif "CAK Name (CKN)" in line:
dict_out["pre-shared-key"]["ckn"] = line.split(":")[1].strip()
elif "MKA Policy Name" in line:
dict_out["name"] = line.split(":")[1].strip()
fallback_out = split_out[1].splitlines()
for line in fallback_out:
if "CAK Name (CKN)" in line:
dict_out["fallback-key"]["ckn"] = line.split(":")[1].strip()
return True, dict_out
else:
return False, dict_out
def set_rekey_period(self, profile_name, rekey_period_value):
"""
:param profile_name: policy to change rekey value on
:param rekey_period_value: value to set rekey in seconds, value range between 60 and 2592000
:return: boolean, output from rekey implementation
"""
try:
command = "macsec-policy {} sak-rekey-interval seconds {}".format(profile_name, rekey_period_value)
output = self.config(lines=[command])
if not output["failed"]:
return True, output
return False, output
except Exception as e:
return False, "Failed to set rekey period due to {}".format(str(e))
def get_macsec_profile(self, interface):
"""
:param interface: interface of device to capture profile name
:return: profile name
"""
try:
command = "show run int {} macsec psk-keychain".format(interface)
output = self.commands(commands=[command])
"""example of output:
Thu Feb 6 16:32:34.297 UTC
interface HundredGigE0/2/0/11
macsec psk-keychain ptx10k-64hexCAK fallback-psk-keychain ptx10k-64hexCAK-fallback policy macsec-xpn-256
!
"""
if not output["failed"]:
return True, output['stdout_lines'][0][1].split()[-1].strip()
return False, output
except Exception as e:
return False, "Failed to get_macsec_profile due to {}".format(str(e))
def get_macsec_status_logs(self, interface, last_count="30", log_type="ESTABLISHED"):
"""
:param interface: interface of macsec adjacency
:param log_type: ESTABLISHED, FAILURE, ROLLOVER
:param last_count: optional field to capture number of logs
:return: boolean, output from logs
"""
try:
interface = self.convert_interface_prefix([interface])
if log_type == "ESTABLISHED":
log_type = "established"
elif log_type == "FAILURE":
log_type = "MACSEC_CIPHER_MISMATCH"
elif log_type == "ROLLOVER":
return True, "log {} not supported on device {}".format(log_type, self.hostname)
command = "show logging last {} | include {} | include {}".format(last_count, log_type, interface)
output = self.commands(commands=[command])["stdout"][0]
if not output["failed"]:
return len(output["stdout_lines"][0]) > 0, output["stdout"][0]
return False, str(output)
except Exception as e:
return False, str(e)
def get_key_name(self, interface, key_type):
"""
:param interface:
:param key_type:
:return: string of key name
"""
if key_type == "primary":
command = "show run int {} macsec psk-keychain".format(interface)
output = self.commands(commands=[command])["stdout_lines"][0]
"""example of output:
Thu Feb 6 16:32:34.297 UTC
interface HundredGigE0/2/0/11
macsec psk-keychain ptx10k-64hexCAK fallback-psk-keychain ptx10k-64hexCAK-fallback policy macsec-xpn-256
!
"""
return output[1].split()[2]
if key_type == "fallback":
command = "show run int {} macsec psk-keychain".format(interface)
output = self.commands(commands=[command])["stdout_lines"][0]
"""example of output:
Thu Feb 6 16:32:34.297 UTC
interface HundredGigE0/2/0/11
macsec psk-keychain ptx10k-64hexCAK fallback-psk-keychain ptx10k-64hexCAK-fallback policy macsec-xpn-256
!
"""
return output[1].split()[4]
else:
return "unsupported key_type"
def get_macsec_key_lifetime(self, key_name):
"""
:param string of key chain name:
:return: string of CKN from the key chain name
"""
command = "show running-config forma key chain {} macsec".format(key_name)
output = self.commands(commands=[command])["stdout_lines"][0]
for line in output:
if "lifetime" in line:
return line.split(" ", 7)[-1]
else:
return "macsec key lifetime not find"
def set_macsec_key(self, profile_name, key, key_type, interface):
"""
:param profile_name: macsec profile name used for key
:param key: string key to apply
:param key_type: fallback or primary
:param interface: interface of macsec session
:return: boolean and test_msg string
"""
key_name = self.get_key_name(interface, key_type)
lifetime = self.get_macsec_key_lifetime(key_name)
if lifetime != "macsec key lifetime not find":
commands = [
"no key chain {}".format(key_name),
"key chain {} macsec key {} key-string {} cryptographic-algorithm aes-256-cmac".format(
key_name, key, key
),
"key chain {} macsec key {} lifetime {}".format(key_name, key, lifetime),
]
else:
commands = [
"no key chain {}".format(key_name),
"key chain {} macsec key {} key-string {} cryptographic-algorithm aes-256-cmac".format(
key_name, key, key
),
]
if key_name != "unsupported key_type":
output = self.config(lines=commands)
test_msg = "Output: {}".format(output)
return True, test_msg
else:
test_msg = "Key type {} not supported".format(key_type)
return False, test_msg
def get_macsec_config(self, interface):
"""
:param interface: interface of device to capture profile name
:return: interface config
"""
try:
command = "show running-config formal interface {} macsec psk-keychain".format(interface)
output = self.commands(commands=[command])
# Returning only MACSEC config.
if not output["failed"]:
for config in output["stdout_lines"][0]:
if "psk" in config:
return True, config
# if psk is not found return false
return False, output
except Exception as e:
return False, "Failed to get macsec config due to {}".format(str(e))
def apply_macsec_interface_config(self, commands):
"""
:param commands: List command which need to execute on DTU.
:return: boolean
"""
try:
output = self.config(lines=commands)
if not output["failed"]:
return True, output
return False, output
except Exception as e:
return False, "Failed to apply macsec interface config due to {}".format(str(e))
def delete_macsec_interface_config(self, interface):
"""
:param interface: remove MACSEC from physical interface
:return: bool
"""
try:
command = "no interface {} macsec ".format(interface)
output = self.config(lines=command)
if not output["failed"]:
return True, output
return False, output
except Exception as e:
return False, "Failed to delete macsec interface config due to {}".format(str(e))
"""
RSVP
"""
def check_rsvp_nbr(self, neighbor):
"""
:param neighbor: neighbor of rsvp
:return: boolean, failure message or dict_out containing dictionary of attributes
"""
try:
command = "show rsvp neighbors | include Global"
output = self.commands(commands=[command])["stdout_lines"][0]
"""
Sample output
RP/0/RP0/CPU0:IBR02.STR01#show rsvp neighbors | include Global
Wed Jul 15 14:50:28.721 UTC
Global Neighbor: 10.3.151.95
Global Neighbor: 10.3.151.163
"""
for line in output:
if neighbor in line:
return True, "RSVP neighbor {} is up".format(neighbor)
else:
return False, "RSVP neighbor {} is down".format(neighbor)
except Exception as e:
return False, "Failed to get RSVP neighbor status due to {}".format(str(e))
def get_loopback_ipv4_addr(self):
"""
:return: boolean, failure message or str
"""
loopback_ip = "0.0.0.0"
command = "sho running-config formal interface Loopback 99"
output_list = self.commands(commands=[command])["stdout_lines"][0]
for line in output_list:
if "ipv4" in line:
loopback_ip = line.split()[4]
break
return loopback_ip
def remove_int_from_portchan(self, interface, pcnum):
"""
remove interface from interface ether-bundle
:param interface: The interface name
:param pcnum: portchannel number
:return: boolean, message
"""
try:
command = ["no bundle id"]
parents = ["interface {}".format(interface)]
output = self.config(lines=command, parents=parents)
if not output["failed"]:
return True, "remove interface {} from ether-bundle {}".format(interface, pcnum)
else:
return False, "Failed to remove interface {} from ether-bundle {}".format(interface, pcnum)
except Exception as e:
return False, "Failed to remove interface {} from ether-bundle {} due to {}".format(
interface, pcnum, str(e)
)
def put_int_in_portchan(self, interface, pcnum):
"""
remove interface from ether-bundle
:param interface: The interface name
:param pcnum: portchannel number
:return: boolean, message
"""
try:
command = ["interface {} bundle id {} mode active".format(interface, pcnum)]
output = self.config(lines=command)
if not output["failed"]:
return True, "Added interface {} from ether-bundle {}".format(interface, pcnum)
else:
return False, "Failed to add interface {} from ether-bundle {}".format(interface, pcnum)
except Exception as e:
return False, "Failed to add interface {} from ether-bundle {} due to {}".format(interface, pcnum, str(e))
"""
TACACS
"""
def run_configure_command_test(self):
"""
This function is intend to test current account can get into config mode and do harmless config
and confirm the account has priviliage to configure the router.
"""
try:
command = ["alias testversion show version"]
output = self.config(lines=command)
if not output["failed"]:
rollback_command = ["no alias testversion"]
self.config(lines=rollback_command)
return True, output
return False, output
except Exception as e:
return False, "Failed to run configure command test due to {}".format(str(e))
def apply_check_tacacs_config_and_rollback(self, prod_tacacsserver, tacacs_secret, accounting_secret, user, pwd):
"""
:param prod_tacacsserver: production TACACS servers ip address
:param tacacs_secret: TACACS secret key
:param acccounting_secret: TACACS secret key
:param source_address: lab router source IP address
:param user: production username for tacacs test
:param pwd: production password for tacacs test
:return: Boolean, message
This function pushes production TACACS configurations to the router.
At the end it executes a "commit confirmed 120" command on the router.
then, start another ssh session to run "show version" command on the router to test the prod tacacs server.
After 120 seconds the router will automatically restore the original configurations.
"""
try:
prod_configs = ["configure exclusive"]
prod_configs.append("no aaa group server tacacs+ TACACS-DEFAULT ")
prod_configs.append("aaa group server tacacs+ TACACS-DEFAULT ")
prod_configs.append("aaa group server tacacs+ TACACS-DEFAULT vrf MANAGEMENT")
prod_configs.append(
"aaa group server tacacs+ TACACS-DEFAULT server-private {} port 49 key 7 {}".format(
prod_tacacsserver, tacacs_secret
)
)
prod_configs.append("commit confirmed 120")
config_ssh_session = SSHClient()
config_ssh_session.set_missing_host_key_policy(AutoAddPolicy())
config_ssh_session.load_system_host_keys()
config_ssh_session.connect(self.hostname, username=self.ansible_user, password=self.ansible_passwd)
cli_shell = config_ssh_session.invoke_shell()
for line in prod_configs:
cli_shell.send(line + "\n")
output = cli_shell.recv(1024).decode("utf-8")
if "Failed to commit" not in output:
time.sleep(20)
test_ssh_session = SSHClient()
test_ssh_session.set_missing_host_key_policy(AutoAddPolicy())
test_ssh_session.load_system_host_keys()
test_ssh_session.connect(self.hostname, username=user, password=pwd)
stdin, stdout, stderr = test_ssh_session.exec_command("show version")
if not stderr.readlines():
cli_shell.close()
test_ssh_session.close()
return True, stdout.readlines()
else:
return False, output
except Exception as e:
msg = "Failed to apply/check tacacs configuration due to {}".format(str(e))
return False, msg
def check_for_aggregate_route_generation(self, agg_prefix):
"""
:param agg_prefix: aggregate prefix
:return: Boolean status of Aggregate prefix
"""
try:
command = "show route | include {}".format(agg_prefix)
agg_route_gen_status = False
output = self.commands(commands=[command])
if not output["failed"]:
for line in output["stdout_lines"][0]:
if agg_prefix in line:
agg_route_gen_status = True
break
return agg_route_gen_status, output["stdout_lines"][0]
except Exception as e:
return False, "Failed to verify the aggregate route due to {}".format(str(e))
def get_list_of_location(self):
command = "show platform | include NSHUT | include CPU | exclude RP"
output = self.commands(commands=[command])
location_list = []
if not output["failed"]:
for line in output["stdout_lines"][0]:
if "CPU" in line:
location_list.append(line.split()[0])
return location_list
def get_ipfix_export_data_count(self, location):
try:
packets_exported = 0
command = 'show flow exporter IPFIX_MSAZ location {} | include "Packets exported:"'.format(location)
output = self.commands(commands=[command])
if not output["failed"]:
for line in output["stdout_lines"][0]:
if "Packets exported:" in line:
packets_exported += int(line.split()[2])
return packets_exported
except Exception as e:
return {"error": str(e)}
def is_ipfix_exporting_data(self):
try:
location_list = self.get_list_of_location()
first_time_packets_exported = 0
second_time_packets_exported = 0
for location in location_list:
location_counter = self.get_ipfix_export_data_count(location)
first_time_packets_exported += location_counter
# wait for 30 seconds
time.sleep(30)
for location in location_list:
location_counter = self.get_ipfix_export_data_count(location)
second_time_packets_exported += location_counter
if first_time_packets_exported == second_time_packets_exported:
return False, "The total packets exported ipfix data are NOT increasing"
else:
return True, "The total packets exported ipfix data are increasing"
except Exception as e:
return False, "Failed to check is ipfix exporting data due to {}".format(str(e))
def apply_sample_filter_to_interface(self, filter_name, interface):
try:
command = "interface {} flow ipv4 monitor {}_IPV4 sampler {}_SM ingress".format(
interface, filter_name, filter_name
)
output = self.config(lines=[command])
if not output["failed"]:
return True, output
return False, output
except Exception as e:
return False, "Failed to apply sample filter to interface due to {}".format(str(e))
def reboot_chassis(self):
try:
command = "admin hw-module location all reload noprompt"
output = self.commands(commands=[command])
if not output["failed"]:
return True, output
return False, output
except Exception as e:
return False, "Failed to reboot the device {}. due to {}".format(self.hostname, str(e))
|
[
"noreply@github.com"
] |
ramakristipati.noreply@github.com
|
a1fe40ff78a33f1e7c1a7088b726b91d8af85485
|
fd26eda30e2b9f878102292c21cae7d437e4714b
|
/utils/logger.py
|
7778de4819ce8717051ab14b2b61846136656ea0
|
[] |
no_license
|
aj1143208/fer
|
cacbe29e9a1d6005f80837f17081757d4327d4f7
|
31ec248d9f11c7f3a4fddb253b8a433258e77003
|
refs/heads/master
| 2023-05-08T13:51:08.360227
| 2021-06-03T18:59:42
| 2021-06-03T18:59:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
from matplotlib import pyplot as plt
import os
class Logger:
def __init__(self):
self.loss_train = []
self.loss_val = []
self.acc_train = []
self.acc_val = []
def get_logs(self):
return self.loss_train, self.loss_val, self.acc_train, self.acc_val
def restore_logs(self, logs):
self.loss_train, self.loss_val, self.acc_train, self.acc_val = logs
def save_plt(self, hps):
loss_path = os.path.join(hps['model_save_dir'], 'loss.jpg')
acc_path = os.path.join(hps['model_save_dir'], 'acc.jpg')
plt.figure()
plt.plot(self.acc_train, 'g', label='Training Acc')
plt.plot(self.acc_val, 'b', label='Validation Acc')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Acc')
plt.legend()
plt.grid()
plt.savefig(acc_path)
plt.figure()
plt.plot(self.loss_train, 'g', label='Training Loss')
plt.plot(self.loss_val, 'b', label='Validation Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid()
plt.savefig(loss_path)
|
[
"yousif.khaireddin@gmail.com"
] |
yousif.khaireddin@gmail.com
|
5d99e8da65f1ef81b8c30662ccbe271785a7fb46
|
a596602db5a4636492f33468f0269222cfac9894
|
/Blog/index/urls.py
|
8e638d2261b565d6af50e802a6e14c9672ec49f0
|
[] |
no_license
|
chen40/blog
|
76a78dca11d264d1d2a3921c2969a740e8f6a9a7
|
34d357c4ec48237a1a5fb3e8e929e63cbabc416d
|
refs/heads/master
| 2020-04-04T13:10:28.148949
| 2018-11-13T06:47:10
| 2018-11-13T06:47:10
| 155,951,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^$', v_index),
]
|
[
"chen40@163.com"
] |
chen40@163.com
|
9de214237dad71adf0c2d29d72473bff01336613
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-bms/huaweicloudsdkbms/v1/model/sub_jobs.py
|
f3a3cfa52d06c87e9133020d92bdf36ea254d459
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,306
|
py
|
# coding: utf-8
import re
import six
class SubJobs:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'status': 'str',
'entities': 'Entitie',
'job_id': 'str',
'job_type': 'str',
'begin_time': 'datetime',
'end_time': 'datetime',
'error_code': 'str',
'fail_reason': 'str',
'message': 'str',
'code': 'str'
}
attribute_map = {
'status': 'status',
'entities': 'entities',
'job_id': 'job_id',
'job_type': 'job_type',
'begin_time': 'begin_time',
'end_time': 'end_time',
'error_code': 'error_code',
'fail_reason': 'fail_reason',
'message': 'message',
'code': 'code'
}
def __init__(self, status=None, entities=None, job_id=None, job_type=None, begin_time=None, end_time=None, error_code=None, fail_reason=None, message=None, code=None):
"""SubJobs - a model defined in huaweicloud sdk"""
self._status = None
self._entities = None
self._job_id = None
self._job_type = None
self._begin_time = None
self._end_time = None
self._error_code = None
self._fail_reason = None
self._message = None
self._code = None
self.discriminator = None
if status is not None:
self.status = status
if entities is not None:
self.entities = entities
if job_id is not None:
self.job_id = job_id
if job_type is not None:
self.job_type = job_type
if begin_time is not None:
self.begin_time = begin_time
if end_time is not None:
self.end_time = end_time
if error_code is not None:
self.error_code = error_code
if fail_reason is not None:
self.fail_reason = fail_reason
if message is not None:
self.message = message
if code is not None:
self.code = code
@property
def status(self):
"""Gets the status of this SubJobs.
Job的状态。SUCCESS:成功RUNNING:运行中FAIL:失败INIT:正在初始化
:return: The status of this SubJobs.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this SubJobs.
Job的状态。SUCCESS:成功RUNNING:运行中FAIL:失败INIT:正在初始化
:param status: The status of this SubJobs.
:type: str
"""
self._status = status
@property
def entities(self):
"""Gets the entities of this SubJobs.
:return: The entities of this SubJobs.
:rtype: Entitie
"""
return self._entities
@entities.setter
def entities(self, entities):
"""Sets the entities of this SubJobs.
:param entities: The entities of this SubJobs.
:type: Entitie
"""
self._entities = entities
@property
def job_id(self):
"""Gets the job_id of this SubJobs.
Job ID
:return: The job_id of this SubJobs.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this SubJobs.
Job ID
:param job_id: The job_id of this SubJobs.
:type: str
"""
self._job_id = job_id
@property
def job_type(self):
"""Gets the job_type of this SubJobs.
Job的类型,包含以下类型:baremetalSingleCreate:创建单个裸金属服务器;baremetalSingleOperate:修改单个裸金属服务器电源状态;baremetalAttachSingleVolume:挂载单个共享磁盘
:return: The job_type of this SubJobs.
:rtype: str
"""
return self._job_type
@job_type.setter
def job_type(self, job_type):
"""Sets the job_type of this SubJobs.
Job的类型,包含以下类型:baremetalSingleCreate:创建单个裸金属服务器;baremetalSingleOperate:修改单个裸金属服务器电源状态;baremetalAttachSingleVolume:挂载单个共享磁盘
:param job_type: The job_type of this SubJobs.
:type: str
"""
self._job_type = job_type
@property
def begin_time(self):
"""Gets the begin_time of this SubJobs.
开始时间。时间戳格式为ISO 8601,例如:2019-04-25T20:04:47.591Z
:return: The begin_time of this SubJobs.
:rtype: datetime
"""
return self._begin_time
@begin_time.setter
def begin_time(self, begin_time):
"""Sets the begin_time of this SubJobs.
开始时间。时间戳格式为ISO 8601,例如:2019-04-25T20:04:47.591Z
:param begin_time: The begin_time of this SubJobs.
:type: datetime
"""
self._begin_time = begin_time
@property
def end_time(self):
"""Gets the end_time of this SubJobs.
结束时间。时间戳格式为ISO 8601,例如:2019-04-26T20:04:47.591Z
:return: The end_time of this SubJobs.
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this SubJobs.
结束时间。时间戳格式为ISO 8601,例如:2019-04-26T20:04:47.591Z
:param end_time: The end_time of this SubJobs.
:type: datetime
"""
self._end_time = end_time
@property
def error_code(self):
"""Gets the error_code of this SubJobs.
Job执行失败时的错误码
:return: The error_code of this SubJobs.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this SubJobs.
Job执行失败时的错误码
:param error_code: The error_code of this SubJobs.
:type: str
"""
self._error_code = error_code
@property
def fail_reason(self):
"""Gets the fail_reason of this SubJobs.
Job执行失败时的错误原因
:return: The fail_reason of this SubJobs.
:rtype: str
"""
return self._fail_reason
@fail_reason.setter
def fail_reason(self, fail_reason):
"""Sets the fail_reason of this SubJobs.
Job执行失败时的错误原因
:param fail_reason: The fail_reason of this SubJobs.
:type: str
"""
self._fail_reason = fail_reason
@property
def message(self):
"""Gets the message of this SubJobs.
出现错误时,返回的错误消息
:return: The message of this SubJobs.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this SubJobs.
出现错误时,返回的错误消息
:param message: The message of this SubJobs.
:type: str
"""
self._message = message
@property
def code(self):
"""Gets the code of this SubJobs.
出现错误时,返回的错误码
:return: The code of this SubJobs.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this SubJobs.
出现错误时,返回的错误码
:param code: The code of this SubJobs.
:type: str
"""
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubJobs):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
6edefe7fcee4cbc139a27cd711a075b6fc4476dd
|
62fb4dc94d1481904ad7539afe40ed211453f0d7
|
/medical_prescription/dashboardPatient/views/home_patient.py
|
b68730e6fd1190f930418b2dedd6e891bb715145
|
[
"MIT"
] |
permissive
|
ristovao/2017.2-Receituario-Medico
|
3a3b2baaf11d6e374ecbc5b7243fb8175317e639
|
5387eb80dfb354e948abe64f7d8bbe087fc4f136
|
refs/heads/master
| 2020-03-31T23:56:08.809569
| 2018-10-12T01:11:04
| 2018-10-12T01:11:04
| 152,674,938
| 0
| 0
|
MIT
| 2018-10-12T01:08:07
| 2018-10-12T01:08:07
| null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
# Django imports
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import View
from django.utils.decorators import method_decorator
# local django
from user.decorators import is_patient
from prescription.models import PatientPrescription
from chat.models import Message
class HomePatient(View):
"""
Renders the home page (dashboard) of the health professional.
"""
template_name = 'patient_dashboard.html'
@method_decorator(login_required)
@method_decorator(is_patient)
def dispatch(self, *args, **kwargs):
return super(HomePatient, self).dispatch(*args, **kwargs)
def get(self, request):
patient = request.user.patient
one_week_ago = datetime.today() - timedelta(days=7)
# Set initial date first hour
week_ago = datetime(one_week_ago.year, one_week_ago.month, one_week_ago.day)
prescription_quantity = PatientPrescription.objects.filter(date__gte=week_ago,
patient=patient).count()
# Get six latest prescriptions
latest_prescriptions = PatientPrescription.objects.filter(patient=patient).order_by('-id')[:6]
# Get six latest messages
latest_messages = Message.objects.filter(user_to=patient).order_by('-id')[:6]
return render(request, self.template_name, {'prescription_quantity': prescription_quantity,
'last_prescriptions': latest_prescriptions,
'latest_messages': latest_messages})
|
[
"lucas.hh@hotmail.com"
] |
lucas.hh@hotmail.com
|
e504d94610a1fa152f33781ddd97b1d5df047ba4
|
a45331991f550192df9df9075fa718ea72bee1e9
|
/lustro.py
|
a776c162c6ce73936960bc145ee5cb74d9bc8047
|
[] |
no_license
|
constans73/ejercicios_basicos
|
756173160022212133ca1fe5bfbc7cc2d5747002
|
eb564eb1a8973786a0dd3a80c55cd419c16020fc
|
refs/heads/master
| 2022-08-03T11:12:48.964190
| 2020-05-23T15:22:15
| 2020-05-23T15:22:15
| 266,362,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
lustro = (((365*5)+1)*24*3600)
print ("Un lustro se compone de",lustro, "segundos")
|
[
"constans73@gmail.com"
] |
constans73@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.